ceph ceph.conf 配置流程

1. ceph.conf 设置生效流程, 改动后如何立即生效

2. ceph.conf都有哪些配置项目, 代表什么意思?

注释符号是‘;‘

ceph git库编译出来的测试, 测试命令

[harvis@centos7 build]$ ../src/vstart.sh -d -n -x -l

下面是ceph集群状态查看,和ceph.conf文件的内容

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_WARN

no active mgr

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e2: 0/0/1 up

mgr no daemons active

osdmap e16: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v21: 24 pgs, 3 pools, 0 bytes data, 0 objects

75710 MB used, 22542 MB / 98253 MB avail

16 creating+activating

8 activating

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_WARN

no active mgr

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby

mgr no daemons active

osdmap e18: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v26: 24 pgs, 3 pools, 2238 bytes data, 21 objects

75742 MB used, 22510 MB / 98253 MB avail

24 active+clean

client io 926 B/s wr, 0 op/s rd, 4 op/s wr

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_OK

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby

mgr active: x

osdmap e18: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v34: 24 pgs, 3 pools, 2238 bytes data, 21 objects

75753 MB used, 22499 MB / 98253 MB avail

24 active+clean

[harvis@centos7 build]$ cat ceph.conf

; generated by vstart.sh on Sun Apr 9 03:40:11 CST 2017

[client.vstart.sh]

num mon = 3

num osd = 3

num mds = 3

num mgr = 1

num rgw = 0

[global]

fsid = d439bdca-809f-4bbe-af91-c67647317172

osd pg bits = 3

osd pgp bits = 5 ; (invalid, but ceph should cope!)

osd pool default size = 3

osd crush chooseleaf type = 0

osd pool default min size = 1

osd failsafe full ratio = .99

mon osd reporter subtree level = osd

mon osd full ratio = .99

mon data avail warn = 10

mon data avail crit = 1

erasure code dir = /CEPH/build/lib

plugin dir = /CEPH/build/lib

osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd

rgw frontends = civetweb port=8000

filestore fd cache size = 32

run dir = /CEPH/build/out

enable experimental unrecoverable data corrupting features = *

lockdep = true

auth cluster required = cephx

auth service required = cephx

auth client required = cephx

[client]

keyring = /CEPH/build/keyring

log file = /CEPH/build/out/$name.$pid.log

admin socket = /CEPH/build/out/$name.$pid.asok

[mds]

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug ms = 1

debug mds = 20

debug auth = 20

debug monc = 20

debug mgrc = 20

mds debug scatterstat = true

mds verify scatter = true

mds log max segments = 2

mds debug frag = true

mds debug auth pins = true

mds debug subtrees = true

mds data = /CEPH/build/dev/mds.$id

mds root ino uid = 1000

mds root ino gid = 1000

[mgr]

mgr modules = rest fsstatus

mgr data = /CEPH/build/dev/mgr.$id

mgr module path = /CEPH/src/pybind/mgr

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug ms = 1

debug monc = 20

debug mgr = 20

[osd]

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

osd_check_max_object_name_len_on_startup = false

osd data = /CEPH/build/dev/osd$id

osd journal = /CEPH/build/dev/osd$id/journal

osd journal size = 100

osd class tmp = out

osd class dir = /CEPH/build/lib

osd class load list = *

osd class default list = *

osd scrub load threshold = 2000.0

osd debug op order = true

osd debug misdirected ops = true

filestore wbthrottle xfs ios start flusher = 10

filestore wbthrottle xfs ios hard limit = 20

filestore wbthrottle xfs inodes hard limit = 30

filestore wbthrottle btrfs ios start flusher = 10

filestore wbthrottle btrfs ios hard limit = 20

filestore wbthrottle btrfs inodes hard limit = 30

osd copyfrom max chunk = 524288

bluestore fsck on mount = true

bluestore block create = true

bluestore block db size = 67108864

bluestore block db create = true

bluestore block wal size = 1048576000

bluestore block wal create = true

debug ms = 1

debug osd = 25

debug objecter = 20

debug monc = 20

debug mgrc = 20

debug journal = 20

debug filestore = 20

debug bluestore = 30

debug bluefs = 20

debug rocksdb = 10

debug bdev = 20

debug rgw = 20

debug objclass = 20

[mon]

mon pg warn min per osd = 3

mon osd allow primary affinity = true

mon osd allow pg remap = true

mon reweight min pgs per osd = 4

mon osd prime pg temp = true

crushtool = /CEPH/build/bin/crushtool

mon allow pool delete = true

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug mon = 20

debug paxos = 20

debug auth = 20

debug mgrc = 20

debug ms = 1

mon cluster log file = /CEPH/build/out/cluster.mon.$id.log

[global]

[mon.a]

host = centos7

mon data = /CEPH/build/dev/mon.a

mon addr = 127.0.0.1:40165

[mon.b]

host = centos7

mon data = /CEPH/build/dev/mon.b

mon addr = 127.0.0.1:40166

[mon.c]

host = centos7

mon data = /CEPH/build/dev/mon.c

mon addr = 127.0.0.1:40167

[osd.0]

host = centos7

[osd.1]

host = centos7

[osd.2]

host = centos7

[mds.a]

host = centos7

[mds.b]

host = centos7

[mds.c]

host = centos7

[mgr.x]

host = centos7

[harvis@centos7 build]$

(0)

相关推荐

  • Linux运维---1.Ceph分布式存储架构及工作原理

    Ceph理论 Ceph 简介 Ceph 是一个开源项目,它提供软件定义的.统一的存储解决方案 .Ceph 是一个具有高性能.高度可伸缩性.可大规模扩展并且无单点故障的分布式存储系统 . Ceph 是软 ...

  • Rook定制化和管理Ceph集群

    一.Ceph OSD配置 默认通过 cluster.yaml 创建Ceph集群时,使用的是filestore,并且使用的是 /var/lib/rook/osd-<id> 目录,这明显不是我 ...

  • Ubuntu 20.04 + opencv 4 + cuda 11 + opengl 详细配置流程

    Ubuntu 20.04 + opencv 4 + contrib + opengl 详细配置流程1.下载opencv源码https://opencv.org/releases/,找到相应的版本,下载 ...

  • nrf24l01的配置流程

    TX初始化过程 初始化步骤 24L01相关寄存器 1)写 Tx 节点的地址 TX_ADDR 2)写 Rx 节点的地址(主要是为了使能 Auto Ack) RX_ADDR_P0 3)使能 AUTO AC ...

  • Shotgun的toolkit的配置流程及其他

    题记:今天写点关于Shotgun的Toolkit的配置的东西,写这个的初衷是当初自己配置的时候,完全是摸着石头过河,弄不懂得地方或出错了,请教于配置过的人,费了九牛二虎之力后,才最终于弄通了.中途由于 ...

  • 赛灵思7系列FPGA上电配置流程

    一.FPGA配置引脚说明 1.CFGBVS 如果VCCO0连接至2.5V或3.3V,CFGBVS连接至VCCO0. 如果VCCO0连接至1.5V或1.8V,CFGBVS连接至GND. 建议bank0. ...

  • Ceph添加/删除Mon(ceph.conf)

    操作环境 ceph 0.87.7 Openstack icehouse Centos 6.5 x86_64 当前ceph配置文件如下 [global] pid file = /var/run/ceph ...

  • ceph 添加/删除OSD(ceph.conf)

    之前有篇Blog记录的是通过ceph-deploy进行osd的添加以及删除操作,这里记录的是通过ceph源码安装后,通过配置ceph配置文件,而进行ceph的osd的添加以及删除操作. 操作环境 ce ...

  • 我来记笔记啦-Django开发流程与配置

    目录 1.Django介绍 1.2 MVC模式说明 1.3 Django的MVT 1.4 Django配置顺序 1. 创建Django项目 2.创建Django子应用 3. 注册安装子应用 4. 使用 ...

  • ceph存储的原理

    内容摘要: 不管你是想为云平台提供Ceph 对象存储和/或 Ceph 块设备,还是想部署一个 Ceph 文件系统或者把 Ceph 作为他用,所有 Ceph 存储集群的部署都始于部署一个个 Ceph 节 ...

  • 宁夏宝丰能源第二套50万吨/年煤制烯烃流程配置

    宁东能源化工基地管委会2021年6月对宁夏宝丰能源集团股份有限公司第二套50万吨/年煤制烯烃项目环境影响评价进行报批前公示. 根据公示信息,宁夏宝丰能源第第二套50万吨/年煤制烯烃项目以煤为原料,生产 ...