ceph ceph.conf 配置流程
1. ceph.conf 设置生效流程, 改动后如何立即生效
2. ceph.conf都有哪些配置项目, 代表什么意思?
注释符号是‘;‘
ceph git库编译出来的测试, 测试命令
[harvis@centos7 build]$ ../src/vstart.sh -d -n -x -l
下面是ceph集群状态查看,和ceph.conf文件的内容
[harvis@centos7 build]$ ./bin/ceph -s
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
cluster d439bdca-809f-4bbe-af91-c67647317172
health HEALTH_WARN
no active mgr
monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}
election epoch 6, quorum 0,1,2 a,b,c
fsmap e2: 0/0/1 up
mgr no daemons active
osdmap e16: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds
pgmap v21: 24 pgs, 3 pools, 0 bytes data, 0 objects
75710 MB used, 22542 MB / 98253 MB avail
16 creating+activating
8 activating
[harvis@centos7 build]$ ./bin/ceph -s
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
cluster d439bdca-809f-4bbe-af91-c67647317172
health HEALTH_WARN
no active mgr
monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}
election epoch 6, quorum 0,1,2 a,b,c
fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby
mgr no daemons active
osdmap e18: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds
pgmap v26: 24 pgs, 3 pools, 2238 bytes data, 21 objects
75742 MB used, 22510 MB / 98253 MB avail
24 active+clean
client io 926 B/s wr, 0 op/s rd, 4 op/s wr
[harvis@centos7 build]$ ./bin/ceph -s
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
cluster d439bdca-809f-4bbe-af91-c67647317172
health HEALTH_OK
monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}
election epoch 6, quorum 0,1,2 a,b,c
fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby
mgr active: x
osdmap e18: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds
pgmap v34: 24 pgs, 3 pools, 2238 bytes data, 21 objects
75753 MB used, 22499 MB / 98253 MB avail
24 active+clean
[harvis@centos7 build]$ cat ceph.conf
; generated by vstart.sh on Sun Apr 9 03:40:11 CST 2017
[client.vstart.sh]
num mon = 3
num osd = 3
num mds = 3
num mgr = 1
num rgw = 0
[global]
fsid = d439bdca-809f-4bbe-af91-c67647317172
osd pg bits = 3
osd pgp bits = 5 ; (invalid, but ceph should cope!)
osd pool default size = 3
osd crush chooseleaf type = 0
osd pool default min size = 1
osd failsafe full ratio = .99
mon osd reporter subtree level = osd
mon osd full ratio = .99
mon data avail warn = 10
mon data avail crit = 1
erasure code dir = /CEPH/build/lib
plugin dir = /CEPH/build/lib
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
rgw frontends = civetweb port=8000
filestore fd cache size = 32
run dir = /CEPH/build/out
enable experimental unrecoverable data corrupting features = *
lockdep = true
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
[client]
keyring = /CEPH/build/keyring
log file = /CEPH/build/out/$name.$pid.log
admin socket = /CEPH/build/out/$name.$pid.asok
[mds]
log file = /CEPH/build/out/$name.log
admin socket = /CEPH/build/out/$name.asok
chdir = ""
pid file = /CEPH/build/out/$name.pid
heartbeat file = /CEPH/build/out/$name.heartbeat
debug ms = 1
debug mds = 20
debug auth = 20
debug monc = 20
debug mgrc = 20
mds debug scatterstat = true
mds verify scatter = true
mds log max segments = 2
mds debug frag = true
mds debug auth pins = true
mds debug subtrees = true
mds data = /CEPH/build/dev/mds.$id
mds root ino uid = 1000
mds root ino gid = 1000
[mgr]
mgr modules = rest fsstatus
mgr data = /CEPH/build/dev/mgr.$id
mgr module path = /CEPH/src/pybind/mgr
log file = /CEPH/build/out/$name.log
admin socket = /CEPH/build/out/$name.asok
chdir = ""
pid file = /CEPH/build/out/$name.pid
heartbeat file = /CEPH/build/out/$name.heartbeat
debug ms = 1
debug monc = 20
debug mgr = 20
[osd]
log file = /CEPH/build/out/$name.log
admin socket = /CEPH/build/out/$name.asok
chdir = ""
pid file = /CEPH/build/out/$name.pid
heartbeat file = /CEPH/build/out/$name.heartbeat
osd_check_max_object_name_len_on_startup = false
osd data = /CEPH/build/dev/osd$id
osd journal = /CEPH/build/dev/osd$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = /CEPH/build/lib
osd class load list = *
osd class default list = *
osd scrub load threshold = 2000.0
osd debug op order = true
osd debug misdirected ops = true
filestore wbthrottle xfs ios start flusher = 10
filestore wbthrottle xfs ios hard limit = 20
filestore wbthrottle xfs inodes hard limit = 30
filestore wbthrottle btrfs ios start flusher = 10
filestore wbthrottle btrfs ios hard limit = 20
filestore wbthrottle btrfs inodes hard limit = 30
osd copyfrom max chunk = 524288
bluestore fsck on mount = true
bluestore block create = true
bluestore block db size = 67108864
bluestore block db create = true
bluestore block wal size = 1048576000
bluestore block wal create = true
debug ms = 1
debug osd = 25
debug objecter = 20
debug monc = 20
debug mgrc = 20
debug journal = 20
debug filestore = 20
debug bluestore = 30
debug bluefs = 20
debug rocksdb = 10
debug bdev = 20
debug rgw = 20
debug objclass = 20
[mon]
mon pg warn min per osd = 3
mon osd allow primary affinity = true
mon osd allow pg remap = true
mon reweight min pgs per osd = 4
mon osd prime pg temp = true
crushtool = /CEPH/build/bin/crushtool
mon allow pool delete = true
log file = /CEPH/build/out/$name.log
admin socket = /CEPH/build/out/$name.asok
chdir = ""
pid file = /CEPH/build/out/$name.pid
heartbeat file = /CEPH/build/out/$name.heartbeat
debug mon = 20
debug paxos = 20
debug auth = 20
debug mgrc = 20
debug ms = 1
mon cluster log file = /CEPH/build/out/cluster.mon.$id.log
[global]
[mon.a]
host = centos7
mon data = /CEPH/build/dev/mon.a
mon addr = 127.0.0.1:40165
[mon.b]
host = centos7
mon data = /CEPH/build/dev/mon.b
mon addr = 127.0.0.1:40166
[mon.c]
host = centos7
mon data = /CEPH/build/dev/mon.c
mon addr = 127.0.0.1:40167
[osd.0]
host = centos7
[osd.1]
host = centos7
[osd.2]
host = centos7
[mds.a]
host = centos7
[mds.b]
host = centos7
[mds.c]
host = centos7
[mgr.x]
host = centos7
[harvis@centos7 build]$