ceph 配置文件模版的位置
[root@test-ceph1 ~]# cat /usr/share/doc/ceph/sample.ceph.conf
ceph 配置优先级别
优先级从低到高
程序编译时默认值
中心数据库 # 推荐使用方式
本地配置文件
环境变量
命令行参数
运行时覆盖参数
列出所有的配置选项
[root@test-ceph1 ~]# ceph config ls
查看某个配置选项的帮助信息
[root@test-ceph1 ~]# ceph config help osd_pool_default_size
osd_pool_default_size - the number of copies of an object for new replicated pools
(uint, advanced)
Default: 3
Minimum: 0
Maximum: 10
Can update at runtime: true
Services: [mon]
显示集群数据库的配置
[root@test-ceph1 ~]# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
global basic container_image quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f *
mon advanced auth_allow_insecure_global_id_reclaim false
mon advanced public_network 192.168.222.0/24 *
mgr advanced mgr/cephadm/container_init True *
mgr advanced mgr/cephadm/migration_current 6 *
mgr advanced mgr/dashboard/ALERTMANAGER_API_HOST http://test-ceph1:9093 *
mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false *
mgr advanced mgr/dashboard/GRAFANA_API_URL https://test-ceph1:3000 *
mgr advanced mgr/dashboard/PROMETHEUS_API_HOST http://test-ceph1:9095 *
mgr advanced mgr/dashboard/ssl_server_port 8443 *
mgr advanced mgr/orchestrator/orchestrator cephadm
osd host:test-ceph4 basic osd_memory_target 1532527138
osd host:test-ceph6 basic osd_memory_target 1532527138
osd advanced osd_memory_target_autotune true
osd.0 basic osd_mclock_max_capacity_iops_ssd 5392.182966
osd.1 basic osd_mclock_max_capacity_iops_ssd 1492.521681
osd.10 basic osd_mclock_max_capacity_iops_ssd 1324.061469
osd.11 basic osd_mclock_max_capacity_iops_ssd 3227.997224
osd.12 basic osd_mclock_max_capacity_iops_ssd 2034.782394
osd.13 basic osd_mclock_max_capacity_iops_ssd 3684.510859
osd.14 basic osd_mclock_max_capacity_iops_ssd 9492.307118
osd.2 basic osd_mclock_max_capacity_iops_ssd 9926.606247
osd.3 basic osd_mclock_max_capacity_iops_ssd 12794.165806
osd.4 basic osd_mclock_max_capacity_iops_ssd 1969.199850
osd.5 basic osd_mclock_max_capacity_iops_ssd 4431.331962
osd.6 basic osd_mclock_max_capacity_iops_ssd 4615.562726
osd.7 basic osd_mclock_max_capacity_iops_ssd 3730.672106
osd.8 basic osd_mclock_max_capacity_iops_ssd 10064.632014
osd.9 basic osd_mclock_max_capacity_iops_ssd 3597.473051
查看某个服务的进程信息
[root@test-ceph1 ~]# ceph config show osd.0
NAME VALUE SOURCE OVERRIDES IGNORES
container_image quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f mon
daemonize false override
keyring $osd_data/keyring default
leveldb_log default
log_to_file false default
log_to_journald true default
log_to_stderr false default
mon_host [v2:192.168.222.131:3300/0,v1:192.168.222.131:6789/0] [v2:192.168.222.132:3300/0,v1:192.168.222.132:6789/0] [v2:192.168.222.133:3300/0,v1:192.168.222.133:6789/0] file
no_config_file false override
osd_delete_sleep 0.000000 override
osd_delete_sleep_hdd 0.000000 override
osd_delete_sleep_hybrid 0.000000 override
osd_delete_sleep_ssd 0.000000 override
osd_max_backfills 1 default
osd_mclock_max_capacity_iops_ssd 5392.182966 mon
osd_mclock_scheduler_background_best_effort_lim 0.900000 default
osd_mclock_scheduler_background_best_effort_res 0.000000 default
osd_mclock_scheduler_background_best_effort_wgt 1 default
osd_mclock_scheduler_background_recovery_lim 0.000000 default
osd_mclock_scheduler_background_recovery_res 0.500000 default
osd_mclock_scheduler_background_recovery_wgt 1 default
osd_mclock_scheduler_client_lim 0.000000 default
osd_mclock_scheduler_client_res 0.500000 default
osd_mclock_scheduler_client_wgt 1 default
osd_memory_target 1532527138 mon
osd_memory_target_autotune true mon
osd_recovery_max_active 0 default
osd_recovery_max_active_hdd 3 default
osd_recovery_max_active_ssd 10 default
osd_recovery_sleep 0.000000 override
osd_recovery_sleep_hdd 0.000000 override
osd_recovery_sleep_hybrid 0.000000 override
osd_recovery_sleep_ssd 0.000000 override
osd_scrub_sleep 0.000000 override
osd_snap_trim_sleep 0.000000 override
osd_snap_trim_sleep_hdd 0.000000 override
osd_snap_trim_sleep_hybrid 0.000000

最低0.47元/天 解锁文章
1万+

被折叠的 条评论
为什么被折叠?



