Ceph命令集合
来源:互联网 发布:耽美小说完结推荐知乎 编辑:程序博客网 时间:2024/04/29 01:56
Ceph 命令集合
一、集群
1、启动一个ceph进程
启动 mon 进程
service ceph start mon.node1
启动 msd 进程
service ceph start mds.node1
启动 osd 进程
service ceph start osd.0
2、查看机器的监控状态
[root@client ~]# ceph health
HEALTH_OK
3、查看ceph的实时运行状态
[root@client ~]# ceph -w
cluster be1756f2-54f7-4d8f-8790-820c82721f17
health HEALTH_OK
monmap e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
mdsmap e95: 1/1/1 up {0=node2=up:active}, 1 up:standby
osdmap e88: 3 osds: 3 up, 3 in
pgmap v1164: 448 pgs, 4 pools, 10003 MB data, 2520 objects
23617 MB used, 37792 MB / 61410 MB avail
448 active+clean
2014-06-30 00:48:28.756948 mon.0 [INF] pgmap v1163: 448 pgs: 448 active+clean;
10003 MB data, 23617 MB used, 37792 MB / 61410 MB avail
4、检查信息状态信息
[root@client ~]# ceph -s
cluster be1756f2-54f7-4d8f-8790-820c82721f17
health HEALTH_OK
monmap e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
mdsmap e95: 1/1/1 up {0=node2=up:active}, 1 up:standby
osdmap e88: 3 osds: 3 up, 3 in
pgmap v1164: 448 pgs, 4 pools, 10003 MB data, 2520 objects
23617 MB used, 37792 MB / 61410 MB avail
448 active+clean
[root@client ~]#
5、查看ceph存储空间
[root@client ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
61410M 37792M 23617M 38.46
POOLS:
NAME ID USED %USED OBJECTS
data 0 10000M 16.28 2500
metadata 1 3354k 0 20
rbd 2 0 0 0
jiayuan 3 0 0 0
[root@client ~]#
6、删除一个节点的所有的ceph数据包
[root@node1 ~]# ceph-deploy purge node1
[root@node1 ~]# ceph-deploy purgedata node1
7、为ceph创建一个 admin 用户并为 admin 用户创建一个密
钥,把密钥保存到/etc/ceph目录下:
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' >
/etc/ceph/ceph.client.admin.keyring
或
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' -o
/etc/ceph/ceph.client.admin.keyring
8、为osd.0创建一个用户并创建一个 key
ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *' -o
/var/lib/ceph/osd/ceph-0/keyring
9、为mds.node1创建一个用户并创建一个 key
ceph auth get-or-create mds.node1 mon 'allow rwx' osd 'allow *' mds 'allow *' -
o /var/lib/ceph/mds/ceph-node1/keyring
10、查看ceph集群中的认证用户及相关的 key
ceph auth list
11、删除集群中的一个认证用户
ceph auth del osd.0
12、查看集群的详细配置
[root@node1 ~]# ceph daemon mon.node1 config show | more
13、查看集群健康状态细节
[root@admin ~]# ceph health detail
HEALTH_WARN 12 pgs down; 12 pgs peering; 12 pgs stuck inactive; 12 pgs stuck
unclean
pg 3.3b is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.36 is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.79 is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.5 is stuck inactive since forever, current state down+peering, last acting
[1,2]
pg 3.30 is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.1a is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.2d is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.16 is stuck inactive since forever, current state down+peering, last
acting [1,2]
14、查看ceph log日志所在的目录
[root@node1 ~]# ceph-conf --name mon.node1 --show-config-value log_file
/var/log/ceph/ceph-mon.node1.log
二、 mon
1、查看mon的状态信息
[root@client ~]# ceph mon stat
e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
2、查看mon的选举状态
[root@client ~]# ceph quorum_status
{"election_epoch":294,"quorum":[0,1,2],"quorum_names":["node1","node2","node3"]
,"quorum_leader_name":"node1","monmap":{"epoch":2,"fsid":"be1756f2-54f7-4d8f-
8790-820c82721f17","modified":"2014-06-26
18:43:51.671106","created":"0.000000","mons":[{"rank":0,"name":"node1","addr":"
10.240.240.211:6789\/0"},{"rank":1,"name":"node2","addr":"10.240.240.212:6789\/
0"},{"rank":2,"name":"node3","addr":"10.240.240.213:6789\/0"}]}}
3、查看mon的映射信息
[root@client ~]# ceph mon dump
dumped monmap epoch 2
epoch 2
fsid be1756f2-54f7-4d8f-8790-820c82721f17
last_changed 2014-06-26 18:43:51.671106
created 0.000000
0: 10.240.240.211:6789/0 mon.node1
1: 10.240.240.212:6789/0 mon.node2
2: 10.240.240.213:6789/0 mon.node3
4、删除一个mon节点
[root@node1 ~]# ceph mon remove node1
removed mon.node1 at 10.39.101.1:6789/0, there are now 3 monitors
2014-07-07 18:11:04.974188 7f4d16bfd700 0 monclient: hunting for new mon
5、获得一个正在运行的mon map,并保存在1.txt文件中
[root@node3 ~]# ceph mon getmap -o 1.txt
got monmap epoch 6
6、查看上面获得的map
[root@node3 ~]# monmaptool --print 1.txt
monmaptool: monmap file 1.txt
epoch 6
fsid 92552333-a0a8-41b8-8b45-c93a8730525e
last_changed 2014-07-07 18:22:51.927205
created 0.000000
0: 10.39.101.1: 6789/0 mon.node1
1: 10.39.101.2:6789/0 mon.node2
2: 10.39.101.3:6789/0 mon.node3
[root@node3 ~]#
7、把上面的mon map注入新加入的节点
ceph-mon -i node4 --inject-monmap 1.txt
8、查看mon的 amin socket
root@node1 ~]# ceph-conf --name mon.node1 --show-config-value admin_socket
/var/run/ceph/ceph-mon.node1.asok
9、查看mon的详细状态
[root@node1 ~]# ceph daemon mon.node1 mon_status
{ "name": "node1",
"rank": 0,
"state": "leader",
"election_epoch": 96,
"quorum": [
0,
1,
2],
"outside_quorum": [],
"extra_probe_peers": [
"10.39.101.4:6789\/0"],
"sync_provider": [],
"monmap": { "epoch": 6,
"fsid": "92552333-a0a8-41b8-8b45-c93a8730525e",
"modified": "2014-07-07 18:22:51.927205",
"created": "0.000000",
"mons": [
{ "rank": 0,
"name": "node1",
"addr": "10.39.101.1:6789\/0"},
{ "rank": 1,
"name": "node2",
"addr": "10.39.101.2:6789\/0"},
{ "rank": 2,
"name": "node3",
"addr": "10.39.101.3:6789\/0"}]}
10、删除一个mon节点
[root@os-node1 ~]# ceph mon remove os-node1
removed mon.os-node1 at 10.40.10.64:6789/0, there are now 3 monitors
三、 mds
一、集群
1、启动一个ceph进程
启动 mon 进程
service ceph start mon.node1
启动 msd 进程
service ceph start mds.node1
启动 osd 进程
service ceph start osd.0
2、查看机器的监控状态
[root@client ~]# ceph health
HEALTH_OK
3、查看ceph的实时运行状态
[root@client ~]# ceph -w
cluster be1756f2-54f7-4d8f-8790-820c82721f17
health HEALTH_OK
monmap e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
mdsmap e95: 1/1/1 up {0=node2=up:active}, 1 up:standby
osdmap e88: 3 osds: 3 up, 3 in
pgmap v1164: 448 pgs, 4 pools, 10003 MB data, 2520 objects
23617 MB used, 37792 MB / 61410 MB avail
448 active+clean
2014-06-30 00:48:28.756948 mon.0 [INF] pgmap v1163: 448 pgs: 448 active+clean;
10003 MB data, 23617 MB used, 37792 MB / 61410 MB avail
4、检查信息状态信息
[root@client ~]# ceph -s
cluster be1756f2-54f7-4d8f-8790-820c82721f17
health HEALTH_OK
monmap e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
mdsmap e95: 1/1/1 up {0=node2=up:active}, 1 up:standby
osdmap e88: 3 osds: 3 up, 3 in
pgmap v1164: 448 pgs, 4 pools, 10003 MB data, 2520 objects
23617 MB used, 37792 MB / 61410 MB avail
448 active+clean
[root@client ~]#
5、查看ceph存储空间
[root@client ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
61410M 37792M 23617M 38.46
POOLS:
NAME ID USED %USED OBJECTS
data 0 10000M 16.28 2500
metadata 1 3354k 0 20
rbd 2 0 0 0
jiayuan 3 0 0 0
[root@client ~]#
6、删除一个节点的所有的ceph数据包
[root@node1 ~]# ceph-deploy purge node1
[root@node1 ~]# ceph-deploy purgedata node1
7、为ceph创建一个 admin 用户并为 admin 用户创建一个密
钥,把密钥保存到/etc/ceph目录下:
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' >
/etc/ceph/ceph.client.admin.keyring
或
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' -o
/etc/ceph/ceph.client.admin.keyring
8、为osd.0创建一个用户并创建一个 key
ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *' -o
/var/lib/ceph/osd/ceph-0/keyring
9、为mds.node1创建一个用户并创建一个 key
ceph auth get-or-create mds.node1 mon 'allow rwx' osd 'allow *' mds 'allow *' -
o /var/lib/ceph/mds/ceph-node1/keyring
10、查看ceph集群中的认证用户及相关的 key
ceph auth list
11、删除集群中的一个认证用户
ceph auth del osd.0
12、查看集群的详细配置
[root@node1 ~]# ceph daemon mon.node1 config show | more
13、查看集群健康状态细节
[root@admin ~]# ceph health detail
HEALTH_WARN 12 pgs down; 12 pgs peering; 12 pgs stuck inactive; 12 pgs stuck
unclean
pg 3.3b is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.36 is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.79 is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.5 is stuck inactive since forever, current state down+peering, last acting
[1,2]
pg 3.30 is stuck inactive since forever, current state down+peering, last
acting [1,2]
pg 3.1a is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.2d is stuck inactive since forever, current state down+peering, last
acting [1,0]
pg 3.16 is stuck inactive since forever, current state down+peering, last
acting [1,2]
14、查看ceph log日志所在的目录
[root@node1 ~]# ceph-conf --name mon.node1 --show-config-value log_file
/var/log/ceph/ceph-mon.node1.log
二、 mon
1、查看mon的状态信息
[root@client ~]# ceph mon stat
e2: 3 mons at
{node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6
789/0}, election epoch 294, quorum 0,1,2 node1,node2,node3
2、查看mon的选举状态
[root@client ~]# ceph quorum_status
{"election_epoch":294,"quorum":[0,1,2],"quorum_names":["node1","node2","node3"]
,"quorum_leader_name":"node1","monmap":{"epoch":2,"fsid":"be1756f2-54f7-4d8f-
8790-820c82721f17","modified":"2014-06-26
18:43:51.671106","created":"0.000000","mons":[{"rank":0,"name":"node1","addr":"
10.240.240.211:6789\/0"},{"rank":1,"name":"node2","addr":"10.240.240.212:6789\/
0"},{"rank":2,"name":"node3","addr":"10.240.240.213:6789\/0"}]}}
3、查看mon的映射信息
[root@client ~]# ceph mon dump
dumped monmap epoch 2
epoch 2
fsid be1756f2-54f7-4d8f-8790-820c82721f17
last_changed 2014-06-26 18:43:51.671106
created 0.000000
0: 10.240.240.211:6789/0 mon.node1
1: 10.240.240.212:6789/0 mon.node2
2: 10.240.240.213:6789/0 mon.node3
4、删除一个mon节点
[root@node1 ~]# ceph mon remove node1
removed mon.node1 at 10.39.101.1:6789/0, there are now 3 monitors
2014-07-07 18:11:04.974188 7f4d16bfd700 0 monclient: hunting for new mon
5、获得一个正在运行的mon map,并保存在1.txt文件中
[root@node3 ~]# ceph mon getmap -o 1.txt
got monmap epoch 6
6、查看上面获得的map
[root@node3 ~]# monmaptool --print 1.txt
monmaptool: monmap file 1.txt
epoch 6
fsid 92552333-a0a8-41b8-8b45-c93a8730525e
last_changed 2014-07-07 18:22:51.927205
created 0.000000
0: 10.39.101.1: 6789/0 mon.node1
1: 10.39.101.2:6789/0 mon.node2
2: 10.39.101.3:6789/0 mon.node3
[root@node3 ~]#
7、把上面的mon map注入新加入的节点
ceph-mon -i node4 --inject-monmap 1.txt
8、查看mon的 amin socket
root@node1 ~]# ceph-conf --name mon.node1 --show-config-value admin_socket
/var/run/ceph/ceph-mon.node1.asok
9、查看mon的详细状态
[root@node1 ~]# ceph daemon mon.node1 mon_status
{ "name": "node1",
"rank": 0,
"state": "leader",
"election_epoch": 96,
"quorum": [
0,
1,
2],
"outside_quorum": [],
"extra_probe_peers": [
"10.39.101.4:6789\/0"],
"sync_provider": [],
"monmap": { "epoch": 6,
"fsid": "92552333-a0a8-41b8-8b45-c93a8730525e",
"modified": "2014-07-07 18:22:51.927205",
"created": "0.000000",
"mons": [
{ "rank": 0,
"name": "node1",
"addr": "10.39.101.1:6789\/0"},
{ "rank": 1,
"name": "node2",
"addr": "10.39.101.2:6789\/0"},
{ "rank": 2,
"name": "node3",
"addr": "10.39.101.3:6789\/0"}]}
10、删除一个mon节点
[root@os-node1 ~]# ceph mon remove os-node1
removed mon.os-node1 at 10.40.10.64:6789/0, there are now 3 monitors
三、 mds