ceph技巧1

来源:互联网 发布:usb无线网卡linux驱动 编辑:程序博客网 时间:2024/05/16 04:04

1. ceph rbd在线resize

扩容前

?
1
2
3
4
5
6
7
8
9
10
[root@mon0 ceph]# rbd create myrbd/rbd1 -s 1024 --image-format=2
[root@mon0 ceph]# rbd ls myrbd
rbd1
[root@mon0 ceph]# rbd info myrbd/rbd1
rbd image 'rbd1':
    size 1024 MB in256 objects
    order 22 (4096 kB objects)
    block_name_prefix: rbd_data.12ce6b8b4567
    format: 2
    features: layering

扩容

?
1
2
[root@mon0 ceph]# rbd resize myrbd/rbd1 -s 2048
Resizing image: 100% complete...done.
在rbd1未格式化和挂载之前,直接resize就可以了。如果rbd1已经格式化并挂载了,需要一些额外的操作:
?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
[root@mon0 ceph]# rbd map myrbd/rbd1
[root@mon0 ceph]# rbd showmapped
idpool  image    snap device   
test test.img -    /dev/rbd0
1  myrbd rbd1     -    /dev/rbd1
[root@mon0 ceph]# mkfs.xfs /dev/rbd1
log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/rbd1             isize=256    agcount=9, agsize=64512 blks
         =                       sectsz=512   attr=2, projid32bit=0
data     =                       bsize=4096   blocks=524288, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@mon0 ceph]# mount /dev/rbd1 /mnt
[root@mon0 ceph]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1      529G   20G  482G   4% /
tmpfs            16G  408K   16G   1% /dev/shm
/dev/sdb       559G   33G  527G   6% /openstack
/dev/sdc       1.9T   75M  1.9T   1% /cephmp1
/dev/sdd       1.9T   61M  1.9T   1% /cephmp2
/dev/rbd1      2.0G   33M  2.0G   2% /mnt
[root@mon0 ceph]# rbd resize myrbd/rbd1 -s 4096
Resizing image: 100% complete...done.
[root@mon0 ceph]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1      529G   20G  482G   4% /
tmpfs            16G  408K   16G   1% /dev/shm
/dev/sdb       559G   33G  527G   6% /openstack
/dev/sdc       1.9T   75M  1.9T   1% /cephmp1
/dev/sdd       1.9T   61M  1.9T   1% /cephmp2
/dev/rbd1      2.0G   33M  2.0G   2% /mnt
[root@mon0 ceph]# xfs_growfs /mnt
meta-data=/dev/rbd1             isize=256    agcount=9, agsize=64512 blks
         =                       sectsz=512   attr=2, projid32bit=0
data     =                       bsize=4096   blocks=524288, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 524288 to 1048576
[root@mon0 ceph]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1      529G   20G  482G   4% /
tmpfs            16G  408K   16G   1% /dev/shm
/dev/sdb       559G   33G  527G   6% /openstack
/dev/sdc       1.9T   75M  1.9T   1% /cephmp1
/dev/sdd       1.9T   61M  1.9T   1% /cephmp2
/dev/rbd1      4.0G   33M  4.0G   1% /mnt
还有一种情况是,rbd1已经被挂载到一个vm上:
?
1
2
3
4
5
virsh domblklist myvm
rbd resize myrbd/rbd1
#这里需要通过virsh blockresize进行操作
virsh blockresize --domain myvm --path vdb --size 100G
rbd info myrbd/rbd1

2. 利用ceph-deploy

利用ceph-deploy安装ceph非常简单,可以在安装后根据需要调整ceph.conf。

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
mkdirceph-deploy; cdceph-deploy
ceph-deployinstall$cluster
ceph-deploy new cephnode-01 cephnode-02 cephnode-03
ceph-deploy --overwrite-conf mon create cephnode-01 cephnode-02 cephnode-03
ceph-deploy gatherkeys cephnode-01
ceph-deploy osd create \
    cephnode-01:/dev/sdb:/dev/sda5\
    cephnode-01:/dev/sdc:/dev/sda6\
    cephnode-01:/dev/sdd:/dev/sda7\
    cephnode-02:/dev/sdb:/dev/sda5\
    cephnode-02:/dev/sdc:/dev/sda6\
    cephnode-02:/dev/sdd:/dev/sda7\
    cephnode-03:/dev/sdb:/dev/sda5\
    cephnode-03:/dev/sdc:/dev/sda6\
    cephnode-03:/dev/sdd:/dev/sda7\
    cephnode-04:/dev/sdb:/dev/sda5\
    cephnode-04:/dev/sdc:/dev/sda6\
    cephnode-04:/dev/sdd:/dev/sda7\
    cephnode-05:/dev/sdb:/dev/sda5\
    cephnode-05:/dev/sdc:/dev/sda6\
    cephnode-05:/dev/sdd:/dev/sda7
利用ceph-deploy进行卸载:
?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
ceph-deploy purgedata $cluster
ceph-deploy purge $cluster
 
forhost in$cluster
  do
    ssh$host <<EOF
      sudodd if=/dev/zeroof=/dev/sdbbs=1M count=100
      sudodd if=/dev/zeroof=/dev/sdcbs=1M count=100
      sudodd if=/dev/zeroof=/dev/sddbs=1M count=100
      sudosgdisk -g --clear/dev/sdb
      sudosgdisk -g --clear/dev/sdc
      sudosgdisk -g --clear/dev/sdd
    EOF
  done

3. 删除rbd前先删除相应快照

?
1
2
3
4
5
6
7
8
rbd snap lsmyrbd/rbd1
SNAPID NAME       SIZE
    10 snap1 102400 KB
    12 snap2 102400 KB
rbd snap purge myrbd/rbd1
Removing all snapshots: 100% complete...done.
rbdrmmyrbd/rbd1
Removing image: 100% complete...done.

4. 测试rbd snap

创建并挂载rbd1:

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@mon0 ceph]# rbd create myrbd/rbd1 -s 1024 --image-format=2
[root@mon0 ceph]# rbd map myrbd/rbd1
[root@mon0 ceph]# rbd showmapped
idpool  image    snap device   
test test.img -    /dev/rbd0
1  myrbd rbd1     -    /dev/rbd1
[root@mon0 ceph]# mkfs.xfs /dev/rbd1
log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/rbd1             isize=256    agcount=9, agsize=31744 blks
         =                       sectsz=512   attr=2, projid32bit=0
data     =                       bsize=4096   blocks=262144, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@mon0 ceph]# blkid|grep rbd1
/dev/rbd1: UUID="643b509f-406b-411b-b3a5-fa220fbdfe47"TYPE="xfs"
[root@mon0 ceph]# mount /dev/rbd1 /mnt
[root@mon0 ceph]# mount | grep rbd1
/dev/rbd1on /mnttype xfs (rw)
创建snap并回滚:
?
1
2
3
4
5
6
7
8
9
10
11
[root@mon0 ceph]# rbd snap ls myrbd/rbd1
SNAPID NAME     SIZE
     2 snap1 1024 MB
[root@mon0 ceph]# rbd snap rollback myrbd/rbd1@snap1
Rolling back to snapshot: 100% complete...done.
[root@mon0 ceph]# ls /mnt
v2
[root@mon0 ceph]# umount /mnt
[root@mon0 ceph]# mount /dev/rbd1 /mnt
[root@mon0 ceph]# ls /mnt
v1
通过快照挂载:
?
1
2
3
4
5
6
7
8
9
10
[root@mon0 ceph]# rbd map myrbd/rbd1@snap1
[root@mon0 ceph]# rbd showmapped
idpool  image    snap  device   
test test.img -     /dev/rbd0
1  myrbd rbd1     -     /dev/rbd1
2  myrbd rbd1     snap1 /dev/rbd2
 
[root@mon0 ceph]# mount -t xfs -o ro,norecovery,nouuid /dev/rbd2 /tmp
[root@mon0 ceph]# ls /tmp
v1

5. 定位文件位置

利用rados put上传并利用map查看文件

?
1
2
3
4
5
[root@osd2 software]# md5sum epel-release-6-8.noarch.rpm
2cd0ae668a585a14e07c2ea4f264d79b  epel-release-6-8.noarch.rpm
[root@osd2 software]# rados put -p myrbd epel.rpm ./epel-release-6-8.noarch.rpm
[root@osd2 software]# ceph osd map myrbd epel.rpm
osdmap e88 pool 'myrbd'(4) object 'epel.rpm'-> pg 4.e9ddf5be (4.be) -> up ([4,2], p4) acting ([4,2], p4)
查看所在位置并验证:
?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@osd2 software]# ceph osd tree
# id    weight  type name   up/down reweight
-1  10.92   root default
-2  3.64        host mon0
0   1.82            osd.0   up  1  
1   1.82            osd.1   up  1  
-3  3.64        host osd1
2   1.82            osd.2   up  1  
3   1.82            osd.3   up  1  
-4  3.64        host osd2
4   1.82            osd.4   up  1  
5   1.82            osd.5   up  1  
[root@osd2 software]# cd /cephmp1/current/4.be_head/
[root@osd2 4.be_head]# ls
epel.rpm__head_E9DDF5BE__4
[root@osd2 4.be_head]# md5sum epel.rpm__head_E9DDF5BE__4
2cd0ae668a585a14e07c2ea4f264d79b  epel.rpm__head_E9DDF5BE__4
[root@osd2 4.be_head]# ll -h
total 20K
-rw-r--r--. 1 root root 15K Nov  4 17:59 epel.rpm__head_E9DDF5BE__4
利用rbd input上传后进行验证:
?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
[root@osd2 software]# touch hello.txt
[root@osd2 software]# echo "hello world" >> hello.txt
[root@osd2 software]# rbd import ./hello.txt myrbd/hello.txt
Importing image: 100% complete...done.
[root@osd2 software]# rbd info myrbd/hello.txt
rbd image 'hello.txt':
    size 12 bytes in1 objects
    order 22 (4096 kB objects)
    block_name_prefix: rb.0.1365.6b8b4567
    format: 1
[root@osd2 software]# rados ls -p myrbd
rbd_data.13446b8b4567.00000000000000ba
rbd_directory
rbd_data.13446b8b4567.000000000000007d
rbd_data.13446b8b4567.000000000000007c
rbd_data.13446b8b4567.000000000000005d
rbd_data.13446b8b4567.000000000000007e
rbd_data.13446b8b4567.00000000000000ff
rb.0.1365.6b8b4567.000000000000
hello.txt.rbd
rbd_data.13446b8b4567.00000000000000d9
rbd_data.13446b8b4567.00000000000000f8
rbd_data.13446b8b4567.000000000000009b
rbd_data.13446b8b4567.0000000000000001
rbd_header.13446b8b4567
epel.rpm
rbd_data.13446b8b4567.000000000000001f
rbd_data.13446b8b4567.000000000000003e
rbd_id.rbd1
rbd_data.13446b8b4567.0000000000000000
 
#这样得到的位置信息是错误的
[root@osd2 software]# ceph osd map myrbd hello.txt
osdmap e88 pool 'myrbd'(4) object 'hello.txt'-> pg 4.d92fd82b (4.2b) -> up ([4,3], p4) acting ([4,3], p4)
 
#需要加上.rbd
[root@osd2 current]# ceph osd map myrbd hello.txt.rbd
osdmap e88 pool 'myrbd'(4) object 'hello.txt.rbd'-> pg 4.9b9bf373 (4.73) -> up ([3,1], p3) acting ([3,1], p3)
[root@osd2 current]# ssh osd1
[root@osd1 ~]# cd /cephmp2/current/4.73_head/
[root@osd1 4.73_head]# ll -h
total 8.0K
-rw-r--r-- 1 root root 112 Nov  4 18:08 hello.txt.rbd__head_9B9BF373__4
[root@osd1 4.73_head]# cat hello.txt.rbd__head_9B9BF373__4
<<< Rados Block Device Image >>>
rb.0.1365.6b8b4567RBD001.005
 
#如果是普通的rbd块,format-1类型的rbd块:
#ceph osd map test test.img.rbd
#format-2类型的rbd块:
#ceph osd map test rbd_id.test.img
0 0
原创粉丝点击