ceph 一键部署

来源:互联网 发布:网络电力仪表 编辑:程序博客网 时间:2024/05/16 10:22

首先先赞一个,ceph很强大。部署很方便。

写了一个半自动化部署脚本,第一次写高手飘过。过些时间来做成完全一键部署。

 

#!/usr/bin/env bash
# *./ clear* clear the database.
# *./ reset

#if [ $1 ] && [ $1 = "init" ]; then
#fi;




#main node
#main mon-mds server ip
ip=192.168.3.32


if [ $1 ] && [ $1 = "install-hosts" ]; then

cat >>/etc/hosts <<EOF
#auto append ceph server hosts

  192.168.3.32  ubuntu  #mon-mds
  192.168.3.32  mon-mds

  192.168.3.200  ubuntu201#osd-00
  192.168.3.200  osd-00#osd-00
  192.168.3.201  ubuntu202 #osd-01
  192.168.3.201  osd-01 #osd-01
  192.168.3.202 ubuntu203 #osd-02
 192.168.3.202 osd-02  #osd-02
  192.168.3.203  ubuntu204 #osd-03
  192.168.3.203 osd-03 #osd-03
EOF
       
fi;





#main   mon mds
if [ $1 ] && [ $1 = "install-sshkeygen" ]; then
cd ~

# 在每个服务器下面创建文件 /root/.ssh/authorized_keys
# mkdir /root/.ssh
# touch /root/.ssh/authorized_keys


#ssh-keygen  -d
ssh-keygen -t rsa
scp /root/.ssh/id_rsa.pub root@mon-mds:/root/.ssh/authorized_keys
scp /root/.ssh/id_rsa.pub root@osd-00:/root/.ssh/authorized_keys
scp /root/.ssh/id_rsa.pub root@osd-01:/root/.ssh/authorized_keys  
scp /root/.ssh/id_rsa.pub root@osd-02:/root/.ssh/authorized_keys  
scp /root/.ssh/id_rsa.pub root@osd-03:/root/.ssh/authorized_keys  
fi;


#main   mon mds
if [ $1 ] && [ $1 = "install-copy-script" ]; then
cd ~

scp /root/config-ceph.sh root@mon-mds:/root/config-ceph.sh
scp /root/config-ceph.sh root@osd-00:/root/config-ceph.sh
scp /root/config-ceph.sh root@osd-01:/root/config-ceph.sh
scp /root/config-ceph.sh root@osd-02:/root/config-ceph.sh
scp /root/config-ceph.sh root@osd-03:/root/config-ceph.sh
fi;





#main node
if [ $1 ] && [ $1 = "install" ]; then

wget -q -O- https://raw.github.com/ceph/ceph/master/keys/release.asc |  apt-key add -
echo deb http://ceph.com/debian/ $(lsb_release -sc) main |  tee /etc/apt/sources.list.d/ceph.list
apt-get update &&  apt-get install ceph -y
fi;


# main
if [ $1 ] && [ $1 = "init" ]; then
 
hostname=$(hostname -s)


cat >/etc/ceph/ceph.conf <<EOF
[global]
        ; enable secure authentication ;
        ; allow ourselves to open a lot of files
        max open files = 131072
        ; set up logging
        log file = /var/log/ceph/\$name.log
        ; set up pid files
        pid file = /var/run/ceph/\$name.pid
; monitors
;  You need at least one.  You need at least three if you want to
;  tolerate any node failures.  Always create an odd number.
[mon]
mon data=/data/mon\$id
debug ms=1
[mon0]
    host = $hostname
    mon addr = $ip:6789
[mds.a]
host = $hostname
 ; where the mds keeps it's secret encryption keys
        keyring = /data/keyring.$name

        ; mds logging to debug issues.
        ;debug ms = 1
        ;debug mds = 20        
[osd]
        ; This is where the btrfs volume will be mounted.
        osd data = /data/osd\$id
        ; Ideally, make this a separate disk or partition.  A few
        ; hundred MB should be enough; more if you have fast or many
        ; disks.  You can use a file under the osd data dir if need be
        ; (e.g. /data/osd$id/journal), but it will be slower than a
        ; separate disk or partition.
        ; This is an example of a file-based journal.
        osd journal = /data/osd$id/journal
        osd journal size = 1000 ; journal size, in megabytes
        ; osd logging to debug osd issues, in order of likelihood of being
        ; helpful
        ;debug ms = 1
        ;debug osd = 20
        ;debug filestore = 20
        ;debug journal = 20
[osd.0]
#    host = $hostname
host = osd-00
        ; if 'btrfs devs' is not specified, you're responsible for
        ; setting up the 'osd data' dir.  if it is not btrfs, things
        ; will behave up until you try to recover from a crash (which
        ; usually fine for basic testing).
 btrfs devs = /dev/sdc1
[osd.1]
#    host = $hostname
host = osd-01
btrfs devs = /dev/sdc1
[osd.2]
#    host = $hostname
host = osd-02
btrfs devs = /dev/sdc1
[osd.3]
#    host = $hostname
host = osd-03
btrfs devs = /dev/sdc1



 ; access control
[group everyone]
; you probably want to limit this to a small or a list of
; hosts. clients are fully trusted.
addr = 0.0.0.0/0

[mount /]
allow = %everyone

EOF



    
cat >/etc/ceph/fetch_config << EOF
#!/bin/sh
conf="\$1"
## fetch ceph.conf from some remote location and save it to $conf.
##
## make sure this script is executable (chmod +x fetch_config)
##
## examples:
##
## from a locally accessible file
## from a URL:
# wget -q -O $conf http://somewhere.com/some/ceph.conf
## via scp
# scp -i /path/to/id_dsa user@host:/path/to/ceph.conf $conf
scp root@mon-mds:/etc/ceph/ceph.conf \$conf
EOF

chmod +x  /etc/ceph/fetch_config


fi;



if [ $1 ] && [ $1 = "init-dir" ]; then
mkdir /var/log/ceph
mkdir /data
fi;


#main
if [ $1 ] && [ $1 = "init-server" ]; then

 #mkdir /var/lib/ceph/osd/ceph-0
 #mkdir /var/lib/ceph/osd/ceph-1
 #mkdir /var/lib/ceph/mon/ceph-a
#mkdir /var/lib/ceph/mds/ceph-a
mkdir /var/log/ceph
mkdir /data


cd /etc/ceph
#mkcephfs -a -c /etc/ceph/ceph.conf -k ceph.keyring
#mkcephfs -c /etc/ceph/ceph.conf --allhosts --mkbtrfs -k /etc/ceph/keyring.bin
mkcephfs -c /etc/ceph/ceph.conf --allhosts --mkbtrfs -k /data/keyring.bin
service ceph start
ceph health

fi;




#main
if [ $1 ] && [ $1 = "stop-server" ]; then

service ceph stop
fi;







#node    waring
#  we mount three disk , a used for  system  b used for  other , c used for  node-data-store
# you need change /dev/sdc1 to fit your env
# ./config-ceph init-node 0
# ./config-ceph init-node 1
# ./config-ceph init-node 2
#

if [ $1 ] && [ $1 = "init-node" ]; then

 if [ $2 ] ;then
 mkdir /data  -pv
  mkfs.btrfs  /dev/sdc1
  mount  /dev/sdc1  /data
  mkdir -pv  /data/osd$2
 fi;
 

fi;

 

我的测试环境 mon msd 我是安装到一台服务器上面,数据节点安装了4台服务器 。

使用说明

1 mon msd   osd 都需要执行

config-ceph.sh  install-hosts #检查下 是否都可以通

config-ceph.sh install  #安装ceph软件

config-ceph.sh init-dir  #创建ceph目录

 

2 mon msd 执行

config-ceph.sh  install-sshkeygen  #ssh免登入

config-ceph.sh  install-copy-script  # 同步脚本

config-ceph.sh init-server #执行完毕这个就安装配置好了

 

下面就可以到其他电脑上面测试了

 

首先加载ceph.ko 
#modprobe ceph.ko  好像默认加载了
  mount –t ceph  192.168.3.32:6789:/ /mnt/ceph

 

启动,重启ceph文件系统

在monitor端执行
$ /etc/init.d/ceph –a start

$ /etc/init.d/ceph –a restart

原创粉丝点击