一、环境信息

系统:CentOS Linux release 7.7.1908 磁盘:200G(系统盘)

1、创建loop设备
mkdir -p /data/ceph-disk/
fallocate -l 40G /data/ceph-disk/sdb.img
fallocate -l 40G /data/ceph-disk/sdc.img
fallocate -l 40G /data/ceph-disk/sdd.img

losetup -l -P /dev/loop1 /data/ceph-disk/sdb.img
losetup -l -P /dev/loop2 /data/ceph-disk/sdc.img
losetup -l -P /dev/loop3 /data/ceph-disk/sdd.img

wipefs -a /dev/loop1
wipefs -a /dev/loop2
wipefs -a /dev/loop3
2、设置开机启动挂载loop设备

cat /etc/rc.local

losetup -l -P /dev/loop1 /data/ceph-disk/sdb.img
losetup -l -P /dev/loop2 /data/ceph-disk/sdc.img
losetup -l -P /dev/loop3 /data/ceph-disk/sdd.img

#卸载loop设备
#losetup --detach /dev/loop1
#losetup --detach /dev/loop2
#losetup --detach /dev/loop3
chmod a+x /etc/rc.local

二、安装ceph

1、配置ceph N版本yum源
mkdir -p /etc/yum.repos.d/backup-repo
mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/backup-repo
	
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo

cat >>/etc/yum.repos.d/ceph.repo<<EOF
[Ceph]
name=Ceph packages for x86_64
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
gpgcheck=0
priority=1
EOF

L版本源
cat >>/etc/yum.repos.d/ceph.repo<<EOF
[Ceph]
name=Ceph packages for x86_64
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1
 
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch
gpgcheck=0
priority=1
 
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
gpgcheck=0
priority=1
EOF

yum makecache
yum -y install epel-release
2、系统初始化
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
systemctl disable NetworkManager
systemctl stop NetworkManager

timedatectl set-timezone Asia/Shanghai
yum -y install ntpdate
systemctl restart ntpd
systemctl enable ntpd
ntpdate ntp1.aliyun.com  #生产环境建议使用多个内网yum源,时间不同步会导致集群故障

swapoff -a

echo '* soft nofile 65535' >>/etc/security/limits.conf
echo '* hard nofile 65535' >>/etc/security/limits.conf
echo 'kernel.pid_max = 4194303' >>/etc/sysctl.conf
echo 'vm.swappiness = 0' >>/etc/sysctl.conf 

sysctl -p

ceph_ip=`hostname -I`
echo $ceph_ip `hostname` >>/etc/hosts
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
ssh-copy-id root@`hostname`
3、安装ceph
mkdir -p ~/cephadmin
cd ~/cephadmin
yum install -y ceph-deploy python-setuptools ceph
ceph-deploy new --public-network 192.168.86.0/24 --cluster-network 192.168.86.0/24 `hostname`
ceph-deploy mon create-initial
ceph-deploy admin `hostname`
ceph-deploy mgr create `hostname`

cat >>~/cephadmin/ceph.conf<<EOF
[osd]
osd_op_thread_suicide_timeout = 600
osd_op_thread_timeout = 300
osd_recovery_thread_timeout = 300
osd_recovery_thread_suicide_timeout = 600
osd_memory_target = 2147483648
osd_scrub_begin_hour= 0
osd_scrub_end_hour= 8
osd_max_markdown_count = 10
osd_crush_chooseleaf_type = 0
osd crush update on start = false
EOF

ceph-deploy --overwrite-conf config push `hostname`
systemctl restart ceph-mon.target
4、修改ceph-volume代码支持loop设备
sed -i "s/return TYPE == 'disk'/return TYPE == 'disk' or TYPE == 'loop'/g" /usr/lib/python2.7/site-packages/ceph_volume/util/disk.py
5、创建osd
for i in {1..3}; do ceph-deploy osd create --bluestore --data "/dev/loop$i" `hostname`; done
6、配置逻辑节点支持多副本
ceph osd crush add-bucket virtual-node01 host
ceph osd crush add-bucket virtual-node02 host
ceph osd crush add-bucket virtual-node03 host
ceph osd crush move virtual-node01 root=default
ceph osd crush move virtual-node02 root=default
ceph osd crush move virtual-node03 root=default

for i in 0 3 6 9 12 15 18 21; do ceph osd crush move osd.$i host=virtual-node01 ; done
for i in 1 4 7 10 13 16 19 22; do ceph osd crush move osd.$i host=virtual-node02 ; done
for i in 2 5 8 11 14 17 20 23; do ceph osd crush move osd.$i host=virtual-node03 ; done

ceph config set mon auth_allow_insecure_global_id_reclaim false

更多配置参考:

https://www.kancloud.cn/willseecloud/ceph/1799256

https://docs.ceph.com/en/pacific/cephadm/install/