Centos7部署ceph集群并对接openstack
.1. 简介
Linux持续不断进军可扩展计算空间,特别是可扩展存储空间,Ceph 最近加入到 Linux 中令人印象深刻的文件系统备选行列,它是一个分布式文件系统,能够在维护 POSIX 兼容性的同时加入了复制和容错功能
Ceph 生态系统架构可以划分为四部分:
1、Clients:客户端(数据用户)
2、cmds:Metadata server cluster,元数据服务器(缓存和同步分布式元数据)
3、cosd:Object storage cluster,对象存储集群(将数据和元数据作为对象存储,执行其他关键职能)
4、cmon:Cluster monitors,集群监视器(执行监视功能)
.2. 前期准备
准备三台Centos7虚拟机,配置IP地址和hostname,同步系统时间,关闭防火墙和selinux,修改IP地址和hostname映射,每台虚拟机添加一块硬盘
iphostname 192.168.29.145controller 192.168.29.146computer 192.168.29.147storager
注:若已经创建openstack集群,需要先把实例,镜像和卷进行删除
安装epel和ceph源
[root@controller ~]# yum install epel-release centos-release-ceph-luminous -y [root@computer ~]# yum install epel-release centos-release-ceph-luminous -y [root@storager ~]# yum install epel-release centos-release-ceph-luminous -y
.3. 配置ceph源
[root@controller ~]# vi /etc/yum.repos.d/ceph.repo [root@computer ~]# vi /etc/yum.repos.d/ceph.repo [root@storager ~]# vi /etc/yum.repos.d/ceph.repo [Ceph] name=Ceph packages for $basearch baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/x86_64 enabled=1 gpgcheck=0 type=rpm-md priority=1 [Ceph-noarch] name=Ceph noarch packages baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch enabled=1 gpgcheck=0 type=rpm-md priority=1 [ceph-source] name=Ceph source packages baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS enabled=1 gpgcheck=0 type=rpm-md priority=1
.4. 安装ceph组件
[root@controller ~]# yum install ceph ceph-deploy -y [root@computer ~]# yum install ceph ceph-deploy -y [root@storager ~]# yum install ceph ceph-deploy -y
.5. computer结点安装libvirt
[root@computer ~]# yum install libvirt -y
.6. 部署ceph集群
[root@controller ~]# cd /etc/ceph/
[root@controller ceph]# ceph-deploy new controller computer storager
.7. 修改配置文件
[root@controller ceph]# vi ceph.conf [global] fsid = e5288fdd-d279-414b-8391-e440d82cc925 mon_initial_members = controller, computer, storager mon_host = 192.168.29.145,192.168.29.146,192.168.29.147 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx #设置三个备份 osd_pool_default_size = 3 #设置集群网段 public_network = 192.168.29.0/24 mon allow pool delete = true
.8. 初始化集群监控
[root@controller ceph]# ceph-deploy mon create-initial
.9. 创建OSD
[root@controller ceph]# ceph-deploy disk zap controller:sdb computer:sdb storager:sdb [root@controller ceph]# ceph-deploy osd create controller:sdb computer:sdb storager:sdb
.10. 下发admin密钥
[root@controller ceph]# ceph-deploy admin controller computer storager
.11. 添加执行权限
[root@controller ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring [root@computer ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring [root@storager ceph] # chmod +x /etc/ceph/ceph.client.admin.keyring
.12.
创建mgr管理
[root@controller ceph]# ceph-deploy mgr create controller computer storager
.13. 查看集群状态
[root@controller ceph]# ceph -s
.14. 查看集群容量
[root@controller ceph]# ceph df
.15. 创建pool
[root@controller ceph]# ceph osd pool create volumes 64 [root@controller ceph]# ceph osd pool create vms 64 [root@controller ceph]# ceph osd pool create images 64
.16. #设置自启动
[root@controller ceph]# ceph osd pool application enable vms mon [root@controller ceph]# ceph osd pool application enable images mon [root@controller ceph]# ceph osd pool application enable volumes mon
.17.
查看mon,osd,pool状态
[root@controller ceph]# ceph mon stat [root@controller ceph]# ceph osd status [root@controller ceph]# ceph osd lspools
.18. 查看pool情况
[root@controller ~]# rbd ls vms [root@controller ~]# rbd ls volumes [root@controller ~]# rbd ls images
.19. ceph集群与openstack对接
[root@controller ceph]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images' [root@controller ceph]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images'
.20. 设置密钥
[root@controller ceph]# ceph auth get-or-create client.glance |tee /etc/ceph/ceph.client.glance.keyring [root@controller ceph]# ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring
.21. #传送密钥到computer
[root@controller ~]# ceph auth get-key client.cinder > client.cinder.key [root@controller ~]# scp client.cinder.key computer:/root/
.22. #修改权限
[root@controller ceph]# chown glance.glance /etc/ceph/ceph.client.glance.keyring [root@controller ceph]# chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
.23. 设置密钥
#computer生成uuid [root@computer ~]#uuidgen 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
.24. #创建密钥文件
[root@computer ~]# vi secret.xml 1fad1f90-63fb-4c15-bfc3-366c6559c1fe client.cinder secret
.25. #定义密钥
virsh secret-define --file secret.xml
.26. #设置密钥
virsh secret-set-value --secret 1fad1f90-63fb-4c15-bfc3-366c6559c1fe --base64 $(cat client.cinder.key) && rm -rf client.cinder.key secret.xml
.27. 设置对接glance模块
修改配置文件
[root@controller ~]# vi /etc/glance/glance-api.conf
[glance_store]
#默认配置需要注释掉
stores = rbd
default_store = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
.28.
重启服务
[root@controller ~]# systemctl restart openstack-glance-api
.29. 设置对接cinder模块
[root@controller ~]# vi /etc/cinder/cinder.conf
[default]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.29.145
enabled_backends = ceph
[ceph] default_volume_type= ceph glance_api_version = 2 volume_driver = cinder.volume.drivers.rbd.RBDDriver volume_backend_name = ceph rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 rbd_user = cinder #对应computer创建的uuid rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
.30. 同步数据库
.31. #若已经有数据库,对数据库进行删除并重新创建和同步
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
.32. 重启服务
[root@controller ~]# systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
.33. 设置ceph的类型和存储类型
[root@controller ~]# source admin-openrc [root@controller ~]# cinder type-create ceph [root@controller ~]# cinder type-key ceph set volume_backend_name=ceph
.34. 对接nova-compute模块
[root@computer ~]# vi /etc.nova/nova.conf
[libvirt]
virt_type = qemu
inject_password = true
inject_partition = -1
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
disk_cachemodes = "network=writeback"
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap
[root@computer ~]# vi /etc/ceph/ceph.conf [client] rbd cache=true rbd cache writethrough until flush=true admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok log file = /var/log/qemu/qemu-guest-$pid.log rbd concurrent management ops = 20
.35. 创建日志目录
[root@computer ~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/ [root@computer ~]# chown 777 -R /var/run/ceph/guests/ /var/log/qemu/
.36. controller下发密钥
[root@controller ~]# cd /etc/ceph [root@controller ~]# scp ceph.client.cinder.keyring root@computer:/etc/ceph [root@controller ~]# scp ceph.client.cinder.keyring root@storager:/etc/ceph
.37. 重启服务
[root@computer ~]# systemctl stop libvirtd openstack-nova-compute [root@computer ~]# systemctl start libvirtd openstack-nova-compute
提示:在享受本文内容的同时,请注意版权归属 徐州鑫坤机电设备有限公司https://www.xzxkjd.com如果您觉得有价值欢迎分享,但请务必注明出处,感谢您的理解,谢谢!
以下部分内容需要登录查看 立即登录