大佬教程收集整理的这篇文章主要介绍了019 Ceph整合openstack,大佬教程大佬觉得挺不错的,现在分享给大家,也给大家做个参考。
[[email protected] ~]# vi ./keystonerc_admin
unset OS_serviCE_TOKEN export OS_USERNAME=admin export OS_password=9f0b699989a04a05 export OS_AUTH_URL=http://172.25.250.11:5000/v2.0 export PS1=‘[\[email protected]\h \W(keystone_admin)]\$ ‘ export OS_TENANt_name=admin export OS_REGION_NAME=RegionOne
[[email protected] ~(keystone_admin)]# openstack service list
[[email protected] ~(keystone_admin)]# yum -y install ceph-common
[[email protected] ~(keystone_admin)]# chown ceph:ceph /etc/ceph/
[email protected] ~]# ceph osd pool create images 128 128
pool ‘images‘ created
[[email protected] ~]# ceph osd pool application enable images rbd
enabled application ‘rbd‘ on pool ‘images‘
[[email protected] ~]# ceph osd pool ls
images
[[email protected] ~]# ceph auth get-or-create client.images mon ‘profile rbd‘ osd ‘profile rbd pool=images‘ -o /etc/ceph/ceph.client.images.keyring
[[email protected] ~]# ll /etc/ceph/ceph.client.images.keyring
-rw-r--r-- 1 root root 64 Mar 30 14:18 /etc/ceph/ceph.client.images.keyring
[[email protected] ~]# ceph auth list|grep -A 4 images
[[email protected] ~]# scp /etc/ceph/ceph.conf [email protected]:/etc/ceph/ceph.conf
ceph.conf 100% 470 725.4KB/s 00:00
[[email protected] ~]# scp /etc/ceph/ceph.client.images.keyring [email protected]:/etc/ceph/ceph.client.images.keyring
ceph.client.images.keyring 100% 64 145.2KB/s 00:00
[[email protected] ~(keystone_admin)]# ceph --id images -s
cluster: id: 2d58e9ec-9bc0-4d43-831c-24b345fc2a94 health: HEALTH_OK services: mon: 3 daemons,quorum serverc,serverd,servere mgr: serverc(activE),standbys: serverd,servere osd: 9 osds: 9 up,9 in data: pools: 1 pools,128 pgs objects: 14 objects,25394 kB usage: 1044 MB used,133 GB / 134 GB avail pgs: 128 active+clean
[[email protected] ~(keystone_admin)]# chgrp glance /etc/ceph/ceph.client.images.keyring
[[email protected] ~(keystone_admin)]# chmod 0640 /etc/ceph/ceph.client.images.keyring
[[email protected] ~(keystone_admin)]# vim /etc/glance/glance-api.conf
[glance_store] stores = rbd default_store = rbd filesystem_store_datadir = /var/lib/glance/images/ rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = images rbd_store_ceph_conf = /etc/ceph/ceph.conf rados_connect_timeout = 0 os_region_name=RegionOne
[[email protected] ~(keystone_admin)]# grep -Ev "^$|^[#;]" /etc/glance/glance-api.conf
[DEFAULT] bind_host = 0.0.0.0 bind_port = 9292 workers = 2 image_cache_dir = /var/lib/glance/image-cache registry_host = 0.0.0.0 debug = false log_file = /var/log/glance/api.log log_dir = /var/log/glance [cors] [cors.subdomain] [database] connection = MysqL+pyMysqL://glance:[email protected]/glance [glance_store] stores = rbd default_store = rbd default_store = file filesystem_store_datadir = /var/lib/glance/images/ rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = images rbd_store_ceph_conf = /etc/ceph/ceph.conf rados_connect_timeout = 0 os_region_name=RegionOne [image_format] [keystone_authtoken] auth_uri = http://172.25.250.11:5000/v2.0 auth_type = password project_name=services username=glance password=99b29d9142514f0f auth_url=http://172.25.250.11:35357 [matchmaker_redis] [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] policy_file = /etc/glance/policy.json [paste_deploy] flavor = keystone [profiler] [store_type_LOCATIOn_strategy] [task] [taskflow_executor]
[[email protected] ~(keystone_admin)]# systemctl restart openstack-glance-api
[[email protected] ~(keystone_admin)]# wget http://materials/small.img
[[email protected] ~(keystone_admin)]# openstack image create --container-format bare --disk-format raw --file ./small.img "small Image"
+------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | | container_format | bare | | created_at | 2019-03-30T06:35:40Z | | disk_format | raw | | file | /v2/images/f60f2b0f-8c7d-42c2-b158-53d9a436efc9/file | | id | f60f2b0f-8c7d-42c2-b158-53d9a436efc9 | | min_disk | 0 | | min_ram | 0 | | name | small Image | | owner | 79cf145d371e48ef96f608cbf85d1788 | | protected | false | | scheR_392_11845@a | /v2/scheR_392_11845@as/image | | size | 13287936 | | status | active | | tags | | | updated_at | 2019-03-30T06:35:41Z | | virtual_size | None | | visibility | private | +------------------+------------------------------------------------------+
[[email protected] ~(keystone_admin)]# rbd --id images -p images ls
14135e67-39bb-4b1c-aa11-fd5b26599ee7 42030515-33b2-4875-8b63-119a0dbc12d4 f60f2b0f-8c7d-42c2-b158-53d9a436efc9
[[email protected] ~(keystone_admin)]# glance image-list
+--------------------------------------+-------------+ | ID | Name | +--------------------------------------+-------------+ | f60f2b0f-8c7d-42c2-b158-53d9a436efc9 | small Image | +--------------------------------------+-------------+
[[email protected] ~(keystone_admin)]# rbd --id images info images/f60f2b0f-8c7d-42c2-b158-53d9a436efc9
rbd image ‘f60f2b0f-8c7d-42c2-b158-53d9a436efc9‘: size 12976 kB in 2 objects order 23 (8192 kB objects) block_name_prefix: rbd_data.109981f1d12 format: 2 features: layering,exclusive-lock,object-map,fast-diff,deep-flatten flags: create_timestamp: Sat Mar 30 14:35:41 2019
[[email protected] ~(keystone_admin)]# openstack image delete "small Image"
[[email protected] ~(keystone_admin)]# openstack image list
[[email protected] ~(keystone_admin)]# rbd --id images -p images ls
14135e67-39bb-4b1c-aa11-fd5b26599ee7 42030515-33b2-4875-8b63-119a0dbc12d4
[[email protected] ~(keystone_admin)]# ceph osd pool ls --id images
images
[[email protected] ~]# ceph osd pool create volumes 128
pool ‘volumes‘ created
[[email protected] ~]# ceph osd pool application enable volumes rbd
enabled application ‘rbd‘ on pool ‘volumes‘
[[email protected] ~]# ceph auth get-or-create client.volumes mon ‘profile rbd‘ osd ‘profile rbd pool=volumes,profile rbd pool=images‘ -o /etc/ceph/ceph.client.volumes.keyring
[[email protected] ~]# ceph auth list|grep -A 4 volumes
installed auth entries: client.volumes key: AQBOEZ9ckRr3BxAAaWB8lpYRrUQ+z/Bgk3Rfbg== caps: [mon] profile rbd caps: [osd] profile rbd pool=volumes,profile rbd pool=images mgr.serverc key: AQAKu55cKGFlHBAAXjlvu2GFiJXUQx04PcidgA== caps: [mds] allow * caps: [mon] allow profile mgr
[[email protected] ~]# scp /etc/ceph/ceph.client.volumes.keyring [email protected]:/etc/ceph/ceph.client.volumes.keyring
[email protected]‘s password: ceph.client.volumes.keyring 100% 65 108.3KB/s 00:00
[[email protected] ~]# ceph auth get-key client.volumes|ssh [email protected] tee ./client.volumes.key
[email protected]‘s password: AQBOEZ9ckRr3BxAAaWB8lpYRrUQ+z/Bgk3Rfbg==[[email protected] ~]#
[[email protected] ~(keystone_admin)]# ceph --id volumes -s
cluster: id: 2d58e9ec-9bc0-4d43-831c-24b345fc2a94 health: HEALTH_OK services: mon: 3 daemons,9 in data: pools: 2 pools,256 pgs objects: 14 objects,25394 kB usage: 1048 MB used,133 GB / 134 GB avail pgs: 256 active+clean
[[email protected] ~(keystone_admin)]# chgrp cinder /etc/ceph/ceph.client.volumes.keyring
[[email protected] ~(keystone_admin)]# chmod 0640 /etc/ceph/ceph.client.volumes.keyring
[[email protected] ~(keystone_admin)]# uuidgen |tee ~/myuuid.txt
f3fbcf03-e208-4fba-9c47-9ff465847468
[[email protected] ~(keystone_admin)]# vi /etc/cinder/cinder.conf
enabled_BACkends = ceph glance_api_version = 2 #default_volume_type = iscsi [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_user = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_secret_uuid = f3fbcf03-e208-4fba-9c47-9ff465847468 rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 # 指定volume_BACkend_name,可忽略 volume_BACkend_name = ceph
[[email protected] ~(keystone_admin)]# systemctl restart openstack-cinder-api
[[email protected] ~(keystone_admin)]# systemctl restart openstack-cinder-volume
[[email protected] ~(keystone_admin)]# systemctl restart openstack-cinder-scheduler
[[email protected] ~]$ sudo tail -20 /var/log/cinder/volume.log
2019-03-30 15:11:05.800 23646 INFO cinder.volume.manager [req-76edfdb3-dd84-4377-9a4e-de5f79391609 - - - - -] Driver initialization completed successfully. 2019-03-30 15:11:05.819 23646 INFO cinder.volume.manager [req-76edfdb3-dd84-4377-9a4e-de5f79391609 - - - - -] Initializing RPC dependent components of volume driver RBDDriver (1.2.0) 2019-03-30 15:11:05.871 23646 INFO cinder.volume.manager [req-76edfdb3-dd84-4377-9a4e-de5f79391609 - - - - -] Driver post RPC initialization completed successfully. 2019-03-30 15:12:30.398 23878 INFO cinder.volume.manager [req-ba2a8ef1-e3f0-4a36-a0eb-5a300367c60c - - - - -] Driver initialization completed successfully. 2019-03-30 15:12:30.420 23878 INFO cinder.volume.manager [req-ba2a8ef1-e3f0-4a36-a0eb-5a300367c60c - - - - -] Initializing RPC dependent components of volume driver RBDDriver (1.2.0) 2019-03-30 15:12:30.474 23878 INFO cinder.volume.manager [req-ba2a8ef1-e3f0-4a36-a0eb-5a300367c60c - - - - -] Driver post RPC initialization completed successfully.
[[email protected] ~(keystone_admin)]# vim ~/ceph.xml
<secret ephemeral="no" private="no"> <uuid>f3fbcf03-e208-4fba-9c47-9ff465847468</uuid> <usage type="ceph"> <name>client.volumes secret</name> </usage> </secret>
[[email protected] ~(keystone_admin)]# virsh secret-define --file ~/ceph.xml
Secret f3fbcf03-e208-4fba-9c47-9ff465847468 created
[[email protected] ~(keystone_admin)]# virsh secret-set-value --secret f3fbcf03-e208-4fba-9c47-9ff465847468 --base64 $(cat /home/ceph/client.volumes.key)
Secret value set
[[email protected] ~(keystone_admin)]# openstack volume create --description "Test Volume" --size 1 testvolume
+---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2019-03-30T07:22:26.436978 | | description | Test Volume | | encrypted | false | | id | b9cf60d5-3cff-4cde-ab1d-4747adff7943 | | migration_status | None | | multiattach | false | | name | testvolume | | properties | | | Replication_status | disabled | | size | 1 | | snapshot_id | None | | source_volid | None | | status | creaTing | | type | None | | updated_at | None | | user_id | 8e0be34493e04722ba03ab30fbbf3bf8 | +---------------------+--------------------------------------+
[[email protected] ~(keystone_admin)]# openstack volume list -c ID -c ‘Display Name‘ -c Status -c Size
+--------------------------------------+--------------+-----------+------+
| ID | Display Name | Status | Size |
+--------------------------------------+--------------+-----------+------+
| b9cf60d5-3cff-4cde-ab1d-4747adff7943 | testvolume | available | 1 |
+--------------------------------------+--------------+-----------+------+
[[email protected] ~(keystone_admin)]# rbd --id volumes -p volumes ls
volume-b9cf60d5-3cff-4cde-ab1d-4747adff7943
[[email protected] ~(keystone_admin)]# rbd --id volumes -p volumes info volumes/volume-b9cf60d5-3cff-4cde-ab1d-4747adff7943
rbd image ‘volume-b9cf60d5-3cff-4cde-ab1d-4747adff7943‘: size 1024 MB in 256 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.10e3589b2aec format: 2 features: layering,deep-flatten flags: create_timestamp: Sat Mar 30 15:22:27 2019
[[email protected] ~(keystone_admin)]# openstack volume delete testvolume
[[email protected] ~(keystone_admin)]# openstack volume list
[[email protected] ~(keystone_admin)]# rbd --id volumes -p volumes ls
[[email protected] ~(keystone_admin)]# rm ~/ceph.xml
[[email protected] ~(keystone_admin)]# rm ~/myuuid.txt
关于对象网关后续更新
博主声明:本文的内容来源主要来自誉天教育晏威老师,由本人实验完成操作验证,需要的博友请联系誉天教育(http://www.yutianedu.com/),获得官方同意或者晏老师(https://www.cnblogs.com/breezey/)本人同意即可转载,谢谢!
以上是大佬教程为你收集整理的019 Ceph整合openstack全部内容,希望文章能够帮你解决019 Ceph整合openstack所遇到的程序开发问题。
如果觉得大佬教程网站内容还不错,欢迎将大佬教程推荐给程序员好友。
本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您有任何意见或建议可联系处理。小编QQ:384754419,请注明来意。