Make the Compute nodes Highly Available

Install Compute & Neutron

  1. [root@compute01 ~]# yum install openstack-selinux python-openstackclient yum-plugin-priorities openstack-nova-compute openstack-utils -y
  2. [root@compute01 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y

Configure Nova

  1. [root@compute01 ~]# mkdir -p /data/nova/instances
  2. [root@compute01 ~]# chown -R nova:nova /data/nova
  3.  
  4. [root@compute01 ~]# cp /etc/nova/nova.conf{,.bak}
  5.  
  6. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT instances_path /data/nova/instances
  7. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
  8. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  9. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.220.51
  10. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
  11. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
  12. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT cpu_allocation_ratio 10
  13.  
  14. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
  15. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
  16.  
  17. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
  18.  
  19. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
  20. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
  21. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
  22. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
  23. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
  24. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
  25. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
  26. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
  27. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
  28.  
  29. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
  30. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
  31. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address compute01
  32. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
  33.  
  34. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
  35.  
  36. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  37.  
  38. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
  39. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
  40. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
  41. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
  42. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
  43. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
  44. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
  45. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
  46.  
  47. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
  48.  
  49. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
  50. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
  51. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
  52. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
  53. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
  54. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
  55. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
  56. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
  57. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron password neutron

Configure Neutron

  1. [root@compute01 ~]# cp /etc/neutron/neutron.conf{,.bak}
  2.  
  3. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
  4. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  5.  
  6. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
  7. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
  8. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
  9. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_plugin password
  10. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_id default
  11. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_id default
  12. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
  13. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
  14. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password neutron
  15.  
  16. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp

Configure LinuxBridge Agent

  1. [root@compute01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
  2.  
  3. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:'ens33'
  4.  
  5. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
  6. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  7.  
  8. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false

Start Compute Service

  1. [root@compute01 ~]# systemctl enable libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
  2. [root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service

Highly available Nova API

Create Nova Service & Endpoint

  1. [root@controller1 ~]# openstack service create --name nova --description "OpenStack Compute" compute
  2. +-------------+----------------------------------+
  3. | Field       | Value                            |
  4. +-------------+----------------------------------+
  5. | description | OpenStack Compute                |
  6. | enabled     | True                             |
  7. | id          | 365c1378f8c641ba81f48efa7c62cd29 |
  8. | name        | nova                             |
  9. | type        | compute                          |
  10. +-------------+----------------------------------+
  11. [root@controller1 ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
  12. +--------------+----------------------------------+
  13. | Field        | Value                            |
  14. +--------------+----------------------------------+
  15. | enabled      | True                             |
  16. | id           | 2afb267d1bd34f9388f9568321313167 |
  17. | interface    | public                           |
  18. | region       | RegionOne                        |
  19. | region_id    | RegionOne                        |
  20. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  21. | service_name | nova                             |
  22. | service_type | compute                          |
  23. | url          | http://controller:8774/v2.1      |
  24. +--------------+----------------------------------+
  25. [root@controller1 ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
  26. +--------------+----------------------------------+
  27. | Field        | Value                            |
  28. +--------------+----------------------------------+
  29. | enabled      | True                             |
  30. | id           | 92de622e904546cc9bd1ca4087529e98 |
  31. | interface    | internal                         |
  32. | region       | RegionOne                        |
  33. | region_id    | RegionOne                        |
  34. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  35. | service_name | nova                             |
  36. | service_type | compute                          |
  37. | url          | http://controller:8774/v2.1      |
  38. +--------------+----------------------------------+
  39. [root@controller1 ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
  40. +--------------+----------------------------------+
  41. | Field        | Value                            |
  42. +--------------+----------------------------------+
  43. | enabled      | True                             |
  44. | id           | 9295b974edec4e06842ee4db2e2d2458 |
  45. | interface    | admin                            |
  46. | region       | RegionOne                        |
  47. | region_id    | RegionOne                        |
  48. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  49. | service_name | nova                             |
  50. | service_type | compute                          |
  51. | url          | http://controller:8774/v2.1      |
  52. +--------------+----------------------------------+

Create Placement & Endpoint

  1. [root@controller1 ~]# openstack user create --domain default --password=placement placement
  2. +---------------------+----------------------------------+
  3. | Field               | Value                            |
  4. +---------------------+----------------------------------+
  5. | domain_id           | default                          |
  6. | enabled             | True                             |
  7. | id                  | c18a3e3bf63c4114924fb6b31b3305fd |
  8. | name                | placement                        |
  9. | options             | {}                               |
  10. | password_expires_at | None                             |
  11. +---------------------+----------------------------------+
  12. [root@controller1 ~]# openstack role add --project service --user placement admin
  13. [root@controller1 ~]# openstack service create --name placement --description "Placement API" placement
  14. +-------------+----------------------------------+
  15. | Field       | Value                            |
  16. +-------------+----------------------------------+
  17. | description | Placement API                    |
  18. | enabled     | True                             |
  19. | id          | 99a48fd2c3bf496287fa1fcf82376c02 |
  20. | name        | placement                        |
  21. | type        | placement                        |
  22. +-------------+----------------------------------+
  23. [root@controller1 ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
  24. +--------------+----------------------------------+
  25. | Field        | Value                            |
  26. +--------------+----------------------------------+
  27. | enabled      | True                             |
  28. | id           | c271472e882f49c3a77c518296d4045c |
  29. | interface    | public                           |
  30. | region       | RegionOne                        |
  31. | region_id    | RegionOne                        |
  32. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  33. | service_name | placement                        |
  34. | service_type | placement                        |
  35. | url          | http://controller:8778           |
  36. +--------------+----------------------------------+
  37. [root@controller1 ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
  38. +--------------+----------------------------------+
  39. | Field        | Value                            |
  40. +--------------+----------------------------------+
  41. | enabled      | True                             |
  42. | id           | b04f59956eca4d8cab4b3a41a071fc6b |
  43. | interface    | internal                         |
  44. | region       | RegionOne                        |
  45. | region_id    | RegionOne                        |
  46. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  47. | service_name | placement                        |
  48. | service_type | placement                        |
  49. | url          | http://controller:8778           |
  50. +--------------+----------------------------------+
  51. [root@controller1 ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
  52. +--------------+----------------------------------+
  53. | Field        | Value                            |
  54. +--------------+----------------------------------+
  55. | enabled      | True                             |
  56. | id           | 582bf2a041734838adbf92d7dd4b602e |
  57. | interface    | admin                            |
  58. | region       | RegionOne                        |
  59. | region_id    | RegionOne                        |
  60. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  61. | service_name | placement                        |
  62. | service_type | placement                        |
  63. | url          | http://controller:8778           |
  64. +--------------+----------------------------------+

Install Nova

  1. [root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-cert openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y

Configure Nova

  1. [root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
  2.  
  3. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip controller1
  4. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
  5. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen controller1
  6. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen_port 8774
  7. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen controller1
  8. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen_port 8775
  9. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
  10. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
  11. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  12.  
  13. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
  14. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
  15.  
  16. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
  17.  
  18. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
  19. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
  20. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
  21. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
  22. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
  23. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
  24. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
  25. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
  26. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
  27.  
  28. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
  29. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen controller1
  30. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address controller1
  31. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_host controller1
  32. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_port 6080
  33.  
  34. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
  35.  
  36. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  37.  
  38. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
  39. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
  40. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
  41. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
  42. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
  43. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url  http://controller:35357/v3
  44. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
  45. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
  46.  
  47. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
  48.  
  49. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache enabled true
  50. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
  51. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211

Configure Nova-Placement

Refer to “CLOUD COMPUTING —> OpenStack Pike Installation —> 7.Nova”

  1. [root@controller1 ~]# echo "
  2. > #Placement API
  3. > <Directory /usr/bin>
  4. >    <IfVersion >= 2.4>
  5. >       Require all granted
  6. >    </IfVersion>
  7. >    <IfVersion < 2.4>
  8. >       Order allow,deny
  9. >       Allow from all
  10. >    </IfVersion>
  11. > </Directory>
  12. > ">>/etc/httpd/conf.d/00-nova-placement-api.conf
  13. [root@controller1 ~]# "
  14. [root@controller1 ~]# systemctl restart httpd

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
  2. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
  3. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
  4. [root@controller1 ~]# su -s /bin/sh -c "nova-manage db sync" nova

List Nova Cell

  1. [root@controller1 ~]# nova-manage cell_v2 list_cells
  2. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
  3. |  Name |                 UUID                 |              Transport URL              |               Database Connection               |
  4. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
  5. | cell0 | 00000000-0000-0000-0000-000000000000 |                  none:/                 | mysql+pymysql://nova:****@controller/nova_cell0 |
  6. | cell1 | b55310bc-6d58-4c8d-9b7c-014b77cd48ac | rabbit://openstack:****@controller:5673 |    mysql+pymysql://nova:****@controller/nova    |
  7. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+

Replace Default IP

  1. [root@controller1 ~]# sed -i 's/8778/9778/' /etc/httpd/conf.d/00-nova-placement-api.conf
  2. [root@controller1 ~]# systemctl restart httpd

Configure HA With HAProxy

  1. [root@controller1 ~]# echo '
  2. > ##nova_compute
  3. > listen nova_compute_api_cluster
  4. >   bind controller:8774
  5. >   balance source
  6. >   option tcpka
  7. >   option httpchk
  8. >   option tcplog
  9. >   server controller1 controller1:8774 check inter 2000 rise 2 fall 5
  10. >   server controller2 controller2:8774 check inter 2000 rise 2 fall 5
  11. >   server controller3 controller3:8774 check inter 2000 rise 2 fall 5
  12. > #Nova-api-metadata
  13. > listen Nova-api-metadata_cluster
  14. >   bind controller:8775
  15. >   balance source
  16. >   option tcpka
  17. >   option httpchk
  18. >   option tcplog
  19. >   server controller1 controller1:8775 check inter 2000 rise 2 fall 5
  20. >   server controller2 controller2:8775 check inter 2000 rise 2 fall 5
  21. >   server controller3 controller3:8775 check inter 2000 rise 2 fall 5
  22. > #nova_placement
  23. > listen nova_placement_cluster
  24. >   bind controller:8778
  25. >   balance source
  26. >   option tcpka
  27. >   option tcplog
  28. >   server controller1 controller1:9778 check inter 2000 rise 2 fall 5
  29. >   server controller2 controller2:9778 check inter 2000 rise 2 fall 5
  30. >  server controller3 controller3:9778 check inter 2000 rise 2 fall 5
  31. > '>>/etc/haproxy/haproxy.cfg
  32. [root@controller1 ~]# '
  33. [root@controller1 ~]# systemctl restart haproxy.service
  34.  
  35. netstat -antp|grep haproxy
  36. netstat -antp|egrep '8774|8775|8778|6080'

Start Nova Service

  1. [root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  2. [root@controller ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  3. [root@controller ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

List Nova Status

  1. [root@controller1 ~]# openstack catalog list
  2. +-----------+-----------+-----------------------------------------+
  3. | Name      | Type      | Endpoints                               |
  4. +-----------+-----------+-----------------------------------------+
  5. | glance    | image     | RegionOne                               |
  6. |           |           |   internal: http://controller:9292      |
  7. |           |           | RegionOne                               |
  8. |           |           |   admin: http://controller:9292         |
  9. |           |           | RegionOne                               |
  10. |           |           |   public: http://controller:9292        |
  11. |           |           |                                         |
  12. | nova      | compute   | RegionOne                               |
  13. |           |           |   public: http://controller:8774/v2.1   |
  14. |           |           | RegionOne                               |
  15. |           |           |   admin: http://controller:8774/v2.1    |
  16. |           |           | RegionOne                               |
  17. |           |           |   internal: http://controller:8774/v2.1 |
  18. |           |           |                                         |
  19. | placement | placement | RegionOne                               |
  20. |           |           |   admin: http://controller:8778         |
  21. |           |           | RegionOne                               |
  22. |           |           |   internal: http://controller:8778      |
  23. |           |           | RegionOne                               |
  24. |           |           |   public: http://controller:8778        |
  25. |           |           |                                         |
  26. | neutron   | network   | RegionOne                               |
  27. |           |           |   admin: http://controller:9696         |
  28. |           |           | RegionOne                               |
  29. |           |           |   public: http://controller:9696        |
  30. |           |           | RegionOne                               |
  31. |           |           |   internal: http://controller:9696      |
  32. |           |           |                                         |
  33. | keystone  | identity  | RegionOne                               |
  34. |           |           |   admin: http://controller:35357/v3/    |
  35. |           |           | RegionOne                               |
  36. |           |           |   internal: http://controller:5000/v3/  |
  37. |           |           | RegionOne                               |
  38. |           |           |   public: http://controller:5000/v3/    |
  39. |           |           |                                         |
  40. +-----------+-----------+-----------------------------------------+
  41. [root@controller1 ~]# nova-status upgrade check
  42. +--------------------------------------------------------------------+
  43. | Upgrade Check Results                                              |
  44. +--------------------------------------------------------------------+
  45. | Check: Cells v2                                                    |
  46. | Result: Success                                                    |
  47. | Details: No host mappings or compute nodes were found. Remember to |
  48. |   run command 'nova-manage cell_v2 discover_hosts' when new        |
  49. |   compute hosts are deployed.                                      |
  50. +--------------------------------------------------------------------+
  51. | Check: Placement API                                               |
  52. | Result: Success                                                    |
  53. | Details: None                                                      |
  54. +--------------------------------------------------------------------+
  55. | Check: Resource Providers                                          |
  56. | Result: Success                                                    |
  57. | Details: There are no compute resource providers in the Placement  |
  58. |   service nor are there compute nodes in the database.             |
  59. |   Remember to configure new compute nodes to report into the       |
  60. |   Placement service. See                                           |
  61. |   http://docs.openstack.org/developer/nova/placement.html          |
  62. |   for more details.                                                |
  63. +--------------------------------------------------------------------+
  64. [root@controller1 ~]# openstack compute service list
  65. +----+------------------+-------------+----------+---------+-------+----------------------------+
  66. | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
  67. +----+------------------+-------------+----------+---------+-------+----------------------------+
  68. | 15 | nova-conductor   | controller1 | internal | enabled | up    | 2017-12-19T09:02:58.000000 |
  69. | 18 | nova-scheduler   | controller1 | internal | enabled | up    | 2017-12-19T09:02:56.000000 |
  70. | 21 | nova-consoleauth | controller1 | internal | enabled | up    | 2017-12-19T09:02:59.000000 |
  71. | 27 | nova-consoleauth | controller2 | internal | enabled | up    | 2017-12-19T09:03:04.000000 |
  72. | 30 | nova-conductor   | controller2 | internal | enabled | up    | 2017-12-19T09:03:03.000000 |
  73. | 39 | nova-scheduler   | controller2 | internal | enabled | up    | 2017-12-19T09:02:57.000000 |
  74. | 42 | nova-consoleauth | controller3 | internal | enabled | up    | 2017-12-19T09:03:04.000000 |
  75. | 45 | nova-conductor   | controller3 | internal | enabled | up    | 2017-12-19T09:03:05.000000 |
  76. | 54 | nova-scheduler   | controller3 | internal | enabled | up    | 2017-12-19T09:02:59.000000 |
  77. | 57 | nova-compute     | compute01   | nova     | enabled | up    | 2017-12-19T09:02:59.000000 |
  78. +----+------------------+-------------+----------+---------+-------+----------------------------+

Install On Other Controller Nodes

  1. [root@controller2 ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y 
  2.  
  3. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/nova/*  /etc/nova/
  4. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/
  5. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/haproxy/* /etc/haproxy/
  6.  
  7. [root@controller2 ~]# sed -i '1,9s/controller1/controller2/'  /etc/nova/nova.conf
  8. [root@controller3 ~]# sed -i '1,9s/controller1/controller3/'  /etc/nova/nova.conf
  9.  
  10. [root@controller2 ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  11. [root@controller2 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  12. [root@controller2 ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  13.  
  14. [root@controller2 ~]# systemctl restart httpd haproxy

Discover New Compute Node

  1. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
  2. Found 2 cell mappings.
  3. Skipping cell0 since it does not contain hosts.
  4. Getting compute nodes from cell 'cell1': b55310bc-6d58-4c8d-9b7c-014b77cd48ac
  5. Found 0 unmapped computes in cell: b55310bc-6d58-4c8d-9b7c-014b77cd48ac
  6. [root@controller1 ~]# openstack compute service list
  7. +----+------------------+-------------+----------+---------+-------+----------------------------+
  8. | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
  9. +----+------------------+-------------+----------+---------+-------+----------------------------+
  10. | 15 | nova-conductor   | controller1 | internal | enabled | up    | 2017-12-19T09:28:08.000000 |
  11. | 18 | nova-scheduler   | controller1 | internal | enabled | up    | 2017-12-19T09:28:16.000000 |
  12. | 21 | nova-consoleauth | controller1 | internal | enabled | up    | 2017-12-19T09:28:10.000000 |
  13. | 27 | nova-consoleauth | controller2 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  14. | 30 | nova-conductor   | controller2 | internal | enabled | up    | 2017-12-19T09:28:13.000000 |
  15. | 39 | nova-scheduler   | controller2 | internal | enabled | up    | 2017-12-19T09:28:17.000000 |
  16. | 42 | nova-consoleauth | controller3 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  17. | 45 | nova-conductor   | controller3 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  18. | 54 | nova-scheduler   | controller3 | internal | enabled | up    | 2017-12-19T09:28:09.000000 |
  19. | 57 | nova-compute     | compute01   | nova     | enabled | up    | 2017-12-19T09:28:10.000000 |
  20. | 60 | nova-compute     | compute02   | nova     | enabled | up    | 2017-12-19T09:28:16.000000 |
  21. +----+------------------+-------------+----------+---------+-------+----------------------------+

Highly available Horizon API

Install Dashboard

  1. [root@controller ~]# yum install openstack-dashboard -y

Dashboard Configure

  1. [root@controller1 ~]# cp /etc/openstack-dashboard/local_settings{,.bak}
  2. [root@controller1 ~]# DASHBOARD_LOCAL_SETTINGS=/etc/openstack-dashboard/local_settings
  3. [root@controller1 ~]# sed -i 's#_member_#user#g' $DASHBOARD_LOCAL_SETTINGS
  4. [root@controller1 ~]# sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#' $DASHBOARD_LOCAL_SETTINGS
  5. [root@controller1 ~]# 
  6. [root@controller1 ~]# sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" $DASHBOARD_LOCAL_SETTINGS
  7. [root@controller1 ~]# 
  8. [root@controller1 ~]# sed -in '153,158s/#//' $DASHBOARD_LOCAL_SETTINGS 
  9. [root@controller1 ~]# sed -in '160,164s/.*/#&/' $DASHBOARD_LOCAL_SETTINGS
  10. [root@controller1 ~]# sed -i 's#UTC#Asia/Shanghai#g' $DASHBOARD_LOCAL_SETTINGS
  11. [root@controller1 ~]# sed -i 's#%s:5000/v2.0#%s:5000/v3#' $DASHBOARD_LOCAL_SETTINGS
  12. [root@controller1 ~]# sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' $DASHBOARD_LOCAL_SETTINGS
  13. [root@controller1 ~]# sed -i "s@^#OPENSTACK_KEYSTONE_DEFAULT@OPENSTACK_KEYSTONE_DEFAULT@" $DASHBOARD_LOCAL_SETTINGS
  14. [root@controller1 ~]# 
  15. [root@controller1 ~]# echo '
  16. > #set
  17. > OPENSTACK_API_VERSIONS = {
  18. >     "identity": 3,
  19. >     "image": 2,
  20. >     "volume": 2,
  21. > }
  22. > #'>>$DASHBOARD_LOCAL_SETTINGS

Configure Other Controller Nodes

  1. [root@controller1 ~]# rsync -avzP  -e 'ssh -p 22'  /etc/openstack-dashboard/local_settings  controller2:/etc/openstack-dashboard/
  2. [root@controller1 ~]# rsync -avzP  -e 'ssh -p 22'  /etc/openstack-dashboard/local_settings  controller3:/etc/openstack-dashboard/

Restart Httpd Service

  1. [root@controller1 ~]# systemctl restart httpd
  2. [root@controller1 ~]# ssh controller2 "systemctl restart httpd" 
  3. [root@controller1 ~]# ssh controller3 "systemctl restart httpd"

Configure HA Dashboard API

  1. sed -i 's#^Listen 80#Listen 8080#'  /etc/httpd/conf/httpd.conf
  2. systemctl restart httpd.service
  3. systemctl daemon-reload
  4.  
  5. echo '
  6. listen dashboard_cluster  
  7.   bind controller:80
  8.   balance  roundrobin  
  9.   option  tcpka  
  10.   option  httpchk  
  11.   option  tcplog  
  12.   server controller1 controller1:8080 check port 8080 inter 2000 rise 2 fall 5
  13.   server controller2 controller2:8080 check port 8080 inter 2000 rise 2 fall 5
  14.   server controller3 controller3:8080 check port 8080 inter 2000 rise 2 fall 5
  15. '>>/etc/haproxy/haproxy.cfg
  16. systemctl restart haproxy.service

Now, Take the Browser

http://192.168.220.20/dashboard/

Linux bridge & L3 HA

Create OpenStack Service

  1. [root@controller1 ~]# source admin-openrc 
  2. [root@controller1 ~]# openstack service create --name neutron --description "OpenStack Networking" network
  3. +-------------+----------------------------------+
  4. | Field       | Value                            |
  5. +-------------+----------------------------------+
  6. | description | OpenStack Networking             |
  7. | enabled     | True                             |
  8. | id          | ad17484f2f19423b9ffe8ab2b451906d |
  9. | name        | neutron                          |
  10. | type        | network                          |
  11. +-------------+----------------------------------+
  12. [root@controller1 ~]# openstack endpoint create --region RegionOne network public http://controller:9696
  13. +--------------+----------------------------------+
  14. | Field        | Value                            |
  15. +--------------+----------------------------------+
  16. | enabled      | True                             |
  17. | id           | c4e2c0741118449d933107948c67651d |
  18. | interface    | public                           |
  19. | region       | RegionOne                        |
  20. | region_id    | RegionOne                        |
  21. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  22. | service_name | neutron                          |
  23. | service_type | network                          |
  24. | url          | http://controller:9696           |
  25. +--------------+----------------------------------+
  26. [root@controller1 ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
  27. +--------------+----------------------------------+
  28. | Field        | Value                            |
  29. +--------------+----------------------------------+
  30. | enabled      | True                             |
  31. | id           | f35d94a749ae47d68b243a90015493bb |
  32. | interface    | internal                         |
  33. | region       | RegionOne                        |
  34. | region_id    | RegionOne                        |
  35. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  36. | service_name | neutron                          |
  37. | service_type | network                          |
  38. | url          | http://controller:9696           |
  39. +--------------+----------------------------------+
  40. [root@controller1 ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
  41. +--------------+----------------------------------+
  42. | Field        | Value                            |
  43. +--------------+----------------------------------+
  44. | enabled      | True                             |
  45. | id           | 61e469452d914b78aabbf4bcc0a51732 |
  46. | interface    | admin                            |
  47. | region       | RegionOne                        |
  48. | region_id    | RegionOne                        |
  49. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  50. | service_name | neutron                          |
  51. | service_type | network                          |
  52. | url          | http://controller:9696           |
  53. +--------------+----------------------------------+

Install OpenStack Neutron

  1. [root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y

Configure Neutron

  1. [root@controller1 ~]# cp /etc/neutron/neutron.conf{,.bak2}
  2. [root@controller1 ~]# echo '
  3. > [DEFAULT]
  4. > bind_port = 9696
  5. > bind_host = controller1
  6. > core_plugin = ml2
  7. > service_plugins =
  8. > #service_plugins = trunk
  9. > #service_plugins = router
  10. > allow_overlapping_ips = true
  11. > transport_url = rabbit://openstack:openstack@controller
  12. > auth_strategy = keystone
  13. > notify_nova_on_port_status_changes = true
  14. > notify_nova_on_port_data_changes = true
  15. > 
  16. > [keystone_authtoken]
  17. > auth_uri = http://controller:5000
  18. > auth_url = http://controller:35357
  19. > memcached_servers = controller1:11211
  20. > auth_type = password
  21. > project_domain_name = default
  22. > user_domain_name = default
  23. > project_name = service
  24. > username = neutron
  25. > password = neutron
  26. > 
  27. > [nova]
  28. > auth_url = http://controller:35357
  29. > auth_plugin = password
  30. > project_domain_id = default
  31. > user_domain_id = default
  32. > region_name = RegionOne
  33. > project_name = service
  34. > username = nova
  35. > password = nova
  36. > 
  37. > [database]
  38. > connection = mysql://neutron:neutron@controller:3306/neutron
  39. > 
  40. > [oslo_concurrency]
  41. > lock_path = /var/lib/neutron/tmp 
  42. > #'>/etc/neutron/neutron.conf

Configure ML2

  1. [root@controller1 ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [ml2]
  4. > tenant_network_types = 
  5. > type_drivers = vlan,flat
  6. > mechanism_drivers = linuxbridge
  7. > extension_drivers = port_security
  8. > [ml2_type_flat]
  9. > flat_networks = provider
  10. > [securitygroup]
  11. > enable_ipset = True
  12. > #vlan
  13. > # [ml2_type_valn]
  14. > # network_vlan_ranges = provider:3001:4000
  15. > #'>/etc/neutron/plugins/ml2/ml2_conf.ini
  16. [root@controller1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

Configure Linux Bridge

  1. [root@controller1 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [linux_bridge]
  4. > physical_interface_mappings = provider:'ens37'
  5. > [vxlan]
  6. > enable_vxlan = false
  7. > [agent]
  8. > prevent_arp_spoofing = True
  9. > [securitygroup]
  10. > firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  11. > enable_security_group = True
  12. > #'>/etc/neutron/plugins/ml2/linuxbridge_agent.ini

Configure DHCP

  1. [root@controller1 ~]# cp /etc/neutron/dhcp_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [DEFAULT]
  4. > interface_driver = linuxbridge
  5. > dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
  6. > enable_isolated_metadata = true
  7. > #'>/etc/neutron/dhcp_agent.ini

Configure Metadata

  1. [root@controller1 ~]# cp /etc/neutron/metadata_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '
  3. > [DEFAULT]
  4. > nova_metadata_ip = controller
  5. > metadata_proxy_shared_secret = metadata
  6. > #'>/etc/neutron/metadata_agent.ini

Configure Nova

  1. [root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
  2. [root@controller1 ~]# echo '
  3. > #
  4. > [neutron]
  5. > url = http://controller:9696
  6. > auth_url = http://controller:35357
  7. > auth_type = password
  8. > project_domain_name = default
  9. > user_domain_name = default
  10. > region_name = RegionOne
  11. > project_name = service
  12. > username = neutron
  13. > password = neutron
  14. > service_metadata_proxy = true
  15. > metadata_proxy_shared_secret = metadata
  16. > #'>>/etc/nova/nova.conf

Configure L3

  1. [root@controller1 ~]# cp /etc/neutron/l3_agent.ini{,.bak}
  2. [root@controller1 ~]# 
  3. [root@controller1 ~]# echo '
  4. > [DEFAULT]
  5. > interface_driver = linuxbridge
  6. > #'>/etc/neutron/l3_agent.ini

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
  2. [root@controller1 ~]# mysql -h controller -u neutron -pneutron -e "use neutron;show tables;"

Configure HAProxy For Neutron API

  1. [root@controller1 ~]# echo '
  2. > #Neutron_API
  3. > listen Neutron_API_cluster
  4. > bind controller:9696
  5. > balance source
  6. > option tcpka
  7. > option tcplog
  8. > server controller1 controller1:9696 check inter 2000 rise 2 fall 5
  9. > server controller2 controller2:9696 check inter 2000 rise 2 fall 5
  10. > server controller3 controller3:9696 check inter 2000 rise 2 fall 5
  11. > '>>/etc/haproxy/haproxy.cfg
  12. [root@controller1 ~]# '
  13. [root@controller1 ~]# systemctl restart haproxy.service
  14. [root@controller1 ~]# netstat -antp|grep haproxy
  15. tcp        0      0 192.168.220.20:9292     0.0.0.0:*               LISTEN      76948/haproxy       
  16. tcp        0      0 0.0.0.0:1080            0.0.0.0:*               LISTEN      76948/haproxy       
  17. tcp        0      0 192.168.220.20:35357    0.0.0.0:*               LISTEN      76948/haproxy       
  18. tcp        0      0 192.168.220.20:9696     0.0.0.0:*               LISTEN      76948/haproxy       
  19. tcp        0      0 192.168.220.20:5000     0.0.0.0:*               LISTEN      76948/haproxy

Start Neutron Service

  1. [root@controller1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  2. [root@controller1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  3. [root@controller1 ~]# systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 2 & Start Service

  1. [root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/nova/* /etc/nova/
  2. [root@controller2 ~]# sed -i 's/controller1/controller2/' /etc/neutron/neutron.conf
  3. [root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
  4. [root@controller2 ~]# systemctl restart haproxy
  5. [root@controller2 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  6. [root@controller2 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 3 & Start Service

  1. [root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/neutron/* /etc/neutron/
  2. [root@controller3 ~]# sed -i 's/controller1/controller3/' /etc/neutron/neutron.conf
  3. [root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
  4. [root@controller3 ~]# systemctl restart haproxy
  5. [root@controller3 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  6. [root@controller3 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure L3 HA on Controller node

  1. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
  2. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
  3. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
  4. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips True
  5. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT router_distributed True
  6. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
  7. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha_net_cidr 169.254.192.0/18
  8. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 3
  9. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 2
  10.  
  11. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vxlan
  12. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
  13. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch,l2population
  14. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
  15. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks external
  16. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
  17.  
  18. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
  19. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
  20. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
  21. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
  22. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
  23.  
  24. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT ha_vrrp_auth_password password
  25. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
  26. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge 
  27. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr_snat

Configure L3 HA on Compute Node

  1. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
  2. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
  3.  
  4. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
  5. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
  6. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
  7.  
  8. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
  9.  
  10. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
  11. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge 
  12. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr

Verify Service

  1. [root@controller1 ~]# openstack network agent list
  2. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
  3. | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
  4. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
  5. | 1a4efb8b-aa65-4d4a-8092-7213592acd22 | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  6. | 3b35bc6e-4cec-42e2-9fde-e99c601cf609 | DHCP agent         | controller3 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  7. | 42e57e23-eecb-490d-b709-d8e3730107e8 | DHCP agent         | controller2 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  8. | 6b2058a2-d3e3-4342-afbb-717338b1499f | Metadata agent     | controller1 | None              | :-)   | UP    | neutron-metadata-agent    |
  9. | 750b5e5c-c7b6-4f48-ae2f-37580b6e03d9 | DHCP agent         | controller1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  10. | 7e63ce46-3fd5-40ee-9f63-ee8cc52dd5a4 | Metadata agent     | controller3 | None              | :-)   | UP    | neutron-metadata-agent    |
  11. | 92199bf0-08ef-4642-9557-c33360796405 | Linux bridge agent | controller2 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  12. | 9ae5bafa-0075-4408-b827-1be9bb1ccf99 | Linux bridge agent | controller3 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  13. | f1ed9e45-39e7-4980-aaec-10364e42263f | Metadata agent     | controller2 | None              | :-)   | UP    | neutron-metadata-agent    |
  14. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

Highly available Block Storage API

Waiting For Test ,,,

Config Network

  1. [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
  2. TYPE=Ethernet
  3. PROXY_METHOD=none
  4. BROWSER_ONLY=no
  5. BOOTPROTO=static
  6. DEFROUTE=yes
  7. IPV4_FAILURE_FATAL=no
  8. IPV6INIT=yes
  9. IPV6_AUTOCONF=yes
  10. IPV6_DEFROUTE=yes
  11. IPV6_FAILURE_FATAL=no
  12. IPV6_ADDR_GEN_MODE=stable-privacy
  13. NAME=ens33
  14. UUID=4e333024-b8c8-45e4-baee-e46ece81432c
  15. DEVICE=ens33
  16. ONBOOT=yes
  17. IPADDR=192.168.220.101
  18. NETMASK=255.255.255.0
  19. GATEWAY=192.168.220.2
  20. DNS1=114.114.114.114

Fdisk the 2nd Disk

  1. [root@localhost ~]# ls -al /dev/sd*
  2. brw-rw----. 1 root disk 8,  0 Dec 26  2017 /dev/sda
  3. brw-rw----. 1 root disk 8,  1 Dec 26  2017 /dev/sda1
  4. brw-rw----. 1 root disk 8,  2 Dec 26  2017 /dev/sda2
  5. brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
  6.  
  7. [root@localhost ~]# fdisk /dev/sdb 
  8. Welcome to fdisk (util-linux 2.23.2).
  9.  
  10. Changes will remain in memory only, until you decide to write them.
  11. Be careful before using the write command.
  12.  
  13.  
  14. Command (m for help): n
  15. Partition type:
  16.    p   primary (0 primary, 0 extended, 4 free)
  17.    e   extended
  18. Select (default p): 
  19. Using default response p
  20. Partition number (1-4, default 1): 
  21. First sector (2048-125829119, default 2048): 
  22. Using default value 2048
  23. Last sector, +sectors or +size{K,M,G} (2048-125829119, default 125829119): +30G
  24. Partition 1 of type Linux and of size 30 GiB is set
  25.  
  26. Command (m for help): n
  27. Partition type:
  28.    p   primary (1 primary, 0 extended, 3 free)
  29.    e   extended
  30. Select (default p): 
  31. Using default response p
  32. Partition number (2-4, default 2): 
  33. First sector (62916608-125829119, default 62916608): 
  34. Using default value 62916608
  35. Last sector, +sectors or +size{K,M,G} (62916608-125829119, default 125829119): 
  36. Using default value 125829119
  37. Partition 2 of type Linux and of size 30 GiB is set
  38.  
  39. Command (m for help): p
  40.  
  41. Disk /dev/sdb: 64.4 GB, 64424509440 bytes, 125829120 sectors
  42. Units = sectors of 1 * 512 = 512 bytes
  43. Sector size (logical/physical): 512 bytes / 512 bytes
  44. I/O size (minimum/optimal): 512 bytes / 512 bytes
  45. Disk label type: dos
  46. Disk identifier: 0x285890cb
  47.  
  48.    Device Boot      Start         End      Blocks   Id  System
  49. /dev/sdb1            2048    62916607    31457280   83  Linux
  50. /dev/sdb2        62916608   125829119    31456256   83  Linux
  51.  
  52. Command (m for help): w
  53. The partition table has been altered!
  54.  
  55. Calling ioctl() to re-read partition table.
  56. Syncing disks.
  57.  
  58. [root@localhost ~]# ls -al /dev/sd*
  59. brw-rw----. 1 root disk 8,  0 Dec 26  2017 /dev/sda
  60. brw-rw----. 1 root disk 8,  1 Dec 26  2017 /dev/sda1
  61. brw-rw----. 1 root disk 8,  2 Dec 26  2017 /dev/sda2
  62. brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
  63. brw-rw----. 1 root disk 8, 17 Dec 26 01:11 /dev/sdb1
  64. brw-rw----. 1 root disk 8, 18 Dec 26 01:11 /dev/sdb2

Format 2nd Disk

  1. [root@localhost ~]# mkfs.ext4 /dev/sdb1
  2. mke2fs 1.42.9 (28-Dec-2013)
  3. Filesystem label=
  4. OS type: Linux
  5. Block size=4096 (log=2)
  6. Fragment size=4096 (log=2)
  7. Stride=0 blocks, Stripe width=0 blocks
  8. 1966080 inodes, 7864320 blocks
  9. 393216 blocks (5.00%) reserved for the super user
  10. First data block=0
  11. Maximum filesystem blocks=2155872256
  12. 240 block groups
  13. 32768 blocks per group, 32768 fragments per group
  14. 8192 inodes per group
  15. Superblock backups stored on blocks: 
  16.         32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
  17.         4096000
  18.  
  19. Allocating group tables: done                            
  20. Writing inode tables: done                            
  21. Creating journal (32768 blocks): done
  22. Writing superblocks and filesystem accounting information: done   
  23.  
  24. [root@localhost ~]# mkfs.ext4 /dev/sdb2
  25. mke2fs 1.42.9 (28-Dec-2013)
  26. Filesystem label=
  27. OS type: Linux
  28. Block size=4096 (log=2)
  29. Fragment size=4096 (log=2)
  30. Stride=0 blocks, Stripe width=0 blocks
  31. 1966080 inodes, 7864064 blocks
  32. 393203 blocks (5.00%) reserved for the super user
  33. First data block=0
  34. Maximum filesystem blocks=2155872256
  35. 240 block groups
  36. 32768 blocks per group, 32768 fragments per group
  37. 8192 inodes per group
  38. Superblock backups stored on blocks: 
  39.         32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
  40.         4096000
  41.  
  42. Allocating group tables: done                            
  43. Writing inode tables: done                            
  44. Creating journal (32768 blocks): done
  45. Writing superblocks and filesystem accounting information: done

Mount /dev/sdb1

  1. [root@localhost ~]# mkdir -p /date
  2. [root@localhost ~]# mount -t ext4 /dev/sdb1 /date
  3. [root@localhost ~]# df -h|grep /dev/sdb1
  4. /dev/sdb1                 30G   45M   28G   1% /date
  5. [root@localhost ~]# echo "mount -t ext4 /dev/sdb1 /date" >>/etc/rc.d/rc.local
  6. [root@localhost ~]# tail -1 /etc/rc.d/rc.local
  7. mount -t ext4 /dev/sdb1 /date
  8. [root@localhost ~]# chmod +x /etc/rc.d/rc.local

Create Volume on /dev/sdb2

  1. [root@localhost ~]# yum install lvm2 -y
  2. [root@localhost ~]# systemctl enable lvm2-lvmetad.service
  3. Created symlink from /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.service to /usr/lib/systemd/system/lvm2-lvmetad.service.
  4. [root@localhost ~]# systemctl start lvm2-lvmetad.service
  5.  
  6. [root@localhost ~]# pvcreate /dev/sdb2
  7. WARNING: ext4 signature detected on /dev/sdb2 at offset 1080. Wipe it? [y/n]: y
  8.   Wiping ext4 signature on /dev/sdb2.
  9.   Physical volume "/dev/sdb2" successfully created.
  10. [root@localhost ~]# vgcreate cinder_lvm01 /dev/sdb2
  11.   Volume group "cinder_lvm01" successfully created
  12. [root@localhost ~]# vgdisplay 
  13.   --- Volume group ---
  14.   VG Name               cinder_lvm01
  15.   System ID             
  16.   Format                lvm2
  17.   Metadata Areas        1
  18.   Metadata Sequence No  1
  19.   VG Access             read/write
  20.   VG Status             resizable
  21.   MAX LV                0
  22.   Cur LV                0
  23.   Open LV               0
  24.   Max PV                0
  25.   Cur PV                1
  26.   Act PV                1
  27.   VG Size               <30.00 GiB
  28.   PE Size               4.00 MiB
  29.   Total PE              7679
  30.   Alloc PE / Size       0 / 0   
  31.   Free  PE / Size       7679 / <30.00 GiB
  32.   VG UUID               jyb299-bo5k-E6Z3-Frho-e6Kz-d9Mu-yK0m6c
  33.  
  34.   --- Volume group ---
  35.   VG Name               centos
  36.   System ID             
  37.   Format                lvm2
  38.   Metadata Areas        1
  39.   Metadata Sequence No  4
  40.   VG Access             read/write
  41.   VG Status             resizable
  42.   MAX LV                0
  43.   Cur LV                3
  44.   Open LV               3
  45.   Max PV                0
  46.   Cur PV                1
  47.   Act PV                1
  48.   VG Size               <79.00 GiB
  49.   PE Size               4.00 MiB
  50.   Total PE              20223
  51.   Alloc PE / Size       20222 / 78.99 GiB
  52.   Free  PE / Size       1 / 4.00 MiB
  53.   VG UUID               NHtSF8-nozf-sbB4-vEBt-ogJo-WmuG-F8HYMQ

Install NFS

  1. [root@localhost ~]# yum install nfs-utils rpcbind -y
  2. [root@localhost ~]# mkdir -p /date/{cinder_nfs1,cinder_nfs2}
  3. [root@localhost ~]# chmod -R 777 /date
  4. [root@localhost ~]# echo "/date/cinder_nfs1 *(rw,root_squash,sync,anonuid=165,anongid=165)">/etc/exports
  5. [root@localhost ~]# exportfs -r
  6. [root@localhost ~]# systemctl enable rpcbind nfs-server
  7. Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
  8. [root@localhost ~]# systemctl restart rpcbind nfs-server
  9. [root@localhost ~]# showmount -e localhost
  10. Export list for localhost:
  11. /date/cinder_nfs1 *

Install Cinder & Configure

  1. [root@localhost ~]# yum install openstack-cinder targetcli python-keystone lvm2 -y
  2. [root@localhost ~]# cp /etc/cinder/cinder.conf{,.bak}
  3. [root@localhost ~]# cp /etc/lvm/lvm.conf{,.bak
  4. [root@localhost ~]# sed -i '141a filter = [ "a/sdb2/", "r/.*/"]' /etc/lvm/lvm.conf
  5. [root@localhost ~]# echo '192.168.220.101:/date/cinder_nfs1'>/etc/cinder/nfs_shares
  6. [root@localhost ~]# chmod 640 /etc/cinder/nfs_shares
  7. [root@localhost ~]# chown root:cinder /etc/cinder/nfs_shares

Configure cinder.conf

  1. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
  2. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT log_dir /var/log/cinder
  3. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT state_path /var/lib/cinder
  4. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
  5. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:openstack@controller
  6. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm,nfs
  7.  
  8. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:cinder@controller/cinder
  9.  
  10. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
  11. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
  12. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
  13. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
  14. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
  15. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
  16. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
  17. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
  18. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password cinder
  19.  
  20. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
  21.  
  22. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
  23. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
  24. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
  25. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder_lvm01
  26. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_ip_address 192.168.220.101
  27. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
  28. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_backend_name lvm01
  29.  
  30. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_driver = cinder.volume.drivers.nfs.NfsDriver
  31. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_shares_config = /etc/cinder/nfs_shares
  32. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_mount_point_base = $state_path/mnt
  33. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_backend_name = nfs01

Start Cinder Service

  1. [root@localhost ~]# chmod 640 /etc/cinder/cinder.conf
  2. [root@localhost ~]# chgrp cinder /etc/cinder/cinder.conf
  3.  
  4. [root@localhost ~]# systemctl enable openstack-cinder-volume.service target.service
  5. Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
  6. Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.
  7. [root@localhost ~]# systemctl start openstack-cinder-volume.service target.service

Add Cinder to Pacemaker

  1. # pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true
  2. # pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true
  3. # pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume
  4.  
  5. # pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone
  6. # pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone
  7. # pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume
  8. # pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone
  9.  
  10. # pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone

Configure Cinder service

  1. # cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
  2.  
  3. # openstack-config --set /etc/cinder/cinder.conf DEFAULT host cinder-cluster-1
  4.  
  5. # openstack-config --set /etc/cinder/cinder.conf DEFAULT osapi_volume_listen 10.0.0.11
  6.  
  7. # openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
  8. # openstack-config --set /etc/cinder/cinder.conf DEFAULT control_exchange cinder
  9.  
  10. # openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver
  11. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_shares_config /etc/cinder/nfs_exports
  12. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_sparsed_volumes true
  13. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_mount_options v3
  14.  
  15. # openstack-config --set /etc/cinder/cinder.conf database connection mysql://cinder:password@10.0.0.11/cinder
  16. # openstack-config --set /etc/cinder/cinder.conf database max_retries -1
  17.  
  18. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken identity_uri http://10.0.0.11:35357/
  19. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://10.0.0.11:5000/
  20. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name service
  21. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder
  22. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_password CINDER_PASS
  23.  
  24. # openstack-config --set /etc/cinder/cinder.conf rabbit_hosts 10.0.0.12,10.0.0.13,10.0.0.14
  25. # openstack-config --set /etc/cinder/cinder.conf rabbit_ha_queues True
  26. # openstack-config --set /etc/cinder/cinder.conf heartbeat_timeout_threshold 60
  27. # openstack-config --set /etc/cinder/cinder.conf heartbeat_rate 2

Configure HA Cinder API

  1. $ openstack endpoint create volume --region $KEYSTONE_REGION \
  2.   --publicurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
  3.   --adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
  4.   --internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s'

Highly available Image API

Create Service For Glance

  1. [root@controller1 ~]# openstack service create --name glance --description "OpenStack Image" image
  2. +-------------+----------------------------------+
  3. | Field       | Value                            |
  4. +-------------+----------------------------------+
  5. | description | OpenStack Image                  |
  6. | enabled     | True                             |
  7. | id          | 3083848116cd4706bc39241f81e3475b |
  8. | name        | glance                           |
  9. | type        | image                            |
  10. +-------------+----------------------------------+
  11. [root@controller1 ~]# openstack endpoint create --region RegionOne image public http://controller:9292
  12. +--------------+----------------------------------+
  13. | Field        | Value                            |
  14. +--------------+----------------------------------+
  15. | enabled      | True                             |
  16. | id           | f3eb684e2209471795a04f6e73ce120f |
  17. | interface    | public                           |
  18. | region       | RegionOne                        |
  19. | region_id    | RegionOne                        |
  20. | service_id   | 3083848116cd4706bc39241f81e3475b |
  21. | service_name | glance                           |
  22. | service_type | image                            |
  23. | url          | http://controller:9292           |
  24. +--------------+----------------------------------+
  25. [root@controller1 ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
  26. +--------------+----------------------------------+
  27. | Field        | Value                            |
  28. +--------------+----------------------------------+
  29. | enabled      | True                             |
  30. | id           | bbf93cdcc35645288cfa90ef6239e1b5 |
  31. | interface    | internal                         |
  32. | region       | RegionOne                        |
  33. | region_id    | RegionOne                        |
  34. | service_id   | 3083848116cd4706bc39241f81e3475b |
  35. | service_name | glance                           |
  36. | service_type | image                            |
  37. | url          | http://controller:9292           |
  38. +--------------+----------------------------------+
  39. [root@controller1 ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
  40. +--------------+----------------------------------+
  41. | Field        | Value                            |
  42. +--------------+----------------------------------+
  43. | enabled      | True                             |
  44. | id           | da194b1e1e8148859a45a83fac2c7403 |
  45. | interface    | admin                            |
  46. | region       | RegionOne                        |
  47. | region_id    | RegionOne                        |
  48. | service_id   | 3083848116cd4706bc39241f81e3475b |
  49. | service_name | glance                           |
  50. | service_type | image                            |
  51. | url          | http://controller:9292           |
  52. +--------------+----------------------------------+

Install OpenStack Glance

  1. [root@controller ~]# yum install openstack-glance python-glance -y

Configure Glance Service

  1. [root@controller1 ~]# cp /etc/glance/glance-api.conf{,.bak}
  2. [root@controller1 ~]# cp /etc/glance/glance-registry.conf{,.bak}
  3. [root@controller ~]# mkdir -p /date/glance
  4.  
  5. [root@controller1 ~]# echo "#
  6. > [DEFAULT]
  7. > debug = False
  8. > verbose = True
  9. > bind_host = controller1
  10. > bind_port = 9292
  11. > auth_region = RegionOne
  12. > registry_client_protocol = http
  13. > [database]
  14. > connection = mysql+pymysql://glance:glance@controller/glance
  15. > [keystone_authtoken]
  16. > auth_uri = http://controller:5000/v3
  17. > auth_url = http://controller:35357/v3
  18. > memcached_servers = controller1:11211,controller2:11211,controller3:11211
  19. > auth_type = password
  20. > project_domain_name = default
  21. > user_domain_name = default
  22. > project_name = service
  23. > username = glance
  24. > password = glance
  25. > [paste_deploy]
  26. > flavor = keystone
  27. > [glance_store]
  28. > stores = file,http
  29. > default_store = file
  30. > filesystem_store_datadir = /data/glance
  31. > [oslo_messaging_rabbit]
  32. > rabbit_userid =openstack
  33. > rabbit_password = openstack
  34. > rabbit_durable_queues=true
  35. > rabbit_ha_queues = True
  36. > rabbit_max_retries=0
  37. > rabbit_port = 5672  
  38. > rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
  39. > #">/etc/glance/glance-api.conf
  40.  
  41. [root@controller1 ~]# echo "#
  42. > [DEFAULT]
  43. > debug = False
  44. > verbose = True
  45. > bind_host = controller1
  46. > bind_port = 9191
  47. > workers = 2
  48. > [database]
  49. > connection = mysql+pymysql://glance:glance@controller/glance
  50. > [keystone_authtoken]
  51. > auth_uri = http://controller:5000/v3
  52. > auth_url = http://controller:35357/v3
  53. > memcached_servers = controller1:11211,controller2:11211,controller3:11211
  54. > auth_type = password
  55. > project_domain_name = default
  56. > user_domain_name = default
  57. > project_name = service
  58. > username = glance
  59. > password = glance
  60. > [paste_deploy]
  61. > flavor = keystone
  62. > [oslo_messaging_rabbit]
  63. > rabbit_userid =openstack
  64. > rabbit_password = openstack
  65. > rabbit_durable_queues=true
  66. > rabbit_ha_queues = True
  67. > rabbit_max_retries=0
  68. > rabbit_port = 5672  
  69. > rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
  70. > #">/etc/glance/glance-registry.conf
  71. [root@controller1 ~]#

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
  2. /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1328: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
  3.   expire_on_commit=expire_on_commit, _conf=conf)
  4. INFO  [alembic.runtime.migration] Context impl MySQLImpl.
  5. INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
  6. INFO  [alembic.runtime.migration] Running upgrade  -> liberty, liberty initial
  7. INFO  [alembic.runtime.migration] Running upgrade liberty -> mitaka01, add index on created_at and updated_at columns of 'images' table
  8. INFO  [alembic.runtime.migration] Running upgrade mitaka01 -> mitaka02, update metadef os_nova_server
  9. INFO  [alembic.runtime.migration] Running upgrade mitaka02 -> ocata01, add visibility to and remove is_public from images
  10. INFO  [alembic.runtime.migration] Running upgrade ocata01 -> pike01, drop glare artifacts tables
  11. INFO  [alembic.runtime.migration] Context impl MySQLImpl.
  12. INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
  13. Upgraded database to: pike01, current revision(s): pike01
  14.  
  15. [root@controller1 ~]# mysql -h controller -u glance -pglance -e "use glance;show tables;"
  16. +----------------------------------+
  17. | Tables_in_glance                 |
  18. +----------------------------------+
  19. | alembic_version                  |
  20. | image_locations                  |
  21. | image_members                    |
  22. | image_properties                 |
  23. | image_tags                       |
  24. | images                           |
  25. | metadef_namespace_resource_types |
  26. | metadef_namespaces               |
  27. | metadef_objects                  |
  28. | metadef_properties               |
  29. | metadef_resource_types           |
  30. | metadef_tags                     |
  31. | migrate_version                  |
  32. | task_info                        |
  33. | tasks                            |
  34. +----------------------------------+

Start Glance Service

  1. [root@controller1 ~]# systemctl enable openstack-glance-api openstack-glance-registry
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
  3. Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service to /usr/lib/systemd/system/openstack-glance-registry.service.
  4. [root@controller1 ~]# systemctl restart openstack-glance-api openstack-glance-registry
  5. [root@controller1 ~]# netstat -antp|grep python2
  6. tcp        0      0 192.168.220.21:9292     0.0.0.0:*               LISTEN      13125/python2       
  7. tcp        0      0 192.168.220.21:9191     0.0.0.0:*               LISTEN      13126/python2       
  8. [root@controller1 ~]# netstat -antp|egrep '9292|9191'
  9. tcp        0      0 192.168.220.21:9292     0.0.0.0:*               LISTEN      13125/python2       
  10. tcp        0      0 192.168.220.21:9191     0.0.0.0:*               LISTEN      13126/python2       
  11. tcp       32      0 192.168.220.21:39292    192.168.220.21:2224     CLOSE_WAIT  699/ruby            
  12. tcp        0      0 192.168.220.11:49292    192.168.220.23:2224     ESTABLISHED 699/ruby

Configure HAProxy For Glance

  1. [root@controller1 ~]# echo '
  2. > #glance_api_cluster
  3. > listen glance_api_cluster
  4. >   bind controller:9292
  5. >   #balance  source
  6. >   option  tcpka
  7. >   option  httpchk
  8. >   option  tcplog
  9. >   server controller1 controller1:9292 check inter 2000 rise 2 fall 5
  10. >   server controller2 controller2:9292 check inter 2000 rise 2 fall 5
  11. >   server controller3 controller3:9292 check inter 2000 rise 2 fall 5
  12. > '>>/etc/haproxy/haproxy.cfg
  13. [root@controller1 ~]# '
  14. [root@controller1 ~]# systemctl restart haproxy.service
  15. [root@controller1 ~]# netstat -antp|grep haproxy
  16. tcp        0      0 192.168.220.20:9292     0.0.0.0:*               LISTEN      13170/haproxy       
  17. tcp        0      0 0.0.0.0:1080            0.0.0.0:*               LISTEN      13170/haproxy       
  18. tcp        0      0 192.168.220.20:35357    0.0.0.0:*               LISTEN      13170/haproxy       
  19. tcp        0      0 192.168.220.20:5000     0.0.0.0:*               LISTEN      13170/haproxy       
  20. tcp        0      0 0.0.0.0:5000            0.0.0.0:*               LISTEN      13170/haproxy

Create Cirros Image

  1. [root@controller glance]# wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
  2. --2017-12-18 02:06:45--  http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
  3. Resolving download.cirros-cloud.net (download.cirros-cloud.net)... 64.90.42.85, 2607:f298:6:a036::bd6:a72a
  4. Connecting to download.cirros-cloud.net (download.cirros-cloud.net)|64.90.42.85|:80... connected.
  5. HTTP request sent, awaiting response... 200 OK
  6. Length: 13267968 (13M) [text/plain]
  7. Saving to: ‘cirros-0.3.5-x86_64-disk.img’
  8.  
  9. 100%[==============================================>] 13,267,968  1.04MB/s   in 13s    
  10.  
  11. 2017-12-18 02:06:58 (1023 KB/s) - ‘cirros-0.3.5-x86_64-disk.img’ saved [13267968/13267968]
  12.  
  13. [root@controller1 glance]# openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
  14. +------------------+------------------------------------------------------+
  15. | Field            | Value                                                |
  16. +------------------+------------------------------------------------------+
  17. | checksum         | f8ab98ff5e73ebab884d80c9dc9c7290                     |
  18. | container_format | bare                                                 |
  19. | created_at       | 2017-12-18T07:37:04Z                                 |
  20. | disk_format      | qcow2                                                |
  21. | file             | /v2/images/82e5d7d9-86a0-4266-a599-e50e8c4b0cbe/file |
  22. | id               | 82e5d7d9-86a0-4266-a599-e50e8c4b0cbe                 |
  23. | min_disk         | 0                                                    |
  24. | min_ram          | 0                                                    |
  25. | name             | cirros                                               |
  26. | owner            | 2291724ac1a54d65844cc5dba56f4803                     |
  27. | protected        | False                                                |
  28. | schema           | /v2/schemas/image                                    |
  29. | size             | 13267968                                             |
  30. | status           | active                                               |
  31. | tags             |                                                      |
  32. | updated_at       | 2017-12-18T07:37:05Z                                 |
  33. | virtual_size     | None                                                 |
  34. | visibility       | public                                               |
  35. +------------------+------------------------------------------------------+

List OpenStack Image

  1. [root@controller1 glance]# openstack image list
  2. +--------------------------------------+--------+--------+
  3. | ID                                   | Name   | Status |
  4. +--------------------------------------+--------+--------+
  5. | 82e5d7d9-86a0-4266-a599-e50e8c4b0cbe | cirros | active |
  6. +--------------------------------------+--------+--------+

Configure Other Controller Nodes

  1. [root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/glance/* controller2:/etc/glance/
  2. sending incremental file list
  3. glance-api.conf
  4.          883 100%    0.00kB/s    0:00:00 (xfer#1, to-check=44/45)
  5. glance-api.conf.bak
  6.       150676 100%   14.37MB/s    0:00:00 (xfer#2, to-check=43/45)
  7. glance-registry.conf
  8.          744 100%   72.66kB/s    0:00:00 (xfer#3, to-check=41/45)
  9. glance-registry.conf.bak
  10.        77404 100%    5.27MB/s    0:00:00 (xfer#4, to-check=40/45)
  11. metadefs/
  12. rootwrap.d/
  13.  
  14. sent 61739 bytes  received 126 bytes  123730.00 bytes/sec
  15. total size is 504600  speedup is 8.16
  16.  
  17. [root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/glance/* controller3:/etc/glance/
  18. sending incremental file list
  19. glance-api.conf
  20.          883 100%    0.00kB/s    0:00:00 (xfer#1, to-check=44/45)
  21. glance-api.conf.bak
  22.       150676 100%   28.74MB/s    0:00:00 (xfer#2, to-check=43/45)
  23. glance-registry.conf
  24.          744 100%  121.09kB/s    0:00:00 (xfer#3, to-check=41/45)
  25. glance-registry.conf.bak
  26.        77404 100%    9.23MB/s    0:00:00 (xfer#4, to-check=40/45)
  27. metadefs/
  28. rootwrap.d/
  29.  
  30. sent 61739 bytes  received 126 bytes  123730.00 bytes/sec
  31. total size is 504600  speedup is 8.16
  32.  
  33. [root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/haproxy/haproxy.cfg controller2:/etc/haproxy/
  34. sending incremental file list
  35. haproxy.cfg
  36.         4376 100%    3.51MB/s    0:00:00 (xfer#1, to-check=0/1)
  37.  
  38. sent 75 bytes  received 73 bytes  296.00 bytes/sec
  39. total size is 4376  speedup is 29.57
  40. [root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/haproxy/haproxy.cfg controller3:/etc/haproxy/
  41.  
  42. sending incremental file list
  43. haproxy.cfg
  44.         4376 100%    3.51MB/s    0:00:00 (xfer#1, to-check=0/1)
  45.  
  46. sent 75 bytes  received 73 bytes  296.00 bytes/sec
  47. total size is 4376  speedup is 29.57
  48. [root@controller1 glance]# 
  49. [root@controller1 glance]# ssh controller2 "sed -i '1,10s/controller1/controller2/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"
  50. [root@controller1 glance]# ssh controller3 "sed -i '1,10s/controller1/controller3/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"

Start Other Nodes’s Service

  1. [root@controller1 glance]# ssh controller2 "systemctl enable openstack-glance-api openstack-glance-registry"
  2. [root@controller1 glance]# ssh controller2 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"
  3. [root@controller1 glance]# ssh controller3 "systemctl enable openstack-glance-api openstack-glance-registry"
  4. [root@controller1 glance]# ssh controller3 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"

Add OpenStack Image API resource to Pacemaker

  1. [root@controller1 glance]# cd /usr/lib/ocf/resource.d/openstack
  2. [root@controller1 openstack]# wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
  3. --2017-12-18 03:00:41--  https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
  4. Resolving git.openstack.org (git.openstack.org)... 104.130.246.128, 2001:4800:7819:103:be76:4eff:fe06:63c
  5. Connecting to git.openstack.org (git.openstack.org)|104.130.246.128|:443... connected.
  6. HTTP request sent, awaiting response... 200 OK
  7. Length: 11439 (11K) [text/plain]
  8. Saving to: ‘glance-api’
  9.  
  10. 100%[====================================>] 11,439      --.-K/s   in 0s      
  11.  
  12. 2017-12-18 03:00:42 (31.5 MB/s) - ‘glance-api’ saved [11439/11439]
  13.  
  14. [root@controller1 openstack]# chmod a+rx *
  15. [root@controller1 openstack]# ls
  16. glance-api  nova-compute-wait  NovaEvacuate
Create Local Repository
  1. [root@localrepo yum.repos.d]# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/network:ha-clustering:Stable.repo
  2. [root@localrepo myrepo]# reposync --repoid=network_ha-clustering_Stable
  3. [root@localrepo myrepo]# createrepo /var/www/html/myrepo/network_ha-clustering_Stable/
  4.  
  5. [root@controller1 openstack]# cat /etc/yum.repos.d/network_ha-clustering_Stable.repo 
  6. [network_ha-clustering_Stable]
  7. name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
  8. type=rpm-md
  9. baseurl=http://192.168.220.200/myrepo/network_ha-clustering_Stable/
  10. gpgcheck=0
  11. gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/repodata/repomd.xml.key
  12. enabled=1

Install CRMSH

  1. [root@controller1 openstack]# yum install crmsh -y
  2.  
  3. [root@controller1 openstack]# crm configure
  4.  
  5. crm(live)configure# primitive p_glance-api ocf:openstack:glance-api \
  6.    >   params config="/etc/glance/glance-api.conf" \
  7.    >   os_password="admin" \
  8.    >   os_username="admin" os_tenant_name="admin" \
  9.    >   os_auth_url="http://controller:5000/v3/" \
  10.    >   op monitor interval="30s" timeout="30s"
  11.  
  12. crm(live)configure# commit

Configure OpenStack Image service API

  1. cp /etc/glance/glance-api.conf{,.bak}
  2.  
  3. connection = mysql+pymysql://glance:glance@controller/glance
  4. bind_host = controller
  5. registry_host = controller
  6. notifier_strategy = rabbit
  7. rabbit_host = controller
  1. nova.conf
  2.  
  3. [glance]
  4.  
  5. api_servers = 10.0.0.11

Configure OpenStack services to use the highly available OpenStack Image API

  1. $ openstack endpoint create --region $KEYSTONE_REGION image public http://10.0.0.10:9292
  2. $ openstack endpoint create --region $KEYSTONE_REGION image admin http://10.0.0.11:9292
  3. $ openstack endpoint create --region $KEYSTONE_REGION image internal http://10.0.0.11:9292

Highly available Identity API

Installing KeyStone On Controllers

  1. [root@controller ~]# yum install openstack-keystone httpd mod_wsgi python-openstackclient openstack-utils -y

Configure MemCached

  1. [root@controller ~]# sed -i 's/127.0.0.1/0.0.0.0/' /etc/sysconfig/memcached
  2. [root@controller ~]# cat /etc/sysconfig/memcached
  3. PORT="11211"
  4. USER="memcached"
  5. MAXCONN="1024"
  6. CACHESIZE="64"
  7. OPTIONS="-l 0.0.0.0,::1"

Start MemCached Service

  1. [root@controller ~]# systemctl enable memcached.service
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.
  3. [root@controller ~]# systemctl start memcached.service    
  4. [root@controller ~]# systemctl status memcached.service
  5. ● memcached.service - memcached daemon
  6.    Loaded: loaded (/usr/lib/systemd/system/memcached.service; enabled; vendor preset: disabled)
  7.    Active: active (running) since Sun 2017-12-17 22:07:25 EST; 1s ago
  8.  Main PID: 7500 (memcached)
  9.    CGroup: /system.slice/memcached.service
  10.            └─7500 /usr/bin/memcached -p 11211 -u memcached -m 64 -c 1024 -l 0.0.0.0,::1
  11.  
  12. Dec 17 22:07:25 controller1 systemd[1]: Started memcached daemon.
  13. Dec 17 22:07:25 controller1 systemd[1]: Starting memcached daemon...

Configure Httpd Service

  1. [root@controller ~]# cp /etc/httpd/conf/httpd.conf{,.bak}
  2. [root@controller1 ~]# echo "ServerName controller1">>/etc/httpd/conf/httpd.conf
  3. [root@controller2 ~]# echo "ServerName controller2">>/etc/httpd/conf/httpd.conf
  4. [root@controller3 ~]# echo "ServerName controller3">>/etc/httpd/conf/httpd.conf
  5. [root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

Configure KeyStone

  1. [root@controller ~]# cp /usr/share/keystone/wsgi-keystone.conf{,.bak}
  2. [root@controller ~]# sed -i 's/5000/4999/' /usr/share/keystone/wsgi-keystone.conf
  3. [root@controller ~]# sed -i 's/35357/35356/' /usr/share/keystone/wsgi-keystone.conf

Start Httpd Service

  1. [root@controller ~]# systemctl enable httpd.service
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
  3. [root@controller ~]# systemctl restart httpd.service
  4. [root@controller ~]# netstat -antp|egrep 'httpd'
  5. tcp6       0      0 :::80                   :::*                    LISTEN      1946/httpd          
  6. tcp6       0      0 :::35356                :::*                    LISTEN      1946/httpd          
  7. tcp6       0      0 :::4999                 :::*                    LISTEN      1946/httpd

Configure HAPorxy For KeyStone

  1. [root@controller ~]# echo '
  2. > #keystone
  3. > listen keystone_admin_cluster
  4. > bind controller:35357
  5. > #balance  source
  6. > option  tcpka
  7. > option  httpchk 
  8. > option  tcplog
  9. > server controller1 controller1:35356 check inter 2000 rise 2 fall 5
  10. > server controller2 controller2:35356 check inter 2000 rise 2 fall 5
  11. > server controller3 controller3:35356 check inter 2000 rise 2 fall 5
  12. > 
  13. > listen keystone_public_cluster
  14. > bind controller:5000
  15. > #balance  source
  16. > option  tcpka
  17. > option  httpchk 
  18. > option  tcplog
  19. > server controller1 controller1:4999 check inter 2000 rise 2 fall 5
  20. > server controller2 controller2:4999 check inter 2000 rise 2 fall 5
  21. > server controller3 controller3:4999 check inter 2000 rise 2 fall 5
  22. > '>>/etc/haproxy/haproxy.cfg
  23. [root@controller ~]# '
  24. [root@controller ~]# systemctl restart haproxy.service
  25. [root@controller ~]# netstat -antp|egrep 'haproxy|httpd'
  26. tcp        0      0 0.0.0.0:1080            0.0.0.0:*               LISTEN      2111/haproxy        
  27. tcp        0      0 192.168.220.20:35357    0.0.0.0:*               LISTEN      2111/haproxy        
  28. tcp        0      0 192.168.220.20:5000     0.0.0.0:*               LISTEN      2111/haproxy        
  29. tcp        0      0 0.0.0.0:5000            0.0.0.0:*               LISTEN      2111/haproxy        
  30. tcp6       0      0 :::80                   :::*                    LISTEN      1946/httpd          
  31. tcp6       0      0 :::35356                :::*                    LISTEN      1946/httpd          
  32. tcp6       0      0 :::4999                 :::*                    LISTEN      1946/httpd

Configure KeyStone

  1. [root@controller1 ~]# KEYSTONE_SECRET=$(openssl rand -hex 10)
  2. [root@controller1 ~]# 
  3. [root@controller1 ~]# cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
  4. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $KEYSTONE_SECRET
  5. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf DEFAULT verbose true
  6. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:keystone@controller/keystone
  7. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf cache backend oslo_cache.memcache_pool
  8. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf cache enabled true
  9. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211
  10. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf memcache servers controller1:11211,controller2:11211,controller3:11211
  11. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf token driver memcache
  12. [root@controller1 ~]# openstack-config --set /etc/keystone/keystone.conf token provider fernet

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
  2. [root@controller1 ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
  3. [root@controller1 ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

Configure Other Controller Nodes

  1. [root@controller1 ~]# rsync -avzP -e 'ssh -p 22' /etc/keystone/* controller2:/etc/keystone/
  2. sending incremental file list
  3. keystone.conf
  4.       115180 100%  108.49MB/s    0:00:00 (xfer#1, to-check=10/13)
  5. keystone.conf.bak
  6.       114875 100%   54.78MB/s    0:00:00 (xfer#2, to-check=9/13)
  7. credential-keys/
  8. credential-keys/0
  9.           44 100%   21.48kB/s    0:00:00 (xfer#3, to-check=3/13)
  10. credential-keys/1
  11.           44 100%   21.48kB/s    0:00:00 (xfer#4, to-check=2/13)
  12. fernet-keys/
  13. fernet-keys/0
  14.           44 100%   21.48kB/s    0:00:00 (xfer#5, to-check=1/13)
  15. fernet-keys/1
  16.           44 100%   21.48kB/s    0:00:00 (xfer#6, to-check=0/13)
  17.  
  18. sent 2209 bytes  received 2114 bytes  2882.00 bytes/sec
  19. total size is 236741  speedup is 54.76
  20. [root@controller1 ~]# rsync -avzP -e 'ssh -p 22' /etc/keystone/* controller3:/etc/keystone/
  21. sending incremental file list
  22. keystone.conf
  23.       115180 100%  108.49MB/s    0:00:00 (xfer#1, to-check=10/13)
  24. keystone.conf.bak
  25.       114875 100%   54.78MB/s    0:00:00 (xfer#2, to-check=9/13)
  26. credential-keys/
  27. credential-keys/0
  28.           44 100%   21.48kB/s    0:00:00 (xfer#3, to-check=3/13)
  29. credential-keys/1
  30.           44 100%   21.48kB/s    0:00:00 (xfer#4, to-check=2/13)
  31. fernet-keys/
  32. fernet-keys/0
  33.           44 100%   21.48kB/s    0:00:00 (xfer#5, to-check=1/13)
  34. fernet-keys/1
  35.           44 100%   21.48kB/s    0:00:00 (xfer#6, to-check=0/13)
  36.  
  37. sent 2209 bytes  received 2114 bytes  8646.00 bytes/sec
  38. total size is 236741  speedup is 54.76

Restart Httpd Service

  1. [root@controller1 ~]# systemctl restart httpd.service
  2. [root@controller1 ~]# ssh controller2 "systemctl restart httpd.service"
  3. [root@controller1 ~]# ssh controller3 "systemctl restart httpd.service"

Create Admin Role

  1. [root@controller1 ~]# keystone-manage bootstrap --bootstrap-password admin \
  2. > --bootstrap-admin-url http://controller:35357/v3/ \
  3. > --bootstrap-internal-url http://controller:5000/v3/ \
  4. > --bootstrap-public-url http://controller:5000/v3/ \
  5. > --bootstrap-region-id RegionOne

Configure Admin Resource

  1. [root@controller1 ~]# echo "
  2. > export OS_PROJECT_DOMAIN_NAME=default
  3. > export OS_USER_DOMAIN_NAME=default 
  4. > export OS_PROJECT_NAME=admin 
  5. > export OS_USERNAME=admin
  6. > export OS_PASSWORD=admin
  7. > export OS_AUTH_URL=http://controller:35357/v3
  8. > export OS_IDENTITY_API_VERSION=3
  9. > export OS_IMAGE_API_VERSION=2
  10. > ">/root/admin-openrc
  11. [root@controller1 ~]# "
  12. [root@controller1 ~]# source /root/admin-openrc
  13. [root@controller1 ~]# openstack token issue
  14. +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
  15. | Field      | Value                                                                                                                                                                                   |
  16. +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
  17. | expires    | 2017-12-15T11:24:26+0000                                                                                                                                                                |
  18. | id         | gAAAAABaM6LaRTUjdiPkk1_5ydJV38A7d8ksrrD270fHt5Rc6SZZiIqhQXD70YdFVZqzfK0wWnxqF2jpAy1yBB6Tt-_v9VGbwyGORDJ-MesmmcmychP65oL_2dY8O4N09Mb8RZZm29wkJzOjgQffiFkmmjm3H7mAjfEHqbUxS-RdNcrnFEY0sTQ |
  19. | project_id | 2291724ac1a54d65844cc5dba56f4803                                                                                                                                                        |
  20. | user_id    | c69e3e92d2e9485dabc42d845574d965                                                                                                                                                        |
  21. +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

Create OpenStack Project

  1. [root@controller1 ~]# openstack project create --domain default --description "Service Project" service
  2. +-------------+----------------------------------+
  3. | Field       | Value                            |
  4. +-------------+----------------------------------+
  5. | description | Service Project                  |
  6. | domain_id   | default                          |
  7. | enabled     | True                             |
  8. | id          | 78757402f85a467995bcbd69b2183ba5 |
  9. | is_domain   | False                            |
  10. | name        | service                          |
  11. | parent_id   | default                          |
  12. +-------------+----------------------------------+
  13. [root@controller1 ~]# openstack user create --domain default --password=glance glance
  14. +---------------------+----------------------------------+
  15. | Field               | Value                            |
  16. +---------------------+----------------------------------+
  17. | domain_id           | default                          |
  18. | enabled             | True                             |
  19. | id                  | 1072761f1a714aa8ad31a8e3f32fdc94 |
  20. | name                | glance                           |
  21. | options             | {}                               |
  22. | password_expires_at | None                             |
  23. +---------------------+----------------------------------+
  24. [root@controller1 ~]# openstack role add --project service --user glance admin
  25. [root@controller1 ~]# openstack user create --domain default --password=nova nova
  26. +---------------------+----------------------------------+
  27. | Field               | Value                            |
  28. +---------------------+----------------------------------+
  29. | domain_id           | default                          |
  30. | enabled             | True                             |
  31. | id                  | 83ce33fed0fe4c1894b6448cc17c32f7 |
  32. | name                | nova                             |
  33. | options             | {}                               |
  34. | password_expires_at | None                             |
  35. +---------------------+----------------------------------+
  36. [root@controller1 ~]# openstack role add --project service --user nova admin
  37. [root@controller1 ~]# openstack user create --domain default --password=neutron neutron
  38. +---------------------+----------------------------------+
  39. | Field               | Value                            |
  40. +---------------------+----------------------------------+
  41. | domain_id           | default                          |
  42. | enabled             | True                             |
  43. | id                  | d0ed457a96824cffb030d3c57b4a8218 |
  44. | name                | neutron                          |
  45. | options             | {}                               |
  46. | password_expires_at | None                             |
  47. +---------------------+----------------------------------+
  48. [root@controller1 ~]# openstack role add --project service --user neutron admin
  49.  
  50. [root@controller1 ~]# openstack project create --domain default --description "Demo Project" demo
  51. +-------------+----------------------------------+
  52. | Field       | Value                            |
  53. +-------------+----------------------------------+
  54. | description | Demo Project                     |
  55. | domain_id   | default                          |
  56. | enabled     | True                             |
  57. | id          | 3ddffab721d24934a0cbd49def5aa615 |
  58. | is_domain   | False                            |
  59. | name        | demo                             |
  60. | parent_id   | default                          |
  61. +-------------+----------------------------------+
  62. [root@controller1 ~]# openstack user create --domain default --password=demo demo
  63. +---------------------+----------------------------------+
  64. | Field               | Value                            |
  65. +---------------------+----------------------------------+
  66. | domain_id           | default                          |
  67. | enabled             | True                             |
  68. | id                  | 7884786780534d82afa0085028d2eb9b |
  69. | name                | demo                             |
  70. | options             | {}                               |
  71. | password_expires_at | None                             |
  72. +---------------------+----------------------------------+
  73. [root@controller1 ~]# openstack role create user
  74. +-----------+----------------------------------+
  75. | Field     | Value                            |
  76. +-----------+----------------------------------+
  77. | domain_id | None                             |
  78. | id        | 0e067a05c0334234be3e19cad51cc1b5 |
  79. | name      | user                             |
  80. +-----------+----------------------------------+
  81. [root@controller1 ~]# openstack role add --project demo --user demo user

Add OpenStack Identity resource to Pacemaker

  1. [root@controller1 ~]# pcs resource create openstack-keystone systemd:openstack-keystone --clone interleave=true

Configure OpenStack Identity service

  1. # cat keystone.conf
  2.  
  3. bind_host = 10.0.0.12
  4. public_bind_host = 10.0.0.12
  5. admin_bind_host = 10.0.0.12
  6.  
  7. [catalog]
  8. driver = keystone.catalog.backends.sql.Catalog
  9. # ...
  10. [identity]
  11. driver = keystone.identity.backends.sql.Identity
  12. # ...

Configure OpenStack services to use the highly available OpenStack Identity

  1. # cat api-paste.ini
  2.  
  3. auth_host = 10.0.0.11
  4.  
  5. $ openstack endpoint create --region $KEYSTONE_REGION $service-type public http://PUBLIC_VIP:5000/v2.0
  6. $ openstack endpoint create --region $KEYSTONE_REGION $service-type admin http://10.0.0.11:35357/v2.0
  7. $ openstack endpoint create --region $KEYSTONE_REGION $service-type internal http://10.0.0.11:5000/v2.0
  8.  
  9. # cat local_settings.py
  10.  
  11. OPENSTACK_HOST = 10.0.0.11

Configure the VIP & HAProxy

Configure the VIP

  1. [root@controller1 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip="192.168.220.20" cidr_netmask="24" op monitor interval="30s"

Installing HAProxy On Controllers

  1. [root@controller ~]# yum install haproxy httpd -y

Configuring HAProxy

  1. [root@controller1 ~]# cp /etc/haproxy/haproxy.cfg{,.bak}
  2.  
  3. [root@controller1 ~]# diff /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
  4. 87,99d86
  5. < listen stats  
  6. <       bind 0.0.0.0:1080 
  7. <       mode http
  8. <       option httplog
  9. <       log 127.0.0.1 local0 err  
  10. <       maxconn 10
  11. <       stats refresh 30s
  12. <       stats uri /admin
  13. <       stats realm Haproxy\ Statistics
  14. <       stats auth admin:admin
  15. <       stats hide-version
  16. <       stats admin if TRUE
  17. < 
  18.  
  19. [root@controller1 ~]# scp /etc/haproxy/haproxy.cfg root@controller2:/etc/haproxy/haproxy.cfg
  20. [root@controller1 ~]# scp /etc/haproxy/haproxy.cfg root@controller3:/etc/haproxy/haproxy.cfg

Allow Non-Local IP Binding

  1. [root@controller ~]# echo "net.ipv4.ip_nonlocal_bind = 1" >>/etc/sysctl.conf
  2. [root@controller ~]# sysctl -p
  3. net.ipv4.ip_nonlocal_bind = 1

Add HAProxy To The Cluster

  1. [root@controller1 ~]# pcs resource create lb-haproxy systemd:haproxy --clone
  2. [root@controller1 ~]# pcs constraint order start vip then lb-haproxy-clone kind=Optional
  3. Adding vip lb-haproxy-clone (kind: Optional) (Options: first-action=start then-action=start)
  4. [root@controller1 ~]# pcs constraint colocation add lb-haproxy-clone with vip

Now, Take the Browser

http://192.168.220.20:1080/admin

username/password :

admin/admin

Add RabbitMQTo The HAProxy

  1. [root@controller1 ~]# echo '#RabbitMQ
  2. > listen RabbitMQ-Server
  3. > bind controller:5673
  4. > mode tcp
  5. > balance roundrobin
  6. > option tcpka
  7. > timeout client  3h
  8. > timeout server  3h
  9. > option          clitcpka
  10. > server controller1 controller1:5672 check inter 5s rise 2 fall 3
  11. > server controller2 controller2:5672 check inter 5s rise 2 fall 3
  12. > server controller3 controller3:5672 check inter 5s rise 2 fall 3
  13. > 
  14. > listen RabbitMQ-Web
  15. > bind controller:15673
  16. > mode tcp
  17. > balance roundrobin
  18. > option tcpka
  19. > server controller1 controller1:15672 check inter 5s rise 2 fall 3
  20. > server controller2 controller2:15672 check inter 5s rise 2 fall 3
  21. > server controller3 controller3:15672 check inter 5s rise 2 fall 3
  22. > '>>/etc/haproxy/haproxy.cfg
  23. [root@controller1 ~]# 
  24. [root@controller1 ~]# systemctl restart haproxy.service

Pacemaker Cluster Stack

Install Packages On Controller Nodes

  1. [root@controller ~]# yum install corosync pacemaker pcs fence-agents resource-agents -y

Set-Up the Cluster

  1. [root@controller ~]# systemctl enable pcsd
  2. [root@controller ~]# systemctl start pcsd
  3.  
  4. [root@controller ~]# echo myhaclusterpwd | passwd --stdin hacluster
  5.  
  6. [root@controller ~]# pcs cluster auth controller1 controller2 controller3  -u hacluster -p myhaclusterpwd --force
  7.  
  8. [root@controller1 ~]# pcs cluster setup --force --name my-cluster controller1 controller2 controller3
  9. Destroying cluster on nodes: controller1, controller2, controller3...
  10. controller1: Stopping Cluster (pacemaker)...
  11. controller2: Stopping Cluster (pacemaker)...
  12. controller3: Stopping Cluster (pacemaker)...
  13. controller3: Successfully destroyed cluster
  14. controller1: Successfully destroyed cluster
  15. controller2: Successfully destroyed cluster
  16.  
  17. Sending 'pacemaker_remote authkey' to 'controller1', 'controller2', 'controller3'
  18. controller1: successful distribution of the file 'pacemaker_remote authkey'
  19. controller3: successful distribution of the file 'pacemaker_remote authkey'
  20. controller3: successful distribution of the file 'pacemaker_remote authkey'
  21. controller2: successful distribution of the file 'pacemaker_remote authkey'
  22. Sending cluster config files to the nodes...
  23. controller1: Succeeded
  24. controller2: Succeeded
  25. controller3: Succeeded
  26.  
  27. Synchronizing pcsd certificates on nodes controller1, controller2, controller3...
  28. controller3: Success
  29. controller2: Success
  30. controller1: Success
  31. Restarting pcsd on the nodes in order to reload the certificates...
  32.  
  33. controller3: Success
  34. controller2: Success
  35. controller1: Success
  36. [root@controller1 ~]# pcs cluster start --all
  37. controller1: Starting Cluster...
  38. controller2: Starting Cluster...
  39. controller3: Starting Cluster...
  40. [root@controller1 ~]# pcs cluster enable --all
  41. controller1: Cluster Enabled
  42. controller2: Cluster Enabled
  43. controller3: Cluster Enabled
  44. [root@controller1 ~]# pcs cluster status
  45. Cluster Status:
  46.  Stack: unknown
  47.  Current DC: NONE
  48.  Last updated: Fri Dec 15 00:21:36 2017
  49.  Last change: Fri Dec 15 00:21:24 2017 by hacluster via crmd on controller1
  50.  3 nodes configured
  51.  0 resources configured
  52. PCSD Status:
  53.   controller3: Online
  54.   controller2: Online
  55.   controller1: Online

Start Corosync On Controllers

  1. [root@controller ~]# systemctl start corosync
  2.  
  3. [root@controller1 ~]# corosync-cfgtool -s
  4. Printing ring status.
  5. Local node ID 1
  6. RING ID 0
  7.         id      = 192.168.220.21
  8.         status  = ring 0 active with no faults
  9. [root@controller2 ~]# corosync-cfgtool -s
  10. Printing ring status.
  11. Local node ID 2
  12. RING ID 0
  13.         id      = 192.168.220.22
  14.         status  = ring 0 active with no faults
  15. [root@controller3 ~]# corosync-cfgtool -s
  16. Printing ring status.
  17. Local node ID 3
  18. RING ID 0
  19.         id      = 192.168.220.23
  20.         status  = ring 0 active with no faults
  21.  
  22. [root@controller ~]# corosync-cmapctl runtime.totem.pg.mrp.srp.members
  23. runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
  24. runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.220.21) 
  25. runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
  26. runtime.totem.pg.mrp.srp.members.1.status (str) = joined
  27. runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
  28. runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.220.22) 
  29. runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1
  30. runtime.totem.pg.mrp.srp.members.2.status (str) = joined
  31. runtime.totem.pg.mrp.srp.members.3.config_version (u64) = 0
  32. runtime.totem.pg.mrp.srp.members.3.ip (str) = r(0) ip(192.168.220.23) 
  33. runtime.totem.pg.mrp.srp.members.3.join_count (u32) = 1
  34. runtime.totem.pg.mrp.srp.members.3.status (str) = joined

Start Pacemaker

  1. [root@controller1 ~]# systemctl start pacemaker
  2. [root@controller1 ~]# crm_mon -1
  3. Stack: corosync
  4. Current DC: controller1 (version 1.1.16-12.el7_4.5-94ff4df) - partition with quorum
  5. Last updated: Fri Dec 15 00:34:25 2017
  6. Last change: Fri Dec 15 00:21:45 2017 by hacluster via crmd on controller1
  7.  
  8. 3 nodes configured
  9. 0 resources configured
  10.  
  11. Online: [ controller1 controller2 controller3 ]
  12.  
  13. No active resources

Set Basic Cluster Properties

  1. [root@controller1 ~]# pcs property set pe-warn-series-max=1000 \
  2. >   pe-input-series-max=1000 \
  3. >   pe-error-series-max=1000 \
  4. >   cluster-recheck-interval=5min
  5. [root@controller1 ~]# pcs property set stonith-enabled=false

RabbitMQ Cluster Setup

Install RabbitMQ

  1. [root@controller1 ~]# yum install erlang rabbitmq-server -y
  2. [root@controller2 ~]# yum install erlang rabbitmq-server -y
  3. [root@controller3 ~]# yum install erlang rabbitmq-server -y
  4.  
  5. [root@controller1 ~]# systemctl enable rabbitmq-server.service
  6. Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
  7. [root@controller2 ~]# systemctl enable rabbitmq-server.service
  8. Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
  9. [root@controller3 ~]# systemctl enable rabbitmq-server.service
  10. Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
  11.  
  12. [root@controller1 ~]# systemctl start rabbitmq-server.service
  13.  
  14. [root@controller1 ~]# rabbitmq-plugins enable rabbitmq_management
  15. The following plugins have been enabled:
  16.   mochiweb
  17.   webmachine
  18.   rabbitmq_web_dispatch
  19.   amqp_client
  20.   rabbitmq_management_agent
  21.   rabbitmq_management
  22.  
  23. Applying plugin configuration to rabbit@controller1... started 6 plugins.
  24. [root@controller2 ~]# rabbitmq-plugins enable rabbitmq_management
  25. The following plugins have been enabled:
  26.   mochiweb
  27.   webmachine
  28.   rabbitmq_web_dispatch
  29.   amqp_client
  30.   rabbitmq_management_agent
  31.   rabbitmq_management
  32.  
  33. Applying plugin configuration to rabbit@controller2... started 6 plugins.
  34. [root@controller3 ~]# rabbitmq-plugins enable rabbitmq_management
  35. The following plugins have been enabled:
  36.   mochiweb
  37.   webmachine
  38.   rabbitmq_web_dispatch
  39.   amqp_client
  40.   rabbitmq_management_agent
  41.   rabbitmq_management
  42.  
  43. Applying plugin configuration to rabbit@controller3... started 6 plugins.

Configure RabbitMQ for HA queues

  1. [root@controller1 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@controller2:/var/lib/rabbitmq/.erlang.cookie
  2. .erlang.cookie                                                             100%   20    19.8KB/s   00:00    
  3. [root@controller1 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@controller3:/var/lib/rabbitmq/.erlang.cookie
  4. .erlang.cookie                                                             100%   20    34.2KB/s   00:00

On Each Nodes

  1. # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie
  2. # chmod 400 /var/lib/rabbitmq/.erlang.cookie
  3.  
  4. # systemctl enable rabbitmq-server.service
  5. # systemctl start rabbitmq-server.service
  6.  
  7. [root@controller1 ~]# rabbitmqctl cluster_status
  8. Cluster status of node rabbit@controller1 ...
  9. [{nodes,[{disc,[rabbit@controller1]}]},
  10.  {running_nodes,[rabbit@controller1]},
  11.  {cluster_name,<<"rabbit@controller1">>},
  12.  {partitions,[]},
  13.  {alarms,[{rabbit@controller1,[]}]}]
  14. [root@controller2 ~]# rabbitmqctl cluster_status
  15. Cluster status of node rabbit@controller2 ...
  16. [{nodes,[{disc,[rabbit@controller2]}]},
  17.  {running_nodes,[rabbit@controller2]},
  18.  {cluster_name,<<"rabbit@controller2">>},
  19.  {partitions,[]},
  20.  {alarms,[{rabbit@controller2,[]}]}]
  21. [root@controller3 ~]# rabbitmqctl cluster_status
  22. Cluster status of node rabbit@controller3 ...
  23. [{nodes,[{disc,[rabbit@controller3]}]},
  24.  {running_nodes,[rabbit@controller3]},
  25.  {cluster_name,<<"rabbit@controller3">>},
  26.  {partitions,[]},
  27.  {alarms,[{rabbit@controller3,[]}]}]

On Other Nodes

  1. [root@controller2 ~]# rabbitmqctl stop_app
  2. Stopping node rabbit@controller2 ...
  3. [root@controller3 ~]# rabbitmqctl stop_app
  4. Stopping node rabbit@controller3 ...
  5.  
  6. [root@controller2 ~]# rabbitmqctl join_cluster --ram rabbit@controller1
  7. Clustering node rabbit@controller2 with rabbit@controller1 ...
  8. [root@controller3 ~]# rabbitmqctl join_cluster --ram rabbit@controller1
  9. Clustering node rabbit@controller3 with rabbit@controller1 ...
  10.  
  11. [root@controller2 ~]# rabbitmqctl start_app
  12. Starting node rabbit@controller2 ...
  13. [root@controller3 ~]# rabbitmqctl start_app
  14. Starting node rabbit@controller3 ...

Set the HA-Mode Policy

  1. [root@controller1 ~]# rabbitmqctl cluster_status
  2. Cluster status of node rabbit@controller1 ...
  3. [{nodes,[{disc,[rabbit@controller1]},
  4.          {ram,[rabbit@controller3,rabbit@controller2]}]},
  5.  {running_nodes,[rabbit@controller3,rabbit@controller2,rabbit@controller1]},
  6.  {cluster_name,<<"rabbit@controller1">>},
  7.  {partitions,[]},
  8.  {alarms,[{rabbit@controller3,[]},
  9.           {rabbit@controller2,[]},
  10.           {rabbit@controller1,[]}]}]
  11. [root@controller2 ~]# rabbitmqctl cluster_status
  12. Cluster status of node rabbit@controller2 ...
  13. [{nodes,[{disc,[rabbit@controller1]},
  14.          {ram,[rabbit@controller3,rabbit@controller2]}]},
  15.  {running_nodes,[rabbit@controller3,rabbit@controller1,rabbit@controller2]},
  16.  {cluster_name,<<"rabbit@controller1">>},
  17.  {partitions,[]},
  18.  {alarms,[{rabbit@controller3,[]},
  19.           {rabbit@controller1,[]},
  20.           {rabbit@controller2,[]}]}]
  21. [root@controller3 ~]# rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}'
  22. Setting policy "ha-all" for pattern "^(?!amq\\.).*" to "{\"ha-mode\": \"all\"}" with priority "0" ...
  23. [root@controller3 ~]# 
  24. [root@controller3 ~]# 
  25. [root@controller3 ~]# rabbitmqctl cluster_status
  26. Cluster status of node rabbit@controller3 ...
  27. [{nodes,[{disc,[rabbit@controller1]},
  28.          {ram,[rabbit@controller3,rabbit@controller2]}]},
  29.  {running_nodes,[rabbit@controller2,rabbit@controller1,rabbit@controller3]},
  30.  {cluster_name,<<"rabbit@controller1">>},
  31.  {partitions,[]},
  32.  {alarms,[{rabbit@controller2,[]},
  33.           {rabbit@controller1,[]},
  34.           {rabbit@controller3,[]}]}]
  35.  
  36. [root@controller1 ~]# rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}'
  37. Setting policy "ha-all" for pattern "^(?!amq\\.).*" to "{\"ha-mode\": \"all\"}" with priority "0" ...
  38. [root@controller2 ~]# rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}'
  39. Setting policy "ha-all" for pattern "^(?!amq\\.).*" to "{\"ha-mode\": \"all\"}" with priority "0" ...
  40. [root@controller3 ~]# rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}'
  41. Setting policy "ha-all" for pattern "^(?!amq\\.).*" to "{\"ha-mode\": \"all\"}" with priority "0" ...

Now, Take the Browser

http://192.168.220.21:15672

username/password :

guest/guest

Configure OpenStack services to use RabbitMQ HA queues

  1. transport_url = rabbit://RABBIT_USER:RABBIT_PASS@rabbit1:5672,
  2. RABBIT_USER:RABBIT_PASS@rabbit2:5672,RABBIT_USER:RABBIT_PASS@rabbit3:5672
  3. rabbit_retry_interval=1
  4. rabbit_retry_backoff=2
  5. rabbit_max_retries=0
  6. rabbit_durable_queues=true
  7. rabbit_ha_queues=true

For the Old Set-Up

  1. # rabbitmqctl stop_app
  2. # rabbitmqctl reset
  3. # rabbitmqctl start_app

Create RabbitMQ Admin

  1. [root@controller1 ~]# rabbitmqctl  add_user admin admin
  2. Creating user "admin" ...
  3. [root@controller1 ~]# rabbitmqctl  set_user_tags admin administrator
  4. Setting tags for user "admin" to [administrator] ...
  5. [root@controller1 ~]# rabbitmqctl add_user openstack openstack
  6. Creating user "openstack" ...
  7. [root@controller1 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*" 
  8. Setting permissions for user "openstack" in vhost "/" ...
  9. [root@controller1 ~]# rabbitmqctl  set_user_tags openstack administrator
  10. Setting tags for user "openstack" to [administrator] ...
  11. [root@controller1 ~]# systemctl restart rabbitmq-server.service