Water's Home

Just another Life Style

0%

Just Do It !

[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=4e333024-b8c8-45e4-baee-e46ece81432c
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.200
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=8.8.8.8

Install Compute & Neutron

[root@compute01 ~]# yum install openstack-selinux python-openstackclient yum-plugin-priorities openstack-nova-compute openstack-utils -y
[root@compute01 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y

Configure Nova

[root@compute01 ~]# mkdir -p /data/nova/instances
[root@compute01 ~]# chown -R nova:nova /data/nova

[root@compute01 ~]# cp /etc/nova/nova.conf{,.bak}

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT instances_path /data/nova/instances
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT my_ip 192.168.220.51
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT use_neutron True
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT cpu_allocation_ratio 10

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf api auth_strategy keystone

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken project_name service
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken username nova
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken password nova

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf vnc enabled true
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf vnc vncserver_proxyclient_address compute01
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc\_auto.html

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf glance api_servers http://controller:9292

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement os_region_name RegionOne
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement project_domain_name Default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement project_name service
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement auth_type password
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement user_domain_name Default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement username placement
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf placement password placement

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf libvirt virt_type qemu

[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron url http://controller:9696
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron auth_url http://controller:35357
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron auth_type password
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron project_domain_name default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron user_domain_name default
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron region_name RegionOne
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron project_name service
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron username neutron
[root@compute01 ~]# openstack-config –set /etc/nova/nova.conf neutron password neutron

Configure Neutron

[root@compute01 ~]# cp /etc/neutron/neutron.conf{,.bak}

[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673

[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken auth_plugin password
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken project_domain_id default
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken user_domain_id default
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken project_name service
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken username neutron
[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf keystone_authtoken password neutron

[root@compute01 ~]# openstack-config –set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp

Configure LinuxBridge Agent

[root@compute01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

[root@compute01 ~]# openstack-config –set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:’ens33’

[root@compute01 ~]# openstack-config –set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
[root@compute01 ~]# openstack-config –set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[root@compute01 ~]# openstack-config –set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false

Start Compute Service

[root@compute01 ~]# systemctl enable libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
[root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service

Create Nova Service & Endpoint

[root@controller1 ~]# openstack service create –name nova –description “OpenStack Compute” compute
+————-+———————————-+
Field Value
+————-+———————————-+
description OpenStack Compute
enabled True
id 365c1378f8c641ba81f48efa7c62cd29
name nova
type compute
+————-+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne compute public http://controller:8774/v2.1
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id 2afb267d1bd34f9388f9568321313167
interface public
region RegionOne
region_id RegionOne
service_id 365c1378f8c641ba81f48efa7c62cd29
service_name nova
service_type compute
url http://controller:8774/v2.1
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne compute internal http://controller:8774/v2.1
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id 92de622e904546cc9bd1ca4087529e98
interface internal
region RegionOne
region_id RegionOne
service_id 365c1378f8c641ba81f48efa7c62cd29
service_name nova
service_type compute
url http://controller:8774/v2.1
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne compute admin http://controller:8774/v2.1
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id 9295b974edec4e06842ee4db2e2d2458
interface admin
region RegionOne
region_id RegionOne
service_id 365c1378f8c641ba81f48efa7c62cd29
service_name nova
service_type compute
url http://controller:8774/v2.1
+————–+———————————-+

Create Placement & Endpoint

[root@controller1 ~]# openstack user create –domain default –password=placement placement
+———————+———————————-+
Field Value
+———————+———————————-+
domain_id default
enabled True
id c18a3e3bf63c4114924fb6b31b3305fd
name placement
options {}
password_expires_at None
+———————+———————————-+
[root@controller1 ~]# openstack role add –project service –user placement admin
[root@controller1 ~]# openstack service create –name placement –description “Placement API” placement
+————-+———————————-+
Field Value
+————-+———————————-+
description Placement API
enabled True
id 99a48fd2c3bf496287fa1fcf82376c02
name placement
type placement
+————-+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne placement public http://controller:8778
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id c271472e882f49c3a77c518296d4045c
interface public
region RegionOne
region_id RegionOne
service_id 99a48fd2c3bf496287fa1fcf82376c02
service_name placement
service_type placement
url http://controller:8778
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne placement internal http://controller:8778
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id b04f59956eca4d8cab4b3a41a071fc6b
interface internal
region RegionOne
region_id RegionOne
service_id 99a48fd2c3bf496287fa1fcf82376c02
service_name placement
service_type placement
url http://controller:8778
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne placement admin http://controller:8778
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id 582bf2a041734838adbf92d7dd4b602e
interface admin
region RegionOne
region_id RegionOne
service_id 99a48fd2c3bf496287fa1fcf82376c02
service_name placement
service_type placement
url http://controller:8778
+————–+———————————-+

Install Nova

[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-cert openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y

Configure Nova

[root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT my_ip controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT use_neutron True
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT osapi_compute_listen controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT osapi_compute_listen_port 8774
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT metadata_listen controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT metadata_listen_port 8775
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf api auth_strategy keystone

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken project_name service
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken username nova
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf keystone_authtoken password nova

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf vnc enabled true
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf vnc vncserver_listen controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf vnc vncserver_proxyclient_address controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf vnc novncproxy_host controller1
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf vnc novncproxy_port 6080

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf glance api_servers http://controller:9292

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement os_region_name RegionOne
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement project_domain_name Default
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement project_name service
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement auth_type password
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement user_domain_name Default
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement username placement
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf placement password placement

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300

[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf cache enabled true
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
[root@controller1 ~]# openstack-config –set /etc/nova/nova.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211

Configure Nova-Placement

Refer to “CLOUD COMPUTING —> OpenStack Pike Installation —> 7.Nova”

[root@controller1 ~]# echo “

#Placement API

= 2.4>
Require all granted

  Order allow,deny
  Allow from all

“>>/etc/httpd/conf.d/00-nova-placement-api.conf
[root@controller1 ~]# “
[root@controller1 ~]# systemctl restart httpd

Synchronize Database

[root@controller1 ~]# su -s /bin/sh -c “nova-manage api_db sync” nova
[root@controller1 ~]# su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova
[root@controller1 ~]# su -s /bin/sh -c “nova-manage cell_v2 create_cell –name=cell1 –verbose” nova
[root@controller1 ~]# su -s /bin/sh -c “nova-manage db sync” nova

List Nova Cell

[root@controller1 ~]# nova-manage cell_v2 list_cells
+——-+————————————–+—————————————–+————————————————-+
Name UUID Transport URL Database Connection
+——-+————————————–+—————————————–+————————————————-+
cell0 00000000-0000-0000-0000-000000000000 none:/ mysql+pymysql://nova:****@controller/nova_cell0
cell1 b55310bc-6d58-4c8d-9b7c-014b77cd48ac rabbit://openstack:****@controller:5673 mysql+pymysql://nova:****@controller/nova
+——-+————————————–+—————————————–+————————————————-+

Replace Default IP

[root@controller1 ~]# sed -i ‘s/8778/9778/‘ /etc/httpd/conf.d/00-nova-placement-api.conf
[root@controller1 ~]# systemctl restart httpd

Configure HA With HAProxy

[root@controller1 ~]# echo ‘

##nova_compute
listen nova_compute_api_cluster
bind controller:8774
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:8774 check inter 2000 rise 2 fall 5
server controller2 controller2:8774 check inter 2000 rise 2 fall 5
server controller3 controller3:8774 check inter 2000 rise 2 fall 5
#Nova-api-metadata
listen Nova-api-metadata_cluster
bind controller:8775
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:8775 check inter 2000 rise 2 fall 5
server controller2 controller2:8775 check inter 2000 rise 2 fall 5
server controller3 controller3:8775 check inter 2000 rise 2 fall 5
#nova_placement
listen nova_placement_cluster
bind controller:8778
balance source
option tcpka
option tcplog
server controller1 controller1:9778 check inter 2000 rise 2 fall 5
server controller2 controller2:9778 check inter 2000 rise 2 fall 5
server controller3 controller3:9778 check inter 2000 rise 2 fall 5
‘>>/etc/haproxy/haproxy.cfg
[root@controller1 ~]# ‘
[root@controller1 ~]# systemctl restart haproxy.service

netstat -antpgrep haproxy
netstat -antpegrep ‘8774877587786080’

Start Nova Service

[root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

List Nova Status

[root@controller1 ~]# openstack catalog list
+———–+———–+—————————————–+
Name Type Endpoints
+———–+———–+—————————————–+
glance image RegionOne
internal: http://controller:9292
RegionOne
admin: http://controller:9292
RegionOne
public: http://controller:9292

nova compute RegionOne
public: http://controller:8774/v2.1
RegionOne
admin: http://controller:8774/v2.1
RegionOne
internal: http://controller:8774/v2.1

placement placement RegionOne
admin: http://controller:8778
RegionOne
internal: http://controller:8778
RegionOne
public: http://controller:8778

neutron network RegionOne
admin: http://controller:9696
RegionOne
public: http://controller:9696
RegionOne
internal: http://controller:9696

keystone identity RegionOne
admin: http://controller:35357/v3/
RegionOne
internal: http://controller:5000/v3/
RegionOne
public: http://controller:5000/v3/

+———–+———–+—————————————–+
[root@controller1 ~]# nova-status upgrade check
+——————————————————————–+
Upgrade Check Results
+——————————————————————–+
Check: Cells v2
Result: Success
Details: No host mappings or compute nodes were found. Remember to
run command ‘nova-manage cell_v2 discover_hosts’ when new
compute hosts are deployed.
+——————————————————————–+
Check: Placement API
Result: Success
Details: None
+——————————————————————–+
Check: Resource Providers
Result: Success
Details: There are no compute resource providers in the Placement
service nor are there compute nodes in the database.
Remember to configure new compute nodes to report into the
Placement service. See
http://docs.openstack.org/developer/nova/placement.html
for more details.
+——————————————————————–+
[root@controller1 ~]# openstack compute service list
+—-+——————+————-+———-+———+——-+—————————-+
ID Binary Host Zone Status State Updated At
+—-+——————+————-+———-+———+——-+—————————-+
15 nova-conductor controller1 internal enabled up 2017-12-19T09:02:58.000000
18 nova-scheduler controller1 internal enabled up 2017-12-19T09:02:56.000000
21 nova-consoleauth controller1 internal enabled up 2017-12-19T09:02:59.000000
27 nova-consoleauth controller2 internal enabled up 2017-12-19T09:03:04.000000
30 nova-conductor controller2 internal enabled up 2017-12-19T09:03:03.000000
39 nova-scheduler controller2 internal enabled up 2017-12-19T09:02:57.000000
42 nova-consoleauth controller3 internal enabled up 2017-12-19T09:03:04.000000
45 nova-conductor controller3 internal enabled up 2017-12-19T09:03:05.000000
54 nova-scheduler controller3 internal enabled up 2017-12-19T09:02:59.000000
57 nova-compute compute01 nova enabled up 2017-12-19T09:02:59.000000
+—-+——————+————-+———-+———+——-+—————————-+

Install On Other Controller Nodes

[root@controller2 ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y

[root@controller2 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/nova/* /etc/nova/
[root@controller2 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/
[root@controller2 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/haproxy/* /etc/haproxy/

[root@controller2 ~]# sed -i ‘1,9s/controller1/controller2/‘ /etc/nova/nova.conf
[root@controller3 ~]# sed -i ‘1,9s/controller1/controller3/‘ /etc/nova/nova.conf

[root@controller2 ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller2 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller2 ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

[root@controller2 ~]# systemctl restart httpd haproxy

Discover New Compute Node

[root@controller1 ~]# su -s /bin/sh -c “nova-manage cell_v2 discover_hosts –verbose” nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting compute nodes from cell ‘cell1’: b55310bc-6d58-4c8d-9b7c-014b77cd48ac
Found 0 unmapped computes in cell: b55310bc-6d58-4c8d-9b7c-014b77cd48ac
[root@controller1 ~]# openstack compute service list
+—-+——————+————-+———-+———+——-+—————————-+
ID Binary Host Zone Status State Updated At
+—-+——————+————-+———-+———+——-+—————————-+
15 nova-conductor controller1 internal enabled up 2017-12-19T09:28:08.000000
18 nova-scheduler controller1 internal enabled up 2017-12-19T09:28:16.000000
21 nova-consoleauth controller1 internal enabled up 2017-12-19T09:28:10.000000
27 nova-consoleauth controller2 internal enabled up 2017-12-19T09:28:15.000000
30 nova-conductor controller2 internal enabled up 2017-12-19T09:28:13.000000
39 nova-scheduler controller2 internal enabled up 2017-12-19T09:28:17.000000
42 nova-consoleauth controller3 internal enabled up 2017-12-19T09:28:15.000000
45 nova-conductor controller3 internal enabled up 2017-12-19T09:28:15.000000
54 nova-scheduler controller3 internal enabled up 2017-12-19T09:28:09.000000
57 nova-compute compute01 nova enabled up 2017-12-19T09:28:10.000000
60 nova-compute compute02 nova enabled up 2017-12-19T09:28:16.000000
+—-+——————+————-+———-+———+——-+—————————-+

Install Dashboard

[root@controller ~]# yum install openstack-dashboard -y

Dashboard Configure

[root@controller1 ~]# cp /etc/openstack-dashboard/local_settings{,.bak}
[root@controller1 ~]# DASHBOARD_LOCAL_SETTINGS=/etc/openstack-dashboard/local_settings
[root@controller1 ~]# sed -i ‘s#_member_#user#g’ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i ‘s#OPENSTACK_HOST = “127.0.0.1”#OPENSTACK_HOST = “controller”#’ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# sed -i “/ALLOWED_HOSTS/cALLOWED_HOSTS = [‘*‘, ]“ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# sed -in ‘153,158s/#//‘ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -in ‘160,164s/.*/#&/‘ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i ‘s#UTC#Asia/Shanghai#g’ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i ‘s#%s:5000/v2.0#%s:5000/v3#’ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i ‘/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True’ $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i “s@^#OPENSTACK_KEYSTONE_DEFAULT@OPENSTACK_KEYSTONE_DEFAULT@” $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# echo ‘

#set
OPENSTACK_API_VERSIONS = {
“identity”: 3,
“image”: 2,
“volume”: 2,
}
#’>>$DASHBOARD_LOCAL_SETTINGS

Configure Other Controller Nodes

[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/openstack-dashboard/local_settings controller2:/etc/openstack-dashboard/
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/openstack-dashboard/local_settings controller3:/etc/openstack-dashboard/

Restart Httpd Service

[root@controller1 ~]# systemctl restart httpd
[root@controller1 ~]# ssh controller2 “systemctl restart httpd”
[root@controller1 ~]# ssh controller3 “systemctl restart httpd”

Configure HA Dashboard API

sed -i ‘s#^Listen 80#Listen 8080#’ /etc/httpd/conf/httpd.conf
systemctl restart httpd.service
systemctl daemon-reload

echo ‘
listen dashboard_cluster
bind controller:80
balance roundrobin
option tcpka
option httpchk
option tcplog
server controller1 controller1:8080 check port 8080 inter 2000 rise 2 fall 5
server controller2 controller2:8080 check port 8080 inter 2000 rise 2 fall 5
server controller3 controller3:8080 check port 8080 inter 2000 rise 2 fall 5
‘>>/etc/haproxy/haproxy.cfg
systemctl restart haproxy.service

Now, Take the Browser

img/Dashboard-V2.gif)

Create OpenStack Service

[root@controller1 ~]# source admin-openrc
[root@controller1 ~]# openstack service create –name neutron –description “OpenStack Networking” network
+————-+———————————-+
Field Value
+————-+———————————-+
description OpenStack Networking
enabled True
id ad17484f2f19423b9ffe8ab2b451906d
name neutron
type network
+————-+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne network public http://controller:9696
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id c4e2c0741118449d933107948c67651d
interface public
region RegionOne
region_id RegionOne
service_id ad17484f2f19423b9ffe8ab2b451906d
service_name neutron
service_type network
url http://controller:9696
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne network internal http://controller:9696
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id f35d94a749ae47d68b243a90015493bb
interface internal
region RegionOne
region_id RegionOne
service_id ad17484f2f19423b9ffe8ab2b451906d
service_name neutron
service_type network
url http://controller:9696
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne network admin http://controller:9696
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id 61e469452d914b78aabbf4bcc0a51732
interface admin
region RegionOne
region_id RegionOne
service_id ad17484f2f19423b9ffe8ab2b451906d
service_name neutron
service_type network
url http://controller:9696
+————–+———————————-+

Install OpenStack Neutron

[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y

Configure Neutron

[root@controller1 ~]# cp /etc/neutron/neutron.conf{,.bak2}
[root@controller1 ~]# echo ‘

[DEFAULT]
bind_port = 9696
bind_host = controller1
core_plugin = ml2
service_plugins =
#service_plugins = trunk
#service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller1:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[nova]
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[database]
connection = mysql://neutron:neutron@controller:3306/neutron

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#’>/etc/neutron/neutron.conf

Configure ML2

[root@controller1 ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
[root@controller1 ~]# echo ‘#

[ml2]
tenant_network_types =
type_drivers = vlan,flat
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[securitygroup]
enable_ipset = True
#vlan

[ml2_type_valn]

network_vlan_ranges = provider:3001:4000

#’>/etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

Configure Linux Bridge

[root@controller1 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
[root@controller1 ~]# echo ‘#

[linux_bridge]
physical_interface_mappings = provider:’ens37’
[vxlan]
enable_vxlan = false
[agent]
prevent_arp_spoofing = True
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = True
#’>/etc/neutron/plugins/ml2/linuxbridge_agent.ini

Configure DHCP

[root@controller1 ~]# cp /etc/neutron/dhcp_agent.ini{,.bak}
[root@controller1 ~]# echo ‘#

[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
#’>/etc/neutron/dhcp_agent.ini

Configure Metadata

[root@controller1 ~]# cp /etc/neutron/metadata_agent.ini{,.bak}
[root@controller1 ~]# echo ‘

[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = metadata
#’>/etc/neutron/metadata_agent.ini

Configure Nova

[root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
[root@controller1 ~]# echo ‘

[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = metadata
#’>>/etc/nova/nova.conf

Configure L3

[root@controller1 ~]# cp /etc/neutron/l3_agent.ini{,.bak}
[root@controller1 ~]#
[root@controller1 ~]# echo ‘

[DEFAULT]
interface_driver = linuxbridge
#’>/etc/neutron/l3_agent.ini

Synchronize Database

[root@controller1 ~]# su -s /bin/sh -c “neutron-db-manage –config-file /etc/neutron/neutron.conf –config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron
[root@controller1 ~]# mysql -h controller -u neutron -pneutron -e “use neutron;show tables;”

Configure HAProxy For Neutron API

[root@controller1 ~]# echo ‘

#Neutron_API
listen Neutron_API_cluster
bind controller:9696
balance source
option tcpka
option tcplog
server controller1 controller1:9696 check inter 2000 rise 2 fall 5
server controller2 controller2:9696 check inter 2000 rise 2 fall 5
server controller3 controller3:9696 check inter 2000 rise 2 fall 5
‘>>/etc/haproxy/haproxy.cfg
[root@controller1 ~]# ‘
[root@controller1 ~]# systemctl restart haproxy.service
[root@controller1 ~]# netstat -antpgrep haproxy
tcp 0 0 192.168.220.20:9292 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:35357 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:9696 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:5000 0.0.0.0:* LISTEN 76948/haproxy

Start Neutron Service

[root@controller1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller1 ~]# systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 2 & Start Service

[root@controller2 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/nova/* /etc/nova/
[root@controller2 ~]# sed -i ‘s/controller1/controller2/‘ /etc/neutron/neutron.conf
[root@controller2 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/haproxy/* /etc/haproxy/
[root@controller2 ~]# systemctl restart haproxy
[root@controller2 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller2 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 3 & Start Service

[root@controller3 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/neutron/* /etc/neutron/
[root@controller3 ~]# sed -i ‘s/controller1/controller3/‘ /etc/neutron/neutron.conf
[root@controller3 ~]# rsync -avzP -e ‘ssh -p 22’ controller1:/etc/haproxy/* /etc/haproxy/
[root@controller3 ~]# systemctl restart haproxy
[root@controller3 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller3 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure L3 HA on Controller node

[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT l3_ha True
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT service_plugins router
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips True
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT router_distributed True
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT l3_ha True
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT l3_ha_net_cidr 169.254.192.0/18
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 3
[root@controller ~]# openstack-config –set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 2

[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vxlan
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch,l2population
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks external
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000

[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
[root@controller ~]# openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True

[root@controller ~]# openstack-config –set /etc/neutron/l3_agent.ini DEFAULT ha_vrrp_auth_password password
[root@controller ~]# openstack-config –set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
[root@controller ~]# openstack-config –set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge
[root@controller ~]# openstack-config –set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr_snat

Configure L3 HA on Compute Node

openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex

openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True

openstack-config –set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

openstack-config –set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
openstack-config –set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge
openstack-config –set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr

Verify Service

[root@controller1 ~]# openstack network agent list
+————————————–+——————–+————-+——————-+——-+——-+—————————+
ID Agent Type Host Availability Zone Alive State Binary
+————————————–+——————–+————-+——————-+——-+——-+—————————+
1a4efb8b-aa65-4d4a-8092-7213592acd22 Linux bridge agent controller1 None :-) UP neutron-linuxbridge-agent
3b35bc6e-4cec-42e2-9fde-e99c601cf609 DHCP agent controller3 nova :-) UP neutron-dhcp-agent
42e57e23-eecb-490d-b709-d8e3730107e8 DHCP agent controller2 nova :-) UP neutron-dhcp-agent
6b2058a2-d3e3-4342-afbb-717338b1499f Metadata agent controller1 None :-) UP neutron-metadata-agent
750b5e5c-c7b6-4f48-ae2f-37580b6e03d9 DHCP agent controller1 nova :-) UP neutron-dhcp-agent
7e63ce46-3fd5-40ee-9f63-ee8cc52dd5a4 Metadata agent controller3 None :-) UP neutron-metadata-agent
92199bf0-08ef-4642-9557-c33360796405 Linux bridge agent controller2 None :-) UP neutron-linuxbridge-agent
9ae5bafa-0075-4408-b827-1be9bb1ccf99 Linux bridge agent controller3 None :-) UP neutron-linuxbridge-agent
f1ed9e45-39e7-4980-aaec-10364e42263f Metadata agent controller2 None :-) UP neutron-metadata-agent
+————————————–+——————–+————-+——————-+——-+——-+—————————+

Waiting For Test ,,,

Config Network

[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=4e333024-b8c8-45e4-baee-e46ece81432c
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.101
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Fdisk the 2nd Disk

[root@localhost ~]# ls -al /dev/sd*
brw-rw—-. 1 root disk 8, 0 Dec 26 2017 /dev/sda
brw-rw—-. 1 root disk 8, 1 Dec 26 2017 /dev/sda1
brw-rw—-. 1 root disk 8, 2 Dec 26 2017 /dev/sda2
brw-rw—-. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb

[root@localhost ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Command (m for help): n
Partition type:
p primary (0 primary, 0 extended, 4 free)
e extended
Select (default p):
Using default response p
Partition number (1-4, default 1):
First sector (2048-125829119, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-125829119, default 125829119): +30G
Partition 1 of type Linux and of size 30 GiB is set

Command (m for help): n
Partition type:
p primary (1 primary, 0 extended, 3 free)
e extended
Select (default p):
Using default response p
Partition number (2-4, default 2):
First sector (62916608-125829119, default 62916608):
Using default value 62916608
Last sector, +sectors or +size{K,M,G} (62916608-125829119, default 125829119):
Using default value 125829119
Partition 2 of type Linux and of size 30 GiB is set

Command (m for help): p

Disk /dev/sdb: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x285890cb

Device Boot Start End Blocks Id System
/dev/sdb1 2048 62916607 31457280 83 Linux
/dev/sdb2 62916608 125829119 31456256 83 Linux

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

[root@localhost ~]# ls -al /dev/sd*
brw-rw—-. 1 root disk 8, 0 Dec 26 2017 /dev/sda
brw-rw—-. 1 root disk 8, 1 Dec 26 2017 /dev/sda1
brw-rw—-. 1 root disk 8, 2 Dec 26 2017 /dev/sda2
brw-rw—-. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
brw-rw—-. 1 root disk 8, 17 Dec 26 01:11 /dev/sdb1
brw-rw—-. 1 root disk 8, 18 Dec 26 01:11 /dev/sdb2

Format 2nd Disk

[root@localhost ~]# mkfs.ext4 /dev/sdb1
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1966080 inodes, 7864320 blocks
393216 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2155872256
240 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000

Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

[root@localhost ~]# mkfs.ext4 /dev/sdb2
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1966080 inodes, 7864064 blocks
393203 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2155872256
240 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000

Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

Mount /dev/sdb1

[root@localhost ~]# mkdir -p /date
[root@localhost ~]# mount -t ext4 /dev/sdb1 /date
[root@localhost ~]# df -hgrep /dev/sdb1
/dev/sdb1 30G 45M 28G 1% /date
[root@localhost ~]# echo “mount -t ext4 /dev/sdb1 /date” >>/etc/rc.d/rc.local
[root@localhost ~]# tail -1 /etc/rc.d/rc.local
mount -t ext4 /dev/sdb1 /date
[root@localhost ~]# chmod +x /etc/rc.d/rc.local

Create Volume on /dev/sdb2

[root@localhost ~]# yum install lvm2 -y
[root@localhost ~]# systemctl enable lvm2-lvmetad.service
Created symlink from /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.service to /usr/lib/systemd/system/lvm2-lvmetad.service.
[root@localhost ~]# systemctl start lvm2-lvmetad.service

[root@localhost ~]# pvcreate /dev/sdb2
WARNING: ext4 signature detected on /dev/sdb2 at offset 1080. Wipe it? [y/n]: y
Wiping ext4 signature on /dev/sdb2.
Physical volume “/dev/sdb2” successfully created.
[root@localhost ~]# vgcreate cinder_lvm01 /dev/sdb2
Volume group “cinder_lvm01” successfully created
[root@localhost ~]# vgdisplay
— Volume group —
VG Name cinder_lvm01
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <30.00 GiB
PE Size 4.00 MiB
Total PE 7679
Alloc PE / Size 0 / 0
Free PE / Size 7679 / <30.00 GiB
VG UUID jyb299-bo5k-E6Z3-Frho-e6Kz-d9Mu-yK0m6c

— Volume group —
VG Name centos
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 3
Max PV 0
Cur PV 1
Act PV 1
VG Size <79.00 GiB
PE Size 4.00 MiB
Total PE 20223
Alloc PE / Size 20222 / 78.99 GiB
Free PE / Size 1 / 4.00 MiB
VG UUID NHtSF8-nozf-sbB4-vEBt-ogJo-WmuG-F8HYMQ

Install NFS

[root@localhost ~]# yum install nfs-utils rpcbind -y
[root@localhost ~]# mkdir -p /date/{cinder_nfs1,cinder_nfs2}
[root@localhost ~]# chmod -R 777 /date
[root@localhost ~]# echo “/date/cinder_nfs1 *(rw,root_squash,sync,anonuid=165,anongid=165)”>/etc/exports
[root@localhost ~]# exportfs -r
[root@localhost ~]# systemctl enable rpcbind nfs-server
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
[root@localhost ~]# systemctl restart rpcbind nfs-server
[root@localhost ~]# showmount -e localhost
Export list for localhost:
/date/cinder_nfs1 *

Install Cinder & Configure

[root@localhost ~]# yum install openstack-cinder targetcli python-keystone lvm2 -y
[root@localhost ~]# cp /etc/cinder/cinder.conf{,.bak}
[root@localhost ~]# cp /etc/lvm/lvm.conf{,.bak
[root@localhost ~]# sed -i ‘141a filter = [ “a/sdb2/“, “r/.*/“]‘ /etc/lvm/lvm.conf
[root@localhost ~]# echo ‘192.168.220.101:/date/cinder_nfs1’>/etc/cinder/nfs_shares
[root@localhost ~]# chmod 640 /etc/cinder/nfs_shares
[root@localhost ~]# chown root:cinder /etc/cinder/nfs_shares

Configure cinder.conf

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT log_dir /var/log/cinder
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT state_path /var/lib/cinder
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:openstack@controller
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm,nfs

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:cinder@controller/cinder

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken auth_type password
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken project_name service
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken username cinder
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf keystone_authtoken password cinder

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm volume_group cinder_lvm01
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm iscsi_ip_address 192.168.220.101
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf lvm volume_backend_name lvm01

[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf nfs volume_driver = cinder.volume.drivers.nfs.NfsDriver
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf nfs nfs_shares_config = /etc/cinder/nfs_shares
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf nfs nfs_mount_point_base = $state_path/mnt
[root@localhost ~]# openstack-config –set /etc/cinder/cinder.conf nfs volume_backend_name = nfs01

Start Cinder Service

[root@localhost ~]# chmod 640 /etc/cinder/cinder.conf
[root@localhost ~]# chgrp cinder /etc/cinder/cinder.conf

[root@localhost ~]# systemctl enable openstack-cinder-volume.service target.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.
[root@localhost ~]# systemctl start openstack-cinder-volume.service target.service

Add Cinder to Pacemaker

# pcs resource create openstack-cinder-api systemd:openstack-cinder-api –clone interleave=true

pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler –clone interleave=true

pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume

pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone

pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone

pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume

pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone

pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone

Configure Cinder service

# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak

openstack-config –set /etc/cinder/cinder.conf DEFAULT host cinder-cluster-1

openstack-config –set /etc/cinder/cinder.conf DEFAULT osapi_volume_listen 10.0.0.11

openstack-config –set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone

openstack-config –set /etc/cinder/cinder.conf DEFAULT control_exchange cinder

openstack-config –set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver

openstack-config –set /etc/cinder/cinder.conf DEFAULT nfs_shares_config /etc/cinder/nfs_exports

openstack-config –set /etc/cinder/cinder.conf DEFAULT nfs_sparsed_volumes true

openstack-config –set /etc/cinder/cinder.conf DEFAULT nfs_mount_options v3

openstack-config –set /etc/cinder/cinder.conf database connection mysql://cinder:password@10.0.0.11/cinder

openstack-config –set /etc/cinder/cinder.conf database max_retries -1

openstack-config –set /etc/cinder/cinder.conf keystone_authtoken identity_uri http://10.0.0.11:35357/

openstack-config –set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://10.0.0.11:5000/

openstack-config –set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name service

openstack-config –set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder

openstack-config –set /etc/cinder/cinder.conf keystone_authtoken admin_password CINDER_PASS

openstack-config –set /etc/cinder/cinder.conf rabbit_hosts 10.0.0.12,10.0.0.13,10.0.0.14

openstack-config –set /etc/cinder/cinder.conf rabbit_ha_queues True

openstack-config –set /etc/cinder/cinder.conf heartbeat_timeout_threshold 60

openstack-config –set /etc/cinder/cinder.conf heartbeat_rate 2

Configure HA Cinder API

$ openstack endpoint create volume –region $KEYSTONE_REGION \
–publicurl ‘http://10.0.0.11:8776/v1/%(tenant\_id)s' \
–adminurl ‘http://10.0.0.11:8776/v1/%(tenant\_id)s' \
–internalurl ‘http://10.0.0.11:8776/v1/%(tenant\_id)s'

Create Service For Glance

[root@controller1 ~]# openstack service create –name glance –description “OpenStack Image” image
+————-+———————————-+
Field Value
+————-+———————————-+
description OpenStack Image
enabled True
id 3083848116cd4706bc39241f81e3475b
name glance
type image
+————-+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne image public http://controller:9292
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id f3eb684e2209471795a04f6e73ce120f
interface public
region RegionOne
region_id RegionOne
service_id 3083848116cd4706bc39241f81e3475b
service_name glance
service_type image
url http://controller:9292
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne image internal http://controller:9292
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id bbf93cdcc35645288cfa90ef6239e1b5
interface internal
region RegionOne
region_id RegionOne
service_id 3083848116cd4706bc39241f81e3475b
service_name glance
service_type image
url http://controller:9292
+————–+———————————-+
[root@controller1 ~]# openstack endpoint create –region RegionOne image admin http://controller:9292
+————–+———————————-+
Field Value
+————–+———————————-+
enabled True
id da194b1e1e8148859a45a83fac2c7403
interface admin
region RegionOne
region_id RegionOne
service_id 3083848116cd4706bc39241f81e3475b
service_name glance
service_type image
url http://controller:9292
+————–+———————————-+

Install OpenStack Glance

[root@controller ~]# yum install openstack-glance python-glance -y

Configure Glance Service

[root@controller1 ~]# cp /etc/glance/glance-api.conf{,.bak}
[root@controller1 ~]# cp /etc/glance/glance-registry.conf{,.bak}
[root@controller ~]# mkdir -p /date/glance

[root@controller1 ~]# echo “#

[DEFAULT]
debug = False
verbose = True
bind_host = controller1
bind_port = 9292
auth_region = RegionOne
registry_client_protocol = http
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v3
auth_url = http://controller:35357/v3
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /data/glance
[oslo_messaging_rabbit]
rabbit_userid =openstack
rabbit_password = openstack
rabbit_durable_queues=true
rabbit_ha_queues = True
rabbit_max_retries=0
rabbit_port = 5672
rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
#”>/etc/glance/glance-api.conf

[root@controller1 ~]# echo “#

[DEFAULT]
debug = False
verbose = True
bind_host = controller1
bind_port = 9191
workers = 2
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v3
auth_url = http://controller:35357/v3
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[oslo_messaging_rabbit]
rabbit_userid =openstack
rabbit_password = openstack
rabbit_durable_queues=true
rabbit_ha_queues = True
rabbit_max_retries=0
rabbit_port = 5672
rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
#”>/etc/glance/glance-registry.conf
[root@controller1 ~]#

Synchronize Database

[root@controller1 ~]# su -s /bin/sh -c “glance-manage db_sync” glance
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1328: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
expire_on_commit=expire_on_commit, _conf=conf)
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> liberty, liberty initial
INFO [alembic.runtime.migration] Running upgrade liberty -> mitaka01, add index on created_at and updated_at columns of ‘images’ table
INFO [alembic.runtime.migration] Running upgrade mitaka01 -> mitaka02, update metadef os_nova_server
INFO [alembic.runtime.migration] Running upgrade mitaka02 -> ocata01, add visibility to and remove is_public from images
INFO [alembic.runtime.migration] Running upgrade ocata01 -> pike01, drop glare artifacts tables
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: pike01, current revision(s): pike01

[root@controller1 ~]# mysql -h controller -u glance -pglance -e “use glance;show tables;”
+———————————-+
Tables_in_glance
+———————————-+
alembic_version
image_locations
image_members
image_properties
image_tags
images
metadef_namespace_resource_types
metadef_namespaces
metadef_objects
metadef_properties
metadef_resource_types
metadef_tags
migrate_version
task_info
tasks
+———————————-+

Start Glance Service

[root@controller1 ~]# systemctl enable openstack-glance-api openstack-glance-registry
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service to /usr/lib/systemd/system/openstack-glance-registry.service.
[root@controller1 ~]# systemctl restart openstack-glance-api openstack-glance-registry
[root@controller1 ~]# netstat -antpgrep python2
tcp 0 0 192.168.220.21:9292 0.0.0.0:* LISTEN 13125/python2
tcp 0 0 192.168.220.21:9191 0.0.0.0:* LISTEN 13126/python2
[root@controller1 ~]# netstat -antpegrep ‘92929191’
tcp 0 0 192.168.220.21:9292 0.0.0.0:* LISTEN 13125/python2
tcp 0 0 192.168.220.21:9191 0.0.0.0:* LISTEN 13126/python2
tcp 32 0 192.168.220.21:39292 192.168.220.21:2224 CLOSE_WAIT 699/ruby
tcp 0 0 192.168.220.11:49292 192.168.220.23:2224 ESTABLISHED 699/ruby

Configure HAProxy For Glance

[root@controller1 ~]# echo ‘

#glance_api_cluster
listen glance_api_cluster
bind controller:9292
#balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:9292 check inter 2000 rise 2 fall 5
server controller2 controller2:9292 check inter 2000 rise 2 fall 5
server controller3 controller3:9292 check inter 2000 rise 2 fall 5
‘>>/etc/haproxy/haproxy.cfg
[root@controller1 ~]# ‘
[root@controller1 ~]# systemctl restart haproxy.service
[root@controller1 ~]# netstat -antpgrep haproxy
tcp 0 0 192.168.220.20:9292 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 192.168.220.20:35357 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 192.168.220.20:5000 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 0.0.0.0:5000 0.0.0.0:* LISTEN 13170/haproxy

Create Cirros Image

[root@controller glance]# wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86\_64-disk.img
–2017-12-18 02:06:45– http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86\_64-disk.img
Resolving download.cirros-cloud.net (download.cirros-cloud.net)… 64.90.42.85, 2607:f298:6:a036::bd6:a72a
Connecting to download.cirros-cloud.net (download.cirros-cloud.net)64.90.42.85:80… connected.
HTTP request sent, awaiting response… 200 OK
Length: 13267968 (13M) [text/plain]
Saving to: ‘cirros-0.3.5-x86_64-disk.img’

100%[==============================================>] 13,267,968 1.04MB/s in 13s

2017-12-18 02:06:58 (1023 KB/s) - ‘cirros-0.3.5-x86_64-disk.img’ saved [13267968/13267968]

[root@controller1 glance]# openstack image create “cirros” –file cirros-0.3.5-x86_64-disk.img –disk-format qcow2 –container-format bare –public
+——————+——————————————————+
Field Value
+——————+——————————————————+
checksum f8ab98ff5e73ebab884d80c9dc9c7290
container_format bare
created_at 2017-12-18T07:37:04Z
disk_format qcow2
file /v2/images/82e5d7d9-86a0-4266-a599-e50e8c4b0cbe/file
id 82e5d7d9-86a0-4266-a599-e50e8c4b0cbe
min_disk 0
min_ram 0
name cirros
owner 2291724ac1a54d65844cc5dba56f4803
protected False
schema /v2/schemas/image
size 13267968
status active
tags
updated_at 2017-12-18T07:37:05Z
virtual_size None
visibility public
+——————+——————————————————+

List OpenStack Image

[root@controller1 glance]# openstack image list
+————————————–+——–+——–+
ID Name Status
+————————————–+——–+——–+
82e5d7d9-86a0-4266-a599-e50e8c4b0cbe cirros active
+————————————–+——–+——–+

Configure Other Controller Nodes

[root@controller1 glance]# rsync -avzP -e ‘ssh -p 22’ /etc/glance/* controller2:/etc/glance/
sending incremental file list
glance-api.conf
883 100% 0.00kB/s 0:00:00 (xfer#1, to-check=44/45)
glance-api.conf.bak
150676 100% 14.37MB/s 0:00:00 (xfer#2, to-check=43/45)
glance-registry.conf
744 100% 72.66kB/s 0:00:00 (xfer#3, to-check=41/45)
glance-registry.conf.bak
77404 100% 5.27MB/s 0:00:00 (xfer#4, to-check=40/45)
metadefs/
rootwrap.d/

sent 61739 bytes received 126 bytes 123730.00 bytes/sec
total size is 504600 speedup is 8.16

[root@controller1 glance]# rsync -avzP -e ‘ssh -p 22’ /etc/glance/* controller3:/etc/glance/
sending incremental file list
glance-api.conf
883 100% 0.00kB/s 0:00:00 (xfer#1, to-check=44/45)
glance-api.conf.bak
150676 100% 28.74MB/s 0:00:00 (xfer#2, to-check=43/45)
glance-registry.conf
744 100% 121.09kB/s 0:00:00 (xfer#3, to-check=41/45)
glance-registry.conf.bak
77404 100% 9.23MB/s 0:00:00 (xfer#4, to-check=40/45)
metadefs/
rootwrap.d/

sent 61739 bytes received 126 bytes 123730.00 bytes/sec
total size is 504600 speedup is 8.16

[root@controller1 glance]# rsync -avzP -e ‘ssh -p 22’ /etc/haproxy/haproxy.cfg controller2:/etc/haproxy/
sending incremental file list
haproxy.cfg
4376 100% 3.51MB/s 0:00:00 (xfer#1, to-check=0/1)

sent 75 bytes received 73 bytes 296.00 bytes/sec
total size is 4376 speedup is 29.57
[root@controller1 glance]# rsync -avzP -e ‘ssh -p 22’ /etc/haproxy/haproxy.cfg controller3:/etc/haproxy/

sending incremental file list
haproxy.cfg
4376 100% 3.51MB/s 0:00:00 (xfer#1, to-check=0/1)

sent 75 bytes received 73 bytes 296.00 bytes/sec
total size is 4376 speedup is 29.57
[root@controller1 glance]#
[root@controller1 glance]# ssh controller2 “sed -i ‘1,10s/controller1/controller2/‘ /etc/glance/glance-api.conf /etc/glance/glance-registry.conf”
[root@controller1 glance]# ssh controller3 “sed -i ‘1,10s/controller1/controller3/‘ /etc/glance/glance-api.conf /etc/glance/glance-registry.conf”

Start Other Nodes’s Service

[root@controller1 glance]# ssh controller2 “systemctl enable openstack-glance-api openstack-glance-registry”
[root@controller1 glance]# ssh controller2 “systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;”
[root@controller1 glance]# ssh controller3 “systemctl enable openstack-glance-api openstack-glance-registry”
[root@controller1 glance]# ssh controller3 “systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;”

Add OpenStack Image API resource to Pacemaker

[root@controller1 glance]# cd /usr/lib/ocf/resource.d/openstack
[root@controller1 openstack]# wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
–2017-12-18 03:00:41– https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
Resolving git.openstack.org (git.openstack.org)… 104.130.246.128, 2001:4800:7819:103:be76:4eff:fe06:63c
Connecting to git.openstack.org (git.openstack.org)104.130.246.128:443… connected.
HTTP request sent, awaiting response… 200 OK
Length: 11439 (11K) [text/plain]
Saving to: ‘glance-api’

100%[====================================>] 11,439 –.-K/s in 0s

2017-12-18 03:00:42 (31.5 MB/s) - ‘glance-api’ saved [11439/11439]

[root@controller1 openstack]# chmod a+rx *
[root@controller1 openstack]# ls
glance-api nova-compute-wait NovaEvacuate

Create Local Repository

[root@localrepo yum.repos.d]# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS\_CentOS-7/network:ha-clustering:Stable.repo
[root@localrepo myrepo]# reposync –repoid=network_ha-clustering_Stable
[root@localrepo myrepo]# createrepo /var/www/html/myrepo/network_ha-clustering_Stable/

[root@controller1 openstack]# cat /etc/yum.repos.d/network_ha-clustering_Stable.repo
[network_ha-clustering_Stable]
name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
type=rpm-md
baseurl=http://192.168.220.200/myrepo/network\_ha-clustering\_Stable/
gpgcheck=0
gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS\_CentOS-7/repodata/repomd.xml.key
enabled=1

Install CRMSH

[root@controller1 openstack]# yum install crmsh -y

[root@controller1 openstack]# crm configure

crm(live)configure# primitive p_glance-api ocf:openstack:glance-api \

params config=”/etc/glance/glance-api.conf” \
os_password=”admin” \
os_username=”admin” os_tenant_name=”admin” \
os_auth_url=”http://controller:5000/v3/" \
op monitor interval=”30s” timeout=”30s”

crm(live)configure# commit

Configure OpenStack Image service API

cp /etc/glance/glance-api.conf{,.bak}

connection = mysql+pymysql://glance:glance@controller/glance
bind_host = controller
registry_host = controller
notifier_strategy = rabbit
rabbit_host = controller

nova.conf

[glance]

api_servers = 10.0.0.11

Configure OpenStack services to use the highly available OpenStack Image API

$ openstack endpoint create –region $KEYSTONE_REGION image public http://10.0.0.10:9292
$ openstack endpoint create –region $KEYSTONE_REGION image admin http://10.0.0.11:9292
$ openstack endpoint create –region $KEYSTONE_REGION image internal http://10.0.0.11:9292

Installing KeyStone On Controllers

[root@controller ~]# yum install openstack-keystone httpd mod_wsgi python-openstackclient openstack-utils -y

Configure MemCached

[root@controller ~]# sed -i ‘s/127.0.0.1/0.0.0.0/‘ /etc/sysconfig/memcached
[root@controller ~]# cat /etc/sysconfig/memcached
PORT=”11211”
USER=”memcached”
MAXCONN=”1024”
CACHESIZE=”64”
OPTIONS=”-l 0.0.0.0,::1”

Start MemCached Service

[root@controller ~]# systemctl enable memcached.service
Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.
[root@controller ~]# systemctl start memcached.service
[root@controller ~]# systemctl status memcached.service
● memcached.service - memcached daemon
Loaded: loaded (/usr/lib/systemd/system/memcached.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2017-12-17 22:07:25 EST; 1s ago
Main PID: 7500 (memcached)
CGroup: /system.slice/memcached.service
└─7500 /usr/bin/memcached -p 11211 -u memcached -m 64 -c 1024 -l 0.0.0.0,::1

Dec 17 22:07:25 controller1 systemd[1]: Started memcached daemon.
Dec 17 22:07:25 controller1 systemd[1]: Starting memcached daemon…

Configure Httpd Service

[root@controller ~]# cp /etc/httpd/conf/httpd.conf{,.bak}
[root@controller1 ~]# echo “ServerName controller1”>>/etc/httpd/conf/httpd.conf
[root@controller2 ~]# echo “ServerName controller2”>>/etc/httpd/conf/httpd.conf
[root@controller3 ~]# echo “ServerName controller3”>>/etc/httpd/conf/httpd.conf
[root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

Configure KeyStone

[root@controller ~]# cp /usr/share/keystone/wsgi-keystone.conf{,.bak}
[root@controller ~]# sed -i ‘s/5000/4999/‘ /usr/share/keystone/wsgi-keystone.conf
[root@controller ~]# sed -i ‘s/35357/35356/‘ /usr/share/keystone/wsgi-keystone.conf

Start Httpd Service

[root@controller ~]# systemctl enable httpd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
[root@controller ~]# systemctl restart httpd.service
[root@controller ~]# netstat -antpegrep ‘httpd’
tcp6 0 0 :::80 :::* LISTEN 1946/httpd
tcp6 0 0 :::35356 :::* LISTEN 1946/httpd
tcp6 0 0 :::4999 :::* LISTEN 1946/httpd

Configure HAPorxy For KeyStone

[root@controller ~]# echo ‘

#keystone
listen keystone_admin_cluster
bind controller:35357
#balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:35356 check inter 2000 rise 2 fall 5
server controller2 controller2:35356 check inter 2000 rise 2 fall 5
server controller3 controller3:35356 check inter 2000 rise 2 fall 5

listen keystone_public_cluster
bind controller:5000
#balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:4999 check inter 2000 rise 2 fall 5
server controller2 controller2:4999 check inter 2000 rise 2 fall 5
server controller3 controller3:4999 check inter 2000 rise 2 fall 5
‘>>/etc/haproxy/haproxy.cfg
[root@controller ~]# ‘
[root@controller ~]# systemctl restart haproxy.service
[root@controller ~]# netstat -antpegrep ‘haproxyhttpd’
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 2111/haproxy
tcp 0 0 192.168.220.20:35357 0.0.0.0:* LISTEN 2111/haproxy
tcp 0 0 192.168.220.20:5000 0.0.0.0:* LISTEN 2111/haproxy
tcp 0 0 0.0.0.0:5000 0.0.0.0:* LISTEN 2111/haproxy
tcp6 0 0 :::80 :::* LISTEN 1946/httpd
tcp6 0 0 :::35356 :::* LISTEN 1946/httpd
tcp6 0 0 :::4999 :::* LISTEN 1946/httpd

Configure KeyStone

[root@controller1 ~]# KEYSTONE_SECRET=$(openssl rand -hex 10)
[root@controller1 ~]#
[root@controller1 ~]# cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf DEFAULT admin_token $KEYSTONE_SECRET
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf DEFAULT verbose true
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:keystone@controller/keystone
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf cache backend oslo_cache.memcache_pool
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf cache enabled true
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf memcache servers controller1:11211,controller2:11211,controller3:11211
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf token driver memcache
[root@controller1 ~]# openstack-config –set /etc/keystone/keystone.conf token provider fernet

Synchronize Database

[root@controller1 ~]# su -s /bin/sh -c “keystone-manage db_sync” keystone
[root@controller1 ~]# keystone-manage fernet_setup –keystone-user keystone –keystone-group keystone
[root@controller1 ~]# keystone-manage credential_setup –keystone-user keystone –keystone-group keystone

Configure Other Controller Nodes

[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/keystone/* controller2:/etc/keystone/
sending incremental file list
keystone.conf
115180 100% 108.49MB/s 0:00:00 (xfer#1, to-check=10/13)
keystone.conf.bak
114875 100% 54.78MB/s 0:00:00 (xfer#2, to-check=9/13)
credential-keys/
credential-keys/0
44 100% 21.48kB/s 0:00:00 (xfer#3, to-check=3/13)
credential-keys/1
44 100% 21.48kB/s 0:00:00 (xfer#4, to-check=2/13)
fernet-keys/
fernet-keys/0
44 100% 21.48kB/s 0:00:00 (xfer#5, to-check=1/13)
fernet-keys/1
44 100% 21.48kB/s 0:00:00 (xfer#6, to-check=0/13)

sent 2209 bytes received 2114 bytes 2882.00 bytes/sec
total size is 236741 speedup is 54.76
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/keystone/* controller3:/etc/keystone/
sending incremental file list
keystone.conf
115180 100% 108.49MB/s 0:00:00 (xfer#1, to-check=10/13)
keystone.conf.bak
114875 100% 54.78MB/s 0:00:00 (xfer#2, to-check=9/13)
credential-keys/
credential-keys/0
44 100% 21.48kB/s 0:00:00 (xfer#3, to-check=3/13)
credential-keys/1
44 100% 21.48kB/s 0:00:00 (xfer#4, to-check=2/13)
fernet-keys/
fernet-keys/0
44 100% 21.48kB/s 0:00:00 (xfer#5, to-check=1/13)
fernet-keys/1
44 100% 21.48kB/s 0:00:00 (xfer#6, to-check=0/13)

sent 2209 bytes received 2114 bytes 8646.00 bytes/sec
total size is 236741 speedup is 54.76

Restart Httpd Service

[root@controller1 ~]# systemctl restart httpd.service
[root@controller1 ~]# ssh controller2 “systemctl restart httpd.service”
[root@controller1 ~]# ssh controller3 “systemctl restart httpd.service”

Create Admin Role

[root@controller1 ~]# keystone-manage bootstrap –bootstrap-password admin \

–bootstrap-admin-url http://controller:35357/v3/ \
–bootstrap-internal-url http://controller:5000/v3/ \
–bootstrap-public-url http://controller:5000/v3/ \
–bootstrap-region-id RegionOne

Configure Admin Resource

[root@controller1 ~]# echo “

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
“>/root/admin-openrc
[root@controller1 ~]# “
[root@controller1 ~]# source /root/admin-openrc
[root@controller1 ~]# openstack token issue
+————+—————————————————————————————————————————————————————————————–+
Field Value
+————+—————————————————————————————————————————————————————————————–+
expires 2017-12-15T11:24:26+0000
id gAAAAABaM6LaRTUjdiPkk1_5ydJV38A7d8ksrrD270fHt5Rc6SZZiIqhQXD70YdFVZqzfK0wWnxqF2jpAy1yBB6Tt-_v9VGbwyGORDJ-MesmmcmychP65oL_2dY8O4N09Mb8RZZm29wkJzOjgQffiFkmmjm3H7mAjfEHqbUxS-RdNcrnFEY0sTQ
project_id 2291724ac1a54d65844cc5dba56f4803
user_id c69e3e92d2e9485dabc42d845574d965
+————+—————————————————————————————————————————————————————————————–+

Create OpenStack Project

[root@controller1 ~]# openstack project create –domain default –description “Service Project” service
+————-+———————————-+
Field Value
+————-+———————————-+
description Service Project
domain_id default
enabled True
id 78757402f85a467995bcbd69b2183ba5
is_domain False
name service
parent_id default
+————-+———————————-+
[root@controller1 ~]# openstack user create –domain default –password=glance glance
+———————+———————————-+
Field Value
+———————+———————————-+
domain_id default
enabled True
id 1072761f1a714aa8ad31a8e3f32fdc94
name glance
options {}
password_expires_at None
+———————+———————————-+
[root@controller1 ~]# openstack role add –project service –user glance admin
[root@controller1 ~]# openstack user create –domain default –password=nova nova
+———————+———————————-+
Field Value
+———————+———————————-+
domain_id default
enabled True
id 83ce33fed0fe4c1894b6448cc17c32f7
name nova
options {}
password_expires_at None
+———————+———————————-+
[root@controller1 ~]# openstack role add –project service –user nova admin
[root@controller1 ~]# openstack user create –domain default –password=neutron neutron
+———————+———————————-+
Field Value
+———————+———————————-+
domain_id default
enabled True
id d0ed457a96824cffb030d3c57b4a8218
name neutron
options {}
password_expires_at None
+———————+———————————-+
[root@controller1 ~]# openstack role add –project service –user neutron admin

[root@controller1 ~]# openstack project create –domain default –description “Demo Project” demo
+————-+———————————-+
Field Value
+————-+———————————-+
description Demo Project
domain_id default
enabled True
id 3ddffab721d24934a0cbd49def5aa615
is_domain False
name demo
parent_id default
+————-+———————————-+
[root@controller1 ~]# openstack user create –domain default –password=demo demo
+———————+———————————-+
Field Value
+———————+———————————-+
domain_id default
enabled True
id 7884786780534d82afa0085028d2eb9b
name demo
options {}
password_expires_at None
+———————+———————————-+
[root@controller1 ~]# openstack role create user
+———–+———————————-+
Field Value
+———–+———————————-+
domain_id None
id 0e067a05c0334234be3e19cad51cc1b5
name user
+———–+———————————-+
[root@controller1 ~]# openstack role add –project demo –user demo user

Add OpenStack Identity resource to Pacemaker

[root@controller1 ~]# pcs resource create openstack-keystone systemd:openstack-keystone –clone interleave=true

Configure OpenStack Identity service

# cat keystone.conf

bind_host = 10.0.0.12
public_bind_host = 10.0.0.12
admin_bind_host = 10.0.0.12

[catalog]
driver = keystone.catalog.backends.sql.Catalog

[identity]
driver = keystone.identity.backends.sql.Identity

Configure OpenStack services to use the highly available OpenStack Identity

# cat api-paste.ini

auth_host = 10.0.0.11

$ openstack endpoint create –region $KEYSTONE_REGION $service-type public http://PUBLIC\_VIP:5000/v2.0
$ openstack endpoint create –region $KEYSTONE_REGION $service-type admin http://10.0.0.11:35357/v2.0
$ openstack endpoint create –region $KEYSTONE_REGION $service-type internal http://10.0.0.11:5000/v2.0

cat local_settings.py

OPENSTACK_HOST = 10.0.0.11

Configure the VIP

[root@controller1 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=”192.168.220.20” cidr_netmask=”24” op monitor interval=”30s”

Installing HAProxy On Controllers

[root@controller ~]# yum install haproxy httpd -y

Configuring HAProxy

[root@controller1 ~]# cp /etc/haproxy/haproxy.cfg{,.bak}

[root@controller1 ~]# diff /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
87,99d86
< listen stats
< bind 0.0.0.0:1080
< mode http
< option httplog
< log 127.0.0.1 local0 err
< maxconn 10
< stats refresh 30s
< stats uri /admin
< stats realm Haproxy\ Statistics
< stats auth admin:admin
< stats hide-version
< stats admin if TRUE
<

[root@controller1 ~]# scp /etc/haproxy/haproxy.cfg root@controller2:/etc/haproxy/haproxy.cfg
[root@controller1 ~]# scp /etc/haproxy/haproxy.cfg root@controller3:/etc/haproxy/haproxy.cfg

Allow Non-Local IP Binding

[root@controller ~]# echo “net.ipv4.ip_nonlocal_bind = 1” >>/etc/sysctl.conf
[root@controller ~]# sysctl -p
net.ipv4.ip_nonlocal_bind = 1

Add HAProxy To The Cluster

[root@controller1 ~]# pcs resource create lb-haproxy systemd:haproxy –clone
[root@controller1 ~]# pcs constraint order start vip then lb-haproxy-clone kind=Optional
Adding vip lb-haproxy-clone (kind: Optional) (Options: first-action=start then-action=start)
[root@controller1 ~]# pcs constraint colocation add lb-haproxy-clone with vip

Now, Take the Browser

img/HAProxy-V1.gif)

username/password :

admin/admin

Add RabbitMQTo The HAProxy

[root@controller1 ~]# echo ‘#RabbitMQ

listen RabbitMQ-Server
bind controller:5673
mode tcp
balance roundrobin
option tcpka
timeout client 3h
timeout server 3h
option clitcpka
server controller1 controller1:5672 check inter 5s rise 2 fall 3
server controller2 controller2:5672 check inter 5s rise 2 fall 3
server controller3 controller3:5672 check inter 5s rise 2 fall 3

listen RabbitMQ-Web
bind controller:15673
mode tcp
balance roundrobin
option tcpka
server controller1 controller1:15672 check inter 5s rise 2 fall 3
server controller2 controller2:15672 check inter 5s rise 2 fall 3
server controller3 controller3:15672 check inter 5s rise 2 fall 3
‘>>/etc/haproxy/haproxy.cfg
[root@controller1 ~]#
[root@controller1 ~]# systemctl restart haproxy.service