Category: Cloud Computing
Regression : Caffe
Based on LeNet-5
Deploy with GPU
name: "RegressionExample"
layer {
name: "data"
type: "Input"
top: "data"
input_param {
shape: {
dim: 100
dim: 1
dim: 100
dim: 100
}
}
}
……
Convolution Visualization
How to Use Microsoft’s RemoteFX Feature In Hyper-V
Requirement Example
Turn On Hyper-V(Host)
Turn On RemoteFX(Host)
Turn On RemoteFX(Client)
Check Display adapters(Client)
The Display adapters Must be “Microsoft RemoteFX Graphics Device – WDDM”
How to Use KVM to Create Virtual Machine
Configure Firewall
systemctl stop firewalld.service
systemctl disable firewalld.service
firewall-cmd --state
sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
Install KVM & Virt
yum install qemu-kvm libvirt -y
yum install virt-install -y
Start Virt Service
systemctl start libvirtd && systemctl enable libvirtd
[root@localhost ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.220.202 netmask 255.255.255.0 broadcast 192.168.220.255
inet6 fe80::c269:7c04:a06b:dce7 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:4e:32:2a txqueuelen 1000 (Ethernet)
RX packets 3394211 bytes 4731781088 (4.4 GiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 264816 bytes 35363147 (33.7 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1 (Local Loopback)
RX packets 68 bytes 5920 (5.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 68 bytes 5920 (5.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
virbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
ether 52:54:00:a5:ea:48 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
Create Disk
[root@localhost ~]# qemu-img create -f raw /opt/CentOS-7-x86_64.raw 10G
Formatting '/opt/CentOS-7-x86_64.raw', fmt=raw size=10737418240
Upload System ISO
[root@localhost ~]# ls /ISO
CentOS-7-x86_64-DVD-1708.iso
Begin Install OS
[root@localhost ~]# virt-install --virt-type kvm --name CentOS-7-x86_64 --ram 1024 --cdrom=/ISO/CentOS-7-x86_64-DVD-1708.iso --disk path=/opt/CentOS-7-x86_64.raw --network network=default --graphics vnc,listen=0.0.0.0 --noautoconsole
Starting install...
Domain installation still in progress. You can reconnect to
the console to complete the installation process.
Connect With VNC
List Virtual Machine
[root@localhost ~]# virsh list --all
Id Name State
----------------------------------------------------
- CentOS-7-x86_64 shut off
Start Virtual Machine
[root@localhost ~]# virsh start CentOS-7-x86_64
Domain CentOS-7-x86_64 started
[root@localhost ~]# virsh list --all
Id Name State
----------------------------------------------------
3 CentOS-7-x86_64 running
Test Virtual Machine
List Virbr
[root@localhost ~]# brctl show
bridge name bridge id STP enabled interfaces
virbr0 8000.525400a5ea48 yes virbr0-nic
vnet0
List br0 Status
[root@localhost ~]# vi create-br0.sh
[root@localhost ~]# cat create-br0.sh
brctl addbr br0
brctl addif br0 ens33
ip addr del dev ens33 192.168.220.202/24
ifconfig br0 192.168.220.202/24 up
route add default gw 192.168.220.2
[root@localhost ~]# chmod +x create-br0.sh
[root@localhost ~]# ./create-br0.sh
Create br0
[root@localhost network-scripts]# ifconfig
br0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.220.202 netmask 255.255.255.0 broadcast 192.168.220.255
inet6 fe80::20c:29ff:fe4e:322a prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:4e:32:2a txqueuelen 1000 (Ethernet)
RX packets 144 bytes 12890 (12.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 96 bytes 17020 (16.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet6 fe80::c269:7c04:a06b:dce7 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:4e:32:2a txqueuelen 1000 (Ethernet)
RX packets 3432987 bytes 4745607852 (4.4 GiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 297978 bytes 44267836 (42.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1 (Local Loopback)
RX packets 68 bytes 5920 (5.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 68 bytes 5920 (5.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
ether 52:54:00:a5:ea:48 txqueuelen 1000 (Ethernet)
RX packets 3970 bytes 167470 (163.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 4261 bytes 11695250 (11.1 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
vnet0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet6 fe80::fc54:ff:feaf:a499 prefixlen 64 scopeid 0x20<link>
ether fe:54:00:af:a4:99 txqueuelen 1000 (Ethernet)
RX packets 3970 bytes 223050 (217.8 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 5218 bytes 11745246 (11.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
Vir Edit to Bridge Network
[root@localhost ~]# virsh edit CentOS-7-x86_64
Domain CentOS-7-x86_64 XML configuration edited.
Before Edit
After Edit
Restart Virtual Machine
[root@localhost ~]# virsh shutdown CentOS-7-x86_64
Domain CentOS-7-x86_64 is being shutdown
[root@localhost ~]# virsh start CentOS-7-x86_64
Domain CentOS-7-x86_64 started
Look at the Virtual Machine IP
Make the Compute nodes Highly Available
Install Compute & Neutron
[root@compute01 ~]# yum install openstack-selinux python-openstackclient yum-plugin-priorities openstack-nova-compute openstack-utils -y
[root@compute01 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y
Configure Nova
[root@compute01 ~]# mkdir -p /data/nova/instances
[root@compute01 ~]# chown -R nova:nova /data/nova
[root@compute01 ~]# cp /etc/nova/nova.conf{,.bak}
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT instances_path /data/nova/instances
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected]:5673
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.220.51
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT cpu_allocation_ratio 10
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:[email protected]/nova_api
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:[email protected]/nova
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address compute01
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
[root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron password neutron
Configure Neutron
[root@compute01 ~]# cp /etc/neutron/neutron.conf{,.bak}
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected]:5673
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_plugin password
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_id default
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_id default
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password neutron
[root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
Configure LinuxBridge Agent
[root@compute01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
[root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:'ens33'
[root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
[root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false
Start Compute Service
[root@compute01 ~]# systemctl enable libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
[root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
Highly available Nova API
Create Nova Service & Endpoint
[root@controller1 ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | 365c1378f8c641ba81f48efa7c62cd29 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 2afb267d1bd34f9388f9568321313167 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 365c1378f8c641ba81f48efa7c62cd29 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 92de622e904546cc9bd1ca4087529e98 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 365c1378f8c641ba81f48efa7c62cd29 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 9295b974edec4e06842ee4db2e2d2458 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 365c1378f8c641ba81f48efa7c62cd29 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
Create Placement & Endpoint
[root@controller1 ~]# openstack user create --domain default --password=placement placement
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | c18a3e3bf63c4114924fb6b31b3305fd |
| name | placement |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
[root@controller1 ~]# openstack role add --project service --user placement admin
[root@controller1 ~]# openstack service create --name placement --description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | 99a48fd2c3bf496287fa1fcf82376c02 |
| name | placement |
| type | placement |
+-------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | c271472e882f49c3a77c518296d4045c |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 99a48fd2c3bf496287fa1fcf82376c02 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | b04f59956eca4d8cab4b3a41a071fc6b |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 99a48fd2c3bf496287fa1fcf82376c02 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 582bf2a041734838adbf92d7dd4b602e |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 99a48fd2c3bf496287fa1fcf82376c02 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
Install Nova
[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-cert openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
Configure Nova
[root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen_port 8774
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen_port 8775
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected]ler:5673
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:[email protected]/nova
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:[email protected]/nova_api
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_host controller1
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_port 6080
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache enabled true
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
[root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211
Configure Nova-Placement
Refer to “CLOUD COMPUTING —> OpenStack Pike Installation —> 7.Nova”
[root@controller1 ~]# echo "
> #Placement API
> <Directory /usr/bin>
> <IfVersion >= 2.4>
> Require all granted
> </IfVersion>
> <IfVersion < 2.4>
> Order allow,deny
> Allow from all
> </IfVersion>
> </Directory>
> ">>/etc/httpd/conf.d/00-nova-placement-api.conf
[[email protected] ~]# "
[root@controller1 ~]# systemctl restart httpd
Synchronize Database
[root@controller1 ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
[root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
[root@controller1 ~]# su -s /bin/sh -c "nova-manage db sync" nova
List Nova Cell
[root@controller1 ~]# nova-manage cell_v2 list_cells
+-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
| Name | UUID | Transport URL | Database Connection |
+-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 |
| cell1 | b55310bc-6d58-4c8d-9b7c-014b77cd48ac | rabbit://openstack:****@controller:5673 | mysql+pymysql://nova:****@controller/nova |
+-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
Replace Default IP
[root@controller1 ~]# sed -i 's/8778/9778/' /etc/httpd/conf.d/00-nova-placement-api.conf
[root@controller1 ~]# systemctl restart httpd
Configure HA With HAProxy
[root@controller1 ~]# echo '
> ##nova_compute
> listen nova_compute_api_cluster
> bind controller:8774
> balance source
> option tcpka
> option httpchk
> option tcplog
> server controller1 controller1:8774 check inter 2000 rise 2 fall 5
> server controller2 controller2:8774 check inter 2000 rise 2 fall 5
> server controller3 controller3:8774 check inter 2000 rise 2 fall 5
> #Nova-api-metadata
> listen Nova-api-metadata_cluster
> bind controller:8775
> balance source
> option tcpka
> option httpchk
> option tcplog
> server controller1 controller1:8775 check inter 2000 rise 2 fall 5
> server controller2 controller2:8775 check inter 2000 rise 2 fall 5
> server controller3 controller3:8775 check inter 2000 rise 2 fall 5
> #nova_placement
> listen nova_placement_cluster
> bind controller:8778
> balance source
> option tcpka
> option tcplog
> server controller1 controller1:9778 check inter 2000 rise 2 fall 5
> server controller2 controller2:9778 check inter 2000 rise 2 fall 5
> server controller3 controller3:9778 check inter 2000 rise 2 fall 5
> '>>/etc/haproxy/haproxy.cfg
[[email protected] ~]# '
[root@controller1 ~]# systemctl restart haproxy.service
netstat -antp|grep haproxy
netstat -antp|egrep '8774|8775|8778|6080'
Start Nova Service
[root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
List Nova Status
[root@controller1 ~]# openstack catalog list
+-----------+-----------+-----------------------------------------+
| Name | Type | Endpoints |
+-----------+-----------+-----------------------------------------+
| glance | image | RegionOne |
| | | internal: http://controller:9292 |
| | | RegionOne |
| | | admin: http://controller:9292 |
| | | RegionOne |
| | | public: http://controller:9292 |
| | | |
| nova | compute | RegionOne |
| | | public: http://controller:8774/v2.1 |
| | | RegionOne |
| | | admin: http://controller:8774/v2.1 |
| | | RegionOne |
| | | internal: http://controller:8774/v2.1 |
| | | |
| placement | placement | RegionOne |
| | | admin: http://controller:8778 |
| | | RegionOne |
| | | internal: http://controller:8778 |
| | | RegionOne |
| | | public: http://controller:8778 |
| | | |
| neutron | network | RegionOne |
| | | admin: http://controller:9696 |
| | | RegionOne |
| | | public: http://controller:9696 |
| | | RegionOne |
| | | internal: http://controller:9696 |
| | | |
| keystone | identity | RegionOne |
| | | admin: http://controller:35357/v3/ |
| | | RegionOne |
| | | internal: http://controller:5000/v3/ |
| | | RegionOne |
| | | public: http://controller:5000/v3/ |
| | | |
+-----------+-----------+-----------------------------------------+
[root@controller1 ~]# nova-status upgrade check
+--------------------------------------------------------------------+
| Upgrade Check Results |
+--------------------------------------------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: No host mappings or compute nodes were found. Remember to |
| run command 'nova-manage cell_v2 discover_hosts' when new |
| compute hosts are deployed. |
+--------------------------------------------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
| Check: Resource Providers |
| Result: Success |
| Details: There are no compute resource providers in the Placement |
| service nor are there compute nodes in the database. |
| Remember to configure new compute nodes to report into the |
| Placement service. See |
| http://docs.openstack.org/developer/nova/placement.html |
| for more details. |
+--------------------------------------------------------------------+
[root@controller1 ~]# openstack compute service list
+----+------------------+-------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+-------------+----------+---------+-------+----------------------------+
| 15 | nova-conductor | controller1 | internal | enabled | up | 2017-12-19T09:02:58.000000 |
| 18 | nova-scheduler | controller1 | internal | enabled | up | 2017-12-19T09:02:56.000000 |
| 21 | nova-consoleauth | controller1 | internal | enabled | up | 2017-12-19T09:02:59.000000 |
| 27 | nova-consoleauth | controller2 | internal | enabled | up | 2017-12-19T09:03:04.000000 |
| 30 | nova-conductor | controller2 | internal | enabled | up | 2017-12-19T09:03:03.000000 |
| 39 | nova-scheduler | controller2 | internal | enabled | up | 2017-12-19T09:02:57.000000 |
| 42 | nova-consoleauth | controller3 | internal | enabled | up | 2017-12-19T09:03:04.000000 |
| 45 | nova-conductor | controller3 | internal | enabled | up | 2017-12-19T09:03:05.000000 |
| 54 | nova-scheduler | controller3 | internal | enabled | up | 2017-12-19T09:02:59.000000 |
| 57 | nova-compute | compute01 | nova | enabled | up | 2017-12-19T09:02:59.000000 |
+----+------------------+-------------+----------+---------+-------+----------------------------+
Install On Other Controller Nodes
[root@controller2 ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
[root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/nova/* /etc/nova/
[root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/
[root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
[root@controller2 ~]# sed -i '1,9s/controller1/controller2/' /etc/nova/nova.conf
[root@controller3 ~]# sed -i '1,9s/controller1/controller3/' /etc/nova/nova.conf
[root@controller2 ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller2 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller2 ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller2 ~]# systemctl restart httpd haproxy
Discover New Compute Node
[root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting compute nodes from cell 'cell1': b55310bc-6d58-4c8d-9b7c-014b77cd48ac
Found 0 unmapped computes in cell: b55310bc-6d58-4c8d-9b7c-014b77cd48ac
[root@controller1 ~]# openstack compute service list
+----+------------------+-------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+-------------+----------+---------+-------+----------------------------+
| 15 | nova-conductor | controller1 | internal | enabled | up | 2017-12-19T09:28:08.000000 |
| 18 | nova-scheduler | controller1 | internal | enabled | up | 2017-12-19T09:28:16.000000 |
| 21 | nova-consoleauth | controller1 | internal | enabled | up | 2017-12-19T09:28:10.000000 |
| 27 | nova-consoleauth | controller2 | internal | enabled | up | 2017-12-19T09:28:15.000000 |
| 30 | nova-conductor | controller2 | internal | enabled | up | 2017-12-19T09:28:13.000000 |
| 39 | nova-scheduler | controller2 | internal | enabled | up | 2017-12-19T09:28:17.000000 |
| 42 | nova-consoleauth | controller3 | internal | enabled | up | 2017-12-19T09:28:15.000000 |
| 45 | nova-conductor | controller3 | internal | enabled | up | 2017-12-19T09:28:15.000000 |
| 54 | nova-scheduler | controller3 | internal | enabled | up | 2017-12-19T09:28:09.000000 |
| 57 | nova-compute | compute01 | nova | enabled | up | 2017-12-19T09:28:10.000000 |
| 60 | nova-compute | compute02 | nova | enabled | up | 2017-12-19T09:28:16.000000 |
+----+------------------+-------------+----------+---------+-------+----------------------------+
Highly available Horizon API
Install Dashboard
[root@controller ~]# yum install openstack-dashboard -y
Dashboard Configure
[root@controller1 ~]# cp /etc/openstack-dashboard/local_settings{,.bak}
[root@controller1 ~]# DASHBOARD_LOCAL_SETTINGS=/etc/openstack-dashboard/local_settings
[root@controller1 ~]# sed -i 's#_member_#user#g' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# sed -in '153,158s/#//' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -in '160,164s/.*/#&/' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i 's#UTC#Asia/Shanghai#g' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i 's#%s:5000/v2.0#%s:5000/v3#' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]# sed -i "[email protected]^#[email protected][email protected]" $DASHBOARD_LOCAL_SETTINGS
[root@controller1 ~]#
[root@controller1 ~]# echo '
> #set
> OPENSTACK_API_VERSIONS = {
> "identity": 3,
> "image": 2,
> "volume": 2,
> }
> #'>>$DASHBOARD_LOCAL_SETTINGS
Configure Other Controller Nodes
[root@controller1 ~]# rsync -avzP -e 'ssh -p 22' /etc/openstack-dashboard/local_settings controller2:/etc/openstack-dashboard/
[root@controller1 ~]# rsync -avzP -e 'ssh -p 22' /etc/openstack-dashboard/local_settings controller3:/etc/openstack-dashboard/
Restart Httpd Service
[root@controller1 ~]# systemctl restart httpd
[root@controller1 ~]# ssh controller2 "systemctl restart httpd"
[root@controller1 ~]# ssh controller3 "systemctl restart httpd"
Configure HA Dashboard API
sed -i 's#^Listen 80#Listen 8080#' /etc/httpd/conf/httpd.conf
systemctl restart httpd.service
systemctl daemon-reload
echo '
listen dashboard_cluster
bind controller:80
balance roundrobin
option tcpka
option httpchk
option tcplog
server controller1 controller1:8080 check port 8080 inter 2000 rise 2 fall 5
server controller2 controller2:8080 check port 8080 inter 2000 rise 2 fall 5
server controller3 controller3:8080 check port 8080 inter 2000 rise 2 fall 5
'>>/etc/haproxy/haproxy.cfg
systemctl restart haproxy.service
Now, Take the Browser
http://192.168.220.20/dashboard/
Linux bridge & L3 HA
Create OpenStack Service
[root@controller1 ~]# source admin-openrc
[root@controller1 ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Networking |
| enabled | True |
| id | ad17484f2f19423b9ffe8ab2b451906d |
| name | neutron |
| type | network |
+-------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne network public http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | c4e2c0741118449d933107948c67651d |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ad17484f2f19423b9ffe8ab2b451906d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | f35d94a749ae47d68b243a90015493bb |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ad17484f2f19423b9ffe8ab2b451906d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 61e469452d914b78aabbf4bcc0a51732 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | ad17484f2f19423b9ffe8ab2b451906d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
Install OpenStack Neutron
[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y
Configure Neutron
[root@controller1 ~]# cp /etc/neutron/neutron.conf{,.bak2}
[root@controller1 ~]# echo '
> [DEFAULT]
> bind_port = 9696
> bind_host = controller1
> core_plugin = ml2
> service_plugins =
> #service_plugins = trunk
> #service_plugins = router
> allow_overlapping_ips = true
> transport_url = rabbit://openstack:openstack@controller
> auth_strategy = keystone
> notify_nova_on_port_status_changes = true
> notify_nova_on_port_data_changes = true
>
> [keystone_authtoken]
> auth_uri = http://controller:5000
> auth_url = http://controller:35357
> memcached_servers = controller1:11211
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> project_name = service
> username = neutron
> password = neutron
>
> [nova]
> auth_url = http://controller:35357
> auth_plugin = password
> project_domain_id = default
> user_domain_id = default
> region_name = RegionOne
> project_name = service
> username = nova
> password = nova
>
> [database]
> connection = mysql://neutron:neutron@controller:3306/neutron
>
> [oslo_concurrency]
> lock_path = /var/lib/neutron/tmp
> #'>/etc/neutron/neutron.conf
Configure ML2
[root@controller1 ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
[root@controller1 ~]# echo '#
> [ml2]
> tenant_network_types =
> type_drivers = vlan,flat
> mechanism_drivers = linuxbridge
> extension_drivers = port_security
> [ml2_type_flat]
> flat_networks = provider
> [securitygroup]
> enable_ipset = True
> #vlan
> # [ml2_type_valn]
> # network_vlan_ranges = provider:3001:4000
> #'>/etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
Configure Linux Bridge
[root@controller1 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
[root@controller1 ~]# echo '#
> [linux_bridge]
> physical_interface_mappings = provider:'ens37'
> [vxlan]
> enable_vxlan = false
> [agent]
> prevent_arp_spoofing = True
> [securitygroup]
> firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
> enable_security_group = True
> #'>/etc/neutron/plugins/ml2/linuxbridge_agent.ini
Configure DHCP
[root@controller1 ~]# cp /etc/neutron/dhcp_agent.ini{,.bak}
[root@controller1 ~]# echo '#
> [DEFAULT]
> interface_driver = linuxbridge
> dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
> enable_isolated_metadata = true
> #'>/etc/neutron/dhcp_agent.ini
Configure Metadata
[root@controller1 ~]# cp /etc/neutron/metadata_agent.ini{,.bak}
[root@controller1 ~]# echo '
> [DEFAULT]
> nova_metadata_ip = controller
> metadata_proxy_shared_secret = metadata
> #'>/etc/neutron/metadata_agent.ini
Configure Nova
[root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
[root@controller1 ~]# echo '
> #
> [neutron]
> url = http://controller:9696
> auth_url = http://controller:35357
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> region_name = RegionOne
> project_name = service
> username = neutron
> password = neutron
> service_metadata_proxy = true
> metadata_proxy_shared_secret = metadata
> #'>>/etc/nova/nova.conf
Configure L3
[root@controller1 ~]# cp /etc/neutron/l3_agent.ini{,.bak}
[root@controller1 ~]#
[root@controller1 ~]# echo '
> [DEFAULT]
> interface_driver = linuxbridge
> #'>/etc/neutron/l3_agent.ini
Synchronize Database
[root@controller1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
[root@controller1 ~]# mysql -h controller -u neutron -pneutron -e "use neutron;show tables;"
Configure HAProxy For Neutron API
[root@controller1 ~]# echo '
> #Neutron_API
> listen Neutron_API_cluster
> bind controller:9696
> balance source
> option tcpka
> option tcplog
> server controller1 controller1:9696 check inter 2000 rise 2 fall 5
> server controller2 controller2:9696 check inter 2000 rise 2 fall 5
> server controller3 controller3:9696 check inter 2000 rise 2 fall 5
> '>>/etc/haproxy/haproxy.cfg
[[email protected] ~]# '
[root@controller1 ~]# systemctl restart haproxy.service
[root@controller1 ~]# netstat -antp|grep haproxy
tcp 0 0 192.168.220.20:9292 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:35357 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:9696 0.0.0.0:* LISTEN 76948/haproxy
tcp 0 0 192.168.220.20:5000 0.0.0.0:* LISTEN 76948/haproxy
Start Neutron Service
[root@controller1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller1 ~]# systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
Configure Controller Node 2 & Start Service
[root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/nova/* /etc/nova/
[root@controller2 ~]# sed -i 's/controller1/controller2/' /etc/neutron/neutron.conf
[root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
[root@controller2 ~]# systemctl restart haproxy
[root@controller2 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller2 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
Configure Controller Node 3 & Start Service
[root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/neutron/* /etc/neutron/
[root@controller3 ~]# sed -i 's/controller1/controller3/' /etc/neutron/neutron.conf
[root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
[root@controller3 ~]# systemctl restart haproxy
[root@controller3 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller3 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
Configure L3 HA on Controller node
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT router_distributed True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha_net_cidr 169.254.192.0/18
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 3
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 2
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vxlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch,l2population
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks external
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT ha_vrrp_auth_password password
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr_snat
Configure L3 HA on Compute Node
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr
Verify Service
[root@controller1 ~]# openstack network agent list
+--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
| 1a4efb8b-aa65-4d4a-8092-7213592acd22 | Linux bridge agent | controller1 | None | :-) | UP | neutron-linuxbridge-agent |
| 3b35bc6e-4cec-42e2-9fde-e99c601cf609 | DHCP agent | controller3 | nova | :-) | UP | neutron-dhcp-agent |
| 42e57e23-eecb-490d-b709-d8e3730107e8 | DHCP agent | controller2 | nova | :-) | UP | neutron-dhcp-agent |
| 6b2058a2-d3e3-4342-afbb-717338b1499f | Metadata agent | controller1 | None | :-) | UP | neutron-metadata-agent |
| 750b5e5c-c7b6-4f48-ae2f-37580b6e03d9 | DHCP agent | controller1 | nova | :-) | UP | neutron-dhcp-agent |
| 7e63ce46-3fd5-40ee-9f63-ee8cc52dd5a4 | Metadata agent | controller3 | None | :-) | UP | neutron-metadata-agent |
| 92199bf0-08ef-4642-9557-c33360796405 | Linux bridge agent | controller2 | None | :-) | UP | neutron-linuxbridge-agent |
| 9ae5bafa-0075-4408-b827-1be9bb1ccf99 | Linux bridge agent | controller3 | None | :-) | UP | neutron-linuxbridge-agent |
| f1ed9e45-39e7-4980-aaec-10364e42263f | Metadata agent | controller2 | None | :-) | UP | neutron-metadata-agent |
+--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
Highly available Block Storage API
Waiting For Test ,,,
Config Network
[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=4e333024-b8c8-45e4-baee-e46ece81432c
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.101
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114
Fdisk the 2nd Disk
[root@localhost ~]# ls -al /dev/sd*
brw-rw----. 1 root disk 8, 0 Dec 26 2017 /dev/sda
brw-rw----. 1 root disk 8, 1 Dec 26 2017 /dev/sda1
brw-rw----. 1 root disk 8, 2 Dec 26 2017 /dev/sda2
brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
[root@localhost ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Command (m for help): n
Partition type:
p primary (0 primary, 0 extended, 4 free)
e extended
Select (default p):
Using default response p
Partition number (1-4, default 1):
First sector (2048-125829119, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-125829119, default 125829119): +30G
Partition 1 of type Linux and of size 30 GiB is set
Command (m for help): n
Partition type:
p primary (1 primary, 0 extended, 3 free)
e extended
Select (default p):
Using default response p
Partition number (2-4, default 2):
First sector (62916608-125829119, default 62916608):
Using default value 62916608
Last sector, +sectors or +size{K,M,G} (62916608-125829119, default 125829119):
Using default value 125829119
Partition 2 of type Linux and of size 30 GiB is set
Command (m for help): p
Disk /dev/sdb: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x285890cb
Device Boot Start End Blocks Id System
/dev/sdb1 2048 62916607 31457280 83 Linux
/dev/sdb2 62916608 125829119 31456256 83 Linux
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
[root@localhost ~]# ls -al /dev/sd*
brw-rw----. 1 root disk 8, 0 Dec 26 2017 /dev/sda
brw-rw----. 1 root disk 8, 1 Dec 26 2017 /dev/sda1
brw-rw----. 1 root disk 8, 2 Dec 26 2017 /dev/sda2
brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
brw-rw----. 1 root disk 8, 17 Dec 26 01:11 /dev/sdb1
brw-rw----. 1 root disk 8, 18 Dec 26 01:11 /dev/sdb2
Format 2nd Disk
[root@localhost ~]# mkfs.ext4 /dev/sdb1
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1966080 inodes, 7864320 blocks
393216 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2155872256
240 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
[root@localhost ~]# mkfs.ext4 /dev/sdb2
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1966080 inodes, 7864064 blocks
393203 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2155872256
240 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
Mount /dev/sdb1
[root@localhost ~]# mkdir -p /date
[root@localhost ~]# mount -t ext4 /dev/sdb1 /date
[root@localhost ~]# df -h|grep /dev/sdb1
/dev/sdb1 30G 45M 28G 1% /date
[root@localhost ~]# echo "mount -t ext4 /dev/sdb1 /date" >>/etc/rc.d/rc.local
[root@localhost ~]# tail -1 /etc/rc.d/rc.local
mount -t ext4 /dev/sdb1 /date
[root@localhost ~]# chmod +x /etc/rc.d/rc.local
Create Volume on /dev/sdb2
[root@localhost ~]# yum install lvm2 -y
[root@localhost ~]# systemctl enable lvm2-lvmetad.service
Created symlink from /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.service to /usr/lib/systemd/system/lvm2-lvmetad.service.
[root@localhost ~]# systemctl start lvm2-lvmetad.service
[root@localhost ~]# pvcreate /dev/sdb2
WARNING: ext4 signature detected on /dev/sdb2 at offset 1080. Wipe it? [y/n]: y
Wiping ext4 signature on /dev/sdb2.
Physical volume "/dev/sdb2" successfully created.
[root@localhost ~]# vgcreate cinder_lvm01 /dev/sdb2
Volume group "cinder_lvm01" successfully created
[root@localhost ~]# vgdisplay
--- Volume group ---
VG Name cinder_lvm01
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <30.00 GiB
PE Size 4.00 MiB
Total PE 7679
Alloc PE / Size 0 / 0
Free PE / Size 7679 / <30.00 GiB
VG UUID jyb299-bo5k-E6Z3-Frho-e6Kz-d9Mu-yK0m6c
--- Volume group ---
VG Name centos
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 3
Max PV 0
Cur PV 1
Act PV 1
VG Size <79.00 GiB
PE Size 4.00 MiB
Total PE 20223
Alloc PE / Size 20222 / 78.99 GiB
Free PE / Size 1 / 4.00 MiB
VG UUID NHtSF8-nozf-sbB4-vEBt-ogJo-WmuG-F8HYMQ
Install NFS
[root@localhost ~]# yum install nfs-utils rpcbind -y
[root@localhost ~]# mkdir -p /date/{cinder_nfs1,cinder_nfs2}
[root@localhost ~]# chmod -R 777 /date
[root@localhost ~]# echo "/date/cinder_nfs1 *(rw,root_squash,sync,anonuid=165,anongid=165)">/etc/exports
[root@localhost ~]# exportfs -r
[root@localhost ~]# systemctl enable rpcbind nfs-server
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
[root@localhost ~]# systemctl restart rpcbind nfs-server
[root@localhost ~]# showmount -e localhost
Export list for localhost:
/date/cinder_nfs1 *
Install Cinder & Configure
[root@localhost ~]# yum install openstack-cinder targetcli python-keystone lvm2 -y
[root@localhost ~]# cp /etc/cinder/cinder.conf{,.bak}
[root@localhost ~]# cp /etc/lvm/lvm.conf{,.bak
[root@localhost ~]# sed -i '141a filter = [ "a/sdb2/", "r/.*/"]' /etc/lvm/lvm.conf
[root@localhost ~]# echo '192.168.220.101:/date/cinder_nfs1'>/etc/cinder/nfs_shares
[root@localhost ~]# chmod 640 /etc/cinder/nfs_shares
[root@localhost ~]# chown root:cinder /etc/cinder/nfs_shares
Configure cinder.conf
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT log_dir /var/log/cinder
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT state_path /var/lib/cinder
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:[email protected]
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm,nfs
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:[email protected]/cinder
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password cinder
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder_lvm01
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_ip_address 192.168.220.101
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_backend_name lvm01
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_driver = cinder.volume.drivers.nfs.NfsDriver
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_shares_config = /etc/cinder/nfs_shares
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_mount_point_base = $state_path/mnt
[root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_backend_name = nfs01
Start Cinder Service
[root@localhost ~]# chmod 640 /etc/cinder/cinder.conf
[root@localhost ~]# chgrp cinder /etc/cinder/cinder.conf
[root@localhost ~]# systemctl enable openstack-cinder-volume.service target.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.
[root@localhost ~]# systemctl start openstack-cinder-volume.service target.service
Add Cinder to Pacemaker
# pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true
# pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true
# pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume
# pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone
# pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone
# pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume
# pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone
# pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone
Configure Cinder service
# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
# openstack-config --set /etc/cinder/cinder.conf DEFAULT host cinder-cluster-1
# openstack-config --set /etc/cinder/cinder.conf DEFAULT osapi_volume_listen 10.0.0.11
# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
# openstack-config --set /etc/cinder/cinder.conf DEFAULT control_exchange cinder
# openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver
# openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_shares_config /etc/cinder/nfs_exports
# openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_sparsed_volumes true
# openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_mount_options v3
# openstack-config --set /etc/cinder/cinder.conf database connection mysql://cinder:[email protected]/cinder
# openstack-config --set /etc/cinder/cinder.conf database max_retries -1
# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken identity_uri http://10.0.0.11:35357/
# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://10.0.0.11:5000/
# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name service
# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder
# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_password CINDER_PASS
# openstack-config --set /etc/cinder/cinder.conf rabbit_hosts 10.0.0.12,10.0.0.13,10.0.0.14
# openstack-config --set /etc/cinder/cinder.conf rabbit_ha_queues True
# openstack-config --set /etc/cinder/cinder.conf heartbeat_timeout_threshold 60
# openstack-config --set /etc/cinder/cinder.conf heartbeat_rate 2
Configure HA Cinder API
$ openstack endpoint create volume --region $KEYSTONE_REGION \
--publicurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
--adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
--internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s'
Highly available Image API
Create Service For Glance
[root@controller1 ~]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Image |
| enabled | True |
| id | 3083848116cd4706bc39241f81e3475b |
| name | glance |
| type | image |
+-------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne image public http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | f3eb684e2209471795a04f6e73ce120f |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 3083848116cd4706bc39241f81e3475b |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | bbf93cdcc35645288cfa90ef6239e1b5 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 3083848116cd4706bc39241f81e3475b |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | da194b1e1e8148859a45a83fac2c7403 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 3083848116cd4706bc39241f81e3475b |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
Install OpenStack Glance
[root@controller ~]# yum install openstack-glance python-glance -y
Configure Glance Service
[root@controller1 ~]# cp /etc/glance/glance-api.conf{,.bak}
[root@controller1 ~]# cp /etc/glance/glance-registry.conf{,.bak}
[root@controller ~]# mkdir -p /date/glance
[root@controller1 ~]# echo "#
> [DEFAULT]
> debug = False
> verbose = True
> bind_host = controller1
> bind_port = 9292
> auth_region = RegionOne
> registry_client_protocol = http
> [database]
> connection = mysql+pymysql://glance:glance@controller/glance
> [keystone_authtoken]
> auth_uri = http://controller:5000/v3
> auth_url = http://controller:35357/v3
> memcached_servers = controller1:11211,controller2:11211,controller3:11211
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> project_name = service
> username = glance
> password = glance
> [paste_deploy]
> flavor = keystone
> [glance_store]
> stores = file,http
> default_store = file
> filesystem_store_datadir = /data/glance
> [oslo_messaging_rabbit]
> rabbit_userid =openstack
> rabbit_password = openstack
> rabbit_durable_queues=true
> rabbit_ha_queues = True
> rabbit_max_retries=0
> rabbit_port = 5672
> rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
> #">/etc/glance/glance-api.conf
[root@controller1 ~]# echo "#
> [DEFAULT]
> debug = False
> verbose = True
> bind_host = controller1
> bind_port = 9191
> workers = 2
> [database]
> connection = mysql+pymysql://glance:glance@controller/glance
> [keystone_authtoken]
> auth_uri = http://controller:5000/v3
> auth_url = http://controller:35357/v3
> memcached_servers = controller1:11211,controller2:11211,controller3:11211
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> project_name = service
> username = glance
> password = glance
> [paste_deploy]
> flavor = keystone
> [oslo_messaging_rabbit]
> rabbit_userid =openstack
> rabbit_password = openstack
> rabbit_durable_queues=true
> rabbit_ha_queues = True
> rabbit_max_retries=0
> rabbit_port = 5672
> rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
> #">/etc/glance/glance-registry.conf
[root@controller1 ~]#
Synchronize Database
[root@controller1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1328: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
expire_on_commit=expire_on_commit, _conf=conf)
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> liberty, liberty initial
INFO [alembic.runtime.migration] Running upgrade liberty -> mitaka01, add index on created_at and updated_at columns of 'images' table
INFO [alembic.runtime.migration] Running upgrade mitaka01 -> mitaka02, update metadef os_nova_server
INFO [alembic.runtime.migration] Running upgrade mitaka02 -> ocata01, add visibility to and remove is_public from images
INFO [alembic.runtime.migration] Running upgrade ocata01 -> pike01, drop glare artifacts tables
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: pike01, current revision(s): pike01
[root@controller1 ~]# mysql -h controller -u glance -pglance -e "use glance;show tables;"
+----------------------------------+
| Tables_in_glance |
+----------------------------------+
| alembic_version |
| image_locations |
| image_members |
| image_properties |
| image_tags |
| images |
| metadef_namespace_resource_types |
| metadef_namespaces |
| metadef_objects |
| metadef_properties |
| metadef_resource_types |
| metadef_tags |
| migrate_version |
| task_info |
| tasks |
+----------------------------------+
Start Glance Service
[root@controller1 ~]# systemctl enable openstack-glance-api openstack-glance-registry
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service to /usr/lib/systemd/system/openstack-glance-registry.service.
[root@controller1 ~]# systemctl restart openstack-glance-api openstack-glance-registry
[root@controller1 ~]# netstat -antp|grep python2
tcp 0 0 192.168.220.21:9292 0.0.0.0:* LISTEN 13125/python2
tcp 0 0 192.168.220.21:9191 0.0.0.0:* LISTEN 13126/python2
[root@controller1 ~]# netstat -antp|egrep '9292|9191'
tcp 0 0 192.168.220.21:9292 0.0.0.0:* LISTEN 13125/python2
tcp 0 0 192.168.220.21:9191 0.0.0.0:* LISTEN 13126/python2
tcp 32 0 192.168.220.21:39292 192.168.220.21:2224 CLOSE_WAIT 699/ruby
tcp 0 0 192.168.220.11:49292 192.168.220.23:2224 ESTABLISHED 699/ruby
Configure HAProxy For Glance
[root@controller1 ~]# echo '
> #glance_api_cluster
> listen glance_api_cluster
> bind controller:9292
> #balance source
> option tcpka
> option httpchk
> option tcplog
> server controller1 controller1:9292 check inter 2000 rise 2 fall 5
> server controller2 controller2:9292 check inter 2000 rise 2 fall 5
> server controller3 controller3:9292 check inter 2000 rise 2 fall 5
> '>>/etc/haproxy/haproxy.cfg
[[email protected] ~]# '
[root@controller1 ~]# systemctl restart haproxy.service
[root@controller1 ~]# netstat -antp|grep haproxy
tcp 0 0 192.168.220.20:9292 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 192.168.220.20:35357 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 192.168.220.20:5000 0.0.0.0:* LISTEN 13170/haproxy
tcp 0 0 0.0.0.0:5000 0.0.0.0:* LISTEN 13170/haproxy
Create Cirros Image
[root@controller glance]# wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
--2017-12-18 02:06:45-- http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
Resolving download.cirros-cloud.net (download.cirros-cloud.net)... 64.90.42.85, 2607:f298:6:a036::bd6:a72a
Connecting to download.cirros-cloud.net (download.cirros-cloud.net)|64.90.42.85|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 13267968 (13M) [text/plain]
Saving to: ‘cirros-0.3.5-x86_64-disk.img’
100%[==============================================>] 13,267,968 1.04MB/s in 13s
2017-12-18 02:06:58 (1023 KB/s) - ‘cirros-0.3.5-x86_64-disk.img’ saved [13267968/13267968]
[root@controller1 glance]# openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+------------------------------------------------------+
| Field | Value |
+------------------+------------------------------------------------------+
| checksum | f8ab98ff5e73ebab884d80c9dc9c7290 |
| container_format | bare |
| created_at | 2017-12-18T07:37:04Z |
| disk_format | qcow2 |
| file | /v2/images/82e5d7d9-86a0-4266-a599-e50e8c4b0cbe/file |
| id | 82e5d7d9-86a0-4266-a599-e50e8c4b0cbe |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros |
| owner | 2291724ac1a54d65844cc5dba56f4803 |
| protected | False |
| schema | /v2/schemas/image |
| size | 13267968 |
| status | active |
| tags | |
| updated_at | 2017-12-18T07:37:05Z |
| virtual_size | None |
| visibility | public |
+------------------+------------------------------------------------------+
List OpenStack Image
[root@controller1 glance]# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| 82e5d7d9-86a0-4266-a599-e50e8c4b0cbe | cirros | active |
+--------------------------------------+--------+--------+
Configure Other Controller Nodes
[root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/glance/* controller2:/etc/glance/
sending incremental file list
glance-api.conf
883 100% 0.00kB/s 0:00:00 (xfer#1, to-check=44/45)
glance-api.conf.bak
150676 100% 14.37MB/s 0:00:00 (xfer#2, to-check=43/45)
glance-registry.conf
744 100% 72.66kB/s 0:00:00 (xfer#3, to-check=41/45)
glance-registry.conf.bak
77404 100% 5.27MB/s 0:00:00 (xfer#4, to-check=40/45)
metadefs/
rootwrap.d/
sent 61739 bytes received 126 bytes 123730.00 bytes/sec
total size is 504600 speedup is 8.16
[root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/glance/* controller3:/etc/glance/
sending incremental file list
glance-api.conf
883 100% 0.00kB/s 0:00:00 (xfer#1, to-check=44/45)
glance-api.conf.bak
150676 100% 28.74MB/s 0:00:00 (xfer#2, to-check=43/45)
glance-registry.conf
744 100% 121.09kB/s 0:00:00 (xfer#3, to-check=41/45)
glance-registry.conf.bak
77404 100% 9.23MB/s 0:00:00 (xfer#4, to-check=40/45)
metadefs/
rootwrap.d/
sent 61739 bytes received 126 bytes 123730.00 bytes/sec
total size is 504600 speedup is 8.16
[root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/haproxy/haproxy.cfg controller2:/etc/haproxy/
sending incremental file list
haproxy.cfg
4376 100% 3.51MB/s 0:00:00 (xfer#1, to-check=0/1)
sent 75 bytes received 73 bytes 296.00 bytes/sec
total size is 4376 speedup is 29.57
[root@controller1 glance]# rsync -avzP -e 'ssh -p 22' /etc/haproxy/haproxy.cfg controller3:/etc/haproxy/
sending incremental file list
haproxy.cfg
4376 100% 3.51MB/s 0:00:00 (xfer#1, to-check=0/1)
sent 75 bytes received 73 bytes 296.00 bytes/sec
total size is 4376 speedup is 29.57
[root@controller1 glance]#
[root@controller1 glance]# ssh controller2 "sed -i '1,10s/controller1/controller2/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"
[root@controller1 glance]# ssh controller3 "sed -i '1,10s/controller1/controller3/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"
Start Other Nodes’s Service
[root@controller1 glance]# ssh controller2 "systemctl enable openstack-glance-api openstack-glance-registry"
[root@controller1 glance]# ssh controller2 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"
[root@controller1 glance]# ssh controller3 "systemctl enable openstack-glance-api openstack-glance-registry"
[root@controller1 glance]# ssh controller3 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"
Add OpenStack Image API resource to Pacemaker
[root@controller1 glance]# cd /usr/lib/ocf/resource.d/openstack
[root@controller1 openstack]# wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
--2017-12-18 03:00:41-- https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api
Resolving git.openstack.org (git.openstack.org)... 104.130.246.128, 2001:4800:7819:103:be76:4eff:fe06:63c
Connecting to git.openstack.org (git.openstack.org)|104.130.246.128|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 11439 (11K) [text/plain]
Saving to: ‘glance-api’
100%[====================================>] 11,439 --.-K/s in 0s
2017-12-18 03:00:42 (31.5 MB/s) - ‘glance-api’ saved [11439/11439]
[root@controller1 openstack]# chmod a+rx *
[root@controller1 openstack]# ls
glance-api nova-compute-wait NovaEvacuate
Create Local Repository
[root@localrepo yum.repos.d]# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/network:ha-clustering:Stable.repo
[root@localrepo myrepo]# reposync --repoid=network_ha-clustering_Stable
[root@localrepo myrepo]# createrepo /var/www/html/myrepo/network_ha-clustering_Stable/
[root@controller1 openstack]# cat /etc/yum.repos.d/network_ha-clustering_Stable.repo
[network_ha-clustering_Stable]
name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
type=rpm-md
baseurl=http://192.168.220.200/myrepo/network_ha-clustering_Stable/
gpgcheck=0
gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/repodata/repomd.xml.key
enabled=1
Install CRMSH
[root@controller1 openstack]# yum install crmsh -y
[root@controller1 openstack]# crm configure
crm(live)configure# primitive p_glance-api ocf:openstack:glance-api \
> params config="/etc/glance/glance-api.conf" \
> os_password="admin" \
> os_username="admin" os_tenant_name="admin" \
> os_auth_url="http://controller:5000/v3/" \
> op monitor interval="30s" timeout="30s"
crm(live)configure# commit
Configure OpenStack Image service API
cp /etc/glance/glance-api.conf{,.bak}
connection = mysql+pymysql://glance:glance@controller/glance
bind_host = controller
registry_host = controller
notifier_strategy = rabbit
rabbit_host = controller
nova.conf
[glance]
api_servers = 10.0.0.11
Configure OpenStack services to use the highly available OpenStack Image API
$ openstack endpoint create --region $KEYSTONE_REGION image public http://10.0.0.10:9292
$ openstack endpoint create --region $KEYSTONE_REGION image admin http://10.0.0.11:9292
$ openstack endpoint create --region $KEYSTONE_REGION image internal http://10.0.0.11:9292