How to Use KVM to Create Virtual Machine

Configure Firewall

  1. systemctl stop firewalld.service
  2. systemctl disable firewalld.service
  3. firewall-cmd --state
  4.  
  5. sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
  6. sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
  7. grep --color=auto '^SELINUX' /etc/selinux/config
  8. setenforce 0

Install KVM & Virt

  1. yum install qemu-kvm libvirt -y
  2. yum install virt-install -y

Start Virt Service

  1. systemctl start libvirtd && systemctl enable libvirtd
  2.  
  3. [root@localhost ~]# ifconfig
  4. ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
  5.         inet 192.168.220.202  netmask 255.255.255.0  broadcast 192.168.220.255
  6.         inet6 fe80::c269:7c04:a06b:dce7  prefixlen 64  scopeid 0x20<link>
  7.         ether 00:0c:29:4e:32:2a  txqueuelen 1000  (Ethernet)
  8.         RX packets 3394211  bytes 4731781088 (4.4 GiB)
  9.         RX errors 0  dropped 0  overruns 0  frame 0
  10.         TX packets 264816  bytes 35363147 (33.7 MiB)
  11.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  12.  
  13. lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
  14.         inet 127.0.0.1  netmask 255.0.0.0
  15.         inet6 ::1  prefixlen 128  scopeid 0x10<host>
  16.         loop  txqueuelen 1  (Local Loopback)
  17.         RX packets 68  bytes 5920 (5.7 KiB)
  18.         RX errors 0  dropped 0  overruns 0  frame 0
  19.         TX packets 68  bytes 5920 (5.7 KiB)
  20.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  21.  
  22. virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
  23.         inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
  24.         ether 52:54:00:a5:ea:48  txqueuelen 1000  (Ethernet)
  25.         RX packets 0  bytes 0 (0.0 B)
  26.         RX errors 0  dropped 0  overruns 0  frame 0
  27.         TX packets 0  bytes 0 (0.0 B)
  28.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

Create Disk

  1. [root@localhost ~]# qemu-img create -f raw /opt/CentOS-7-x86_64.raw 10G
  2. Formatting '/opt/CentOS-7-x86_64.raw', fmt=raw size=10737418240

Upload System ISO

  1. [root@localhost ~]# ls /ISO
  2. CentOS-7-x86_64-DVD-1708.iso

Begin Install OS

  1. [root@localhost ~]# virt-install --virt-type kvm --name CentOS-7-x86_64 --ram 1024 --cdrom=/ISO/CentOS-7-x86_64-DVD-1708.iso --disk path=/opt/CentOS-7-x86_64.raw --network network=default --graphics vnc,listen=0.0.0.0 --noautoconsole
  2.  
  3. Starting install...
  4. Domain installation still in progress. You can reconnect to 
  5. the console to complete the installation process.

Connect With VNC






List Virtual Machine

  1. [root@localhost ~]# virsh list --all
  2.  Id    Name                           State
  3. ----------------------------------------------------
  4.  -     CentOS-7-x86_64                shut off

Start Virtual Machine

  1. [root@localhost ~]# virsh start CentOS-7-x86_64
  2. Domain CentOS-7-x86_64 started
  3.  
  4. [root@localhost ~]# virsh list --all
  5.  Id    Name                           State
  6. ----------------------------------------------------
  7.  3     CentOS-7-x86_64                running

Test Virtual Machine


List Virbr

  1. [root@localhost ~]# brctl show
  2. bridge name     bridge id               STP enabled     interfaces
  3. virbr0          8000.525400a5ea48       yes             virbr0-nic
  4.                                                         vnet0

List br0 Status

  1. [root@localhost ~]# vi create-br0.sh
  2.  
  3. [root@localhost ~]# cat create-br0.sh
  4. brctl addbr br0
  5. brctl addif br0 ens33
  6. ip addr del dev ens33 192.168.220.202/24
  7. ifconfig br0 192.168.220.202/24 up
  8. route add default gw 192.168.220.2
  9.  
  10. [root@localhost ~]# chmod +x create-br0.sh 
  11. [root@localhost ~]# ./create-br0.sh

Create br0

  1. [root@localhost network-scripts]# ifconfig
  2. br0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
  3.         inet 192.168.220.202  netmask 255.255.255.0  broadcast 192.168.220.255
  4.         inet6 fe80::20c:29ff:fe4e:322a  prefixlen 64  scopeid 0x20<link>
  5.         ether 00:0c:29:4e:32:2a  txqueuelen 1000  (Ethernet)
  6.         RX packets 144  bytes 12890 (12.5 KiB)
  7.         RX errors 0  dropped 0  overruns 0  frame 0
  8.         TX packets 96  bytes 17020 (16.6 KiB)
  9.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  10.  
  11. ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
  12.         inet6 fe80::c269:7c04:a06b:dce7  prefixlen 64  scopeid 0x20<link>
  13.         ether 00:0c:29:4e:32:2a  txqueuelen 1000  (Ethernet)
  14.         RX packets 3432987  bytes 4745607852 (4.4 GiB)
  15.         RX errors 0  dropped 0  overruns 0  frame 0
  16.         TX packets 297978  bytes 44267836 (42.2 MiB)
  17.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  18.  
  19. lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
  20.         inet 127.0.0.1  netmask 255.0.0.0
  21.         inet6 ::1  prefixlen 128  scopeid 0x10<host>
  22.         loop  txqueuelen 1  (Local Loopback)
  23.         RX packets 68  bytes 5920 (5.7 KiB)
  24.         RX errors 0  dropped 0  overruns 0  frame 0
  25.         TX packets 68  bytes 5920 (5.7 KiB)
  26.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  27.  
  28. virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
  29.         inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
  30.         ether 52:54:00:a5:ea:48  txqueuelen 1000  (Ethernet)
  31.         RX packets 3970  bytes 167470 (163.5 KiB)
  32.         RX errors 0  dropped 0  overruns 0  frame 0
  33.         TX packets 4261  bytes 11695250 (11.1 MiB)
  34.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  35.  
  36. vnet0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
  37.         inet6 fe80::fc54:ff:feaf:a499  prefixlen 64  scopeid 0x20<link>
  38.         ether fe:54:00:af:a4:99  txqueuelen 1000  (Ethernet)
  39.         RX packets 3970  bytes 223050 (217.8 KiB)
  40.         RX errors 0  dropped 0  overruns 0  frame 0
  41.         TX packets 5218  bytes 11745246 (11.2 MiB)
  42.         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

Vir Edit to Bridge Network

  1. [root@localhost ~]# virsh edit CentOS-7-x86_64
  2. Domain CentOS-7-x86_64 XML configuration edited.
Before Edit

After Edit

Restart Virtual Machine

  1. [root@localhost ~]# virsh shutdown CentOS-7-x86_64 
  2. Domain CentOS-7-x86_64 is being shutdown
  3.  
  4. [root@localhost ~]# virsh start CentOS-7-x86_64
  5. Domain CentOS-7-x86_64 started

Look at the Virtual Machine IP


How to Secure Tunnels to Localhost

Install Golang 1.4

  1. tar -C /usr/local -xzf go1.4.3.linux-amd64.tar.gz 
  2. mkdir $HOME/go
  3. echo 'export GOROOT=/usr/local/go'>> ~/.bashrc
  4. echo 'export GOPATH=$HOME/go'>> ~/.bashrc 
  5. echo 'export PATH=$PATH:$GOROOT/bin'>> ~/.bashrc 
  6. source /root/.bashrc

Install Git

  1. yum install mercurial git bzr subversion -y

Install Ngrok

  1. cd /usr/local/src/
  2. git clone https://github.com/inconshreveable/ngrok.git

Generate CERT

  1. export NGROK_DOMAIN="qinuu.com"
  2.  
  3. cd ngrok/
  4. openssl genrsa -out rootCA.key 2048
  5. openssl req -x509 -new -nodes -key rootCA.key -subj "/CN=$NGROK_DOMAIN" -days 5000 -out rootCA.pem
  6. openssl genrsa -out device.key 2048
  7. openssl req -new -key device.key -subj "/CN=$NGROK_DOMAIN" -out device.csr
  8. openssl x509 -req -in device.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out device.crt -days 5000
  9.  
  10. cp rootCA.pem assets/client/tls/ngrokroot.crt
  11. cp device.crt assets/server/tls/snakeoil.crt
  12. cp device.key assets/server/tls/snakeoil.key

Make Linux Server

  1. GOOS=linux GOARCH=amd64
  2. make release-server release-client

Make Windows Client

  1. cd /usr/local/go/src
  2. GOOS=windows GOARCH=amd64 CGO_ENABLED=0 ./make.bash
  3. cd  /usr/local/src/ngrok/
  4. GOOS=windows GOARCH=amd64 make release-server release-client

Start Ngrok Service

  1. nohup bin/ngrokd -domain="qinuu.com" -httpAddr=":8080" -httpsAddr=":8443" &

Download Windows Client

  1. ls -al bin/windows_amd64/

Configure CLient CFG File

Start Windows Client

How to Configure Static IP in VMware NAT Mode

Just Do It !

  1. [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
  2. TYPE=Ethernet
  3. PROXY_METHOD=none
  4. BROWSER_ONLY=no
  5. BOOTPROTO=static
  6. DEFROUTE=yes
  7. IPV4_FAILURE_FATAL=no
  8. IPV6INIT=yes
  9. IPV6_AUTOCONF=yes
  10. IPV6_DEFROUTE=yes
  11. IPV6_FAILURE_FATAL=no
  12. IPV6_ADDR_GEN_MODE=stable-privacy
  13. NAME=ens33
  14. UUID=4e333024-b8c8-45e4-baee-e46ece81432c
  15. DEVICE=ens33
  16. ONBOOT=yes
  17. IPADDR=192.168.220.200
  18. NETMASK=255.255.255.0
  19. GATEWAY=192.168.220.2
  20. DNS1=8.8.8.8

Make the Compute nodes Highly Available

Install Compute & Neutron

  1. [root@compute01 ~]# yum install openstack-selinux python-openstackclient yum-plugin-priorities openstack-nova-compute openstack-utils -y
  2. [root@compute01 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y

Configure Nova

  1. [root@compute01 ~]# mkdir -p /data/nova/instances
  2. [root@compute01 ~]# chown -R nova:nova /data/nova
  3.  
  4. [root@compute01 ~]# cp /etc/nova/nova.conf{,.bak}
  5.  
  6. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT instances_path /data/nova/instances
  7. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
  8. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  9. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.220.51
  10. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
  11. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
  12. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT cpu_allocation_ratio 10
  13.  
  14. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
  15. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
  16.  
  17. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
  18.  
  19. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
  20. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
  21. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
  22. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
  23. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
  24. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
  25. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
  26. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
  27. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
  28.  
  29. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
  30. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
  31. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address compute01
  32. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
  33.  
  34. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
  35.  
  36. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  37.  
  38. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
  39. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
  40. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
  41. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
  42. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
  43. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
  44. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
  45. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
  46.  
  47. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
  48.  
  49. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
  50. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
  51. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
  52. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
  53. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
  54. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
  55. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
  56. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
  57. [root@compute01 ~]# openstack-config --set /etc/nova/nova.conf neutron password neutron

Configure Neutron

  1. [root@compute01 ~]# cp /etc/neutron/neutron.conf{,.bak}
  2.  
  3. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
  4. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  5.  
  6. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
  7. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
  8. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
  9. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_plugin password
  10. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_id default
  11. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_id default
  12. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
  13. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
  14. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password neutron
  15.  
  16. [root@compute01 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp

Configure LinuxBridge Agent

  1. [root@compute01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
  2.  
  3. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:'ens33'
  4.  
  5. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
  6. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  7.  
  8. [root@compute01 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false

Start Compute Service

  1. [root@compute01 ~]# systemctl enable libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
  2. [root@compute01 ~]# systemctl restart libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service

Highly available Nova API

Create Nova Service & Endpoint

  1. [root@controller1 ~]# openstack service create --name nova --description "OpenStack Compute" compute
  2. +-------------+----------------------------------+
  3. | Field       | Value                            |
  4. +-------------+----------------------------------+
  5. | description | OpenStack Compute                |
  6. | enabled     | True                             |
  7. | id          | 365c1378f8c641ba81f48efa7c62cd29 |
  8. | name        | nova                             |
  9. | type        | compute                          |
  10. +-------------+----------------------------------+
  11. [root@controller1 ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
  12. +--------------+----------------------------------+
  13. | Field        | Value                            |
  14. +--------------+----------------------------------+
  15. | enabled      | True                             |
  16. | id           | 2afb267d1bd34f9388f9568321313167 |
  17. | interface    | public                           |
  18. | region       | RegionOne                        |
  19. | region_id    | RegionOne                        |
  20. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  21. | service_name | nova                             |
  22. | service_type | compute                          |
  23. | url          | http://controller:8774/v2.1      |
  24. +--------------+----------------------------------+
  25. [root@controller1 ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
  26. +--------------+----------------------------------+
  27. | Field        | Value                            |
  28. +--------------+----------------------------------+
  29. | enabled      | True                             |
  30. | id           | 92de622e904546cc9bd1ca4087529e98 |
  31. | interface    | internal                         |
  32. | region       | RegionOne                        |
  33. | region_id    | RegionOne                        |
  34. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  35. | service_name | nova                             |
  36. | service_type | compute                          |
  37. | url          | http://controller:8774/v2.1      |
  38. +--------------+----------------------------------+
  39. [root@controller1 ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
  40. +--------------+----------------------------------+
  41. | Field        | Value                            |
  42. +--------------+----------------------------------+
  43. | enabled      | True                             |
  44. | id           | 9295b974edec4e06842ee4db2e2d2458 |
  45. | interface    | admin                            |
  46. | region       | RegionOne                        |
  47. | region_id    | RegionOne                        |
  48. | service_id   | 365c1378f8c641ba81f48efa7c62cd29 |
  49. | service_name | nova                             |
  50. | service_type | compute                          |
  51. | url          | http://controller:8774/v2.1      |
  52. +--------------+----------------------------------+

Create Placement & Endpoint

  1. [root@controller1 ~]# openstack user create --domain default --password=placement placement
  2. +---------------------+----------------------------------+
  3. | Field               | Value                            |
  4. +---------------------+----------------------------------+
  5. | domain_id           | default                          |
  6. | enabled             | True                             |
  7. | id                  | c18a3e3bf63c4114924fb6b31b3305fd |
  8. | name                | placement                        |
  9. | options             | {}                               |
  10. | password_expires_at | None                             |
  11. +---------------------+----------------------------------+
  12. [root@controller1 ~]# openstack role add --project service --user placement admin
  13. [root@controller1 ~]# openstack service create --name placement --description "Placement API" placement
  14. +-------------+----------------------------------+
  15. | Field       | Value                            |
  16. +-------------+----------------------------------+
  17. | description | Placement API                    |
  18. | enabled     | True                             |
  19. | id          | 99a48fd2c3bf496287fa1fcf82376c02 |
  20. | name        | placement                        |
  21. | type        | placement                        |
  22. +-------------+----------------------------------+
  23. [root@controller1 ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
  24. +--------------+----------------------------------+
  25. | Field        | Value                            |
  26. +--------------+----------------------------------+
  27. | enabled      | True                             |
  28. | id           | c271472e882f49c3a77c518296d4045c |
  29. | interface    | public                           |
  30. | region       | RegionOne                        |
  31. | region_id    | RegionOne                        |
  32. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  33. | service_name | placement                        |
  34. | service_type | placement                        |
  35. | url          | http://controller:8778           |
  36. +--------------+----------------------------------+
  37. [root@controller1 ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
  38. +--------------+----------------------------------+
  39. | Field        | Value                            |
  40. +--------------+----------------------------------+
  41. | enabled      | True                             |
  42. | id           | b04f59956eca4d8cab4b3a41a071fc6b |
  43. | interface    | internal                         |
  44. | region       | RegionOne                        |
  45. | region_id    | RegionOne                        |
  46. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  47. | service_name | placement                        |
  48. | service_type | placement                        |
  49. | url          | http://controller:8778           |
  50. +--------------+----------------------------------+
  51. [root@controller1 ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
  52. +--------------+----------------------------------+
  53. | Field        | Value                            |
  54. +--------------+----------------------------------+
  55. | enabled      | True                             |
  56. | id           | 582bf2a041734838adbf92d7dd4b602e |
  57. | interface    | admin                            |
  58. | region       | RegionOne                        |
  59. | region_id    | RegionOne                        |
  60. | service_id   | 99a48fd2c3bf496287fa1fcf82376c02 |
  61. | service_name | placement                        |
  62. | service_type | placement                        |
  63. | url          | http://controller:8778           |
  64. +--------------+----------------------------------+

Install Nova

  1. [root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-cert openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y

Configure Nova

  1. [root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
  2.  
  3. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip controller1
  4. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
  5. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen controller1
  6. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen_port 8774
  7. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen controller1
  8. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen_port 8775
  9. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
  10. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
  11. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller:5673
  12.  
  13. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
  14. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
  15.  
  16. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
  17.  
  18. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
  19. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
  20. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
  21. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
  22. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
  23. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
  24. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
  25. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
  26. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
  27.  
  28. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled true
  29. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen controller1
  30. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address controller1
  31. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_host controller1
  32. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_port 6080
  33.  
  34. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
  35.  
  36. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  37.  
  38. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
  39. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
  40. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement project_name service
  41. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_type password
  42. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
  43. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement auth_url  http://controller:35357/v3
  44. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement username placement
  45. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf placement password placement
  46.  
  47. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
  48.  
  49. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache enabled true
  50. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
  51. [root@controller1 ~]# openstack-config --set /etc/nova/nova.conf cache memcache_servers controller1:11211,controller2:11211,controller3:11211

Configure Nova-Placement

Refer to “CLOUD COMPUTING —> OpenStack Pike Installation —> 7.Nova”

  1. [root@controller1 ~]# echo "
  2. > #Placement API
  3. > <Directory /usr/bin>
  4. >    <IfVersion >= 2.4>
  5. >       Require all granted
  6. >    </IfVersion>
  7. >    <IfVersion < 2.4>
  8. >       Order allow,deny
  9. >       Allow from all
  10. >    </IfVersion>
  11. > </Directory>
  12. > ">>/etc/httpd/conf.d/00-nova-placement-api.conf
  13. [root@controller1 ~]# "
  14. [root@controller1 ~]# systemctl restart httpd

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
  2. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
  3. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
  4. [root@controller1 ~]# su -s /bin/sh -c "nova-manage db sync" nova

List Nova Cell

  1. [root@controller1 ~]# nova-manage cell_v2 list_cells
  2. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
  3. |  Name |                 UUID                 |              Transport URL              |               Database Connection               |
  4. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+
  5. | cell0 | 00000000-0000-0000-0000-000000000000 |                  none:/                 | mysql+pymysql://nova:****@controller/nova_cell0 |
  6. | cell1 | b55310bc-6d58-4c8d-9b7c-014b77cd48ac | rabbit://openstack:****@controller:5673 |    mysql+pymysql://nova:****@controller/nova    |
  7. +-------+--------------------------------------+-----------------------------------------+-------------------------------------------------+

Replace Default IP

  1. [root@controller1 ~]# sed -i 's/8778/9778/' /etc/httpd/conf.d/00-nova-placement-api.conf
  2. [root@controller1 ~]# systemctl restart httpd

Configure HA With HAProxy

  1. [root@controller1 ~]# echo '
  2. > ##nova_compute
  3. > listen nova_compute_api_cluster
  4. >   bind controller:8774
  5. >   balance source
  6. >   option tcpka
  7. >   option httpchk
  8. >   option tcplog
  9. >   server controller1 controller1:8774 check inter 2000 rise 2 fall 5
  10. >   server controller2 controller2:8774 check inter 2000 rise 2 fall 5
  11. >   server controller3 controller3:8774 check inter 2000 rise 2 fall 5
  12. > #Nova-api-metadata
  13. > listen Nova-api-metadata_cluster
  14. >   bind controller:8775
  15. >   balance source
  16. >   option tcpka
  17. >   option httpchk
  18. >   option tcplog
  19. >   server controller1 controller1:8775 check inter 2000 rise 2 fall 5
  20. >   server controller2 controller2:8775 check inter 2000 rise 2 fall 5
  21. >   server controller3 controller3:8775 check inter 2000 rise 2 fall 5
  22. > #nova_placement
  23. > listen nova_placement_cluster
  24. >   bind controller:8778
  25. >   balance source
  26. >   option tcpka
  27. >   option tcplog
  28. >   server controller1 controller1:9778 check inter 2000 rise 2 fall 5
  29. >   server controller2 controller2:9778 check inter 2000 rise 2 fall 5
  30. >  server controller3 controller3:9778 check inter 2000 rise 2 fall 5
  31. > '>>/etc/haproxy/haproxy.cfg
  32. [root@controller1 ~]# '
  33. [root@controller1 ~]# systemctl restart haproxy.service
  34.  
  35. netstat -antp|grep haproxy
  36. netstat -antp|egrep '8774|8775|8778|6080'

Start Nova Service

  1. [root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  2. [root@controller ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  3. [root@controller ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

List Nova Status

  1. [root@controller1 ~]# openstack catalog list
  2. +-----------+-----------+-----------------------------------------+
  3. | Name      | Type      | Endpoints                               |
  4. +-----------+-----------+-----------------------------------------+
  5. | glance    | image     | RegionOne                               |
  6. |           |           |   internal: http://controller:9292      |
  7. |           |           | RegionOne                               |
  8. |           |           |   admin: http://controller:9292         |
  9. |           |           | RegionOne                               |
  10. |           |           |   public: http://controller:9292        |
  11. |           |           |                                         |
  12. | nova      | compute   | RegionOne                               |
  13. |           |           |   public: http://controller:8774/v2.1   |
  14. |           |           | RegionOne                               |
  15. |           |           |   admin: http://controller:8774/v2.1    |
  16. |           |           | RegionOne                               |
  17. |           |           |   internal: http://controller:8774/v2.1 |
  18. |           |           |                                         |
  19. | placement | placement | RegionOne                               |
  20. |           |           |   admin: http://controller:8778         |
  21. |           |           | RegionOne                               |
  22. |           |           |   internal: http://controller:8778      |
  23. |           |           | RegionOne                               |
  24. |           |           |   public: http://controller:8778        |
  25. |           |           |                                         |
  26. | neutron   | network   | RegionOne                               |
  27. |           |           |   admin: http://controller:9696         |
  28. |           |           | RegionOne                               |
  29. |           |           |   public: http://controller:9696        |
  30. |           |           | RegionOne                               |
  31. |           |           |   internal: http://controller:9696      |
  32. |           |           |                                         |
  33. | keystone  | identity  | RegionOne                               |
  34. |           |           |   admin: http://controller:35357/v3/    |
  35. |           |           | RegionOne                               |
  36. |           |           |   internal: http://controller:5000/v3/  |
  37. |           |           | RegionOne                               |
  38. |           |           |   public: http://controller:5000/v3/    |
  39. |           |           |                                         |
  40. +-----------+-----------+-----------------------------------------+
  41. [root@controller1 ~]# nova-status upgrade check
  42. +--------------------------------------------------------------------+
  43. | Upgrade Check Results                                              |
  44. +--------------------------------------------------------------------+
  45. | Check: Cells v2                                                    |
  46. | Result: Success                                                    |
  47. | Details: No host mappings or compute nodes were found. Remember to |
  48. |   run command 'nova-manage cell_v2 discover_hosts' when new        |
  49. |   compute hosts are deployed.                                      |
  50. +--------------------------------------------------------------------+
  51. | Check: Placement API                                               |
  52. | Result: Success                                                    |
  53. | Details: None                                                      |
  54. +--------------------------------------------------------------------+
  55. | Check: Resource Providers                                          |
  56. | Result: Success                                                    |
  57. | Details: There are no compute resource providers in the Placement  |
  58. |   service nor are there compute nodes in the database.             |
  59. |   Remember to configure new compute nodes to report into the       |
  60. |   Placement service. See                                           |
  61. |   http://docs.openstack.org/developer/nova/placement.html          |
  62. |   for more details.                                                |
  63. +--------------------------------------------------------------------+
  64. [root@controller1 ~]# openstack compute service list
  65. +----+------------------+-------------+----------+---------+-------+----------------------------+
  66. | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
  67. +----+------------------+-------------+----------+---------+-------+----------------------------+
  68. | 15 | nova-conductor   | controller1 | internal | enabled | up    | 2017-12-19T09:02:58.000000 |
  69. | 18 | nova-scheduler   | controller1 | internal | enabled | up    | 2017-12-19T09:02:56.000000 |
  70. | 21 | nova-consoleauth | controller1 | internal | enabled | up    | 2017-12-19T09:02:59.000000 |
  71. | 27 | nova-consoleauth | controller2 | internal | enabled | up    | 2017-12-19T09:03:04.000000 |
  72. | 30 | nova-conductor   | controller2 | internal | enabled | up    | 2017-12-19T09:03:03.000000 |
  73. | 39 | nova-scheduler   | controller2 | internal | enabled | up    | 2017-12-19T09:02:57.000000 |
  74. | 42 | nova-consoleauth | controller3 | internal | enabled | up    | 2017-12-19T09:03:04.000000 |
  75. | 45 | nova-conductor   | controller3 | internal | enabled | up    | 2017-12-19T09:03:05.000000 |
  76. | 54 | nova-scheduler   | controller3 | internal | enabled | up    | 2017-12-19T09:02:59.000000 |
  77. | 57 | nova-compute     | compute01   | nova     | enabled | up    | 2017-12-19T09:02:59.000000 |
  78. +----+------------------+-------------+----------+---------+-------+----------------------------+

Install On Other Controller Nodes

  1. [root@controller2 ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y 
  2.  
  3. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/nova/*  /etc/nova/
  4. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/
  5. [root@controller2 ~]# rsync -avzP  -e 'ssh -p 22'  controller1:/etc/haproxy/* /etc/haproxy/
  6.  
  7. [root@controller2 ~]# sed -i '1,9s/controller1/controller2/'  /etc/nova/nova.conf
  8. [root@controller3 ~]# sed -i '1,9s/controller1/controller3/'  /etc/nova/nova.conf
  9.  
  10. [root@controller2 ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  11. [root@controller2 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  12. [root@controller2 ~]# systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  13.  
  14. [root@controller2 ~]# systemctl restart httpd haproxy

Discover New Compute Node

  1. [root@controller1 ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
  2. Found 2 cell mappings.
  3. Skipping cell0 since it does not contain hosts.
  4. Getting compute nodes from cell 'cell1': b55310bc-6d58-4c8d-9b7c-014b77cd48ac
  5. Found 0 unmapped computes in cell: b55310bc-6d58-4c8d-9b7c-014b77cd48ac
  6. [root@controller1 ~]# openstack compute service list
  7. +----+------------------+-------------+----------+---------+-------+----------------------------+
  8. | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
  9. +----+------------------+-------------+----------+---------+-------+----------------------------+
  10. | 15 | nova-conductor   | controller1 | internal | enabled | up    | 2017-12-19T09:28:08.000000 |
  11. | 18 | nova-scheduler   | controller1 | internal | enabled | up    | 2017-12-19T09:28:16.000000 |
  12. | 21 | nova-consoleauth | controller1 | internal | enabled | up    | 2017-12-19T09:28:10.000000 |
  13. | 27 | nova-consoleauth | controller2 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  14. | 30 | nova-conductor   | controller2 | internal | enabled | up    | 2017-12-19T09:28:13.000000 |
  15. | 39 | nova-scheduler   | controller2 | internal | enabled | up    | 2017-12-19T09:28:17.000000 |
  16. | 42 | nova-consoleauth | controller3 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  17. | 45 | nova-conductor   | controller3 | internal | enabled | up    | 2017-12-19T09:28:15.000000 |
  18. | 54 | nova-scheduler   | controller3 | internal | enabled | up    | 2017-12-19T09:28:09.000000 |
  19. | 57 | nova-compute     | compute01   | nova     | enabled | up    | 2017-12-19T09:28:10.000000 |
  20. | 60 | nova-compute     | compute02   | nova     | enabled | up    | 2017-12-19T09:28:16.000000 |
  21. +----+------------------+-------------+----------+---------+-------+----------------------------+

Highly available Horizon API

Install Dashboard

  1. [root@controller ~]# yum install openstack-dashboard -y

Dashboard Configure

  1. [root@controller1 ~]# cp /etc/openstack-dashboard/local_settings{,.bak}
  2. [root@controller1 ~]# DASHBOARD_LOCAL_SETTINGS=/etc/openstack-dashboard/local_settings
  3. [root@controller1 ~]# sed -i 's#_member_#user#g' $DASHBOARD_LOCAL_SETTINGS
  4. [root@controller1 ~]# sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#' $DASHBOARD_LOCAL_SETTINGS
  5. [root@controller1 ~]# 
  6. [root@controller1 ~]# sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" $DASHBOARD_LOCAL_SETTINGS
  7. [root@controller1 ~]# 
  8. [root@controller1 ~]# sed -in '153,158s/#//' $DASHBOARD_LOCAL_SETTINGS 
  9. [root@controller1 ~]# sed -in '160,164s/.*/#&/' $DASHBOARD_LOCAL_SETTINGS
  10. [root@controller1 ~]# sed -i 's#UTC#Asia/Shanghai#g' $DASHBOARD_LOCAL_SETTINGS
  11. [root@controller1 ~]# sed -i 's#%s:5000/v2.0#%s:5000/v3#' $DASHBOARD_LOCAL_SETTINGS
  12. [root@controller1 ~]# sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' $DASHBOARD_LOCAL_SETTINGS
  13. [root@controller1 ~]# sed -i "s@^#OPENSTACK_KEYSTONE_DEFAULT@OPENSTACK_KEYSTONE_DEFAULT@" $DASHBOARD_LOCAL_SETTINGS
  14. [root@controller1 ~]# 
  15. [root@controller1 ~]# echo '
  16. > #set
  17. > OPENSTACK_API_VERSIONS = {
  18. >     "identity": 3,
  19. >     "image": 2,
  20. >     "volume": 2,
  21. > }
  22. > #'>>$DASHBOARD_LOCAL_SETTINGS

Configure Other Controller Nodes

  1. [root@controller1 ~]# rsync -avzP  -e 'ssh -p 22'  /etc/openstack-dashboard/local_settings  controller2:/etc/openstack-dashboard/
  2. [root@controller1 ~]# rsync -avzP  -e 'ssh -p 22'  /etc/openstack-dashboard/local_settings  controller3:/etc/openstack-dashboard/

Restart Httpd Service

  1. [root@controller1 ~]# systemctl restart httpd
  2. [root@controller1 ~]# ssh controller2 "systemctl restart httpd" 
  3. [root@controller1 ~]# ssh controller3 "systemctl restart httpd"

Configure HA Dashboard API

  1. sed -i 's#^Listen 80#Listen 8080#'  /etc/httpd/conf/httpd.conf
  2. systemctl restart httpd.service
  3. systemctl daemon-reload
  4.  
  5. echo '
  6. listen dashboard_cluster  
  7.   bind controller:80
  8.   balance  roundrobin  
  9.   option  tcpka  
  10.   option  httpchk  
  11.   option  tcplog  
  12.   server controller1 controller1:8080 check port 8080 inter 2000 rise 2 fall 5
  13.   server controller2 controller2:8080 check port 8080 inter 2000 rise 2 fall 5
  14.   server controller3 controller3:8080 check port 8080 inter 2000 rise 2 fall 5
  15. '>>/etc/haproxy/haproxy.cfg
  16. systemctl restart haproxy.service

Now, Take the Browser

http://192.168.220.20/dashboard/

Linux bridge & L3 HA

Create OpenStack Service

  1. [root@controller1 ~]# source admin-openrc 
  2. [root@controller1 ~]# openstack service create --name neutron --description "OpenStack Networking" network
  3. +-------------+----------------------------------+
  4. | Field       | Value                            |
  5. +-------------+----------------------------------+
  6. | description | OpenStack Networking             |
  7. | enabled     | True                             |
  8. | id          | ad17484f2f19423b9ffe8ab2b451906d |
  9. | name        | neutron                          |
  10. | type        | network                          |
  11. +-------------+----------------------------------+
  12. [root@controller1 ~]# openstack endpoint create --region RegionOne network public http://controller:9696
  13. +--------------+----------------------------------+
  14. | Field        | Value                            |
  15. +--------------+----------------------------------+
  16. | enabled      | True                             |
  17. | id           | c4e2c0741118449d933107948c67651d |
  18. | interface    | public                           |
  19. | region       | RegionOne                        |
  20. | region_id    | RegionOne                        |
  21. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  22. | service_name | neutron                          |
  23. | service_type | network                          |
  24. | url          | http://controller:9696           |
  25. +--------------+----------------------------------+
  26. [root@controller1 ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
  27. +--------------+----------------------------------+
  28. | Field        | Value                            |
  29. +--------------+----------------------------------+
  30. | enabled      | True                             |
  31. | id           | f35d94a749ae47d68b243a90015493bb |
  32. | interface    | internal                         |
  33. | region       | RegionOne                        |
  34. | region_id    | RegionOne                        |
  35. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  36. | service_name | neutron                          |
  37. | service_type | network                          |
  38. | url          | http://controller:9696           |
  39. +--------------+----------------------------------+
  40. [root@controller1 ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
  41. +--------------+----------------------------------+
  42. | Field        | Value                            |
  43. +--------------+----------------------------------+
  44. | enabled      | True                             |
  45. | id           | 61e469452d914b78aabbf4bcc0a51732 |
  46. | interface    | admin                            |
  47. | region       | RegionOne                        |
  48. | region_id    | RegionOne                        |
  49. | service_id   | ad17484f2f19423b9ffe8ab2b451906d |
  50. | service_name | neutron                          |
  51. | service_type | network                          |
  52. | url          | http://controller:9696           |
  53. +--------------+----------------------------------+

Install OpenStack Neutron

  1. [root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y

Configure Neutron

  1. [root@controller1 ~]# cp /etc/neutron/neutron.conf{,.bak2}
  2. [root@controller1 ~]# echo '
  3. > [DEFAULT]
  4. > bind_port = 9696
  5. > bind_host = controller1
  6. > core_plugin = ml2
  7. > service_plugins =
  8. > #service_plugins = trunk
  9. > #service_plugins = router
  10. > allow_overlapping_ips = true
  11. > transport_url = rabbit://openstack:openstack@controller
  12. > auth_strategy = keystone
  13. > notify_nova_on_port_status_changes = true
  14. > notify_nova_on_port_data_changes = true
  15. > 
  16. > [keystone_authtoken]
  17. > auth_uri = http://controller:5000
  18. > auth_url = http://controller:35357
  19. > memcached_servers = controller1:11211
  20. > auth_type = password
  21. > project_domain_name = default
  22. > user_domain_name = default
  23. > project_name = service
  24. > username = neutron
  25. > password = neutron
  26. > 
  27. > [nova]
  28. > auth_url = http://controller:35357
  29. > auth_plugin = password
  30. > project_domain_id = default
  31. > user_domain_id = default
  32. > region_name = RegionOne
  33. > project_name = service
  34. > username = nova
  35. > password = nova
  36. > 
  37. > [database]
  38. > connection = mysql://neutron:neutron@controller:3306/neutron
  39. > 
  40. > [oslo_concurrency]
  41. > lock_path = /var/lib/neutron/tmp 
  42. > #'>/etc/neutron/neutron.conf

Configure ML2

  1. [root@controller1 ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [ml2]
  4. > tenant_network_types = 
  5. > type_drivers = vlan,flat
  6. > mechanism_drivers = linuxbridge
  7. > extension_drivers = port_security
  8. > [ml2_type_flat]
  9. > flat_networks = provider
  10. > [securitygroup]
  11. > enable_ipset = True
  12. > #vlan
  13. > # [ml2_type_valn]
  14. > # network_vlan_ranges = provider:3001:4000
  15. > #'>/etc/neutron/plugins/ml2/ml2_conf.ini
  16. [root@controller1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

Configure Linux Bridge

  1. [root@controller1 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [linux_bridge]
  4. > physical_interface_mappings = provider:'ens37'
  5. > [vxlan]
  6. > enable_vxlan = false
  7. > [agent]
  8. > prevent_arp_spoofing = True
  9. > [securitygroup]
  10. > firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  11. > enable_security_group = True
  12. > #'>/etc/neutron/plugins/ml2/linuxbridge_agent.ini

Configure DHCP

  1. [root@controller1 ~]# cp /etc/neutron/dhcp_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '#
  3. > [DEFAULT]
  4. > interface_driver = linuxbridge
  5. > dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
  6. > enable_isolated_metadata = true
  7. > #'>/etc/neutron/dhcp_agent.ini

Configure Metadata

  1. [root@controller1 ~]# cp /etc/neutron/metadata_agent.ini{,.bak}
  2. [root@controller1 ~]# echo '
  3. > [DEFAULT]
  4. > nova_metadata_ip = controller
  5. > metadata_proxy_shared_secret = metadata
  6. > #'>/etc/neutron/metadata_agent.ini

Configure Nova

  1. [root@controller1 ~]# cp /etc/nova/nova.conf{,.bak}
  2. [root@controller1 ~]# echo '
  3. > #
  4. > [neutron]
  5. > url = http://controller:9696
  6. > auth_url = http://controller:35357
  7. > auth_type = password
  8. > project_domain_name = default
  9. > user_domain_name = default
  10. > region_name = RegionOne
  11. > project_name = service
  12. > username = neutron
  13. > password = neutron
  14. > service_metadata_proxy = true
  15. > metadata_proxy_shared_secret = metadata
  16. > #'>>/etc/nova/nova.conf

Configure L3

  1. [root@controller1 ~]# cp /etc/neutron/l3_agent.ini{,.bak}
  2. [root@controller1 ~]# 
  3. [root@controller1 ~]# echo '
  4. > [DEFAULT]
  5. > interface_driver = linuxbridge
  6. > #'>/etc/neutron/l3_agent.ini

Synchronize Database

  1. [root@controller1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
  2. [root@controller1 ~]# mysql -h controller -u neutron -pneutron -e "use neutron;show tables;"

Configure HAProxy For Neutron API

  1. [root@controller1 ~]# echo '
  2. > #Neutron_API
  3. > listen Neutron_API_cluster
  4. > bind controller:9696
  5. > balance source
  6. > option tcpka
  7. > option tcplog
  8. > server controller1 controller1:9696 check inter 2000 rise 2 fall 5
  9. > server controller2 controller2:9696 check inter 2000 rise 2 fall 5
  10. > server controller3 controller3:9696 check inter 2000 rise 2 fall 5
  11. > '>>/etc/haproxy/haproxy.cfg
  12. [root@controller1 ~]# '
  13. [root@controller1 ~]# systemctl restart haproxy.service
  14. [root@controller1 ~]# netstat -antp|grep haproxy
  15. tcp        0      0 192.168.220.20:9292     0.0.0.0:*               LISTEN      76948/haproxy       
  16. tcp        0      0 0.0.0.0:1080            0.0.0.0:*               LISTEN      76948/haproxy       
  17. tcp        0      0 192.168.220.20:35357    0.0.0.0:*               LISTEN      76948/haproxy       
  18. tcp        0      0 192.168.220.20:9696     0.0.0.0:*               LISTEN      76948/haproxy       
  19. tcp        0      0 192.168.220.20:5000     0.0.0.0:*               LISTEN      76948/haproxy

Start Neutron Service

  1. [root@controller1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  2. [root@controller1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  3. [root@controller1 ~]# systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 2 & Start Service

  1. [root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/nova/* /etc/nova/
  2. [root@controller2 ~]# sed -i 's/controller1/controller2/' /etc/neutron/neutron.conf
  3. [root@controller2 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
  4. [root@controller2 ~]# systemctl restart haproxy
  5. [root@controller2 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  6. [root@controller2 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure Controller Node 3 & Start Service

  1. [root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/neutron/* /etc/neutron/
  2. [root@controller3 ~]# sed -i 's/controller1/controller3/' /etc/neutron/neutron.conf
  3. [root@controller3 ~]# rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/
  4. [root@controller3 ~]# systemctl restart haproxy
  5. [root@controller3 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
  6. [root@controller3 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

Configure L3 HA on Controller node

  1. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
  2. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
  3. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
  4. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips True
  5. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT router_distributed True
  6. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
  7. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha_net_cidr 169.254.192.0/18
  8. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 3
  9. [root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 2
  10.  
  11. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vxlan
  12. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
  13. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch,l2population
  14. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
  15. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks external
  16. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
  17.  
  18. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
  19. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
  20. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
  21. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
  22. [root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
  23.  
  24. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT ha_vrrp_auth_password password
  25. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
  26. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge 
  27. [root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr_snat

Configure L3 HA on Compute Node

  1. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip TUNNEL_INTERFACE_IP_ADDRESS
  2. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings external:br-ex
  3.  
  4. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent enable_distributed_routing True
  5. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent tunnel_types vxlan
  6. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini agent l2_population True
  7.  
  8. openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
  9.  
  10. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch
  11. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge 
  12. openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr

Verify Service

  1. [root@controller1 ~]# openstack network agent list
  2. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
  3. | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
  4. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
  5. | 1a4efb8b-aa65-4d4a-8092-7213592acd22 | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  6. | 3b35bc6e-4cec-42e2-9fde-e99c601cf609 | DHCP agent         | controller3 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  7. | 42e57e23-eecb-490d-b709-d8e3730107e8 | DHCP agent         | controller2 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  8. | 6b2058a2-d3e3-4342-afbb-717338b1499f | Metadata agent     | controller1 | None              | :-)   | UP    | neutron-metadata-agent    |
  9. | 750b5e5c-c7b6-4f48-ae2f-37580b6e03d9 | DHCP agent         | controller1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
  10. | 7e63ce46-3fd5-40ee-9f63-ee8cc52dd5a4 | Metadata agent     | controller3 | None              | :-)   | UP    | neutron-metadata-agent    |
  11. | 92199bf0-08ef-4642-9557-c33360796405 | Linux bridge agent | controller2 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  12. | 9ae5bafa-0075-4408-b827-1be9bb1ccf99 | Linux bridge agent | controller3 | None              | :-)   | UP    | neutron-linuxbridge-agent |
  13. | f1ed9e45-39e7-4980-aaec-10364e42263f | Metadata agent     | controller2 | None              | :-)   | UP    | neutron-metadata-agent    |
  14. +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

Highly available Block Storage API

Waiting For Test ,,,

Config Network

  1. [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
  2. TYPE=Ethernet
  3. PROXY_METHOD=none
  4. BROWSER_ONLY=no
  5. BOOTPROTO=static
  6. DEFROUTE=yes
  7. IPV4_FAILURE_FATAL=no
  8. IPV6INIT=yes
  9. IPV6_AUTOCONF=yes
  10. IPV6_DEFROUTE=yes
  11. IPV6_FAILURE_FATAL=no
  12. IPV6_ADDR_GEN_MODE=stable-privacy
  13. NAME=ens33
  14. UUID=4e333024-b8c8-45e4-baee-e46ece81432c
  15. DEVICE=ens33
  16. ONBOOT=yes
  17. IPADDR=192.168.220.101
  18. NETMASK=255.255.255.0
  19. GATEWAY=192.168.220.2
  20. DNS1=114.114.114.114

Fdisk the 2nd Disk

  1. [root@localhost ~]# ls -al /dev/sd*
  2. brw-rw----. 1 root disk 8,  0 Dec 26  2017 /dev/sda
  3. brw-rw----. 1 root disk 8,  1 Dec 26  2017 /dev/sda1
  4. brw-rw----. 1 root disk 8,  2 Dec 26  2017 /dev/sda2
  5. brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
  6.  
  7. [root@localhost ~]# fdisk /dev/sdb 
  8. Welcome to fdisk (util-linux 2.23.2).
  9.  
  10. Changes will remain in memory only, until you decide to write them.
  11. Be careful before using the write command.
  12.  
  13.  
  14. Command (m for help): n
  15. Partition type:
  16.    p   primary (0 primary, 0 extended, 4 free)
  17.    e   extended
  18. Select (default p): 
  19. Using default response p
  20. Partition number (1-4, default 1): 
  21. First sector (2048-125829119, default 2048): 
  22. Using default value 2048
  23. Last sector, +sectors or +size{K,M,G} (2048-125829119, default 125829119): +30G
  24. Partition 1 of type Linux and of size 30 GiB is set
  25.  
  26. Command (m for help): n
  27. Partition type:
  28.    p   primary (1 primary, 0 extended, 3 free)
  29.    e   extended
  30. Select (default p): 
  31. Using default response p
  32. Partition number (2-4, default 2): 
  33. First sector (62916608-125829119, default 62916608): 
  34. Using default value 62916608
  35. Last sector, +sectors or +size{K,M,G} (62916608-125829119, default 125829119): 
  36. Using default value 125829119
  37. Partition 2 of type Linux and of size 30 GiB is set
  38.  
  39. Command (m for help): p
  40.  
  41. Disk /dev/sdb: 64.4 GB, 64424509440 bytes, 125829120 sectors
  42. Units = sectors of 1 * 512 = 512 bytes
  43. Sector size (logical/physical): 512 bytes / 512 bytes
  44. I/O size (minimum/optimal): 512 bytes / 512 bytes
  45. Disk label type: dos
  46. Disk identifier: 0x285890cb
  47.  
  48.    Device Boot      Start         End      Blocks   Id  System
  49. /dev/sdb1            2048    62916607    31457280   83  Linux
  50. /dev/sdb2        62916608   125829119    31456256   83  Linux
  51.  
  52. Command (m for help): w
  53. The partition table has been altered!
  54.  
  55. Calling ioctl() to re-read partition table.
  56. Syncing disks.
  57.  
  58. [root@localhost ~]# ls -al /dev/sd*
  59. brw-rw----. 1 root disk 8,  0 Dec 26  2017 /dev/sda
  60. brw-rw----. 1 root disk 8,  1 Dec 26  2017 /dev/sda1
  61. brw-rw----. 1 root disk 8,  2 Dec 26  2017 /dev/sda2
  62. brw-rw----. 1 root disk 8, 16 Dec 26 01:11 /dev/sdb
  63. brw-rw----. 1 root disk 8, 17 Dec 26 01:11 /dev/sdb1
  64. brw-rw----. 1 root disk 8, 18 Dec 26 01:11 /dev/sdb2

Format 2nd Disk

  1. [root@localhost ~]# mkfs.ext4 /dev/sdb1
  2. mke2fs 1.42.9 (28-Dec-2013)
  3. Filesystem label=
  4. OS type: Linux
  5. Block size=4096 (log=2)
  6. Fragment size=4096 (log=2)
  7. Stride=0 blocks, Stripe width=0 blocks
  8. 1966080 inodes, 7864320 blocks
  9. 393216 blocks (5.00%) reserved for the super user
  10. First data block=0
  11. Maximum filesystem blocks=2155872256
  12. 240 block groups
  13. 32768 blocks per group, 32768 fragments per group
  14. 8192 inodes per group
  15. Superblock backups stored on blocks: 
  16.         32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
  17.         4096000
  18.  
  19. Allocating group tables: done                            
  20. Writing inode tables: done                            
  21. Creating journal (32768 blocks): done
  22. Writing superblocks and filesystem accounting information: done   
  23.  
  24. [root@localhost ~]# mkfs.ext4 /dev/sdb2
  25. mke2fs 1.42.9 (28-Dec-2013)
  26. Filesystem label=
  27. OS type: Linux
  28. Block size=4096 (log=2)
  29. Fragment size=4096 (log=2)
  30. Stride=0 blocks, Stripe width=0 blocks
  31. 1966080 inodes, 7864064 blocks
  32. 393203 blocks (5.00%) reserved for the super user
  33. First data block=0
  34. Maximum filesystem blocks=2155872256
  35. 240 block groups
  36. 32768 blocks per group, 32768 fragments per group
  37. 8192 inodes per group
  38. Superblock backups stored on blocks: 
  39.         32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
  40.         4096000
  41.  
  42. Allocating group tables: done                            
  43. Writing inode tables: done                            
  44. Creating journal (32768 blocks): done
  45. Writing superblocks and filesystem accounting information: done

Mount /dev/sdb1

  1. [root@localhost ~]# mkdir -p /date
  2. [root@localhost ~]# mount -t ext4 /dev/sdb1 /date
  3. [root@localhost ~]# df -h|grep /dev/sdb1
  4. /dev/sdb1                 30G   45M   28G   1% /date
  5. [root@localhost ~]# echo "mount -t ext4 /dev/sdb1 /date" >>/etc/rc.d/rc.local
  6. [root@localhost ~]# tail -1 /etc/rc.d/rc.local
  7. mount -t ext4 /dev/sdb1 /date
  8. [root@localhost ~]# chmod +x /etc/rc.d/rc.local

Create Volume on /dev/sdb2

  1. [root@localhost ~]# yum install lvm2 -y
  2. [root@localhost ~]# systemctl enable lvm2-lvmetad.service
  3. Created symlink from /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.service to /usr/lib/systemd/system/lvm2-lvmetad.service.
  4. [root@localhost ~]# systemctl start lvm2-lvmetad.service
  5.  
  6. [root@localhost ~]# pvcreate /dev/sdb2
  7. WARNING: ext4 signature detected on /dev/sdb2 at offset 1080. Wipe it? [y/n]: y
  8.   Wiping ext4 signature on /dev/sdb2.
  9.   Physical volume "/dev/sdb2" successfully created.
  10. [root@localhost ~]# vgcreate cinder_lvm01 /dev/sdb2
  11.   Volume group "cinder_lvm01" successfully created
  12. [root@localhost ~]# vgdisplay 
  13.   --- Volume group ---
  14.   VG Name               cinder_lvm01
  15.   System ID             
  16.   Format                lvm2
  17.   Metadata Areas        1
  18.   Metadata Sequence No  1
  19.   VG Access             read/write
  20.   VG Status             resizable
  21.   MAX LV                0
  22.   Cur LV                0
  23.   Open LV               0
  24.   Max PV                0
  25.   Cur PV                1
  26.   Act PV                1
  27.   VG Size               <30.00 GiB
  28.   PE Size               4.00 MiB
  29.   Total PE              7679
  30.   Alloc PE / Size       0 / 0   
  31.   Free  PE / Size       7679 / <30.00 GiB
  32.   VG UUID               jyb299-bo5k-E6Z3-Frho-e6Kz-d9Mu-yK0m6c
  33.  
  34.   --- Volume group ---
  35.   VG Name               centos
  36.   System ID             
  37.   Format                lvm2
  38.   Metadata Areas        1
  39.   Metadata Sequence No  4
  40.   VG Access             read/write
  41.   VG Status             resizable
  42.   MAX LV                0
  43.   Cur LV                3
  44.   Open LV               3
  45.   Max PV                0
  46.   Cur PV                1
  47.   Act PV                1
  48.   VG Size               <79.00 GiB
  49.   PE Size               4.00 MiB
  50.   Total PE              20223
  51.   Alloc PE / Size       20222 / 78.99 GiB
  52.   Free  PE / Size       1 / 4.00 MiB
  53.   VG UUID               NHtSF8-nozf-sbB4-vEBt-ogJo-WmuG-F8HYMQ

Install NFS

  1. [root@localhost ~]# yum install nfs-utils rpcbind -y
  2. [root@localhost ~]# mkdir -p /date/{cinder_nfs1,cinder_nfs2}
  3. [root@localhost ~]# chmod -R 777 /date
  4. [root@localhost ~]# echo "/date/cinder_nfs1 *(rw,root_squash,sync,anonuid=165,anongid=165)">/etc/exports
  5. [root@localhost ~]# exportfs -r
  6. [root@localhost ~]# systemctl enable rpcbind nfs-server
  7. Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
  8. [root@localhost ~]# systemctl restart rpcbind nfs-server
  9. [root@localhost ~]# showmount -e localhost
  10. Export list for localhost:
  11. /date/cinder_nfs1 *

Install Cinder & Configure

  1. [root@localhost ~]# yum install openstack-cinder targetcli python-keystone lvm2 -y
  2. [root@localhost ~]# cp /etc/cinder/cinder.conf{,.bak}
  3. [root@localhost ~]# cp /etc/lvm/lvm.conf{,.bak
  4. [root@localhost ~]# sed -i '141a filter = [ "a/sdb2/", "r/.*/"]' /etc/lvm/lvm.conf
  5. [root@localhost ~]# echo '192.168.220.101:/date/cinder_nfs1'>/etc/cinder/nfs_shares
  6. [root@localhost ~]# chmod 640 /etc/cinder/nfs_shares
  7. [root@localhost ~]# chown root:cinder /etc/cinder/nfs_shares

Configure cinder.conf

  1. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
  2. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT log_dir /var/log/cinder
  3. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT state_path /var/lib/cinder
  4. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
  5. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:openstack@controller
  6. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm,nfs
  7.  
  8. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:cinder@controller/cinder
  9.  
  10. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
  11. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
  12. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller1:11211,controller2:11211,controller3:11211
  13. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
  14. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
  15. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
  16. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
  17. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
  18. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password cinder
  19.  
  20. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
  21.  
  22. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
  23. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
  24. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
  25. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder_lvm01
  26. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm iscsi_ip_address 192.168.220.101
  27. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
  28. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf lvm volume_backend_name lvm01
  29.  
  30. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_driver = cinder.volume.drivers.nfs.NfsDriver
  31. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_shares_config = /etc/cinder/nfs_shares
  32. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_mount_point_base = $state_path/mnt
  33. [root@localhost ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_backend_name = nfs01

Start Cinder Service

  1. [root@localhost ~]# chmod 640 /etc/cinder/cinder.conf
  2. [root@localhost ~]# chgrp cinder /etc/cinder/cinder.conf
  3.  
  4. [root@localhost ~]# systemctl enable openstack-cinder-volume.service target.service
  5. Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
  6. Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.
  7. [root@localhost ~]# systemctl start openstack-cinder-volume.service target.service

Add Cinder to Pacemaker

  1. # pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true
  2. # pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true
  3. # pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume
  4.  
  5. # pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone
  6. # pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone
  7. # pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume
  8. # pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone
  9.  
  10. # pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone

Configure Cinder service

  1. # cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
  2.  
  3. # openstack-config --set /etc/cinder/cinder.conf DEFAULT host cinder-cluster-1
  4.  
  5. # openstack-config --set /etc/cinder/cinder.conf DEFAULT osapi_volume_listen 10.0.0.11
  6.  
  7. # openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
  8. # openstack-config --set /etc/cinder/cinder.conf DEFAULT control_exchange cinder
  9.  
  10. # openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver
  11. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_shares_config /etc/cinder/nfs_exports
  12. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_sparsed_volumes true
  13. # openstack-config --set /etc/cinder/cinder.conf DEFAULT nfs_mount_options v3
  14.  
  15. # openstack-config --set /etc/cinder/cinder.conf database connection mysql://cinder:password@10.0.0.11/cinder
  16. # openstack-config --set /etc/cinder/cinder.conf database max_retries -1
  17.  
  18. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken identity_uri http://10.0.0.11:35357/
  19. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://10.0.0.11:5000/
  20. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name service
  21. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder
  22. # openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_password CINDER_PASS
  23.  
  24. # openstack-config --set /etc/cinder/cinder.conf rabbit_hosts 10.0.0.12,10.0.0.13,10.0.0.14
  25. # openstack-config --set /etc/cinder/cinder.conf rabbit_ha_queues True
  26. # openstack-config --set /etc/cinder/cinder.conf heartbeat_timeout_threshold 60
  27. # openstack-config --set /etc/cinder/cinder.conf heartbeat_rate 2

Configure HA Cinder API

  1. $ openstack endpoint create volume --region $KEYSTONE_REGION \
  2.   --publicurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
  3.   --adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \
  4.   --internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s'