Water's Home

Just another Life Style

0%

Install Packages On Controller Nodes

[root@controller ~]# yum install corosync pacemaker pcs fence-agents resource-agents -y

Set-Up the Cluster

[root@controller ~]# systemctl enable pcsd
[root@controller ~]# systemctl start pcsd

[root@controller ~]# echo myhaclusterpwd passwd –stdin hacluster

[root@controller ~]# pcs cluster auth controller1 controller2 controller3 -u hacluster -p myhaclusterpwd –force

[root@controller1 ~]# pcs cluster setup –force –name my-cluster controller1 controller2 controller3
Destroying cluster on nodes: controller1, controller2, controller3…
controller1: Stopping Cluster (pacemaker)…
controller2: Stopping Cluster (pacemaker)…
controller3: Stopping Cluster (pacemaker)…
controller3: Successfully destroyed cluster
controller1: Successfully destroyed cluster
controller2: Successfully destroyed cluster

Sending ‘pacemaker_remote authkey’ to ‘controller1’, ‘controller2’, ‘controller3’
controller1: successful distribution of the file ‘pacemaker_remote authkey’
controller3: successful distribution of the file ‘pacemaker_remote authkey’
controller3: successful distribution of the file ‘pacemaker_remote authkey’
controller2: successful distribution of the file ‘pacemaker_remote authkey’
Sending cluster config files to the nodes…
controller1: Succeeded
controller2: Succeeded
controller3: Succeeded

Synchronizing pcsd certificates on nodes controller1, controller2, controller3…
controller3: Success
controller2: Success
controller1: Success
Restarting pcsd on the nodes in order to reload the certificates…

controller3: Success
controller2: Success
controller1: Success
[root@controller1 ~]# pcs cluster start –all
controller1: Starting Cluster…
controller2: Starting Cluster…
controller3: Starting Cluster…
[root@controller1 ~]# pcs cluster enable –all
controller1: Cluster Enabled
controller2: Cluster Enabled
controller3: Cluster Enabled
[root@controller1 ~]# pcs cluster status
Cluster Status:
Stack: unknown
Current DC: NONE
Last updated: Fri Dec 15 00:21:36 2017
Last change: Fri Dec 15 00:21:24 2017 by hacluster via crmd on controller1
3 nodes configured
0 resources configured
PCSD Status:
controller3: Online
controller2: Online
controller1: Online

Start Corosync On Controllers

[root@controller ~]# systemctl start corosync

[root@controller1 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
id = 192.168.220.21
status = ring 0 active with no faults
[root@controller2 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 2
RING ID 0
id = 192.168.220.22
status = ring 0 active with no faults
[root@controller3 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 3
RING ID 0
id = 192.168.220.23
status = ring 0 active with no faults

[root@controller ~]# corosync-cmapctl runtime.totem.pg.mrp.srp.members
runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.220.21)
runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.1.status (str) = joined
runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.220.22)
runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.2.status (str) = joined
runtime.totem.pg.mrp.srp.members.3.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.3.ip (str) = r(0) ip(192.168.220.23)
runtime.totem.pg.mrp.srp.members.3.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.3.status (str) = joined

Start Pacemaker

[root@controller1 ~]# systemctl start pacemaker
[root@controller1 ~]# crm_mon -1
Stack: corosync
Current DC: controller1 (version 1.1.16-12.el7_4.5-94ff4df) - partition with quorum
Last updated: Fri Dec 15 00:34:25 2017
Last change: Fri Dec 15 00:21:45 2017 by hacluster via crmd on controller1

3 nodes configured
0 resources configured

Online: [ controller1 controller2 controller3 ]

No active resources

Set Basic Cluster Properties

[root@controller1 ~]# pcs property set pe-warn-series-max=1000 \

pe-input-series-max=1000 \
pe-error-series-max=1000 \
cluster-recheck-interval=5min
[root@controller1 ~]# pcs property set stonith-enabled=false

Install RabbitMQ

[root@controller1 ~]# yum install erlang rabbitmq-server -y
[root@controller2 ~]# yum install erlang rabbitmq-server -y
[root@controller3 ~]# yum install erlang rabbitmq-server -y

[root@controller1 ~]# systemctl enable rabbitmq-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
[root@controller2 ~]# systemctl enable rabbitmq-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
[root@controller3 ~]# systemctl enable rabbitmq-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.

[root@controller1 ~]# systemctl start rabbitmq-server.service

[root@controller1 ~]# rabbitmq-plugins enable rabbitmq_management
The following plugins have been enabled:
mochiweb
webmachine
rabbitmq_web_dispatch
amqp_client
rabbitmq_management_agent
rabbitmq_management

Applying plugin configuration to rabbit@controller1… started 6 plugins.
[root@controller2 ~]# rabbitmq-plugins enable rabbitmq_management
The following plugins have been enabled:
mochiweb
webmachine
rabbitmq_web_dispatch
amqp_client
rabbitmq_management_agent
rabbitmq_management

Applying plugin configuration to rabbit@controller2… started 6 plugins.
[root@controller3 ~]# rabbitmq-plugins enable rabbitmq_management
The following plugins have been enabled:
mochiweb
webmachine
rabbitmq_web_dispatch
amqp_client
rabbitmq_management_agent
rabbitmq_management

Applying plugin configuration to rabbit@controller3… started 6 plugins.

Configure RabbitMQ for HA queues

[root@controller1 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@controller2:/var/lib/rabbitmq/.erlang.cookie
.erlang.cookie 100% 20 19.8KB/s 00:00
[root@controller1 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@controller3:/var/lib/rabbitmq/.erlang.cookie
.erlang.cookie 100% 20 34.2KB/s 00:00

On Each Nodes

# chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie

chmod 400 /var/lib/rabbitmq/.erlang.cookie

systemctl enable rabbitmq-server.service

systemctl start rabbitmq-server.service

[root@controller1 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller1 …
[{nodes,[{disc,[rabbit@controller1]}]},
{running_nodes,[rabbit@controller1]},
{cluster_name,<<”rabbit@controller1”>>},
{partitions,[]},
{alarms,[{rabbit@controller1,[]}]}]
[root@controller2 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller2 …
[{nodes,[{disc,[rabbit@controller2]}]},
{running_nodes,[rabbit@controller2]},
{cluster_name,<<”rabbit@controller2”>>},
{partitions,[]},
{alarms,[{rabbit@controller2,[]}]}]
[root@controller3 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller3 …
[{nodes,[{disc,[rabbit@controller3]}]},
{running_nodes,[rabbit@controller3]},
{cluster_name,<<”rabbit@controller3”>>},
{partitions,[]},
{alarms,[{rabbit@controller3,[]}]}]

On Other Nodes

[root@controller2 ~]# rabbitmqctl stop_app
Stopping node rabbit@controller2 …
[root@controller3 ~]# rabbitmqctl stop_app
Stopping node rabbit@controller3 …

[root@controller2 ~]# rabbitmqctl join_cluster –ram rabbit@controller1
Clustering node rabbit@controller2 with rabbit@controller1 …
[root@controller3 ~]# rabbitmqctl join_cluster –ram rabbit@controller1
Clustering node rabbit@controller3 with rabbit@controller1 …

[root@controller2 ~]# rabbitmqctl start_app
Starting node rabbit@controller2 …
[root@controller3 ~]# rabbitmqctl start_app
Starting node rabbit@controller3 …

Set the HA-Mode Policy

[root@controller1 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller1 …
[{nodes,[{disc,[rabbit@controller1]},
{ram,[rabbit@controller3,rabbit@controller2]}]},
{running_nodes,[rabbit@controller3,rabbit@controller2,rabbit@controller1]},
{cluster_name,<<”rabbit@controller1”>>},
{partitions,[]},
{alarms,[{rabbit@controller3,[]},
{rabbit@controller2,[]},
{rabbit@controller1,[]}]}]
[root@controller2 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller2 …
[{nodes,[{disc,[rabbit@controller1]},
{ram,[rabbit@controller3,rabbit@controller2]}]},
{running_nodes,[rabbit@controller3,rabbit@controller1,rabbit@controller2]},
{cluster_name,<<”rabbit@controller1”>>},
{partitions,[]},
{alarms,[{rabbit@controller3,[]},
{rabbit@controller1,[]},
{rabbit@controller2,[]}]}]
[root@controller3 ~]# rabbitmqctl set_policy ha-all ‘^(?!amq\.).*‘ ‘{“ha-mode”: “all”}’
Setting policy “ha-all” for pattern “^(?!amq\\.).*“ to “{\“ha-mode\“: \“all\“}” with priority “0” …
[root@controller3 ~]#
[root@controller3 ~]#
[root@controller3 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller3 …
[{nodes,[{disc,[rabbit@controller1]},
{ram,[rabbit@controller3,rabbit@controller2]}]},
{running_nodes,[rabbit@controller2,rabbit@controller1,rabbit@controller3]},
{cluster_name,<<”rabbit@controller1”>>},
{partitions,[]},
{alarms,[{rabbit@controller2,[]},
{rabbit@controller1,[]},
{rabbit@controller3,[]}]}]

[root@controller1 ~]# rabbitmqctl set_policy ha-all ‘^(?!amq\.).*‘ ‘{“ha-mode”: “all”}’
Setting policy “ha-all” for pattern “^(?!amq\\.).*“ to “{\“ha-mode\“: \“all\“}” with priority “0” …
[root@controller2 ~]# rabbitmqctl set_policy ha-all ‘^(?!amq\.).*‘ ‘{“ha-mode”: “all”}’
Setting policy “ha-all” for pattern “^(?!amq\\.).*“ to “{\“ha-mode\“: \“all\“}” with priority “0” …
[root@controller3 ~]# rabbitmqctl set_policy ha-all ‘^(?!amq\.).*‘ ‘{“ha-mode”: “all”}’
Setting policy “ha-all” for pattern “^(?!amq\\.).*“ to “{\“ha-mode\“: \“all\“}” with priority “0” …

Now, Take the Browser

img/HA-RabbitMQ-V1.gif)

username/password :

guest/guest

Configure OpenStack services to use RabbitMQ HA queues

transport_url = rabbit://RABBIT_USER:RABBIT_PASS@rabbit1:5672,
RABBIT_USER:RABBIT_PASS@rabbit2:5672,RABBIT_USER:RABBIT_PASS@rabbit3:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

For the Old Set-Up

# rabbitmqctl stop_app

rabbitmqctl reset

rabbitmqctl start_app

Create RabbitMQ Admin

[root@controller1 ~]# rabbitmqctl add_user admin admin
Creating user “admin” …
[root@controller1 ~]# rabbitmqctl set_user_tags admin administrator
Setting tags for user “admin” to [administrator] …
[root@controller1 ~]# rabbitmqctl add_user openstack openstack
Creating user “openstack” …
[root@controller1 ~]# rabbitmqctl set_permissions openstack “.*“ “.*“ “.*“
Setting permissions for user “openstack” in vhost “/“ …
[root@controller1 ~]# rabbitmqctl set_user_tags openstack administrator
Setting tags for user “openstack” to [administrator] …
[root@controller1 ~]# systemctl restart rabbitmq-server.service

Install Database On Controller Nodes

[root@controller1 ~]# yum install memcached python-memcached -y
[root@controller1 ~]# yum install mariadb-galera-server mariadb-galera-common galera rsync -y

Database configuration(controller1)

[root@controller1 ~]# cat /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
binlog_format=ROW
bind-address=192.168.220.21 # The management IP address of the controller node

InnoDB Configuration

default_storage_engine=innodb
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=122M

!includedir /etc/my.cnf.d/

[root@controller1 ~]# cat /etc/my.cnf.d/galera.cnf grep ^[^#].*
[mysqld]
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0
wsrep_on=1
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name=”my_wsrep_cluster”
wsrep_cluster_address=”gcomm://controller1,controller2,controller3”
wsrep_node_name=controller1
wsrep_node_address=192.168.220.21
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
wsrep_sst_auth=root:

Database configuration(controller2,controller3)

[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/my.cnf controller2:/etc/
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/my.cnf.d/galera.cnf controller2:/etc/my.cnf.d/
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/my.cnf controller3:/etc/
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/my.cnf.d/galera.cnf controller3:/etc/my.cnf.d/

[root@controller2 ~]# cat /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
binlog_format=ROW
bind-address=192.168.220.22 # The management IP address of the controller node

InnoDB Configuration

default_storage_engine=innodb
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=122M

!includedir /etc/my.cnf.d/

[root@controller2 ~]# cat /etc/my.cnf.d/galera.cnf grep ^[^#].*
[mysqld]
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0
wsrep_on=1
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name=”my_wsrep_cluster”
wsrep_cluster_address=”gcomm://controller1,controller2,controller3”
wsrep_node_name=controller2
wsrep_node_address=192.168.220.22
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
wsrep_sst_auth=root:

[root@controller3 ~]# cat /etc/my.cnf
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
binlog_format=ROW
bind-address=192.168.220.23 # The management IP address of the controller node

InnoDB Configuration

default_storage_engine=innodb
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=0
innodb_buffer_pool_size=122M

!includedir /etc/my.cnf.d/

[root@controller3 ~]# cat /etc/my.cnf.d/galera.cnf grep ^[^#].*
[mysqld]
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0
wsrep_on=1
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name=”my_wsrep_cluster”
wsrep_cluster_address=”gcomm://controller1,controller2,controller3”
wsrep_node_name=controller3
wsrep_node_address=192.168.220.23
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
wsrep_sst_auth=root:

Database Management

Initialize the Primary Component on one cluster node

[root@controller1 ~]# /usr/libexec/mysqld –wsrep-new-cluster –user=root &

Start the database server on all other cluster nodes

[root@controller2 ~]# systemctl start mariadb
[root@controller3 ~]# systemctl start mariadb

Check MaribDB Cluster
The Key Value :
  • wsrep_connected ON
  • wsrep_cluster_size 3
  • wsrep_local_index 0 (controller1 : 0 controller2 : 1 controller3 : 2)
  • wsrep_incoming_addresses 192.168.220.21:3306,192.168.220.22:3306,192.168.220.23:3306

[root@controller1 ~]# mysql -uroot -proot -e “show status like ‘wsrep_%’”
+——————————+————————————————————-+
Variable_name Value
+——————————+————————————————————-+
wsrep_apply_oooe 0.000000
wsrep_apply_oool 0.000000
wsrep_apply_window 0.000000
wsrep_causal_reads 0
wsrep_cert_deps_distance 0.000000
wsrep_cert_index_size 0
wsrep_cert_interval 0.000000
wsrep_cluster_conf_id 3
wsrep_cluster_size 3
wsrep_cluster_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_cluster_status Primary
wsrep_commit_oooe 0.000000
wsrep_commit_oool 0.000000
wsrep_commit_window 0.000000
wsrep_connected ON
wsrep_desync_count 0
wsrep_evs_delayed
wsrep_evs_evict_list
wsrep_evs_repl_latency 0/0/0/0/0
wsrep_evs_state OPERATIONAL
wsrep_flow_control_paused 0.000000
wsrep_flow_control_paused_ns 0
wsrep_flow_control_recv 0
wsrep_flow_control_sent 0
wsrep_gcomm_uuid 37644373-e137-11e7-8fef-c76eb92175c6
wsrep_incoming_addresses 192.168.220.21:3306,192.168.220.22:3306,192.168.220.23:3306
wsrep_last_committed 0
wsrep_local_bf_aborts 0
wsrep_local_cached_downto 18446744073709551615
wsrep_local_cert_failures 0
wsrep_local_commits 0
wsrep_local_index 0
wsrep_local_recv_queue 0
wsrep_local_recv_queue_avg 0.100000
wsrep_local_recv_queue_max 2
wsrep_local_recv_queue_min 0
wsrep_local_replays 0
wsrep_local_send_queue 0
wsrep_local_send_queue_avg 0.000000
wsrep_local_send_queue_max 1
wsrep_local_send_queue_min 0
wsrep_local_state 4
wsrep_local_state_comment Synced
wsrep_local_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_protocol_version 7
wsrep_provider_name Galera
wsrep_provider_vendor Codership Oy wsrep_provider_version 3.16(r5c765eb)
wsrep_ready ON
wsrep_received 10
wsrep_received_bytes 794
wsrep_repl_data_bytes 0
wsrep_repl_keys 0
wsrep_repl_keys_bytes 0
wsrep_repl_other_bytes 0
wsrep_replicated 0
wsrep_replicated_bytes 0
wsrep_thread_count 2
+——————————+————————————————————-+
[root@controller2 ~]# mysql -uroot -proot -e “show status like ‘wsrep_%’”
+——————————+————————————————————-+
Variable_name Value
+——————————+————————————————————-+
wsrep_apply_oooe 0.000000
wsrep_apply_oool 0.000000
wsrep_apply_window 0.000000
wsrep_causal_reads 0
wsrep_cert_deps_distance 0.000000
wsrep_cert_index_size 0
wsrep_cert_interval 0.000000
wsrep_cluster_conf_id 3
wsrep_cluster_size 3
wsrep_cluster_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_cluster_status Primary
wsrep_commit_oooe 0.000000
wsrep_commit_oool 0.000000
wsrep_commit_window 0.000000
wsrep_connected ON
wsrep_desync_count 0
wsrep_evs_delayed
wsrep_evs_evict_list
wsrep_evs_repl_latency 0.000204825/0.00047307/0.000741314/0.000268244/2
wsrep_evs_state OPERATIONAL
wsrep_flow_control_paused 0.000000
wsrep_flow_control_paused_ns 0
wsrep_flow_control_recv 0
wsrep_flow_control_sent 0
wsrep_gcomm_uuid 781401a8-e137-11e7-bba3-9e3f83e88f32
wsrep_incoming_addresses 192.168.220.21:3306,192.168.220.22:3306,192.168.220.23:3306
wsrep_last_committed 0
wsrep_local_bf_aborts 0
wsrep_local_cached_downto 18446744073709551615
wsrep_local_cert_failures 0
wsrep_local_commits 0
wsrep_local_index 1
wsrep_local_recv_queue 0
wsrep_local_recv_queue_avg 0.000000
wsrep_local_recv_queue_max 1
wsrep_local_recv_queue_min 0
wsrep_local_replays 0
wsrep_local_send_queue 0
wsrep_local_send_queue_avg 0.000000
wsrep_local_send_queue_max 1
wsrep_local_send_queue_min 0
wsrep_local_state 4
wsrep_local_state_comment Synced
wsrep_local_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_protocol_version 7
wsrep_provider_name Galera
wsrep_provider_vendor Codership Oy wsrep_provider_version 3.16(r5c765eb)
wsrep_ready ON
wsrep_received 4
wsrep_received_bytes 529
wsrep_repl_data_bytes 0
wsrep_repl_keys 0
wsrep_repl_keys_bytes 0
wsrep_repl_other_bytes 0
wsrep_replicated 0
wsrep_replicated_bytes 0
wsrep_thread_count 2
+——————————+————————————————————-+
[root@controller3 ~]# mysql -uroot -proot -e “show status like ‘wsrep_%’”
+——————————+————————————————————-+
Variable_name Value
+——————————+————————————————————-+
wsrep_apply_oooe 0.000000
wsrep_apply_oool 0.000000
wsrep_apply_window 0.000000
wsrep_causal_reads 0
wsrep_cert_deps_distance 0.000000
wsrep_cert_index_size 0
wsrep_cert_interval 0.000000
wsrep_cluster_conf_id 3
wsrep_cluster_size 3
wsrep_cluster_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_cluster_status Primary
wsrep_commit_oooe 0.000000
wsrep_commit_oool 0.000000
wsrep_commit_window 0.000000
wsrep_connected ON
wsrep_desync_count 0
wsrep_evs_delayed
wsrep_evs_evict_list
wsrep_evs_repl_latency 0.000224679/0.000465885/0.000643407/0.000164076/5
wsrep_evs_state OPERATIONAL
wsrep_flow_control_paused 0.000000
wsrep_flow_control_paused_ns 0
wsrep_flow_control_recv 0
wsrep_flow_control_sent 0
wsrep_gcomm_uuid 7ce71d07-e137-11e7-b720-baa5cd647a15
wsrep_incoming_addresses 192.168.220.21:3306,192.168.220.22:3306,192.168.220.23:3306
wsrep_last_committed 0
wsrep_local_bf_aborts 0
wsrep_local_cached_downto 18446744073709551615
wsrep_local_cert_failures 0
wsrep_local_commits 0
wsrep_local_index 2
wsrep_local_recv_queue 0
wsrep_local_recv_queue_avg 0.000000
wsrep_local_recv_queue_max 1
wsrep_local_recv_queue_min 0
wsrep_local_replays 0
wsrep_local_send_queue 0
wsrep_local_send_queue_avg 0.333333
wsrep_local_send_queue_max 2
wsrep_local_send_queue_min 0
wsrep_local_state 4
wsrep_local_state_comment Synced
wsrep_local_state_uuid 37649504-e137-11e7-815b-1ec404fbf23e
wsrep_protocol_version 7
wsrep_provider_name Galera
wsrep_provider_vendor Codership Oy wsrep_provider_version 3.16(r5c765eb)
wsrep_ready ON
wsrep_received 3
wsrep_received_bytes 311
wsrep_repl_data_bytes 0
wsrep_repl_keys 0
wsrep_repl_keys_bytes 0
wsrep_repl_other_bytes 0
wsrep_replicated 0
wsrep_replicated_bytes 0
wsrep_thread_count 2
+——————————+————————————————————-+

Test MaribDB Cluster

[root@controller1 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
information_schema
mysql
performance_schema
+——————–+
[root@controller2 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
information_schema
mysql
performance_schema
+——————–+
[root@controller3 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
information_schema
mysql
performance_schema
+——————–+
[root@controller1 ~]# mysql -uroot -proot -e “create database galera_test”
[root@controller1 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
galera_test
information_schema
mysql
performance_schema
+——————–+
[root@controller2 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
galera_test
information_schema
mysql
performance_schema
+——————–+
[root@controller3 ~]# mysql -uroot -proot -e “show databases”
+——————–+
Database
+——————–+
galera_test
information_schema
mysql
performance_schema
+——————–+

Database Management

echo ‘
MYSQL_USERNAME=”clustercheck_user”
MYSQL_PASSWORD=”my_clustercheck_password”
MYSQL_HOST=”localhost”
MYSQL_PORT=”3306”
‘ > /etc/sysconfig/clustercheck

Grant the clustercheck user

[root@controller1 ~]# mysql -uroot -proot -e “GRANT PROCESS ON *.* TO ‘clustercheck_user‘@’localhost’ IDENTIFIED BY ‘my_clustercheck_password’;FLUSH PRIVILEGES;”
[root@controller1 ~]# mysql -uroot -proot -e “SELECT User, Host, Password FROM mysql.user;”
+——————-+———–+——————————————-+
User Host Password
+——————-+———–+——————————————-+
root localhost *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B
root 127.0.0.1 *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B
root ::1 *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B
clustercheck_user localhost *B7ECF4F5C3B1DDB87695D91C5D27631AEC569993
+——————-+———–+——————————————-+

Create a configuration file for the HAProxy monitor service

echo ‘
service galera-monitor
{
port = 9200
disable = no
socket_type = stream
protocol = tcp
wait = no
user = root
group = root
groups = yes
server = /usr/bin/clustercheck
type = UNLISTED
per_source = UNLIMITED
log_on_success =
log_on_failure = HOST
flags = REUSE
}
‘ > /etc/xinetd.d/galera-monitor

Create OpenStack Users & Databases

[root@controller1 ~]# mysql -u root -proot -e “

create database keystone;
grant all privileges on keystone.* to ‘keystone‘@’localhost’ identified by ‘keystone’;
grant all privileges on keystone.* to ‘keystone‘@’%’ identified by ‘keystone’;
create database glance;
grant all privileges on glance.* to ‘glance‘@’localhost’ identified by ‘glance’;
grant all privileges on glance.* to ‘glance‘@’%’ identified by ‘glance’;

create database nova;
grant all privileges on nova.* to ‘nova‘@’localhost’ identified by ‘nova’;
grant all privileges on nova.* to ‘nova‘@’%’ identified by ‘nova’;
create database nova_api;
grant all privileges on nova_api.* to ‘nova‘@’localhost’ identified by ‘nova’;
grant all privileges on nova_api.* to ‘nova‘@’%’ identified by ‘nova’;
create database nova_cell0;
grant all privileges on nova_cell0.* to ‘nova‘@’localhost’ identified by ‘nova’;
grant all privileges on nova_cell0.* to ‘nova‘@’%’ identified by ‘nova’;

create database neutron;
grant all privileges on neutron.* to ‘neutron‘@’localhost’ identified by ‘neutron’;
grant all privileges on neutron.* to ‘neutron‘@’%’ identified by ‘neutron’;

flush privileges;
select user,host from mysql.user;
show databases;


+——————-+———–+
user host
+——————-+———–+
glance %
keystone %
neutron %
nova %
root 127.0.0.1
root ::1
clustercheck_user localhost
glance localhost
keystone localhost
neutron localhost
nova localhost
root localhost
+——————-+———–+
+——————–+
Database
+——————–+
galera_test
glance
information_schema
keystone
mysql
neutron
nova
nova_api
nova_cell0
performance_schema
+——————–+

Start the xinetd daemon for clustercheck

[root@controller1 ~]# systemctl daemon-reload
[root@controller1 ~]# systemctl enable xinetd
[root@controller1 ~]# systemctl start xinetd

Install the packages

# yum install memcached python-memcached

Edit the /etc/sysconfig/memcached file

[root@controller1 ~]# cat /etc/sysconfig/memcached
PORT=”11211”
USER=”memcached”
MAXCONN=”1024”
CACHESIZE=”64”
OPTIONS=”-l 127.0.0.1,::1,controller”
Memcached_servers = controller1:11211,controller2:11211,controller3:11211

PCS Create Memcached

# pcs resource delete memcached –force

pcs resource create memcached systemd:memcached –clone interleave=true

pcs status

Firewall Configure

[root@localhost ~]# systemctl stop firewalld.service
[root@localhost ~]# systemctl disable firewalld.service
[root@localhost ~]# firewall-cmd –state

Disable SELinux

[root@localhost ~]# sed -i ‘/^SELINUX=.*/c SELINUX=disabled’ /etc/selinux/config
[root@localhost ~]# sed -i ‘s/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g’ /etc/selinux/config
[root@localhost ~]# grep –color=auto ‘^SELINUX’ /etc/selinux/config
[root@localhost ~]# setenforce 0

Synchronize time

Refer to “CLOUD COMPUTING —> OpenStack High Availability —> 3.Create Local NTP”

Set HostName

[root@localhost ~]# hostnamectl set-hostname controller1
[root@localhost ~]# hostnamectl set-hostname controller2
[root@localhost ~]# hostnamectl set-hostname controller3
[root@localhost ~]# hostnamectl set-hostname compute01
[root@localhost ~]# hostnamectl set-hostname compute02

Network Configure

192.168.200.0Host-only 192.168.220.0NAT

Controller1

[root@controller1 ~]# ls -al /etc/sysconfig/network-scripts/ifcfg-*
-rw-r–r– 1 root root 369 Dec 13 22:22 /etc/sysconfig/network-scripts/ifcfg-ens33
-rw-r–r– 1 root root 369 Dec 13 22:23 /etc/sysconfig/network-scripts/ifcfg-ens37
-rw-r–r– 1 root root 326 Dec 13 22:26 /etc/sysconfig/network-scripts/ifcfg-ens38
-rw-r–r– 1 root root 326 Dec 13 22:28 /etc/sysconfig/network-scripts/ifcfg-ens39
-rw-r–r–. 1 root root 254 May 3 2017 /etc/sysconfig/network-scripts/ifcfg-lo

Update the 1st network adapter:

[root@controller1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=9eb00433-c8b9-4be2-af53-50cab2247226
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.11
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 2nd network adapter:

[root@controller1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=cf7a62c9-e135-4488-b252-99717664941c
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.220.21
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 3rd network adapter:

[root@controller1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens38
UUID=b077534d-3154-43ca-a240-ac811f255f42
DEVICE=ens38
ONBOOT=yes
IPADDR=192.168.200.11
NETMASK=255.255.255.0

Update the 4th network adapter:

[root@controller1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens39
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens39
UUID=ea5e0f92-8f8c-457f-a693-f946c623d0e7
DEVICE=ens39
ONBOOT=yes
IPADDR=192.168.200.21
NETMASK=255.255.255.0

Controller2

[root@controller2 ~]# ls -al /etc/sysconfig/network-scripts/ifcfg-*
-rw-r–r– 1 root root 369 Dec 13 22:22 /etc/sysconfig/network-scripts/ifcfg-ens33
-rw-r–r– 1 root root 369 Dec 13 22:23 /etc/sysconfig/network-scripts/ifcfg-ens37
-rw-r–r– 1 root root 326 Dec 13 22:26 /etc/sysconfig/network-scripts/ifcfg-ens38
-rw-r–r– 1 root root 326 Dec 13 22:28 /etc/sysconfig/network-scripts/ifcfg-ens39
-rw-r–r–. 1 root root 254 May 3 2017 /etc/sysconfig/network-scripts/ifcfg-lo

Update the 1st network adapter:

[root@controller2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=14a0d64d-bca2-4b37-ac1d-460897567fff
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.12
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 2nd network adapter:

[root@controller2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=c382316f-97d8-4e3f-a82b-b48cd6d0d025
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.220.22
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 3rd network adapter:

[root@controller2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens38
UUID=f727a6f1-0ae0-4c0d-aa54-fbaefb3570c7
DEVICE=ens38
ONBOOT=yes
IPADDR=192.168.200.12
NETMASK=255.255.255.0

Update the 4th network adapter:

[root@controller2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens39
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens39
UUID=b56c3af9-2844-47e3-9e30-ef078640b2f1
DEVICE=ens39
ONBOOT=yes
IPADDR=192.168.200.22
NETMASK=255.255.255.0

Controller3

[root@controller3 ~]# ls -al /etc/sysconfig/network-scripts/ifcfg-*
-rw-r–r– 1 root root 369 Dec 13 22:22 /etc/sysconfig/network-scripts/ifcfg-ens33
-rw-r–r– 1 root root 369 Dec 13 22:23 /etc/sysconfig/network-scripts/ifcfg-ens37
-rw-r–r– 1 root root 326 Dec 13 22:26 /etc/sysconfig/network-scripts/ifcfg-ens38
-rw-r–r– 1 root root 326 Dec 13 22:28 /etc/sysconfig/network-scripts/ifcfg-ens39
-rw-r–r–. 1 root root 254 May 3 2017 /etc/sysconfig/network-scripts/ifcfg-lo

Update the 1st network adapter:

[root@controller3 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=5ca30d63-789d-4df4-9f98-29aa74945e1b
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.13
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 2nd network adapter:

[root@controller3 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=8cb6cd8b-304f-4148-96bb-326976276d17
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.220.23
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 3rd network adapter:

[root@controller3 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens38
UUID=9d8e2ebb-ded2-4d51-8b77-072ccfc269c3
DEVICE=ens38
ONBOOT=yes
IPADDR=192.168.200.13
NETMASK=255.255.255.0

Update the 4th network adapter:

[root@controller3 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens39
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens39
UUID=639fdbe3-b6f4-4f8d-b988-b9e01ef5041e
DEVICE=ens39
ONBOOT=yes
IPADDR=192.168.200.23
NETMASK=255.255.255.0

Compute01

[root@compute01 ~]# ls -al /etc/sysconfig/network-scripts/ifcfg-*
-rw-r–r– 1 root root 369 Dec 13 22:22 /etc/sysconfig/network-scripts/ifcfg-ens33
-rw-r–r– 1 root root 326 Dec 13 22:23 /etc/sysconfig/network-scripts/ifcfg-ens37
-rw-r–r–. 1 root root 254 May 3 2017 /etc/sysconfig/network-scripts/ifcfg-lo

Update the 1st network adapter:

[root@compute01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=fdab3eb4-d1c6-46e4-a09e-f3cff2c23514
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.51
NETMASK=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 2nd network adapter:

[root@compute01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=0157d35c-ced3-4c0c-a519-48cb2a11e8db
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.200.51
NETMASK=255.255.255.0

Compute02

[root@compute02 ~]# ls -al /etc/sysconfig/network-scripts/ifcfg-*
-rw-r–r– 1 root root 369 Dec 13 22:22 /etc/sysconfig/network-scripts/ifcfg-ens33
-rw-r–r– 1 root root 326 Dec 13 22:23 /etc/sysconfig/network-scripts/ifcfg-ens37
-rw-r–r–. 1 root root 254 May 3 2017 /etc/sysconfig/network-scripts/ifcfg-lo

Update the 1st network adapter:

[root@compute02 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=e8cf9148-b8dd-4879-8851-f523bb0cefd0
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.220.61
NETMAKE=255.255.255.0
GATEWAY=192.168.220.2
DNS1=114.114.114.114

Update the 2nd network adapter:

[root@compute02 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=308add2b-09df-495f-b706-5afbdff0ff9e
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.200.61
NETMAKE=255.255.255.0

Update HostName

[root@controller1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.220.20 controller # virtual IP
192.168.220.21 controller1
192.168.220.22 controller2
192.168.220.23 controller3
192.168.220.51 compute01
192.168.220.61 compute02

Controller SSH

[root@controller1 ~]# ssh-keygen -t rsa -P “” -f ~/.ssh/id_rsa
Generating public/private rsa key pair.
Created directory ‘/root/.ssh’.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:RJz627PplvqRVorpdiRPXLZ15aiQD/YruvXT/Cu1sSo root@controller1
The key s randomart image is:
+—[RSA 2048]—-+

.o .
.. . o.
.. = o o o
.So B.+ .
oo++= o
oB*. +. +
.ooOEo.oo
.=O=+ooooo
+—-[SHA256]—–+
[root@controller1 ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@controller2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: “/root/.ssh/id_rsa.pub”
The authenticity of host ‘controller2 (192.168.220.22)’ can’t be established.
ECDSA key fingerprint is SHA256:Q5vLVMvx0+FWYxqM263f4//dH72IwEgT/bWeOIlwTfY.
ECDSA key fingerprint is MD5:b5:6a:fb:3f:be:98:f7:d9:71:4e:d1:89:c4:7f:8f:00.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed – if you are prompted now it is to install the new keys
root@controller2’s password:

Number of key(s) added: 1

Now try logging into the machine, with: “ssh ‘root@controller2’”
and check to make sure that only the key(s) you wanted were added.

[root@controller1 ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@controller3
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: “/root/.ssh/id_rsa.pub”
The authenticity of host ‘controller3 (192.168.220.23)’ can’t be established.
ECDSA key fingerprint is SHA256:Q5vLVMvx0+FWYxqM263f4//dH72IwEgT/bWeOIlwTfY.
ECDSA key fingerprint is MD5:b5:6a:fb:3f:be:98:f7:d9:71:4e:d1:89:c4:7f:8f:00.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed – if you are prompted now it is to install the new keys
root@controller3’s password:

Number of key(s) added: 1

Now try logging into the machine, with: “ssh ‘root@controller3’”
and check to make sure that only the key(s) you wanted were added.

[root@controller1 ~]# ssh controller2
Last login: Thu Dec 14 00:47:15 2017 from 192.168.220.1
[root@controller2 ~]# exit
logout
Connection to controller2 closed.
[root@controller1 ~]# ssh controller3
Last login: Thu Dec 14 00:47:16 2017 from 192.168.220.1
[root@controller3 ~]# exit
logout
Connection to controller3 closed.
[root@controller1 ~]#

Use Local Repository

Refer to “CLOUD COMPUTING —> OpenStack High Availability —> 2.Create Local Repository

Use Ali Mirror

[root@localrepo ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@localrepo ~]# cat /etc/yum.repos.d/CentOS-Pike.repo

[centos-openstack-pike]
name=centos-openstack-pike
baseurl=https://mirrors.aliyun.com/centos/$releasever/cloud/$basearch/openstack-pike/
gpgcheck=0
enabled=1

[centos-qemu-ev]
name=centos-qemu-ev
baseurl=https://mirrors.aliyun.com/centos/$releasever/virt/$basearch/kvm-common/
gpgcheck=0
enabled=1

Sync Repo

[root@localrepo myrepo]# reposync –repoid=centos-openstack-pike
[root@localrepo myrepo]# reposync –repoid=centos-qemu-ev
[root@localrepo myrepo]# ls -al
total 120
drwxr-xr-x. 7 root root 98 Dec 14 02:51 .
drwxr-xr-x. 3 root root 20 Dec 13 03:03 ..
drwxr-xr-x. 4 root root 38 Dec 14 02:30 base
drwxr-xr-x. 3 root root 90112 Dec 14 02:50 centos-openstack-pike
drwxr-xr-x. 2 root root 4096 Dec 14 02:51 centos-qemu-ev
drwxr-xr-x. 4 root root 38 Dec 14 02:31 extras
drwxr-xr-x. 4 root root 38 Dec 14 02:31 updates

Create Repo

[root@localrepo myrepo]# createrepo /var/www/html/myrepo/centos-openstack-pike
Spawning worker 0 with 1091 pkgs
Spawning worker 1 with 1091 pkgs
Workers Finished
Saving Primary metadata
Saving file lists metadata
Saving other metadata
Generating sqlite DBs
Sqlite DBs complete
[root@localrepo myrepo]# createrepo /var/www/html/myrepo/centos-qemu-ev/
Spawning worker 0 with 18 pkgs
Spawning worker 1 with 17 pkgs
Workers Finished
Saving Primary metadata
Saving file lists metadata
Saving other metadata
Generating sqlite DBs
Sqlite DBs complete

Add Ali Mirror On Client Node

[root@controller1 ~]# cat /etc/yum.repos.d/CentOS-Pike.repo

[centos-openstack-pike]
name=centos-openstack-pike
baseurl=http://192.168.220.200/myrepo/centos-openstack-pike/
gpgcheck=0
enabled=1

[centos-qemu-ev]
name=centos-qemu-ev
baseurl=http://192.168.220.200/myrepo/centos-qemu-ev/
gpgcheck=0
enabled=1

[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/yum.repos.d/CentOS-Pike.repo controller2:/etc/yum.repos.d/
sending incremental file list
CentOS-Pike.repo
247 100% 0.00kB/s 0:00:00 (xfer#1, to-check=0/1)

sent 195 bytes received 31 bytes 452.00 bytes/sec
total size is 247 speedup is 1.09
[root@controller1 ~]# rsync -avzP -e ‘ssh -p 22’ /etc/yum.repos.d/CentOS-Pike.repo controller3:/etc/yum.repos.d/
sending incremental file list
CentOS-Pike.repo
247 100% 0.00kB/s 0:00:00 (xfer#1, to-check=0/1)

sent 195 bytes received 31 bytes 452.00 bytes/sec
total size is 247 speedup is 1.09

Install OpenStack Environment On All Nodes

[root@controller1 ~]# yum install centos-release-openstack-pike -y
[root@controller1 ~]# yum install python-openstackclient openstack-selinux python2-PyMySQL openstack-utils -y
[root@controller1 ~]# yum update -y

Controller node

Install Chrony

[root@localhost ~]# yum install chrony -y

Edit the /etc/chrony.conf file

[root@localntp ~]# diff /etc/chrony.conf /etc/chrony.conf_bak
3,7c3,6
< #server 0.centos.pool.ntp.org iburst
< #server 1.centos.pool.ntp.org iburst
< #server 2.centos.pool.ntp.org iburst
< #server 3.centos.pool.ntp.org iburst
< server 192.168.220.201 iburst


server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
28d26
< allow 192.168.220.0/24

Start the NTP service

[root@localhost ~]# systemctl enable chronyd.service
[root@localhost ~]# systemctl start chronyd.service
[root@localhost ~]# systemctl restart chronyd.service

Other nodes

Install Chrony

[root@localhost ~]# yum install chrony -y

Edit the /etc/chrony.conf file

[root@localhost ~]# diff /etc/chrony.conf /etc/chrony.conf_bak
3,7c3,6
< #server 0.centos.pool.ntp.org iburst
< #server 1.centos.pool.ntp.org iburst
< #server 2.centos.pool.ntp.org iburst
< #server 3.centos.pool.ntp.org iburst
< server 192.168.220.201 iburst


server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst

Start the NTP service

[root@localhost ~]# systemctl enable chronyd.service
[root@localhost ~]# systemctl start chronyd.service
[root@localhost ~]# systemctl restart chronyd.service

Verify operation

controller node

[root@localhost ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^? localhost.localdomain 0 8 0 - +0ns[ +0ns] +/- 0ns

other nodes

[root@localhost ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^? 192.168.220.201 0 8 0 - +0ns[ +0ns] +/- 0ns

Configure Firewall

[root@localhost ~]# systemctl stop firewalld.service
[root@localhost ~]# systemctl disable firewalld.service

Install Httpd & Start Service

[root@localhost ~]# yum install httpd -y
[root@localhost ~]# systemctl enable httpd.service
[root@localhost ~]# systemctl restart httpd.service
[root@localhost ~]# systemctl status httpd.service

Now, Take the Browser

http://192.168.220.200

Install Reposync

[root@localhost ~]# yum install yum-utils -y

Create Repo Dir

[root@localhost ~]# mkdir /var/www/html/myrepo
[root@localhost ~]# cd /var/www/html/myrepo

List Repo

[root@localhost myrepo]# yum repolist
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.163.com
* extras: mirrors.163.com
* updates: mirrors.aliyun.com
repo id repo name status
base/7/x86_64 CentOS-7 - Base 9,591
extras/7/x86_64 CentOS-7 - Extras 284
updates/7/x86_64 CentOS-7 - Updates 1,490
repolist: 11,365

Sync Repo

[root@localhost myrepo]# reposync –repoid=base
[root@localhost myrepo]# reposync –repoid=updates
[root@localhost myrepo]# reposync –repoid=extras
[root@localhost myrepo]# pwd
/var/www/html/myrepo
[root@localhost myrepo]# ls -al
total 0
drwxr-xr-x. 5 root root 47 Dec 6 20:18 .
drwxr-xr-x. 3 root root 20 Dec 6 03:12 ..
drwxr-xr-x. 3 root root 22 Dec 6 03:48 base
drwxr-xr-x. 3 root root 22 Dec 6 20:18 extras
drwxr-xr-x. 3 root root 22 Dec 6 20:06 updates

Install Createrepo

[root@localhost ~]# yum install createrepo -y

Create Repo

[root@localhost ~]# createrepo /var/www/html/myrepo/base
Spawning worker 0 with 9591 pkgs
Workers Finished
Saving Primary metadata
Saving file lists metadata
Saving other metadata
Generating sqlite DBs
Sqlite DBs complete
[root@localhost ~]# createrepo /var/www/html/myrepo/updates
Spawning worker 0 with 1490 pkgs
Workers Finished
Saving Primary metadata
Saving file lists metadata
Saving other metadata
Generating sqlite DBs
Sqlite DBs complete
[root@localhost ~]# createrepo /var/www/html/myrepo/extras
Spawning worker 0 with 284 pkgs
Workers Finished
Saving Primary metadata
Saving file lists metadata
Saving other metadata
Generating sqlite DBs
Sqlite DBs complete

Now, Take the Browser

**img/LocalRepo-V1.png)

Update Client Repo

[root@localhost ~]# diff /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak
15c15
< baseurl=http://192.168.220.200/myrepo/base


mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
17c17
< gpgcheck=0


gpgcheck=1
23c23
< baseurl=http://192.168.220.200/myrepo/updates


mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
25c25
< gpgcheck=0


gpgcheck=1
31c31
< baseurl=http://192.168.220.200/myrepo/extras


mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
33c33
< gpgcheck=0


gpgcheck=1

Make Client Cache

[root@localhost ~]# yum clean all && yum makecache

Client Example

[root@localhost ~]# yum install vim
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Resolving Dependencies
–> Running transaction check
—> Package vim-enhanced.x86_64 2:7.4.160-2.el7 will be installed
–> Processing Dependency: vim-common = 2:7.4.160-2.el7 for package: 2:vim-enhanced-7.4.160-2.el7.x86_64
–> Processing Dependency: perl(:MODULE_COMPAT_5.16.3) for package: 2:vim-enhanced-7.4.160-2.el7.x86_64
–> Processing Dependency: libperl.so()(64bit) for package: 2:vim-enhanced-7.4.160-2.el7.x86_64
–> Processing Dependency: libgpm.so.2()(64bit) for package: 2:vim-enhanced-7.4.160-2.el7.x86_64
–> Running transaction check
—> Package gpm-libs.x86_64 0:1.20.7-5.el7 will be installed
—> Package perl.x86_64 4:5.16.3-292.el7 will be installed
–> Processing Dependency: perl(Socket) >= 1.3 for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Scalar::Util) >= 1.10 for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl-macros for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(threads::shared) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(threads) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(constant) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Time::Local) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Time::HiRes) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Storable) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Socket) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Scalar::Util) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Pod::Simple::XHTML) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Pod::Simple::Search) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Getopt::Long) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Filter::Util::Call) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(File::Temp) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(File::Spec::Unix) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(File::Spec::Functions) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(File::Spec) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(File::Path) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Exporter) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Cwd) for package: 4:perl-5.16.3-292.el7.x86_64
–> Processing Dependency: perl(Carp) for package: 4:perl-5.16.3-292.el7.x86_64
—> Package perl-libs.x86_64 4:5.16.3-292.el7 will be installed
—> Package vim-common.x86_64 2:7.4.160-2.el7 will be installed
–> Processing Dependency: vim-filesystem for package: 2:vim-common-7.4.160-2.el7.x86_64
–> Running transaction check
—> Package perl-Carp.noarch 0:1.26-244.el7 will be installed
—> Package perl-Exporter.noarch 0:5.68-3.el7 will be installed
—> Package perl-File-Path.noarch 0:2.09-2.el7 will be installed
—> Package perl-File-Temp.noarch 0:0.23.01-3.el7 will be installed
—> Package perl-Filter.x86_64 0:1.49-3.el7 will be installed
—> Package perl-Getopt-Long.noarch 0:2.40-2.el7 will be installed
–> Processing Dependency: perl(Pod::Usage) >= 1.14 for package: perl-Getopt-Long-2.40-2.el7.noarch
–> Processing Dependency: perl(Text::ParseWords) for package: perl-Getopt-Long-2.40-2.el7.noarch
—> Package perl-PathTools.x86_64 0:3.40-5.el7 will be installed
—> Package perl-Pod-Simple.noarch 1:3.28-4.el7 will be installed
–> Processing Dependency: perl(Pod::Escapes) >= 1.04 for package: 1:perl-Pod-Simple-3.28-4.el7.noarch
–> Processing Dependency: perl(Encode) for package: 1:perl-Pod-Simple-3.28-4.el7.noarch
—> Package perl-Scalar-List-Utils.x86_64 0:1.27-248.el7 will be installed
—> Package perl-Socket.x86_64 0:2.010-4.el7 will be installed
—> Package perl-Storable.x86_64 0:2.45-3.el7 will be installed
—> Package perl-Time-HiRes.x86_64 4:1.9725-3.el7 will be installed
—> Package perl-Time-Local.noarch 0:1.2300-2.el7 will be installed
—> Package perl-constant.noarch 0:1.27-2.el7 will be installed
—> Package perl-macros.x86_64 4:5.16.3-292.el7 will be installed
—> Package perl-threads.x86_64 0:1.87-4.el7 will be installed
—> Package perl-threads-shared.x86_64 0:1.43-6.el7 will be installed
—> Package vim-filesystem.x86_64 2:7.4.160-2.el7 will be installed
–> Running transaction check
—> Package perl-Encode.x86_64 0:2.51-7.el7 will be installed
—> Package perl-Pod-Escapes.noarch 1:1.04-292.el7 will be installed
—> Package perl-Pod-Usage.noarch 0:1.63-3.el7 will be installed
–> Processing Dependency: perl(Pod::Text) >= 3.15 for package: perl-Pod-Usage-1.63-3.el7.noarch
–> Processing Dependency: perl-Pod-Perldoc for package: perl-Pod-Usage-1.63-3.el7.noarch
—> Package perl-Text-ParseWords.noarch 0:3.29-4.el7 will be installed
–> Running transaction check
—> Package perl-Pod-Perldoc.noarch 0:3.20-4.el7 will be installed
–> Processing Dependency: perl(parent) for package: perl-Pod-Perldoc-3.20-4.el7.noarch
–> Processing Dependency: perl(HTTP::Tiny) for package: perl-Pod-Perldoc-3.20-4.el7.noarch
—> Package perl-podlators.noarch 0:2.5.1-3.el7 will be installed
–> Running transaction check
—> Package perl-HTTP-Tiny.noarch 0:0.033-3.el7 will be installed
—> Package perl-parent.noarch 1:0.225-244.el7 will be installed
–> Finished Dependency Resolution

Dependencies Resolved

=========================================================================================================================
Package Arch Version Repository Size
=========================================================================================================================
Installing:
vim-enhanced x86_64 2:7.4.160-2.el7 base 1.0 M
Installing for dependencies:
gpm-libs x86_64 1.20.7-5.el7 base 32 k
perl x86_64 4:5.16.3-292.el7 base 8.0 M
perl-Carp noarch 1.26-244.el7 base 19 k
perl-Encode x86_64 2.51-7.el7 base 1.5 M
perl-Exporter noarch 5.68-3.el7 base 28 k
perl-File-Path noarch 2.09-2.el7 base 26 k
perl-File-Temp noarch 0.23.01-3.el7 base 56 k
perl-Filter x86_64 1.49-3.el7 base 76 k
perl-Getopt-Long noarch 2.40-2.el7 base 56 k
perl-HTTP-Tiny noarch 0.033-3.el7 base 38 k
perl-PathTools x86_64 3.40-5.el7 base 82 k
perl-Pod-Escapes noarch 1:1.04-292.el7 base 51 k
perl-Pod-Perldoc noarch 3.20-4.el7 base 87 k
perl-Pod-Simple noarch 1:3.28-4.el7 base 216 k
perl-Pod-Usage noarch 1.63-3.el7 base 27 k
perl-Scalar-List-Utils x86_64 1.27-248.el7 base 36 k
perl-Socket x86_64 2.010-4.el7 base 49 k
perl-Storable x86_64 2.45-3.el7 base 77 k
perl-Text-ParseWords noarch 3.29-4.el7 base 14 k
perl-Time-HiRes x86_64 4:1.9725-3.el7 base 45 k
perl-Time-Local noarch 1.2300-2.el7 base 24 k
perl-constant noarch 1.27-2.el7 base 19 k
perl-libs x86_64 4:5.16.3-292.el7 base 688 k
perl-macros x86_64 4:5.16.3-292.el7 base 43 k
perl-parent noarch 1:0.225-244.el7 base 12 k
perl-podlators noarch 2.5.1-3.el7 base 112 k
perl-threads x86_64 1.87-4.el7 base 49 k
perl-threads-shared x86_64 1.43-6.el7 base 39 k
vim-common x86_64 2:7.4.160-2.el7 base 5.9 M
vim-filesystem x86_64 2:7.4.160-2.el7 base 9.8 k

Transaction Summary

Install 1 Package (+30 Dependent packages)

Total download size: 18 M
Installed size: 60 M
Is this ok [y/d/N]: y
Downloading packages:
(1/31): gpm-libs-1.20.7-5.el7.x86_64.rpm 32 kB 00:00:00
(2/31): perl-Carp-1.26-244.el7.noarch.rpm 19 kB 00:00:00
(3/31): perl-5.16.3-292.el7.x86_64.rpm 8.0 MB 00:00:00
(4/31): perl-Encode-2.51-7.el7.x86_64.rpm 1.5 MB 00:00:00
(5/31): perl-Exporter-5.68-3.el7.noarch.rpm 28 kB 00:00:00
(6/31): perl-File-Path-2.09-2.el7.noarch.rpm 26 kB 00:00:00
(7/31): perl-Filter-1.49-3.el7.x86_64.rpm 76 kB 00:00:00
(8/31): perl-Getopt-Long-2.40-2.el7.noarch.rpm 56 kB 00:00:00
(9/31): perl-File-Temp-0.23.01-3.el7.noarch.rpm 56 kB 00:00:00
(10/31): perl-HTTP-Tiny-0.033-3.el7.noarch.rpm 38 kB 00:00:00
(11/31): perl-Pod-Escapes-1.04-292.el7.noarch.rpm 51 kB 00:00:00
(12/31): perl-PathTools-3.40-5.el7.x86_64.rpm 82 kB 00:00:00
(13/31): perl-Pod-Perldoc-3.20-4.el7.noarch.rpm 87 kB 00:00:00
(14/31): perl-Pod-Usage-1.63-3.el7.noarch.rpm 27 kB 00:00:00
(15/31): perl-Pod-Simple-3.28-4.el7.noarch.rpm 216 kB 00:00:00
(16/31): perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm 36 kB 00:00:00
(17/31): perl-Socket-2.010-4.el7.x86_64.rpm 49 kB 00:00:00
(18/31): perl-Storable-2.45-3.el7.x86_64.rpm 77 kB 00:00:00
(19/31): perl-Text-ParseWords-3.29-4.el7.noarch.rpm 14 kB 00:00:00
(20/31): perl-Time-HiRes-1.9725-3.el7.x86_64.rpm 45 kB 00:00:00
(21/31): perl-Time-Local-1.2300-2.el7.noarch.rpm 24 kB 00:00:00
(22/31): perl-constant-1.27-2.el7.noarch.rpm 19 kB 00:00:00
(23/31): perl-macros-5.16.3-292.el7.x86_64.rpm 43 kB 00:00:00
(24/31): perl-libs-5.16.3-292.el7.x86_64.rpm 688 kB 00:00:00
(25/31): perl-parent-0.225-244.el7.noarch.rpm 12 kB 00:00:00
(26/31): perl-threads-1.87-4.el7.x86_64.rpm 49 kB 00:00:00
(27/31): perl-threads-shared-1.43-6.el7.x86_64.rpm 39 kB 00:00:00
(28/31): perl-podlators-2.5.1-3.el7.noarch.rpm 112 kB 00:00:00
(29/31): vim-common-7.4.160-2.el7.x86_64.rpm 5.9 MB 00:00:00
(30/31): vim-filesystem-7.4.160-2.el7.x86_64.rpm 9.8 kB 00:00:00
(31/31): vim-enhanced-7.4.160-2.el7.x86_64.rpm 1.0 MB 00:00:00


Total 41 MB/s 18 MB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : 1:perl-parent-0.225-244.el7.noarch 1/31
Installing : perl-HTTP-Tiny-0.033-3.el7.noarch 2/31
Installing : perl-podlators-2.5.1-3.el7.noarch 3/31
Installing : perl-Pod-Perldoc-3.20-4.el7.noarch 4/31
Installing : 1:perl-Pod-Escapes-1.04-292.el7.noarch 5/31
Installing : perl-Text-ParseWords-3.29-4.el7.noarch 6/31
Installing : perl-Encode-2.51-7.el7.x86_64 7/31
Installing : perl-Pod-Usage-1.63-3.el7.noarch 8/31
Installing : 4:perl-macros-5.16.3-292.el7.x86_64 9/31
Installing : 4:perl-libs-5.16.3-292.el7.x86_64 10/31
Installing : perl-Storable-2.45-3.el7.x86_64 11/31
Installing : perl-Exporter-5.68-3.el7.noarch 12/31
Installing : perl-constant-1.27-2.el7.noarch 13/31
Installing : perl-Time-Local-1.2300-2.el7.noarch 14/31
Installing : perl-Socket-2.010-4.el7.x86_64 15/31
Installing : perl-Carp-1.26-244.el7.noarch 16/31
Installing : perl-PathTools-3.40-5.el7.x86_64 17/31
Installing : perl-Scalar-List-Utils-1.27-248.el7.x86_64 18/31
Installing : perl-File-Temp-0.23.01-3.el7.noarch 19/31
Installing : perl-File-Path-2.09-2.el7.noarch 20/31
Installing : perl-threads-shared-1.43-6.el7.x86_64 21/31
Installing : perl-threads-1.87-4.el7.x86_64 22/31
Installing : 4:perl-Time-HiRes-1.9725-3.el7.x86_64 23/31
Installing : perl-Filter-1.49-3.el7.x86_64 24/31
Installing : 1:perl-Pod-Simple-3.28-4.el7.noarch 25/31
Installing : perl-Getopt-Long-2.40-2.el7.noarch 26/31
Installing : 4:perl-5.16.3-292.el7.x86_64 27/31
Installing : gpm-libs-1.20.7-5.el7.x86_64 28/31
Installing : 2:vim-filesystem-7.4.160-2.el7.x86_64 29/31
Installing : 2:vim-common-7.4.160-2.el7.x86_64 30/31
Installing : 2:vim-enhanced-7.4.160-2.el7.x86_64 31/31
Verifying : perl-HTTP-Tiny-0.033-3.el7.noarch 1/31
Verifying : perl-threads-shared-1.43-6.el7.x86_64 2/31
Verifying : perl-Storable-2.45-3.el7.x86_64 3/31
Verifying : 2:vim-filesystem-7.4.160-2.el7.x86_64 4/31
Verifying : perl-Exporter-5.68-3.el7.noarch 5/31
Verifying : perl-constant-1.27-2.el7.noarch 6/31
Verifying : perl-PathTools-3.40-5.el7.x86_64 7/31
Verifying : 4:perl-macros-5.16.3-292.el7.x86_64 8/31
Verifying : 1:perl-parent-0.225-244.el7.noarch 9/31
Verifying : 4:perl-5.16.3-292.el7.x86_64 10/31
Verifying : perl-File-Temp-0.23.01-3.el7.noarch 11/31
Verifying : 1:perl-Pod-Simple-3.28-4.el7.noarch 12/31
Verifying : perl-Time-Local-1.2300-2.el7.noarch 13/31
Verifying : gpm-libs-1.20.7-5.el7.x86_64 14/31
Verifying : 4:perl-libs-5.16.3-292.el7.x86_64 15/31
Verifying : perl-Pod-Perldoc-3.20-4.el7.noarch 16/31
Verifying : perl-Socket-2.010-4.el7.x86_64 17/31
Verifying : perl-Carp-1.26-244.el7.noarch 18/31
Verifying : 2:vim-common-7.4.160-2.el7.x86_64 19/31
Verifying : perl-Scalar-List-Utils-1.27-248.el7.x86_64 20/31
Verifying : 1:perl-Pod-Escapes-1.04-292.el7.noarch 21/31
Verifying : perl-Pod-Usage-1.63-3.el7.noarch 22/31
Verifying : perl-Encode-2.51-7.el7.x86_64 23/31
Verifying : perl-podlators-2.5.1-3.el7.noarch 24/31
Verifying : perl-Getopt-Long-2.40-2.el7.noarch 25/31
Verifying : 2:vim-enhanced-7.4.160-2.el7.x86_64 26/31
Verifying : perl-File-Path-2.09-2.el7.noarch 27/31
Verifying : perl-threads-1.87-4.el7.x86_64 28/31
Verifying : 4:perl-Time-HiRes-1.9725-3.el7.x86_64 29/31
Verifying : perl-Filter-1.49-3.el7.x86_64 30/31
Verifying : perl-Text-ParseWords-3.29-4.el7.noarch 31/31

Installed:
vim-enhanced.x86_64 2:7.4.160-2.el7

Dependency Installed:
gpm-libs.x86_64 0:1.20.7-5.el7 perl.x86_64 4:5.16.3-292.el7
perl-Carp.noarch 0:1.26-244.el7 perl-Encode.x86_64 0:2.51-7.el7
perl-Exporter.noarch 0:5.68-3.el7 perl-File-Path.noarch 0:2.09-2.el7
perl-File-Temp.noarch 0:0.23.01-3.el7 perl-Filter.x86_64 0:1.49-3.el7
perl-Getopt-Long.noarch 0:2.40-2.el7 perl-HTTP-Tiny.noarch 0:0.033-3.el7
perl-PathTools.x86_64 0:3.40-5.el7 perl-Pod-Escapes.noarch 1:1.04-292.el7
perl-Pod-Perldoc.noarch 0:3.20-4.el7 perl-Pod-Simple.noarch 1:3.28-4.el7
perl-Pod-Usage.noarch 0:1.63-3.el7 perl-Scalar-List-Utils.x86_64 0:1.27-248.el7
perl-Socket.x86_64 0:2.010-4.el7 perl-Storable.x86_64 0:2.45-3.el7
perl-Text-ParseWords.noarch 0:3.29-4.el7 perl-Time-HiRes.x86_64 4:1.9725-3.el7
perl-Time-Local.noarch 0:1.2300-2.el7 perl-constant.noarch 0:1.27-2.el7
perl-libs.x86_64 4:5.16.3-292.el7 perl-macros.x86_64 4:5.16.3-292.el7
perl-parent.noarch 1:0.225-244.el7 perl-podlators.noarch 0:2.5.1-3.el7
perl-threads.x86_64 0:1.87-4.el7 perl-threads-shared.x86_64 0:1.43-6.el7
vim-common.x86_64 2:7.4.160-2.el7 vim-filesystem.x86_64 2:7.4.160-2.el7

Complete!

Basic Environment

Intel Core i7 processor

32GB memory

512GB SSD + 2TB storage

For example:

Dell OptiPlex 7050 Intel® Core i7 7700 32GB (2x16GB) DDR4-2400 512GB Solid State Drive SATA hard drive 2TB SATA hard drive (7200RPM)

VMware For OpenStack Requirement

3 * Controller

1 CPU (2 Core) 4GB RAM 100 GB Storage 4 NIC(NAT + NAT + HostOnly + HostOnly) Virtualize Intel VT-x/EPT or AMD-V/RVI

2 * Compute

1 CPU (2 Core) 4GB RAM 100 GB Storage 2 NIC(NAT + HostOnly) Virtualize Intel VT-x/EPT or AMD-V/RVI

Create Provider

[root@controller ~]# openstack network create provider –share –external –provider-network-type flat –provider-physical-network provide

Create Provider Sub-Net

[root@controller ~]# openstack subnet create –network provider –allocation-pool start=9.1.1.50,end=9.1.1.90 –dns-nameserver 8.8.8.8 –gateway 9.1.1.1 –subnet-range 9.1.1.0/24 provider-sub

Create Private Network

[root@controller ~]# openstack network create private –provider-network-type vxlan –share –interna

Create Private Sub-Net

[root@controller ~]# openstack subnet create –network private –gateway 192.168.1.1 –subnet-range 192.168.1.0/24 private-subnet

Create Private-Office Network

[root@controller ~]# openstack network create private-office –provider-network-type vxlan –share –internal

Create Private-Office Sub-Net

[root@controller ~]# openstack subnet create –network private-office –gateway 192.168.2.1 –subnet-range 192.168.2.0/24 office-net

Create Private-Sale Network

[root@controller ~]# openstack network create private-sale –provider-network-type vxlan –share –internal

Create Private-Sale Sub-Net

[root@controller ~]# openstack subnet create –network private-sale –gateway 192.168.3.1 –subnet-range 192.168.3.0/24 sale-net

Create Private-Technology Network

[root@controller ~]# openstack network create private-technology –provider-network-type vxlan –share –internal

Create Private-Technology Sub-Net

[root@controller ~]# openstack subnet create –network private-technology –gateway 192.168.4.1 –subnet-range 192.168.4.0/24 technology-net

List Network

[root@controller ~]# openstack network list

Create Provider Router

[root@controller ~]# openstack router create route

Set External-Gateway

[root@controller ~]# openstack router set –external-gateway provider router

Add Route Interface

[root@controller ~]# openstack router add subnet router private-subnet

List Network Agent

[root@controller ~]# openstack network agent list