Wednesday, October 30, 2019

Pacemaker Cluster on OCI


Keepalived Configuration:

[root@vip1 ~]# cat /etc/redhat-release
CentOS release 6.9 (Final)



Simple Keepalived Configuration:

[root@vip1 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from svr1@localhost
   smtp_server localhost
   smtp_connect_timeout 30
}
vrrp_instance VRRP1 {
#    debug 2
    state MASTER
#   Specify the network interface to which the virtual address is assigned
    interface eth0
#   The virtual router ID must be unique to each VRRP instance that you define
    virtual_router_id 41
    unicast_src_ip 10.0.0.3
    unicast_peer {
10.0.0.4
    }
#   Set the value of priority higher on the master server than on a backup server
    priority 200
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1066
    }
    virtual_ipaddress {
        10.0.0.100/24
    }
    nopreempt
    notify_master /etc/keepalived/vip.sh
}
[root@vip1 keepalived]#


[root@vip1 keepalived]# cat vip.sh
VNIC=ocid1.vnic.oc1.phx.abyhqljstoq64rxxkzl4yf3f6jixbckjhtxkf22i5znfpqxi2aasqyxltsda
/root/bin/oci network vnic assign-private-ip --vnic-id $VNIC --ip-address 10.0.0.100 --unassign-if-already-assigned --region us-phoenix-1



—> Need Update
[root@vip1 keepalived]# cat keepalived.conf
global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from svr1@localhost
   smtp_server localhost
   smtp_connect_timeout 30
}
vrrp_script chk_httpd {
    script "pidof httpd"
    interval 2
}
vrrp_instance VRRP1 {
#    debug 2
    state MASTER
#   Specify the network interface to which the virtual address is assigned
    interface eth0
#   The virtual router ID must be unique to each VRRP instance that you define
    virtual_router_id 41
    unicast_src_ip 10.0.0.3 # Private IP
    unicast_peer {
10.0.0.4 # Peer IP
    }
#   Set the value of priority higher on the master server than on a backup server
    priority 200
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1066
    }
    track_script {
        chk_httpd
    }
    virtual_ipaddress {
        10.0.0.100/24
    }
    nopreempt
    notify_master /etc/keepalived/vip.sh
}
virtual_server 10.0.0.100 80 {
    delay_loop 6
    lb_algo wrr
    lb_kind DR
    nat_mask 255.255.255.0
    persistence_timeout 50
    protocol TCP
real_server 10.0.0.3 80 {
    weight 1
    #notify_down /etc/keepalived/check_httpd.sh
#    
# Health Check
    TCP_CHECK {
        connect_timeout 10
        nb_get_retry 3
        connect_port 80
}
}   
}
[root@vip1 keepalived]


[root@vip1 keepalived]# cat check_httpd.sh
#!/bin/bash
VNIC=ocid1.vnic.oc1.phx.abyhqljsmtjaqvakuivgjqs4fd3rltx2uc2epwicrj6j52fuzaopbgbcs33q
/root/bin/oci network vnic assign-private-ip --vnic-id $VNIC --ip-address 10.0.0.100 --unassign-if-already-assigned --region us-phoenix-1

Logs from Slave node which transitions to master.

[root@vip2 ~]# tail -50  /var/log/messages
Jun 30 09:47:28 vip2 Keepalived[2379]: Starting Keepalived v1.2.13 (03/19,2015)
Jun 30 09:47:28 vip2 Keepalived[2380]: Starting Healthcheck child process, pid=2381
Jun 30 09:47:28 vip2 Keepalived[2380]: Starting VRRP child process, pid=2382
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Netlink reflector reports IP 10.0.0.4 added
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Netlink reflector reports IP fe80::200:17ff:fe01:4eae added
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Registering Kernel netlink reflector
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Registering Kernel netlink command channel
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Registering gratuitous ARP shared channel
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Configuration is using : 66719 Bytes
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: Using LinkWatch kernel netlink reflector...
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) Entering BACKUP STATE
Jun 30 09:47:28 vip2 Keepalived_vrrp[2382]: VRRP sockpool: [ifindex(2), proto(112), unicast(1), fd(10,11)]
Jun 30 09:47:29 vip2 Keepalived_vrrp[2382]: VRRP_Script(chk_httpd) succeeded
Jun 30 09:47:29 vip2 kernel: IPVS: Registered protocols (TCP, UDP, SCTP, AH, ESP)
Jun 30 09:47:29 vip2 kernel: IPVS: Connection hash table configured (size=4096, memory=64Kbytes)
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Netlink reflector reports IP 10.0.0.4 added
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Netlink reflector reports IP fe80::200:17ff:fe01:4eae added
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Registering Kernel netlink reflector
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Registering Kernel netlink command channel
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Configuration is using : 11990 Bytes
Jun 30 09:47:29 vip2 kernel: IPVS: ipvs loaded.
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Using LinkWatch kernel netlink reflector...
Jun 30 09:47:29 vip2 Keepalived_healthcheckers[2381]: Activating healthchecker for service [10.0.0.4]:80
Jun 30 09:47:29 vip2 kernel: IPVS: [wrr] scheduler registered.
Jun 30 09:50:42 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) Transition to MASTER STATE
Jun 30 09:50:43 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) Entering MASTER STATE
Jun 30 09:50:43 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) setting protocol VIPs.
Jun 30 09:50:43 vip2 Keepalived_healthcheckers[2381]: Netlink reflector reports IP 10.0.0.100 added
Jun 30 09:50:43 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) Sending gratuitous ARPs on eth0 for 10.0.0.100
Jun 30 09:50:44 vip2 ntpd[2238]: Listen normally on 6 eth0 10.0.0.100 UDP 123
Jun 30 09:50:48 vip2 Keepalived_vrrp[2382]: VRRP_Instance(VRRP1) Sending gratuitous ARPs on eth0 for 10.0.0.100






Pacemaker Corosync

  1. Install Cluster packages:
#  yum install -y pacemaker pcs psmisc policycoreutils-python

  1. Setup firewall:
# firewall-cmd --permanent --add-service=high-availability --add-service=http --add-service=https
# firewall-cmd --reload

Ports required to be opened:
TCP ports 2224, 3121, and 21064, and UDP port 5405.

  1. Start pcs daemon:
# systemctl start pcsd.service
# systemctl enable pcsd.service
ln -s '/usr/lib/systemd/system/pcsd.service' '/etc/systemd/system/multi-user.target.wants/pcsd.service’

  1. Setup password for user hacluster:
# echo | passed --stdin hacluster

  1. Configure Corosync:
On one of the nodes:
# pcs cluster auth node1 node2
Username: hacluster
Password:
node1: Authorized
node2: Authorized


# pcs cluster setup --name mycluster node1 node2
Shutting down pacemaker/corosync services...
Redirecting to /bin/systemctl stop  pacemaker.service
Redirecting to /bin/systemctl stop  corosync.service
Killing any remaining services...
Removing all cluster configuration files...
node1: Succeeded
node2: Succeeded

Start the cluster:
# pcs cluster start --all
node1: Starting Cluster...
node2: Starting Cluster...

[root@node1 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
    id    = 10.0.0.12
    status    = ring 0 active with no faults

[root@node1 ~]# corosync-cmapctl  | grep members
runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(10.0.0.12)
runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.1.status (str) = joined
runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(10.0.0.14)
runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.2.status (str) = joined
[root@node1 ~]#

  1. Disable Stonith:
# pcs property set stonith-enabled=false
# crm_verify -L -V

  1. Add Floating IP:

Update IPaddr2 resource so that it will reassign Private IP on the OCI infrastructure as well.

sudo sed -i '64i\##### OCI vNIC variables\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '65i\server="`hostname -s`"\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '66i\node1vnic="ocid1.vnic.oc1.phx.abyhqljs2qwsjkgsi7ujg735xig3xfnq2w5h2slvl33lqw24wn5rtjpfqvia"\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '67i\node2vnic="ocid1.vnic.oc1.phx.abyhqljs6qpbs6w5peguzucokmx3eh6wvu7jauxwntsgz5zj2krfgrgzclzq"\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '68i\vnicip="10.0.0.200"\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2


sudo sed -i '614i\##### OCI/IPaddr Integration\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '615i\        if [ $server = "node1" ]; then\' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '616i\                /root/bin/oci network vnic assign-private-ip --unassign-if-already-assigned --vnic-id $node1vnic  --ip-address $vnicip \' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '617i\        else \' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '618i\                /root/bin/oci network vnic assign-private-ip --unassign-if-already-assigned --vnic-id $node2vnic  --ip-address $vnicip \' /usr/lib/ocf/resource.d/heartbeat/IPaddr2
sudo sed -i '619i\        fi \' /usr/lib/ocf/resource.d/heartbeat/IPaddr2


Updated IPaddr2 resource should contain 

[root@node2 ~]# grep -A5 OCI  /usr/lib/ocf/resource.d/heartbeat/IPaddr2
##### OCI vNIC variables
server="`hostname -s`"
node1vnic=“<Node1 VNIC OCID>"
node2vnic=“"
vnicip=“"

--
##### OCI/IPaddr Integration
        if [ $server = "node1" ]; then
                /root/bin/oci network vnic assign-private-ip --unassign-if-already-assigned --vnic-id $node1vnic  --ip-address $vnicip
        else
                /root/bin/oci network vnic assign-private-ip --unassign-if-already-assigned --vnic-id $node2vnic  --ip-address $vnicip
        fi


  1. Setup Floating IP:
[root@node1 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 ip=10.0.0.200 cidr_netmask=32 op monitor interval=30s


  1. Specify resource stickiness:
# pcs resource defaults resource-stickiness=100
# pcs resource defaults
resource-stickiness: 100


  1. Setup Nginx:
# yum install nginx

[root@node1 ~]# cat /usr/share/nginx/html/index.html
This is NODE1

Status page:
[root@node1 ~]# cat /usr/share/nginx/html/nginx_status
node1 is alive.

# cat /etc/nginx/default.d/status.conf
location ^~ /nginx_status {
    allow 127.0.0.1;
    deny all;
}

[root@node1 ~]# cat /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

#    include /etc/nginx/conf.d/*.conf;

    server {
        listen       80 default_server;
        listen       [::]:80 default_server;
        server_name  _;
        root         /usr/share/nginx/html;

        # Load configuration files for the default server block.
        include /etc/nginx/default.d/*.conf;

        location / {
        }

        error_page 404 /404.html;
            location = /40x.html {
        }

        error_page 500 502 503 504 /50x.html;
            location = /50x.html {
        }
}


  1. Setup Nginx resource
[root@node1 ~]# pcs resource create webserver ocf:heartbeat:nginx configfile=/etc/nginx/nginx.conf status10url="http://localhost/nginx_status" status10regex="node[1-9] is alive." op monitor timeout="30s" interval="10s" op start timeout="40s" interval="0" op stop timeout="60s" interval="0"
[root@node1 ~]#

[root@node1 ~]# pcs resource
ClusterIP    (ocf::heartbeat:IPaddr2):    Started node2
webserver    (ocf::heartbeat:nginx):    Started node1

  1. Create colocation constraint so that web server resource sticks with ClusterIP:
[root@node1 ~]# pcs constraint colocation add webserver with ClusterIP INFINITY

  1. Setup resource startup order:
[root@node1 ~]# pcs constraint order ClusterIP then webserver
Adding ClusterIP webserver (kind: Mandatory) (Options: first-action=start then-action=start)


[root@node1 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: node2 (version 1.1.18-11.el7_5.2-2b07d5c5a9) - partition with quorum
Last updated: Sat Jun 30 16:23:40 2018
Last change: Sat Jun 30 15:51:20 2018 by root via crm_resource on node2

2 nodes configured
2 resources configured

Online: [ node1 node2 ]

Full list of resources:

ClusterIP    (ocf::heartbeat:IPaddr2):    Started node2
webserver    (ocf::heartbeat:nginx):    Started node2

Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

[root@node1 ~]# pcs constraint
Location Constraints:
  Resource: webserver
    Enabled on: node2 (score:INFINITY) (role: Started)
Ordering Constraints:
  start ClusterIP then start webserver (kind:Mandatory)
Colocation Constraints:
  webserver with ClusterIP (score:INFINITY)
Ticket Constraints: 
 
 
 

No comments:

Post a Comment