Quick Guide

Index

Introduction

This is meant as a cheat sheet that can be used to quickly setup a set of appliances. Take note that this is only accurate for a 3 node cluster.
If in doubt, take a look at the complete pmsApp guide.

The first part of this cheat sheet must be run on all nodes in the cluster.
Ensure to correct names and IP addresses.
Please leave a comment.

Disable firewall

chkconfig iptables off
chkconfig ip6tables off

Disable SElinux

echo "SELINUX=disabled"      > /etc/selinux/config;\
echo "SELINUXTYPE=targeted" >> /etc/selinux/config

/etc/resolv.conf

echo "domain pmsapp.org"          > /etc/resolv.conf;\
echo "search pmsapp.org"         >> /etc/resolv.conf;\
echo "nameserver 192.168.0.11"   >> /etc/resolv.conf;\
echo "nameserver 208.67.222.222" >> /etc/resolv.conf;\
echo "nameserver 208.67.220.220" >> /etc/resolv.conf

/etc/sysconfig/network

echo "NETWORKING=yes"               > /etc/sysconfig/network;\
echo "HOSTNAME=pmsapp1.pmsapp.org" >> /etc/sysconfig/network

/etc/sysconfig/network-scripts/ifcfg-eth0

echo "# Fist NIC, used for NFS and management traffic"    > /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "# should be reachable by the virtualization hosts" >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "DEVICE=eth0"                                       >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "BOOTPROTO=static"                                  >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "ONBOOT=yes"                                        >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "TYPE=Ethernet"                                     >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "IPADDR=192.168.0.21"                               >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "NETMASK=255.255.255.0"                             >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "BROADCAST=192.168.0.255"                           >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "GATEWAY=192.168.0.1"                               >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "DNS1=192.168.0.11"                                 >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "DNS2=208.67.222.222"                               >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "DNS3=208.67.220.220"                               >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "IPV6INIT=no"                                       >> /etc/sysconfig/network-scripts/ifcfg-eth0;\
echo "USERCTL=no"                                        >> /etc/sysconfig/network-scripts/ifcfg-eth0

/etc/sysconfig/network-scripts/ifcfg-eth1

echo "# 2nd NIC, used for iSCSI traffic."               > /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "# No DNS or routes are necessary, but all nodes" >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "# should be able to communicate on the subnet"   >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "DEVICE=eth1"                                     >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "BOOTPROTO=static"                                >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "ONBOOT=yes"                                      >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "TYPE=Ethernet"                                   >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "IPADDR=172.16.0.21"                              >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "NETMASK=255.255.255.0"                           >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "BROADCAST=172.16.0.255"                          >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "IPV6INIT=no"                                     >> /etc/sysconfig/network-scripts/ifcfg-eth1;\
echo "USERCTL=no"                                      >> /etc/sysconfig/network-scripts/ifcfg-eth1

/etc/sysconfig/network-scripts/ifcfg-eth2

echo "# 3rd NIC, used for heartbeat and cluster traffic." > /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "# No DNS or routes are necessary, but all nodes"   >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "# should be able to communicate on the subnet"     >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "DEVICE=eth2"                                       >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "BOOTPROTO=static"                                  >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "ONBOOT=yes"                                        >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "TYPE=Ethernet"                                     >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "IPADDR=10.0.0.21"                                  >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "NETMASK=255.255.255.0"                             >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "BROADCAST=10.0.0.255"                              >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "IPV6INIT=no"                                       >> /etc/sysconfig/network-scripts/ifcfg-eth2;\
echo "USERCTL=no"                                        >> /etc/sysconfig/network-scripts/ifcfg-eth2

/etc/hosts

echo "127.0.0.1 localhost localhost.localdomain" > /etc/hosts;\
echo "10.0.0.21 pmsapp1 pmsapp1.pmsapp.org"     >> /etc/hosts;\
echo "10.0.0.22 pmsapp2 pmsapp2.pmsapp.org"     >> /etc/hosts;\
echo "10.0.0.23 pmsapp3 pmsapp3.pmsapp.org"     >> /etc/hosts

Installing software

yum -y upgrade
yum -y groupinstall "High Availability"
yum -y install scsi-target-utils iscsi-initiator-utils nfs-utils mdadm
chkconfig iscsi off
chkconfig nfs off
chkconfig tgtd off

iSCSI target configuration

echo "<target iqn.2012-03.org.pmsapp:pmsapp1.disk>" > /etc/tgt/targets.conf;\
echo "    backing-store /dev/vdb"                  >> /etc/tgt/targets.conf;\
echo "</target>"                                   >> /etc/tgt/targets.conf

shutdown -h now

Add second harddrive and restart nodes

service tgtd start

 

— The above must be done on all nodes —

 

iSCSI initiator configuration

echo "node.conn[0].timeo.login_timeout = 2"     >> /etc/iscsi/iscsid.conf;\
echo "node.session.initial_login_retry_max = 1" >> /etc/iscsi/iscsid.conf
scp /etc/iscsi/iscsid.conf pmsapp2:/etc/iscsi
scp /etc/iscsi/iscsid.conf pmsapp3:/etc/iscsi
iscsiadm -m discovery -t st -p 172.16.0.21
iscsiadm -m discovery -t st -p 172.16.0.22
iscsiadm -m discovery -t st -p 172.16.0.23
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp1.disk -p 172.16.0.21 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp2.disk -p 172.16.0.22 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp3.disk -p 172.16.0.23 --login

fdisk -l 2>/dev/null | grep Disk | grep bytes

RAID configuration

mdadm --create /dev/md0 --bitmap=internal --level=5 --raid-devices=3 /dev/sda /dev/sdb /dev/sdc

while [ $? -eq 0 ]; do cat /proc/mdstat; sleep 1; grep finish /proc/mdstat &>/dev/null; done

mdadm --examine --scan > /etc/mdadm.conf
cat /etc/mdadm.conf

scp /etc/mdadm.conf pmsapp2:/etc
scp /etc/mdadm.conf pmsapp3:/etc

mdadm --stop /dev/md0
service iscsi stop

# ON NODE 2
iscsiadm -m discovery -t st -p 172.16.0.21
iscsiadm -m discovery -t st -p 172.16.0.22
iscsiadm -m discovery -t st -p 172.16.0.23
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp1.disk -p 172.16.0.21 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp2.disk -p 172.16.0.22 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp3.disk -p 172.16.0.23 --login
cat /proc/mdstat
mdadm --stop /dev/md0
service iscsi stop

# ON NODE 3
iscsiadm -m discovery -t st -p 172.16.0.21
iscsiadm -m discovery -t st -p 172.16.0.22
iscsiadm -m discovery -t st -p 172.16.0.23
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp1.disk -p 172.16.0.21 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp2.disk -p 172.16.0.22 --login
iscsiadm -m node -T iqn.2012-03.org.pmsapp:pmsapp3.disk -p 172.16.0.23 --login
cat /proc/mdstat
mdadm --stop /dev/md0
service iscsi stop

Restart iSCSI target

echo -e "\x23\x21/bin/bash"                  > /etc/init.d/iscsitarget;\
echo "case \"\$1\" in"                      >> /etc/init.d/iscsitarget;\
echo "    start)"                           >> /etc/init.d/iscsitarget;\
echo "        mdadm --stop /dev/md0"        >> /etc/init.d/iscsitarget;\
echo "        /etc/init.d/tgtd start"       >> /etc/init.d/iscsitarget;\
echo "        ;;"                           >> /etc/init.d/iscsitarget;\
echo "    *)"                               >> /etc/init.d/iscsitarget;\
echo "        echo $\"Usage: \$0 {start}\"" >> /etc/init.d/iscsitarget;\
echo "        exit 2"                       >> /etc/init.d/iscsitarget;\
echo "esac"                                 >> /etc/init.d/iscsitarget;\
echo "exit \$?"                             >> /etc/init.d/iscsitarget

chmod +x /etc/init.d/iscsitarget
scp /etc/init.d/iscsitarget pmsapp2:/etc/init.d
scp /etc/init.d/iscsitarget pmsapp3:/etc/init.d
ln -s /etc/init.d/iscsitarget /etc/rc3.d/S16iscsitarget
ssh pmsapp2 ln -s /etc/init.d/iscsitarget /etc/rc3.d/S16iscsitarget
ssh pmsapp3 ln -s /etc/init.d/iscsitarget /etc/rc3.d/S16iscsitarget

Filesystem

service iscsi start
cat /proc/mdstat
mkfs.ext4 /dev/md0

mdadm --stop /dev/md0
service iscsi stop

mkdir /sharedstorage
ssh pmsapp2 mkdir /sharedstorage
ssh pmsapp3 mkdir /sharedstorage

System cluster services

chkconfig cman on; chkconfig rgmanager on; chkconfig modclusterd on; chkconfig ricci on; passwd ricci
ssh pmsapp2 "chkconfig cman on; chkconfig rgmanager on; chkconfig modclusterd on; chkconfig ricci on; passwd ricci"
ssh pmsapp3 "chkconfig cman on; chkconfig rgmanager on; chkconfig modclusterd on; chkconfig ricci on; passwd ricci"

Fence device

echo -e "\x23\x21/bin/bash" > /usr/sbin/fence_disable
chmod +x /usr/sbin/fence_disable
scp /usr/sbin/fence_disable pmsapp2:/usr/sbin
scp /usr/sbin/fence_disable pmsapp3:/usr/sbin

Software RAID script

echo -e "\x23\x21/bin/bash"                                                                     > /etc/init.d/swraid;\
echo "case \"\$1\" in"                                                                         >> /etc/init.d/swraid;\
echo "    start)"                                                                              >> /etc/init.d/swraid;\
echo "        # Start the iscsi client, it will log in to the allready configured targets."    >> /etc/init.d/swraid;\
echo "        service iscsi start"                                                             >> /etc/init.d/swraid;\
echo                                                                                           >> /etc/init.d/swraid;\
echo "        # Wait for 1 second to allow the software RAID driver to discover the RAID set." >> /etc/init.d/swraid;\
echo "        sleep 1"                                                                         >> /etc/init.d/swraid;\
echo                                                                                           >> /etc/init.d/swraid;\
echo "        # Search /proc/mdstat for \": active\""                                          >> /etc/init.d/swraid;\
echo "        grep \": active\" /proc/mdstat &>/dev/null"                                      >> /etc/init.d/swraid;\
echo                                                                                           >> /etc/init.d/swraid;\
echo "        # If \": active\" is not found, it means that the array is not started."         >> /etc/init.d/swraid;\
echo "        if [ \$? -ne 0 ]; then"                                                          >> /etc/init.d/swraid;\
echo "            # Try to start the array"                                                    >> /etc/init.d/swraid;\
echo "            mdadm --run /dev/md0"                                                        >> /etc/init.d/swraid;\
echo "        fi"                                                                              >> /etc/init.d/swraid;\
echo                                                                                           >> /etc/init.d/swraid;\
echo "        # Check /proc/mdstat again to see if the array is running."                      >> /etc/init.d/swraid;\
echo "        grep \": active\" /proc/mdstat &>/dev/null"                                      >> /etc/init.d/swraid;\
echo                                                                                           >> /etc/init.d/swraid;\
echo "        # If \": active\" is not found, it means that the array is not started"          >> /etc/init.d/swraid;\
echo "        if [ \$? -ne 0 ]; then"                                                          >> /etc/init.d/swraid;\
echo "            # Tell that the array is not started and return with an non-zero exit code"  >> /etc/init.d/swraid;\
echo "            echo \"Array /dev/md0 is not started\""                                      >> /etc/init.d/swraid;\
echo "            exit 1"                                                                      >> /etc/init.d/swraid;\
echo "        fi"                                                                              >> /etc/init.d/swraid;\
echo "        # Tell that the array is started and return with 0 as exit code."                >> /etc/init.d/swraid;\
echo "        echo \"Array /dev/md0 is started\""                                              >> /etc/init.d/swraid;\
echo "        exit 0"                                                                          >> /etc/init.d/swraid;\
echo "        ;;"                                                                              >> /etc/init.d/swraid;\
echo "    stop)"                                                                               >> /etc/init.d/swraid;\
echo "        # Stop the RAID array"                                                           >> /etc/init.d/swraid;\
echo "        mdadm --stop /dev/md0"                                                           >> /etc/init.d/swraid;\
echo "        # Stop the iscsi service and exit without error"                                 >> /etc/init.d/swraid;\
echo "        service iscsi stop"                                                              >> /etc/init.d/swraid;\
echo "        exit 0"                                                                          >> /etc/init.d/swraid;\
echo "        ;;"                                                                              >> /etc/init.d/swraid;\
echo "    status)"                                                                             >> /etc/init.d/swraid;\
echo "        # Check /proc/mdstat for \": active\""                                           >> /etc/init.d/swraid;\
echo "        grep \": active\" /proc/mdstat &>/dev/null"                                      >> /etc/init.d/swraid;\
echo "        # If \": active\" is not found, the array is not running"                        >> /etc/init.d/swraid;\
echo "        if [ \$? -ne 0 ]; then"                                                          >> /etc/init.d/swraid;\
echo "            # Tell that the array is not running and exit with a non-zero exit code."    >> /etc/init.d/swraid;\
echo "            echo \"Array /dev/md0 is not running\""                                      >> /etc/init.d/swraid;\
echo "            exit 1"                                                                      >> /etc/init.d/swraid;\
echo "        fi"                                                                              >> /etc/init.d/swraid;\
echo "        # Tell that the array is running and exit with 0 as exit code."                  >> /etc/init.d/swraid;\
echo "        echo \"Array /dev/md0 is running\""                                              >> /etc/init.d/swraid;\
echo "        exit 0"                                                                          >> /etc/init.d/swraid;\
echo "        ;;"                                                                              >> /etc/init.d/swraid;\
echo "    *)"                                                                                  >> /etc/init.d/swraid;\
echo "        # Tell how to use the script and exit with a non-zero exit code."                >> /etc/init.d/swraid;\
echo "        echo $\"Usage: \$0 { start | stop | status }\""                                  >> /etc/init.d/swraid;\
echo "        exit 2"                                                                          >> /etc/init.d/swraid;\
echo "        ;;"                                                                              >> /etc/init.d/swraid;\
echo "esac"                                                                                    >> /etc/init.d/swraid

chmod +x /etc/init.d/swraid
scp /etc/init.d/swraid pmsapp2:/etc/init.d
scp /etc/init.d/swraid pmsapp3:/etc/init.d

Final cluster configuration

echo "<?xml version=\"1.0\"?>"                                                                                           > /etc/cluster/cluster.conf;\
echo "<cluster config_version=\"2\" name=\"pmsappcluster\">"                                                            >> /etc/cluster/cluster.conf;\
echo "    <clusternodes>"                                                                                               >> /etc/cluster/cluster.conf;\
echo "        <clusternode name=\"pmsapp1\" nodeid=\"1\">"                                                              >> /etc/cluster/cluster.conf;\
echo "            <fence>"                                                                                              >> /etc/cluster/cluster.conf;\
echo "                <method name=\"fence_off\">"                                                                      >> /etc/cluster/cluster.conf;\
echo "                    <device name=\"no_fence\"/>"                                                                  >> /etc/cluster/cluster.conf;\
echo "                </method>"                                                                                        >> /etc/cluster/cluster.conf;\
echo "            </fence>"                                                                                             >> /etc/cluster/cluster.conf;\
echo "        </clusternode>"                                                                                           >> /etc/cluster/cluster.conf;\
echo "        <clusternode name=\"pmsapp2\" nodeid=\"2\">"                                                              >> /etc/cluster/cluster.conf;\
echo "            <fence>"                                                                                              >> /etc/cluster/cluster.conf;\
echo "                <method name=\"fence_off\">"                                                                      >> /etc/cluster/cluster.conf;\
echo "                    <device name=\"no_fence\"/>"                                                                  >> /etc/cluster/cluster.conf;\
echo "                </method>"                                                                                        >> /etc/cluster/cluster.conf;\
echo "            </fence>"                                                                                             >> /etc/cluster/cluster.conf;\
echo "        </clusternode>"                                                                                           >> /etc/cluster/cluster.conf;\
echo "        <clusternode name=\"pmsapp3\" nodeid=\"3\">"                                                              >> /etc/cluster/cluster.conf;\
echo "            <fence>"                                                                                              >> /etc/cluster/cluster.conf;\
echo "                <method name=\"fence_off\">"                                                                      >> /etc/cluster/cluster.conf;\
echo "                    <device name=\"no_fence\"/>"                                                                  >> /etc/cluster/cluster.conf;\
echo "                </method>"                                                                                        >> /etc/cluster/cluster.conf;\
echo "            </fence>"                                                                                             >> /etc/cluster/cluster.conf;\
echo "        </clusternode>"                                                                                           >> /etc/cluster/cluster.conf;\
echo "    </clusternodes>"                                                                                              >> /etc/cluster/cluster.conf;\
echo "    <fencedevices>"                                                                                               >> /etc/cluster/cluster.conf;\
echo "        <fencedevice agent=\"fence_disable\" name=\"no_fence\"/>"                                                 >> /etc/cluster/cluster.conf;\
echo "    </fencedevices>"                                                                                              >> /etc/cluster/cluster.conf;\
echo "    <rm>"                                                                                                         >> /etc/cluster/cluster.conf;\
echo "        <service autostart=\"1\" exclusive=\"0\" name=\"clusvc\" recovery=\"relocate\">"                          >> /etc/cluster/cluster.conf;\
echo "            <script file=\"/etc/init.d/swraid\" name=\"swraid\">"                                                 >> /etc/cluster/cluster.conf;\
echo "                <fs device=\"/dev/md0\" fstype=\"ext4\" mountpoint=\"/sharedstorage\" name=\"sharedvol\">"        >> /etc/cluster/cluster.conf;\
echo "                    <nfsexport name=\"sharednfs\">"                                                               >> /etc/cluster/cluster.conf;\
echo "                        <nfsclient name=\"nfsclients\" options=\"rw,no_root_squash\" target=\"192.168.0.0/24\"/>" >> /etc/cluster/cluster.conf;\
echo "                    </nfsexport>"                                                                                 >> /etc/cluster/cluster.conf;\
echo "                </fs>"                                                                                            >> /etc/cluster/cluster.conf;\
echo "            </script>"                                                                                            >> /etc/cluster/cluster.conf;\
echo "            <ip address=\"192.168.0.20\" monitor_link=\"on\"/>"                                                   >> /etc/cluster/cluster.conf;\
echo "        </service>"                                                                                               >> /etc/cluster/cluster.conf;\
echo "    </rm>"                                                                                                        >> /etc/cluster/cluster.conf;\
echo "</cluster>"                                                                                                       >> /etc/cluster/cluster.conf

ccs_config_validate

scp /etc/cluster/cluster.conf pmsapp2:/etc/cluster
scp /etc/cluster/cluster.conf pmsapp3:/etc/cluster

ssh pmsapp2 reboot
ssh pmsapp3 reboot
reboot

clustat

showmount -e 192.168.0.20

Leave a Reply