Skip to content

Commit

Permalink
Vagrant files to build a 3nodes+haproxy cluster
Browse files Browse the repository at this point in the history
  • Loading branch information
ioguix committed Mar 6, 2020
1 parent 13d46a1 commit a07617e
Show file tree
Hide file tree
Showing 26 changed files with 913 additions and 3 deletions.
44 changes: 44 additions & 0 deletions extra/vagrant/3nodes-haproxy/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
export VAGRANT_BOX_UPDATE_CHECK_DISABLE=1
export VAGRANT_CHECKPOINT_DISABLE=1

.PHONY: all create_vm postgresql pgsql_replicas pacemaker prov clean check validate cts pcmk-stop


all: create_vm postgresql pgsql_replicas pacemaker cluster

create_vm:
vagrant up

postgresql: pcmk-stop
vagrant up --provision-with=postgresql

pgsql_replicas: pcmk-stop
vagrant up --provision-with=pgsql-replicas

pacemaker:
vagrant up --provision-with=pacemaker

cluster:
vagrant up --provision-with=cluster-setup

prov:
vagrant up --provision

clean:
vagrant destroy -f

check: validate

validate:
@vagrant validate
@if which shellcheck >/dev/null ;\
then shellcheck provision/*bash ;\
else echo "WARNING: shellcheck is not in PATH, not checking bash syntax" ;\
fi

cts:
vagrant up --provision-with=cts

pcmk-stop:
vagrant ssh -c 'if [ -f "/etc/corosync/corosync.conf" ]; then sudo pcs cluster stop --all --wait; fi'

152 changes: 152 additions & 0 deletions extra/vagrant/3nodes-haproxy/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
require 'ipaddr'
require 'yaml'

#ENV['VAGRANT_NO_PARALLEL'] = 'yes' # uncomment to forbid parallel execution
ENV["LANG"] = "C"
ENV["LC_ALL"] = "C"

boxname = 'centos/7' # vagrant box to use
pgver = '11' # pg version to use
hapass = 'hapass' # password for sys user hacluster
ssh_login = 'root' # ssh login to connect to the host when fencing a VM.
# put "./provision/id_rsa.pub" in your "~<ssh_login>/.ssh/authorized_keys"
base_ip = '10.20.30.5' # Base IP address to compute other ones
pg_nodes = 'srv1', 'srv2', 'srv3' # first will be primary
log_node = 'log-sink' # name of the node receiving logs
vm_prefix = 'paf_vm' # VM prefix in libvrit
rhel_user = '' # RHEL user account
rhel_pass = '' # RHEL user account password

if File.file?('vagrant.yml') and ( custom = YAML.load_file('vagrant.yml') )
boxname = custom['boxname'] if custom.has_key?('boxname')
pgver = custom['pgver'] if custom.has_key?('pgver')
hapass = custom['hapass'] if custom.has_key?('hapass')
ssh_login = custom['ssh_login'] if custom.has_key?('ssh_login')
base_ip = custom['base_ip'] if custom.has_key?('base_ip')
pg_nodes = custom['pg_nodes'] if custom.has_key?('pg_nodes')
log_node = custom['log_node'] if custom.has_key?('log_node')
vm_prefix = custom['vm_prefix'] if custom.has_key?('vm_prefix')
rhel_user = custom['rhel_user'] if custom.has_key?('rhel_user')
rhel_pass = custom['rhel_pass'] if custom.has_key?('rhel_pass')
end

Vagrant.configure(2) do |config|

############################################################################
# computes variables

pgdata = "/var/lib/pgsql/#{pgver}/data"
next_ip = IPAddr.new(base_ip).succ
host_ip = (IPAddr.new(base_ip) & "255.255.255.0").succ.to_s
nodes_ips = {}

( pg_nodes + [ log_node ] ).each do |node|
nodes_ips[node] = next_ip.to_s
next_ip = next_ip.succ
end

############################################################################
# general vagrant setup

# don't mind about insecure ssh key
config.ssh.insert_key = false

# https://vagrantcloud.com/search.
config.vm.box = boxname

# hardware and host settings
config.vm.provider 'libvirt' do |lv|
lv.cpus = 1
lv.memory = 512
lv.watchdog model: 'i6300esb'
lv.default_prefix = vm_prefix
end

# disable default share
config.vm.synced_folder ".", "/vagrant", disabled: true

config.vm.synced_folder "../../..", "/vagrant", type: "rsync",
rsync__exclude: [ ".git/" ]

############################################################################
# system setup for all nodes

config.vm.define pg_nodes.first, primary: true

(pg_nodes + [log_node]).each do |node|
config.vm.define node do |conf|
conf.vm.network 'private_network', ip: nodes_ips[node]
conf.vm.provision 'system-setup', type: 'shell',
path: 'provision/system.bash',
args: [ node, rhel_user, rhel_pass ] + nodes_ips.keys.map {|n| "#{n}=#{nodes_ips[n]}"},
preserve_order: true
end
end


############################################################################
# setup rsyslog to collect logs
(pg_nodes + [log_node]).each do |node|
config.vm.define node do |conf|
conf.vm.provision 'rsyslog-setup', type: 'shell',
path: 'provision/rsyslog.bash',
args: [ log_node ],
preserve_order: true
end
end

############################################################################
# setup haproxy
pg_nodes.each do |node|
config.vm.define node do |conf|
conf.vm.provision 'haproxy-setup', type: 'shell',
path: 'provision/haproxy.bash',
preserve_order: true
end
end

############################################################################
# postgresql installation and setup
pg_nodes.each do |node|
config.vm.define node do |conf|
conf.vm.provision 'postgresql', type: 'shell',
path: 'provision/postgresql.bash',
args: [ pgver, pg_nodes.first, pgdata ],
preserve_order: true
end
end

# replicas setup. Use "vagrant up --provision-with=pgsql-replicas"
pg_nodes[1..-1].each do |node|
config.vm.define node do |conf|
conf.vm.provision 'pgsql-replicas', type: 'shell',
path: 'provision/pgsql-replicas.bash',
args: [ pgver, node, pgdata ],
run: 'never'
end
end

############################################################################
# cluster setup. Use "vagrant up --provision-with=pacemaker"
pg_nodes.each do |node|
config.vm.define node do |conf|
conf.vm.provision 'pacemaker', type: 'shell',
path: 'provision/pacemaker.bash',
args: [ hapass ],
run: 'never'
end
end

# create the cluster. Use "vagrant up --provision-with=cluster-setup"
pg_nodes.each do |node|
config.vm.define node do |conf|
conf.vm.provision 'cluster-setup', type: 'shell',
path: 'provision/cluster.bash',
args: [ pgver, ssh_login, vm_prefix, host_ip, pgdata, hapass ] + pg_nodes,
run: 'never'
end
end

# cluster test suite setup. Use "vagrant up --provision-with=cts"
config.vm.provision 'cts', type: 'shell', path: 'provision/cts.bash', run: 'never'
end
102 changes: 102 additions & 0 deletions extra/vagrant/3nodes-haproxy/provision/cluster.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/env bash

set -o errexit
set -o nounset
set -o pipefail

PGVER="$1"
SSH_LOGIN="$2"
VM_PREFIX="$3"
HOST_IP="$4"
PGDATA="$5"
HAPASS="$6"
shift 6
NODES=( "$@" )

CUSTOMDIR="${PGDATA}/conf.d"

# psd authent
PCMK_VER=$(yum info --quiet pacemaker|grep ^Version)
PCMK_VER="${PCMK_VER#*: }" # extract x.y.z
PCMK_VER="${PCMK_VER:0:1}" # extract x

if [ "$PCMK_VER" -ge 2 ]; then
# if pacemaker version is 2.x, we suppose pcs support it (pcs >= 0.10)
# from pcs 0.10, pcs host auth must be exec'ed on each node
pcs host auth -u hacluster -p "${HAPASS}" "${NODES[@]}"
else
# this could be run on one node, but it doesn't hurt if it runs everywhere,
# so we keep this piece of code with the one dedicated to pacemaker 2.x
pcs cluster auth -u hacluster -p "${HAPASS}" "${NODES[@]}"
fi

# Stop PostgreSQL everywhere
systemctl --quiet stop "postgresql-${PGVER}"

if [ "$(hostname -s)" != "${NODES[0]}" ]; then
exit 0
fi

# WARNING:
# Starting from here, everything is executed on first node only!

if [ "$PCMK_VER" -ge 2 ]; then
pcs cluster setup cluster_pgsql --force "${NODES[@]}"
else
pcs cluster setup --name cluster_pgsql --wait --force "${NODES[@]}"
fi

# pcs stonith sbd enable

pcs cluster start --all --wait

pcs cluster cib cluster1.xml

pcs -f cluster1.xml resource defaults migration-threshold=5
pcs -f cluster1.xml resource defaults resource-stickiness=10
#pcs -f cluster1.xml property set stonith-watchdog-timeout=10s

for VM in "${NODES[@]}"; do
FENCE_ID="fence_vm_${VM}"
VM_PORT="${VM_PREFIX}_${VM}"
pcs -f cluster1.xml stonith create "${FENCE_ID}" fence_virsh \
pcmk_host_check=static-list "pcmk_host_list=${VM}" \
"port=${VM_PORT}" "ipaddr=${HOST_IP}" "login=${SSH_LOGIN}" \
"identity_file=/root/.ssh/id_rsa"
pcs -f cluster1.xml constraint location "fence_vm_${VM}" \
avoids "${VM}=INFINITY"
done

PGSQLD_RSC_OPTS=(
"ocf:heartbeat:pgsqlms"
"pgport=5434"
"bindir=/usr/pgsql-${PGVER}/bin"
"pgdata=${PGDATA}"
"recovery_template=${CUSTOMDIR}/recovery.conf.pcmk"
"op" "start" "timeout=60s"
"op" "stop" "timeout=60s"
"op" "promote" "timeout=30s"
"op" "demote" "timeout=120s"
"op" "monitor" "interval=15s" "timeout=10s" "role=Master"
"op" "monitor" "interval=16s" "timeout=10s" "role=Slave"
"op" "notify" "timeout=60s"
)

# NB: pcs 0.10.2 doesn't support to set the id of the clone XML node
# the id is built from the rsc id to clone using "<rsc-id>-clone"
# As a matter of cohesion and code simplicity, we use the same
# convention to create the master resource with pcs 0.9.x for
# Pacemaker 1.1
if [ "$PCMK_VER" -ge 2 ]; then
PGSQLD_RSC_OPTS+=( "promotable" "notify=true" )
fi

pcs -f cluster1.xml resource create pgsqld "${PGSQLD_RSC_OPTS[@]}"

if [ "$PCMK_VER" -eq 1 ]; then
pcs -f cluster1.xml resource master pgsqld-clone pgsqld notify=true
fi

pcs cluster cib-push scope=configuration cluster1.xml --wait

crm_mon -Dn1
File renamed without changes.
78 changes: 78 additions & 0 deletions extra/vagrant/3nodes-haproxy/provision/haproxy.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env bash

set -o errexit
set -o nounset
set -o pipefail

YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0"

$YUM_INSTALL haproxy

systemctl --quiet --now disable haproxy

cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg-dist
cat <<'EOF' > /etc/haproxy/haproxy.cfg
global
log 127.0.0.1:514 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
mode tcp
log global
option tcplog
retries 3
timeout connect 10s
timeout client 10m
timeout server 10m
timeout check 1s
maxconn 300
listen stats
mode http
bind *:7000
stats enable
stats uri /
timeout connect 15s
timeout client 15s
timeout server 15s
listen prd
bind *:5432
option tcp-check
tcp-check connect port 5431
tcp-check expect string production
default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions
server srv1 srv1:5434 check
server srv2 srv2:5434 check
server srv3 srv3:5434 check
listen stb
bind *:5433
balance leastconn
option tcp-check
tcp-check connect port 5431
tcp-check expect string standby
default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions
server srv1 srv1:5434 check
server srv2 srv2:5434 check
server srv3 srv3:5434 check
EOF

setsebool -P haproxy_connect_any=1

systemctl --quiet --now enable haproxy

if ! firewall-cmd --get-services|grep -q haproxy-stats; then
firewall-cmd --quiet --permanent --new-service="haproxy-stats"
firewall-cmd --quiet --permanent --service="haproxy-stats" --set-description="HAProxy statistics"
firewall-cmd --quiet --permanent --service="haproxy-stats" --add-port="7000/tcp"
fi
firewall-cmd --quiet --permanent --add-service="haproxy-stats"
firewall-cmd --quiet --reload
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit a07617e

Please sign in to comment.