diff --git a/extra/vagrant/3nodes-haproxy/Makefile b/extra/vagrant/3nodes-haproxy/Makefile index 5b2a70c..5374e43 100644 --- a/extra/vagrant/3nodes-haproxy/Makefile +++ b/extra/vagrant/3nodes-haproxy/Makefile @@ -1,28 +1,18 @@ export VAGRANT_BOX_UPDATE_CHECK_DISABLE=1 export VAGRANT_CHECKPOINT_DISABLE=1 -.PHONY: all create_vm postgresql pgsql_replicas pacemaker prov clean check validate cts pcmk-stop +.PHONY: all up pgsql pacemaker cts clean check validate pcmk-stop +all: up -all: create_vm postgresql pgsql_replicas pacemaker cluster - -create_vm: +up: vagrant up -postgresql: pcmk-stop - vagrant up --provision-with=postgresql - -pgsql_replicas: pcmk-stop - vagrant up --provision-with=pgsql-replicas +pgsql: pcmk-stop + vagrant provision --provision-with=pgsql pacemaker: - vagrant up --provision-with=pacemaker - -cluster: - vagrant up --provision-with=cluster-setup - -prov: - vagrant up --provision + vagrant provision --provision-with=pacemaker clean: vagrant destroy -f @@ -37,7 +27,7 @@ validate: fi cts: - vagrant up --provision-with=cts + vagrant provision --provision-with=cts pcmk-stop: vagrant ssh -c 'if [ -f "/etc/corosync/corosync.conf" ]; then sudo pcs cluster stop --all --wait; fi' diff --git a/extra/vagrant/3nodes-haproxy/Vagrantfile b/extra/vagrant/3nodes-haproxy/Vagrantfile index 8063cc9..7e2c369 100644 --- a/extra/vagrant/3nodes-haproxy/Vagrantfile +++ b/extra/vagrant/3nodes-haproxy/Vagrantfile @@ -5,17 +5,18 @@ require 'yaml' ENV["LANG"] = "C" ENV["LC_ALL"] = "C" -boxname = 'centos/7' # vagrant box to use -pgver = '11' # pg version to use -hapass = 'hapass' # password for sys user hacluster -ssh_login = 'root' # ssh login to connect to the host when fencing a VM. - # put "./provision/id_rsa.pub" in your "~/.ssh/authorized_keys" -base_ip = '10.20.30.5' # Base IP address to compute other ones -pg_nodes = 'srv1', 'srv2', 'srv3' # first will be primary -log_node = 'log-sink' # name of the node receiving logs -vm_prefix = 'paf_vm' # VM prefix in libvrit -rhel_user = '' # RHEL user account -rhel_pass = '' # RHEL user account password +boxname = 'centos/7' # vagrant box to use +pgver = '12' # pg version to use +hapass = 'hapass' # password for sys user hacluster +ssh_login = 'root' # ssh login to connect to the host when fencing a VM. + # put "./provision/id_rsa.pub" in your "~/.ssh/authorized_keys" +base_ip = '10.20.30.50' # Base IP address to compute other ones +vm_prefix = 'paf_3nHP' # VM prefix in libvrit +rhel_user = '' # RHEL user account +rhel_pass = '' # RHEL user account password +pg_nodes = 'srv1', 'srv2', 'srv3' # first will be primary +log_node = 'log-sink' # name of the node receiving logs + if File.file?('vagrant.yml') and ( custom = YAML.load_file('vagrant.yml') ) boxname = custom['boxname'] if custom.has_key?('boxname') @@ -33,7 +34,7 @@ end Vagrant.configure(2) do |config| ############################################################################ - # computes variables + # computes IPs pgdata = "/var/lib/pgsql/#{pgver}/data" next_ip = IPAddr.new(base_ip).succ @@ -48,6 +49,13 @@ Vagrant.configure(2) do |config| ############################################################################ # general vagrant setup + # RHEL registration when needed + if Vagrant.has_plugin?('vagrant-registration') + config.registration.unregister_on_halt = false + config.registration.username = rhel_user + config.registration.password = rhel_pass + end + # don't mind about insecure ssh key config.ssh.insert_key = false @@ -63,91 +71,56 @@ Vagrant.configure(2) do |config| lv.qemu_use_session = false end - # disable default share + # sync the root of sources config.vm.synced_folder ".", "/vagrant", disabled: true - config.vm.synced_folder "../../..", "/vagrant", type: "rsync", rsync__exclude: [ ".git/" ] + config.vm.define pg_nodes.first, primary: true + ############################################################################ # system setup for all nodes - config.vm.define pg_nodes.first, primary: true + config.vm.provision 'file', source: 'provision/id_rsa', destination: '/home/vagrant/.ssh/id_rsa' + config.vm.provision 'file', source: 'provision/id_rsa.pub', destination: '/home/vagrant/.ssh/id_rsa.pub' (pg_nodes + [log_node]).each do |node| config.vm.define node do |conf| conf.vm.network 'private_network', ip: nodes_ips[node] - conf.vm.provision 'system-setup', type: 'shell', + conf.vm.provision 'system', type: 'shell', path: 'provision/system.bash', - args: [ node, rhel_user, rhel_pass ] + nodes_ips.keys.map {|n| "#{n}=#{nodes_ips[n]}"}, + args: [ node, pgver, hapass, log_node ] + + nodes_ips.keys.map {|n| "#{n}=#{nodes_ips[n]}"}, preserve_order: true end end - ############################################################################ - # setup rsyslog to collect logs - (pg_nodes + [log_node]).each do |node| - config.vm.define node do |conf| - conf.vm.provision 'rsyslog-setup', type: 'shell', - path: 'provision/rsyslog.bash', - args: [ log_node ], - preserve_order: true - end - end - - ############################################################################ - # setup haproxy + # build pgsql instances pg_nodes.each do |node| config.vm.define node do |conf| - conf.vm.provision 'haproxy-setup', type: 'shell', - path: 'provision/haproxy.bash', + conf.vm.provision 'pgsql', type: 'shell', + path: 'provision/pgsql.bash', + args: [ node, pgver, pgdata, pg_nodes.first ], preserve_order: true end end ############################################################################ - # postgresql installation and setup - pg_nodes.each do |node| - config.vm.define node do |conf| - conf.vm.provision 'postgresql', type: 'shell', - path: 'provision/postgresql.bash', - args: [ pgver, pg_nodes.first, pgdata ], - preserve_order: true - end - end - - # replicas setup. Use "vagrant up --provision-with=pgsql-replicas" - pg_nodes[1..-1].each do |node| - config.vm.define node do |conf| - conf.vm.provision 'pgsql-replicas', type: 'shell', - path: 'provision/pgsql-replicas.bash', - args: [ pgver, node, pgdata ], - run: 'never' - end - end - - ############################################################################ - # cluster setup. Use "vagrant up --provision-with=pacemaker" + # cluster setup pg_nodes.each do |node| config.vm.define node do |conf| conf.vm.provision 'pacemaker', type: 'shell', path: 'provision/pacemaker.bash', - args: [ hapass ], - run: 'never' - end - end - - # create the cluster. Use "vagrant up --provision-with=cluster-setup" - pg_nodes.each do |node| - config.vm.define node do |conf| - conf.vm.provision 'cluster-setup', type: 'shell', - path: 'provision/cluster.bash', - args: [ pgver, ssh_login, vm_prefix, host_ip, pgdata, hapass ] + pg_nodes, - run: 'never' + args: [ pgver, hapass, ssh_login, + vm_prefix, host_ip, pgdata ] + pg_nodes, + preserve_order: true end end - # cluster test suite setup. Use "vagrant up --provision-with=cts" - config.vm.provision 'cts', type: 'shell', path: 'provision/cts.bash', run: 'never' + ############################################################################ + # cluster test suite setup. Use "vagrant provision --provision-with=cts" + config.vm.provision 'cts', type: 'shell', + path: 'provision/cts.bash', + run: 'never' end diff --git a/extra/vagrant/3nodes-haproxy/provision/cluster.bash b/extra/vagrant/3nodes-haproxy/provision/cluster.bash deleted file mode 100755 index a25a309..0000000 --- a/extra/vagrant/3nodes-haproxy/provision/cluster.bash +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -PGVER="$1" -SSH_LOGIN="$2" -VM_PREFIX="$3" -HOST_IP="$4" -PGDATA="$5" -HAPASS="$6" -shift 6 -NODES=( "$@" ) - -CUSTOMDIR="${PGDATA}/conf.d" - -# psd authent -PCMK_VER=$(yum info --quiet pacemaker|grep ^Version) -PCMK_VER="${PCMK_VER#*: }" # extract x.y.z -PCMK_VER="${PCMK_VER:0:1}" # extract x - -if [ "$PCMK_VER" -ge 2 ]; then - # if pacemaker version is 2.x, we suppose pcs support it (pcs >= 0.10) - # from pcs 0.10, pcs host auth must be exec'ed on each node - pcs host auth -u hacluster -p "${HAPASS}" "${NODES[@]}" -else - # this could be run on one node, but it doesn't hurt if it runs everywhere, - # so we keep this piece of code with the one dedicated to pacemaker 2.x - pcs cluster auth -u hacluster -p "${HAPASS}" "${NODES[@]}" -fi - -# Stop PostgreSQL everywhere -systemctl --quiet stop "postgresql-${PGVER}" - -if [ "$(hostname -s)" != "${NODES[0]}" ]; then - exit 0 -fi - -# WARNING: -# Starting from here, everything is executed on first node only! - -if [ "$PCMK_VER" -ge 2 ]; then - pcs cluster setup cluster_pgsql --force "${NODES[@]}" -else - pcs cluster setup --name cluster_pgsql --wait --force "${NODES[@]}" -fi - -# pcs stonith sbd enable - -pcs cluster start --all --wait - -pcs cluster cib cluster1.xml - -pcs -f cluster1.xml resource defaults migration-threshold=5 -pcs -f cluster1.xml resource defaults resource-stickiness=10 -#pcs -f cluster1.xml property set stonith-watchdog-timeout=10s - -for VM in "${NODES[@]}"; do - FENCE_ID="fence_vm_${VM}" - VM_PORT="${VM_PREFIX}_${VM}" - pcs -f cluster1.xml stonith create "${FENCE_ID}" fence_virsh \ - pcmk_host_check=static-list "pcmk_host_list=${VM}" \ - "port=${VM_PORT}" "ipaddr=${HOST_IP}" "login=${SSH_LOGIN}" \ - "identity_file=/root/.ssh/id_rsa" - pcs -f cluster1.xml constraint location "fence_vm_${VM}" \ - avoids "${VM}=INFINITY" -done - -PGSQLD_RSC_OPTS=( - "ocf:heartbeat:pgsqlms" - "pgport=5434" - "bindir=/usr/pgsql-${PGVER}/bin" - "pgdata=${PGDATA}" - "recovery_template=${CUSTOMDIR}/recovery.conf.pcmk" - "op" "start" "timeout=60s" - "op" "stop" "timeout=60s" - "op" "promote" "timeout=30s" - "op" "demote" "timeout=120s" - "op" "monitor" "interval=15s" "timeout=10s" "role=Master" - "op" "monitor" "interval=16s" "timeout=10s" "role=Slave" - "op" "notify" "timeout=60s" -) - -# NB: pcs 0.10.2 doesn't support to set the id of the clone XML node -# the id is built from the rsc id to clone using "-clone" -# As a matter of cohesion and code simplicity, we use the same -# convention to create the master resource with pcs 0.9.x for -# Pacemaker 1.1 -if [ "$PCMK_VER" -ge 2 ]; then - PGSQLD_RSC_OPTS+=( "promotable" "notify=true" ) -fi - -pcs -f cluster1.xml resource create pgsqld "${PGSQLD_RSC_OPTS[@]}" - -if [ "$PCMK_VER" -eq 1 ]; then - pcs -f cluster1.xml resource master pgsqld-clone pgsqld notify=true -fi - -pcs cluster cib-push scope=configuration cluster1.xml --wait - -crm_mon -Dn1 diff --git a/extra/vagrant/3nodes-haproxy/provision/haproxy.bash b/extra/vagrant/3nodes-haproxy/provision/haproxy.bash deleted file mode 100644 index edf6539..0000000 --- a/extra/vagrant/3nodes-haproxy/provision/haproxy.bash +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0" - -$YUM_INSTALL haproxy - -systemctl --quiet --now disable haproxy - -cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg-dist -cat <<'EOF' > /etc/haproxy/haproxy.cfg -global - log 127.0.0.1:514 local2 - chroot /var/lib/haproxy - pidfile /var/run/haproxy.pid - maxconn 4000 - user haproxy - group haproxy - daemon - - stats socket /var/lib/haproxy/stats - -defaults - mode tcp - log global - option tcplog - retries 3 - timeout connect 10s - timeout client 10m - timeout server 10m - timeout check 1s - maxconn 300 - -listen stats - mode http - bind *:7000 - stats enable - stats uri / - timeout connect 15s - timeout client 15s - timeout server 15s - -listen prd - bind *:5432 - option tcp-check - tcp-check connect port 5431 - tcp-check expect string production - default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions - server srv1 srv1:5434 check - server srv2 srv2:5434 check - server srv3 srv3:5434 check - -listen stb - bind *:5433 - balance leastconn - option tcp-check - tcp-check connect port 5431 - tcp-check expect string standby - default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions - server srv1 srv1:5434 check - server srv2 srv2:5434 check - server srv3 srv3:5434 check -EOF - -setsebool -P haproxy_connect_any=1 - -systemctl --quiet --now enable haproxy - -if ! firewall-cmd --get-services|grep -q haproxy-stats; then - firewall-cmd --quiet --permanent --new-service="haproxy-stats" - firewall-cmd --quiet --permanent --service="haproxy-stats" --set-description="HAProxy statistics" - firewall-cmd --quiet --permanent --service="haproxy-stats" --add-port="7000/tcp" -fi -firewall-cmd --quiet --permanent --add-service="haproxy-stats" -firewall-cmd --quiet --reload diff --git a/extra/vagrant/3nodes-haproxy/provision/pacemaker.bash b/extra/vagrant/3nodes-haproxy/provision/pacemaker.bash index 4d4d57a..4248f2b 100755 --- a/extra/vagrant/3nodes-haproxy/provision/pacemaker.bash +++ b/extra/vagrant/3nodes-haproxy/provision/pacemaker.bash @@ -4,43 +4,99 @@ set -o errexit set -o nounset set -o pipefail -HAPASS="$1" +declare PCMK_VER +declare -a PGSQLD_RSC_OPTS +declare -r PGVER="$1" +declare -r HAPASS="$2" -# shellcheck disable=SC1091 -source "/etc/os-release" -OS_ID="$ID" -YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0" +declare -r SSH_LOGIN="$3" +declare -r VM_PREFIX="$4" +declare -r HOST_IP="$5" +declare -r PGDATA="$6" +shift 6 +declare -r -a NODES=( "$@" ) -# install required packages -if [ "$OS_ID" = "rhel" ]; then - # use yum instead of dnf for compatibility between EL 7 and 8 - yum-config-manager --enable "*highavailability-rpms" +declare -r CUSTOMDIR="${PGDATA}/conf.d" + +# extract pacemaker major version +PCMK_VER=$(yum info --quiet pacemaker|grep ^Version) +PCMK_VER="${PCMK_VER#*: }" # extract x.y.z +PCMK_VER="${PCMK_VER:0:1}" # extract x + +if [ "$PCMK_VER" -ge 2 ]; then + # if pacemaker version is 2.x, we suppose pcs support it (pcs >= 0.10) + # from pcs 0.10, pcs host auth must be exec'ed on each node + pcs host auth -u hacluster -p "${HAPASS}" "${NODES[@]}" +else + # this could be run on one node, but it doesn't hurt if it runs everywhere, + # so we keep this piece of code with the one dedicated to pacemaker 2.x + pcs cluster auth -u hacluster -p "${HAPASS}" "${NODES[@]}" fi -PACKAGES=( - pacemaker pcs resource-agents fence-agents-virsh sbd perl-Module-Build +if [ "$(hostname -s)" != "${NODES[0]}" ]; then + exit 0 +fi + +# WARNING: +# Starting from here, everything is executed on first node only! + +if [ "$PCMK_VER" -ge 2 ]; then + pcs cluster setup cluster_pgsql --force "${NODES[@]}" +else + pcs cluster setup --name cluster_pgsql --wait --force "${NODES[@]}" +fi + +pcs stonith sbd enable + +pcs cluster start --all --wait + +pcs cluster cib cluster1.xml + +pcs -f cluster1.xml resource defaults migration-threshold=5 +pcs -f cluster1.xml resource defaults resource-stickiness=10 +pcs -f cluster1.xml property set stonith-watchdog-timeout=10s + +for VM in "${NODES[@]}"; do + FENCE_ID="fence_vm_${VM}" + VM_PORT="${VM_PREFIX}_${VM}" + pcs -f cluster1.xml stonith create "${FENCE_ID}" fence_virsh \ + pcmk_host_check=static-list "pcmk_host_list=${VM}" \ + "port=${VM_PORT}" "ipaddr=${HOST_IP}" "login=${SSH_LOGIN}" \ + "identity_file=/root/.ssh/id_rsa" + pcs -f cluster1.xml constraint location "fence_vm_${VM}" \ + avoids "${VM}=INFINITY" +done + +PGSQLD_RSC_OPTS=( + "ocf:heartbeat:pgsqlms" + "pgport=5434" + "bindir=/usr/pgsql-${PGVER}/bin" + "pgdata=${PGDATA}" + "recovery_template=${CUSTOMDIR}/recovery.conf.pcmk" + "op" "start" "timeout=60s" + "op" "stop" "timeout=60s" + "op" "promote" "timeout=30s" + "op" "demote" "timeout=120s" + "op" "monitor" "interval=15s" "timeout=10s" "role=Master" + "op" "monitor" "interval=16s" "timeout=10s" "role=Slave" + "op" "notify" "timeout=60s" ) -$YUM_INSTALL "${PACKAGES[@]}" - -# install PAF -cd /vagrant -[ -f Build ] && perl Build distclean -sudo -u vagrant perl Build.PL --quiet >/dev/null 2>&1 -sudo -u vagrant perl Build --quiet -perl Build --quiet install - -# firewall setup -firewall-cmd --quiet --permanent --add-service=high-availability -firewall-cmd --quiet --reload - -# pcsd setup -systemctl --quiet --now enable pcsd -echo "${HAPASS}"|passwd --stdin hacluster > /dev/null 2>&1 - -# Pacemaker setup -cp /etc/sysconfig/pacemaker /etc/sysconfig/pacemaker.dist -cat<<'EOF' > /etc/sysconfig/pacemaker -PCMK_debug=yes -PCMK_logpriority=debug -EOF +# NB: pcs 0.10.2 doesn't support to set the id of the clone XML node +# the id is built from the rsc id to clone using "-clone" +# As a matter of cohesion and code simplicity, we use the same +# convention to create the master resource with pcs 0.9.x for +# Pacemaker 1.1 +if [ "$PCMK_VER" -ge 2 ]; then + PGSQLD_RSC_OPTS+=( "promotable" "notify=true" ) +fi + +pcs -f cluster1.xml resource create pgsqld "${PGSQLD_RSC_OPTS[@]}" + +if [ "$PCMK_VER" -eq 1 ]; then + pcs -f cluster1.xml resource master pgsqld-clone pgsqld notify=true +fi + +pcs cluster cib-push scope=configuration cluster1.xml --wait + +crm_mon -Dn1 diff --git a/extra/vagrant/3nodes-haproxy/provision/pgsql-replicas.bash b/extra/vagrant/3nodes-haproxy/provision/pgsql-replicas.bash deleted file mode 100755 index e0b7df2..0000000 --- a/extra/vagrant/3nodes-haproxy/provision/pgsql-replicas.bash +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -PGVER="$1" -NODENAME="$2" -PGDATA="$3" - -CUSTOMDIR="${PGDATA}/conf.d" - -# cleanup -systemctl --quiet --now disable "postgresql-${PGVER}" -rm -rf "${PGDATA}" - -# build standby -"/usr/pgsql-${PGVER}/bin/pg_basebackup" -h 127.0.0.1 -U postgres -D "${PGDATA}" -X stream - -# set pg_hba -cat< "${PGDATA}/pg_hba.conf" -local all all trust -host all all 0.0.0.0/0 trust - -local replication all reject -host replication all $NODENAME reject -host replication all 127.0.0.1/32 reject -host replication all ::1/128 reject -# allow any standby connection -host replication all 0.0.0.0/0 trust -EOC - -cat < "${CUSTOMDIR}/cluster_name.conf" -cluster_name = 'pgsql-$NODENAME' -EOC - -if [ "${PGVER%%.*}" -lt 12 ]; then - # recovery.conf setup - cat<<-EOC > "${CUSTOMDIR}/recovery.conf.pcmk" - standby_mode = on - primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' - recovery_target_timeline = 'latest' - EOC - cp "${CUSTOMDIR}/recovery.conf.pcmk" "${PGDATA}/recovery.conf" -else - cat <<-EOC > "${CUSTOMDIR}/repli.conf" - primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' - EOC - - # standby_mode disappear in v12 - # no need to add recovery_target_timeline as its default is 'latest' since v12 - touch "${PGDATA}/standby.signal" -fi - -# backing up files -cp "${PGDATA}/pg_hba.conf" "${PGDATA}/.." -cp "${PGDATA}/postgresql.conf" "${PGDATA}/.." -cp "${CUSTOMDIR}"/* "${PGDATA}/.." - -chown -R "postgres:postgres" "${PGDATA}/.." - -# start -systemctl --quiet start "postgresql-${PGVER}" diff --git a/extra/vagrant/3nodes-haproxy/provision/pgsql.bash b/extra/vagrant/3nodes-haproxy/provision/pgsql.bash new file mode 100755 index 0000000..be7c36a --- /dev/null +++ b/extra/vagrant/3nodes-haproxy/provision/pgsql.bash @@ -0,0 +1,149 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +declare -r NODENAME="$1" +declare -r PGVER="$2" +declare -r PGDATA="$3" + +declare -r PRIM_NODE="$4" + +declare -r CUSTOMDIR="${PGDATA}/conf.d" + +# cleanup +systemctl --quiet --now disable "postgresql-${PGVER}" +rm -rf "${PGDATA}" + +if [ "$NODENAME" == "$PRIM_NODE" ]; then + # init instance + "/usr/pgsql-${PGVER}/bin/postgresql-${PGVER}-setup" initdb + + # pg_hba setup + cat<<-EOC > "${PGDATA}/pg_hba.conf" + local all all trust + host all all 0.0.0.0/0 trust + + # forbid self-replication + local replication all reject + host replication all ${NODENAME} reject + host replication all 127.0.0.1/32 reject + host replication all ::1/128 reject + + + # allow any standby connection + host replication postgres 0.0.0.0/0 trust + EOC + + # postgresql.conf setup + mkdir -p "$CUSTOMDIR" + echo "include_dir = 'conf.d'" >> "${PGDATA}/postgresql.conf" + + cat <<-EOC > "${CUSTOMDIR}/cluster_name.conf" + cluster_name = 'pgsql-$NODENAME' + EOC + + cat <<-'EOC' > "${CUSTOMDIR}/custom.conf" + listen_addresses = '*' + port = 5434 + wal_level = replica + max_wal_senders = 10 + hot_standby = on + hot_standby_feedback = on + wal_keep_segments = 256 + log_destination = 'syslog,stderr' + log_checkpoints = on + log_min_duration_statement = 0 + log_autovacuum_min_duration = 0 + log_replication_commands = on + EOC + + if [ "${PGVER%%.*}" -lt 12 ]; then + # recovery.conf setup + cat<<-EOC > "${CUSTOMDIR}/recovery.conf.pcmk" + standby_mode = on + primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' + recovery_target_timeline = 'latest' + EOC + else + cat <<-EOC > "${CUSTOMDIR}/repli.conf" + primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' + EOC + + # standby_mode disappear in v12 + # no need to add recovery_target_timeline as its default is 'latest' + # since v12 + fi + + # backing up files + cp "${PGDATA}/pg_hba.conf" "${PGDATA}/.." + cp "${PGDATA}/postgresql.conf" "${PGDATA}/.." + cp "${CUSTOMDIR}"/* "${PGDATA}/.." + + chown -R postgres:postgres "$PGDATA" + + # restart master pgsql + systemctl --quiet start "postgresql-${PGVER}" + + exit +fi + +# building standby + +# wait for the primary to listen +while ! "/usr/pgsql-${PGVER}/bin/pg_isready" -qh "127.0.0.1"; do sleep 1 ; done + + +# build standby +"/usr/pgsql-${PGVER}/bin/pg_basebackup" -h 127.0.0.1 -U postgres \ + -D "${PGDATA}" -X stream + +# set pg_hba +cat< "${PGDATA}/pg_hba.conf" +local all all trust +host all all 0.0.0.0/0 trust + +# forbid self-replication +local replication all reject +host replication all ${NODENAME} reject +host replication all 127.0.0.1/32 reject +host replication all ::1/128 reject + + +# allow any standby connection +host replication postgres 0.0.0.0/0 trust +EOC + +cat < "${CUSTOMDIR}/cluster_name.conf" +cluster_name = 'pgsql-$NODENAME' +EOC + +if [ "${PGVER%%.*}" -lt 12 ]; then + # recovery.conf setup + cat<<-EOC > "${CUSTOMDIR}/recovery.conf.pcmk" + standby_mode = on + primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' + recovery_target_timeline = 'latest' + EOC + + cp "${CUSTOMDIR}/recovery.conf.pcmk" "${PGDATA}/recovery.conf" +else + cat <<-EOC > "${CUSTOMDIR}/repli.conf" + primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' + EOC + + # standby_mode disappear in v12 + # no need to add recovery_target_timeline as its default is 'latest' since v12 + touch "${PGDATA}/standby.signal" +fi + +# backing up files +cp "${PGDATA}/pg_hba.conf" "${PGDATA}/.." +cp "${PGDATA}/postgresql.conf" "${PGDATA}/.." +cp "${CUSTOMDIR}"/* "${PGDATA}/.." + +chown -R "postgres:postgres" "${PGDATA}/.." + +# start +systemctl --quiet start "postgresql-${PGVER}" diff --git a/extra/vagrant/3nodes-haproxy/provision/postgresql.bash b/extra/vagrant/3nodes-haproxy/provision/postgresql.bash deleted file mode 100755 index 9aecaa5..0000000 --- a/extra/vagrant/3nodes-haproxy/provision/postgresql.bash +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -PGVER="$1" -NODENAME="$2" -PGDATA="$3" - -# shellcheck disable=SC1091 -source "/etc/os-release" -OS_VER="$VERSION_ID" -YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0" - -if ! rpm --quiet -q "pgdg-redhat-repo"; then - if [ "${OS_VER:0:2}" = "8." ]; then - $YUM_INSTALL "https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm" - else - $YUM_INSTALL "https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm" - fi -fi - -# disable postgresql upstream module conflicting with pgdg packages in RHEL8 -if [ "${OS_VER:0:2}" = "8." ]; then - yum -qy module disable postgresql -fi - -PACKAGES=( - "postgresql${PGVER}" - "postgresql${PGVER}-server" - "postgresql${PGVER}-contrib" -) - -$YUM_INSTALL "${PACKAGES[@]}" - -# PostgreSQL state -cat<<'EOF' > /etc/systemd/system/pgsql-state@.service -[Unit] -Description=Local PostgreSQL state - -[Service] -User=postgres -Group=postgres -ExecStart=/usr/pgsql-12/bin/psql -d postgres -U postgres -p 5434 -Atc "select CASE pg_is_in_recovery() WHEN true THEN 'standby' ELSE 'production' END" -StandardOutput=socket -EOF - -cat<<'EOF' > /etc/systemd/system/pgsql-state.socket -[Unit] -Description=Local PostgreSQL state - -[Socket] -ListenStream=5431 -Accept=yes - -[Install] -WantedBy=sockets.target -EOF - -systemctl --quiet --now enable pgsql-state.socket - -# firewall setup -firewall-cmd --quiet --permanent --service=postgresql --add-port="5433/tcp" -firewall-cmd --quiet --permanent --service=postgresql --add-port="5434/tcp" -firewall-cmd --quiet --permanent --remove-service=postgresql -firewall-cmd --quiet --permanent --add-service=postgresql -if ! firewall-cmd --get-services|grep -q pgsql-state; then - firewall-cmd --quiet --permanent --new-service="pgsql-state" - firewall-cmd --quiet --permanent --service="pgsql-state" --set-description="Local PostgreSQL state" - firewall-cmd --quiet --permanent --service="pgsql-state" --add-port="5431/tcp" -fi -firewall-cmd --quiet --permanent --add-service="pgsql-state" -firewall-cmd --quiet --reload - -if [ "$(hostname -s)" != "$NODENAME" ]; then - exit 0 -fi - -# Build the primary -CUSTOMDIR="${PGDATA}/conf.d" - -# cleanup -systemctl --quiet --now disable "postgresql-${PGVER}" -rm -rf "${PGDATA}" - -# init instance -"/usr/pgsql-${PGVER}/bin/postgresql-${PGVER}-setup" initdb - -# pg_hba setup -cat< "${PGDATA}/pg_hba.conf" -local all all trust -host all all 0.0.0.0/0 trust - -local replication all reject -host replication all $NODENAME reject -host replication all 127.0.0.1/32 reject -host replication all ::1/128 reject -# allow any standby connection -host replication postgres 0.0.0.0/0 trust -EOC - -# postgresql.conf setup -mkdir -p "$CUSTOMDIR" -echo "include_dir = 'conf.d'" >> "${PGDATA}/postgresql.conf" - -cat < "${CUSTOMDIR}/cluster_name.conf" -cluster_name = 'pgsql-$NODENAME' -EOC - -cat <<'EOC' > "${CUSTOMDIR}/custom.conf" -listen_addresses = '*' -port = 5434 -wal_level = replica -max_wal_senders = 10 -hot_standby = on -hot_standby_feedback = on -wal_keep_segments = 256 -log_destination = 'syslog,stderr' -log_checkpoints = on -log_min_duration_statement = 0 -log_autovacuum_min_duration = 0 -log_replication_commands = on -log_line_prefix = '%m [%p] host=%h ' -EOC - -if [ "${PGVER%%.*}" -lt 12 ]; then - # recovery.conf setup - cat<<-EOC > "${CUSTOMDIR}/recovery.conf.pcmk" - standby_mode = on - primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' - recovery_target_timeline = 'latest' - EOC -else - cat <<-EOC > "${CUSTOMDIR}/repli.conf" - primary_conninfo = 'host=127.0.0.1 application_name=${NODENAME}' - EOC - - # standby_mode disappear in v12 - # no need to add recovery_target_timeline as its default is 'latest' since v12 -fi - -# backing up files -cp "${PGDATA}/pg_hba.conf" "${PGDATA}/.." -cp "${PGDATA}/postgresql.conf" "${PGDATA}/.." -cp "${CUSTOMDIR}"/* "${PGDATA}/.." - -chown -R postgres:postgres "$PGDATA" - -# restart master pgsql -systemctl --quiet start "postgresql-${PGVER}" diff --git a/extra/vagrant/3nodes-haproxy/provision/rsyslog.bash b/extra/vagrant/3nodes-haproxy/provision/rsyslog.bash deleted file mode 100755 index 024055d..0000000 --- a/extra/vagrant/3nodes-haproxy/provision/rsyslog.bash +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -LOG_SINK="$1" - -if [ "$(hostname -s)" == "$LOG_SINK" ]; then - # setup log sink - cat <<-'EOF' > /etc/rsyslog.d/log_sink.conf - $ModLoad imtcp - $InputTCPServerRun 514 - - $template RemoteLogsMerged,"/var/log/%HOSTNAME%/messages.log" - *.* ?RemoteLogsMerged - - $template RemoteLogs,"/var/log/%HOSTNAME%/%PROGRAMNAME%.log" - *.* ?RemoteLogs - #& ~ - EOF - - if ! firewall-cmd --get-services|grep -q rsyslog-tcp; then - firewall-cmd --quiet --permanent --new-service="rsyslog-tcp" - firewall-cmd --quiet --permanent --service="rsyslog-tcp" --set-description="RSyslog TCP port" - firewall-cmd --quiet --permanent --service="rsyslog-tcp" --add-port="514/tcp" - fi - firewall-cmd --quiet --permanent --add-service="rsyslog-tcp" - firewall-cmd --quiet --reload - - semanage port -m -t syslogd_port_t -p tcp 514 -else - # send logs to log-sinks - cat <<-'EOF' >/etc/rsyslog.d/20-fwd_log_sink.conf - *.* action(type="omfwd" - queue.type="LinkedList" - queue.filename="log_sink_fwd" - action.resumeRetryCount="-1" - queue.saveonshutdown="on" - target="log-sink" Port="514" Protocol="tcp") - EOF - - # listen for haproxy logs locally - cat <<-'EOF' >/etc/rsyslog.d/10-haproxy.conf - $ModLoad imudp - $UDPServerAddress 127.0.0.1 - $UDPServerRun 514 - EOF -fi - -systemctl --quiet restart rsyslog diff --git a/extra/vagrant/3nodes-haproxy/provision/system.bash b/extra/vagrant/3nodes-haproxy/provision/system.bash index 9ab5d68..fac9927 100755 --- a/extra/vagrant/3nodes-haproxy/provision/system.bash +++ b/extra/vagrant/3nodes-haproxy/provision/system.bash @@ -4,46 +4,57 @@ set -o errexit set -o nounset set -o pipefail -NODENAME="$1" -RHEL_USER="$2" -RHEL_PASS="$3" -shift 3 -NODES=( "$@" ) +declare -r NODENAME="$1" +declare -r PGVER="$2" -hostnamectl set-hostname "${NODENAME}" +declare -r HAPASS="$3" +declare -r LOGNODE="$4" -for N in "${NODES[@]}"; do - NG=$(sed -n "/${N%=*}\$/p" /etc/hosts|wc -l) - if [ "$NG" -eq 0 ]; then - echo "${N##*=} ${N%=*}" >> /etc/hosts - fi -done +shift 4 +declare -r -a NODES=( "$@" ) +declare -r YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0" + +# detect operating system provider and version # shellcheck disable=SC1091 source "/etc/os-release" OS_ID="$ID" -YUM_INSTALL="yum install --nogpgcheck --quiet -y -e 0" +OS_VER="$VERSION_ID" -PACKAGES=( vim bash-completion yum-utils policycoreutils policycoreutils-python ) +# set hostname +hostnamectl set-hostname "${NODENAME}" +# fill /etc/hosts +for N in "${NODES[@]}"; do + declare HNAME="${N%=*}" + declare HIP="${N##*=}" + if ! grep -Eq "${HNAME}\$" /etc/hosts; then + echo "${HIP} ${HNAME}" >> /etc/hosts + fi +done + +# enable required repository if [ "$OS_ID" = "rhel" ]; then - subscription-manager register --force --username "${RHEL_USER:?}" --password "${RHEL_PASS:?}" --auto-attach - PACKAGES+=("tmux") -else - PACKAGES+=("screen") + # use yum instead of dnf for compatibility between EL 7 and 8 + yum-config-manager --enable "*highavailability-rpms" +elif [ "$OS_ID" = "centos" ] && [ "${OS_VER:0:1}" = "8" ]; then + yum-config-manager --enable "HighAvailability" fi -$YUM_INSTALL "${PACKAGES[@]}" +# install essential packages +if [ "${OS_VER:0:1}" = "8" ]; then + $YUM_INSTALL yum-utils tmux vim policycoreutils-python-utils.noarch +else + $YUM_INSTALL yum-utils tmux vim policycoreutils-python +fi +# SSH setup cat <<'EOF' > "/home/vagrant/.ssh/config" Host * CheckHostIP no StrictHostKeyChecking no EOF -cp "/vagrant/extra/vagrant/3nodes-haproxy/provision/id_rsa" "/home/vagrant/.ssh" -cp "/vagrant/extra/vagrant/3nodes-haproxy/provision/id_rsa.pub" "/home/vagrant/.ssh" - chown -R "vagrant:" "/home/vagrant/.ssh" chmod 0700 "/home/vagrant/.ssh" chmod 0600 "/home/vagrant/.ssh/id_rsa" @@ -63,3 +74,199 @@ chmod 0600 "/root/.ssh/authorized_keys" # enable firewall systemctl --quiet --now enable firewalld + +# setup log sink +if [ "$NODENAME" == "$LOGNODE" ]; then + + cat <<-'EOF' > /etc/rsyslog.d/log_sink.conf + $ModLoad imtcp + $InputTCPServerRun 514 + + $template RemoteLogsMerged,"/var/log/%HOSTNAME%/messages.log" + *.* ?RemoteLogsMerged + + $template RemoteLogs,"/var/log/%HOSTNAME%/%PROGRAMNAME%.log" + *.* ?RemoteLogs + #& ~ + EOF + + if ! firewall-cmd --get-services|grep -q rsyslog-tcp; then + firewall-cmd --quiet --permanent --new-service="rsyslog-tcp" + firewall-cmd --quiet --permanent --service="rsyslog-tcp" --set-description="RSyslog TCP port" + firewall-cmd --quiet --permanent --service="rsyslog-tcp" --add-port="514/tcp" + fi + firewall-cmd --quiet --permanent --add-service="rsyslog-tcp" + firewall-cmd --quiet --reload + + semanage port -m -t syslogd_port_t -p tcp 514 + + systemctl --quiet restart rsyslog + + exit +fi + +# setting up pgsql nodes + +# send logs to log-sinks +cat <<'EOF' >/etc/rsyslog.d/20-fwd_log_sink.conf +*.* action(type="omfwd" +queue.type="LinkedList" +queue.filename="log_sink_fwd" +action.resumeRetryCount="-1" +queue.saveonshutdown="on" +target="log-sink" Port="514" Protocol="tcp") +EOF + +# listen for haproxy logs locally +cat <<'EOF' >/etc/rsyslog.d/10-haproxy.conf +$ModLoad imudp +$UDPServerAddress 127.0.0.1 +$UDPServerRun 514 +EOF + +systemctl --quiet restart rsyslog + +# PGDG repo +if ! rpm --quiet -q "pgdg-redhat-repo"; then + if [ "${OS_VER:0:1}" = "8" ]; then + $YUM_INSTALL "https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm" + else + $YUM_INSTALL "https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm" + fi +fi + +# disable postgresql upstream module conflicting with pgdg packages in RHEL8 +if [ "${OS_VER:0:1}" = "8" ]; then + yum -qy module disable postgresql +fi + +$YUM_INSTALL pacemaker pcs haproxy \ + resource-agents fence-agents-virsh sbd \ + perl-Module-Build \ + "postgresql${PGVER}" \ + "postgresql${PGVER}-server" \ + "postgresql${PGVER}-contrib" + +# setting up pcs +systemctl --quiet --now enable pcsd.service +echo "$HAPASS"|passwd --stdin hacluster > /dev/null 2>&1 + +# setting up haproxy +cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg-dist +cat <<'EOF' > /etc/haproxy/haproxy.cfg +global + log 127.0.0.1:514 local2 + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + stats socket /var/lib/haproxy/stats + +defaults + mode tcp + log global + option tcplog + retries 3 + timeout connect 10s + timeout client 10m + timeout server 10m + timeout check 1s + maxconn 300 + +listen stats + mode http + bind *:7000 + stats enable + stats uri / + timeout connect 15s + timeout client 15s + timeout server 15s + +listen prd + bind *:5432 + option tcp-check + tcp-check connect port 5431 + tcp-check expect string production + default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions + server srv1 srv1:5434 check + server srv2 srv2:5434 check + server srv3 srv3:5434 check + +listen stb + bind *:5433 + balance leastconn + option tcp-check + tcp-check connect port 5431 + tcp-check expect string standby + default-server inter 2s fastinter 1s rise 2 fall 1 on-marked-down shutdown-sessions + server srv1 srv1:5434 check + server srv2 srv2:5434 check + server srv3 srv3:5434 check +EOF + +setsebool -P haproxy_connect_any=1 + +systemctl --quiet --now enable haproxy + +# PostgreSQL state +cat<<'EOF' > /etc/systemd/system/pgsql-state@.service +[Unit] +Description=Local PostgreSQL state + +[Service] +User=postgres +Group=postgres +ExecStart=/usr/pgsql-12/bin/psql -d postgres -U postgres -p 5434 -Atc "select CASE pg_is_in_recovery() WHEN true THEN 'standby' ELSE 'production' END" +StandardOutput=socket +EOF + +cat<<'EOF' > /etc/systemd/system/pgsql-state.socket +[Unit] +Description=Local PostgreSQL state + +[Socket] +ListenStream=5431 +Accept=yes + +[Install] +WantedBy=sockets.target +EOF + +systemctl --quiet --now enable pgsql-state.socket + +# firewall setup +firewall-cmd --quiet --permanent --service=postgresql --add-port="5433/tcp" +firewall-cmd --quiet --permanent --service=postgresql --add-port="5434/tcp" +firewall-cmd --quiet --permanent --remove-service=postgresql +firewall-cmd --quiet --permanent --add-service=postgresql +firewall-cmd --quiet --permanent --add-service=high-availability +if ! firewall-cmd --get-services|grep -q pgsql-state; then + firewall-cmd --quiet --permanent --new-service="pgsql-state" + firewall-cmd --quiet --permanent --service="pgsql-state" --set-description="Local PostgreSQL state" + firewall-cmd --quiet --permanent --service="pgsql-state" --add-port="5431/tcp" +fi +firewall-cmd --quiet --permanent --add-service="pgsql-state" +if ! firewall-cmd --get-services|grep -q haproxy-stats; then + firewall-cmd --quiet --permanent --new-service="haproxy-stats" + firewall-cmd --quiet --permanent --service="haproxy-stats" --set-description="HAProxy statistics" + firewall-cmd --quiet --permanent --service="haproxy-stats" --add-port="7000/tcp" +fi +firewall-cmd --quiet --permanent --add-service="haproxy-stats" +firewall-cmd --quiet --reload + +# install PAF +cd /vagrant +[ -f Build ] && perl Build distclean +sudo -u vagrant perl Build.PL --quiet >/dev/null 2>&1 +sudo -u vagrant perl Build --quiet +perl Build --quiet install + +# Pcmk setup +cp /etc/sysconfig/pacemaker /etc/sysconfig/pacemaker.dist +cat<<'EOF' > /etc/sysconfig/pacemaker +PCMK_debug=yes +PCMK_logpriority=debug +EOF diff --git a/extra/vagrant/3nodes-haproxy/vagrant.yml-dist b/extra/vagrant/3nodes-haproxy/vagrant.yml-dist index fd19591..63be98e 100644 --- a/extra/vagrant/3nodes-haproxy/vagrant.yml-dist +++ b/extra/vagrant/3nodes-haproxy/vagrant.yml-dist @@ -4,11 +4,11 @@ # ssh_login: "user" # ssh login to connect to the host when fencing a VM. # # put "./provision/id_rsa.pub" in your "~/.ssh/authorized_keys" # base_ip: "10.20.30.5" # Base IP address to compute other ones -# pg_nodes: # servers to create. -# - "srv1" # First one will be master +# pg_nodes: # servers to create. +# - "srv1" # First one will be primary # - "srv2" # - "srv3" # log_node: "log-sink" -# vm_prefix: "paf_vm" +# vm_prefix: "paf_3nHP" # rhel_user: "" # RHEL user account # rhel_pass: "" # RHEL user account password