diff --git a/script/pgsqlms b/script/pgsqlms index ada9a21..0d00958 100755 --- a/script/pgsqlms +++ b/script/pgsqlms @@ -228,9 +228,9 @@ sub _get_lag_scores { # "pg_stat_replication" view. # The row_number applies on the result set ordered on write_location ASC so # the highest row_number should be given to the closest node from the - # master, then the lowest node name (alphanumeric sort) in case of equality. - # The result set itself is order by priority DESC to process best known - # candidate first. + # primary, then the lowest node name (alphanumeric sort) in case of + # equality. The result set itself is order by priority DESC to process best + # known candidate first. $query = qq{ SELECT application_name, priority, location, state, current_lag FROM ( @@ -407,8 +407,8 @@ sub _master_score_exists { return 0; } -# Check if the current transiation is a recover of a master clone on given node. -sub _is_master_recover { +# Check if the current transition is a recover of the primary on given node. +sub _is_primary_recover { my ( $n ) = @_; return ( @@ -417,8 +417,8 @@ sub _is_master_recover { ); } -# Check if the current transition is a recover of a slave clone on given node. -sub _is_slave_recover { +# Check if the current transition is a recover of a standby clone on given node. +sub _is_standby_recover { my ( $n ) = @_; return ( @@ -427,7 +427,7 @@ sub _is_slave_recover { ); } -# check if th current transition is a switchover to the given node. +# check if the current transition is a switchover to the given node. sub _is_switchover { my ( $n ) = @_; my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'}; @@ -707,18 +707,18 @@ sub _controldata_to_ocf { } # Check the write_location of all secondaries, and adapt their master score so -# that the instance closest to the master will be the selected candidate should -# a promotion be triggered. +# that the instance closest to the primary will be the selected candidate +# should a promotion be triggered. # NOTE: This is only a hint to pacemaker! The selected candidate to promotion # actually re-check it is the best candidate and force a re-election by failing # if a better one exists. This avoid a race condition between the call of the -# monitor action and the promotion where another slave might have catchup faster -# with the master. +# monitor action and the promotion where another standby might have catchup +# faster with the primary. # NOTE: we cannot directly use the write_location, neither a lsn_diff value as # promotion score as Pacemaker considers any value greater than 1,000,000 as # INFINITY. # -# This sub must be executed from a master monitor action. +# This sub must be executed from a Master-role monitor action. # sub _check_locations { my $partition_nodes; @@ -740,7 +740,7 @@ sub _check_locations { # If no lag are reported at this point, it means that there is no # secondary instance connected. - ocf_log( 'warning', 'No secondary connected to the master' ) + ocf_log( 'warning', 'No secondary connected to the primary' ) if $row_num == 0; # For each standby connected, set their master score based on the following @@ -820,10 +820,10 @@ sub _check_locations { # _check_switchover # check if the pgsql switchover to the localnode is safe. -# This is supposed to be called **after** the master has been stopped or demoted. -# This sub checks if the local standby received the shutdown checkpoint from the -# old master to make sure it can take over the master role and the old master -# will be able to catchup as a standby after. +# This is supposed to be called **after** the primary has been stopped or +# demoted. It checks if the local standby received the shutdown checkpoint +# from the old primary to make sure it can promote safely and the old +# primary will be able to catchup as a standby after. # # Returns 0 if switchover is safe # Returns 1 if swithcover is not safe @@ -843,20 +843,20 @@ sub _check_switchover { .' Need to check the last record in WAL', $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename ); - # check if we received the shutdown checkpoint of the master during its + # check if we received the shutdown checkpoint of the primary during its # demote process. # We need the last local checkpoint LSN and the last received LSN from - # master to check in the WAL between these adresses if we have a + # primary to check in the WAL between these addresses if we have a # "checkpoint shutdown" using pg_xlogdump/pg_waldump. # # Force a checkpoint to make sure the controldata shows the very last TL - # and the master's shutdown checkpoint + # and the primary's shutdown checkpoint _query( q{ CHECKPOINT }, {} ); %cdata = _get_controldata(); $tl = $cdata{'tl'}; $last_redo = $cdata{'redo'}; - # Get the last received LSN from master + # Get the last received LSN from primary $last_lsn = _get_last_received_lsn(); unless ( defined $last_lsn ) { @@ -877,12 +877,12 @@ sub _check_switchover { if ( $rc == 0 and $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m ) { - ocf_log( 'info', 'Slave received the shutdown checkpoint' ); + ocf_log( 'info', 'Standby received the shutdown checkpoint' ); return 0; } ocf_exit_reason( - 'Did not receive the shutdown checkpoint from the old master!' ); + 'Did not receive the shutdown checkpoint from the old primary!' ); return 1; } @@ -909,7 +909,7 @@ sub _confirm_role { elsif ( $is_in_recovery eq 'f' ) { # The instance is a primary. ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary"); - # Check lsn diff with current slaves if any + # Check lsn diff with current standbys if any _check_locations() if $__OCF_ACTION eq 'monitor'; return $OCF_RUNNING_MASTER; } @@ -985,9 +985,10 @@ sub _confirm_stopped { elsif ( $controldata_rc == $OCF_SUCCESS ) { # The controldata has not been updated to "shutdown in recovery". # It should mean we had a crash on a secondary instance. - # There is no "FAILED_SLAVE" return code, so we return a generic error. + # There is no "FAILED_STANDBY" return code, so we return a generic + # error. ocf_exit_reason( - 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed', + 'Instance "%s" controldata indicates a running standby instance, the instance has probably crashed', $OCF_RESOURCE_INSTANCE ); return $OCF_ERR_GENERIC; } @@ -1061,9 +1062,9 @@ parameter is set or a template file is found. =item B -Maximum lag allowed on a standby before we set a negative master score on it. +Maximum lag allowed on a standby before forbidding any promotion to it. The calculation is based on the difference between the current xlog location on -the master and the write location on the standby. +the primary and the write location on the standby. (optional, integer, default "0" disables this feature) @@ -1155,8 +1156,8 @@ sub ocf_meta_data { - Maximum lag allowed on a standby before we set a negative master score on it. The calculation - is based on the difference between the current LSN on the master and the LSN + Maximum lag allowed on a standby before forbidding any promotion on it. The calculation + is based on the difference between the current LSN on the primary and the LSN written on the standby. This parameter must be a valid positive number as described in PostgreSQL documentation. See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC @@ -1168,7 +1169,7 @@ sub ocf_meta_data { Path to the recovery.conf template. This file is simply copied to \$PGDATA - before starting the instance as slave. + before starting the instance as standby. ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for PostgreSQL 12 and higher. The cluster will refuse to start if a template file is found. @@ -1195,7 +1196,6 @@ sub ocf_meta_data { - @@ -1229,11 +1229,11 @@ Suggested minimum timeout: 20. =item B -Promotes the resource to the Master role. Suggested minimum timeout: 30. +Promotes the resource to the primary role. Suggested minimum timeout: 30. =item B -Demotes the resource to the Slave role. Suggested minimum timeout: 120. +Demotes the resource to the standby role. Suggested minimum timeout: 120. =item B @@ -1306,7 +1306,7 @@ sub pgsql_validate_all { unless ( defined $ENV{'OCF_RESKEY_CRM_meta_notify'} and lc($ENV{'OCF_RESKEY_CRM_meta_notify'}) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { ocf_exit_reason( - 'You must set meta parameter notify=true for your master resource' + 'You must set meta parameter notify=true for your "master" resource' ); return $OCF_ERR_INSTALLED; } @@ -1317,7 +1317,7 @@ sub pgsql_validate_all { and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' ) { ocf_exit_reason( - 'You must set meta parameter master-max=1 for your master resource' + 'You must set meta parameter master-max=1 for your "master" resource' ); return $OCF_ERR_INSTALLED; } @@ -1478,14 +1478,14 @@ sub pgsql_start { # Check if a master score exists in the cluster. # During the very first start of the cluster, no master score will - # exists on any of the existing slaves, unless an admin designated - # one of them using crm_master. If no master exists the cluster will - # not promote a master among the slaves. + # exists on any of the existing standbys, unless an admin designated + # one of them using crm_master. If no master score exists the + # cluster can not pick a standby to promote. # To solve this situation, we check if there is at least one master # score existing on one node in the cluster. Do nothing if at least - # one master score is found among the clones of the resource. If no - # master score exists, set a score of 1 only if the resource was a - # shut downed master before the start. + # one master score is found among the clones of the resource. + # If no master score exists, set a score of 1 only if the resource + # was a shut downed primary before the start. if ( $prev_state eq "shut down" and not _master_score_exists() ) { ocf_log( 'info', 'No master score around. Set mine to 1' ); @@ -1496,7 +1496,7 @@ sub pgsql_start { } ocf_exit_reason( - 'Instance "%s" is not running as a slave (returned %d)', + 'Instance "%s" is not running as a standby (returned %d)', $OCF_RESOURCE_INSTANCE, $rc ); return $OCF_ERR_GENERIC; @@ -1703,7 +1703,7 @@ sub pgsql_demote { return $OCF_SUCCESS; } elsif ( $rc == $OCF_NOT_RUNNING ) { - # Instance is stopped. Nothing to do. + # Instance is stopped. Need to start as standby. ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down', $OCF_RESOURCE_INSTANCE ); } @@ -1717,12 +1717,12 @@ sub pgsql_demote { return $OCF_ERR_GENERIC; } - # TODO we need to make sure at least one slave is connected!! + # TODO Do we need to make sure at least one standby is connected? - # WARNING if the resource state is stopped instead of master, the ocf ra dev - # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where - # it computes transitions of demote(failing)->stop->start->promote actions - # until failcount == migration-threshold. + # WARNING if the resource state is stopped instead of primary, the ocf ra + # dev rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop + # where it computes transitions of demote(failing)->stop->start->promote + # actions until failcount == migration-threshold. # This is a really ugly trick to keep going with the demode action if the # rsc is already stopped gracefully. # See discussion "CRM trying to demote a stopped resource" on @@ -1790,12 +1790,12 @@ sub pgsql_promote { $rc = pgsql_monitor(); if ( $rc == $OCF_SUCCESS ) { - # Running as slave. Normal, expected behavior. + # Running as standby. Normal, expected behavior. ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby', $OCF_RESOURCE_INSTANCE ); } elsif ( $rc == $OCF_RUNNING_MASTER ) { - # Already a master. Unexpected, but not a problem. + # Already a primary. Unexpected, but not a problem. ocf_log( 'info', '"%s" already running as a primary', $OCF_RESOURCE_INSTANCE ); return $OCF_SUCCESS; @@ -1835,19 +1835,20 @@ sub pgsql_promote { # internal error during _check_switchover } - # Do not check for a better candidate if we try to recover the master - # Recover of a master is detected during the pre-promote action. It sets the - # private attribute 'recover_master' to '1' if this is a master recover. - if ( _get_priv_attr( 'recover_master' ) eq '1' ) { - ocf_log( 'info', 'Recovering old master, no election needed'); + # Do not check for a better candidate if we try to recover the primary. + # Recover of a primary is detected during the pre-promote action. It sets + # the private attribute 'recover_primary' to '1' if this is a primary + # recover. + if ( _get_priv_attr( 'recover_primary' ) eq '1' ) { + ocf_log( 'info', 'Recovering old primary, no election needed') } else { # The promotion is occurring on the best known candidate (highest # master score), as chosen by pacemaker during the last working monitor - # on previous master (see pgsql_monitor/_check_locations subs). + # on previous primary (see pgsql_monitor/_check_locations subs). # To avoid any race condition between the last monitor action on the - # previous master and the **real** most up-to-date standby, we + # previous primary and the **real** most up-to-date standby, we # set each standby location during the "pre-promote" action, and stored # them using the "lsn_location" resource attribute. # @@ -1970,8 +1971,8 @@ sub pgsql_promote { return $OCF_SUCCESS; } -# This action is called **before** the actual promotion when a failing master is -# considered unreclaimable, recoverable or a new master must be promoted +# This action is called **before** the actual promotion when a failing primary +# is considered unreclaimable, recoverable or a new primary must be promoted # (switchover or first start). # As every "notify" action, it is executed almost simultaneously on all # available nodes. @@ -1986,11 +1987,11 @@ sub pgsql_notify_pre_promote { ocf_log( 'info', 'Promoting instance on node "%s"', $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ); - # No need to do an election between slaves if this is recovery of the master - if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { - ocf_log( 'warning', 'This is a master recovery!' ); + # No need to do an election if this is a recovery of the primary + if ( _is_primary_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { + ocf_log( 'warning', 'This is a primary recovery!' ); - _set_priv_attr( 'recover_master', '1' ) + _set_priv_attr( 'recover_primary', '1' ) if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename; return $OCF_SUCCESS; @@ -1998,7 +1999,7 @@ sub pgsql_notify_pre_promote { # Environment cleanup! _delete_priv_attr( 'lsn_location' ); - _delete_priv_attr( 'recover_master' ); + _delete_priv_attr( 'recover_primary' ); _delete_priv_attr( 'nodes' ); _delete_priv_attr( 'cancel_switchover' ); @@ -2022,19 +2023,19 @@ sub pgsql_notify_pre_promote { # FIXME: should we allow a switchover to a lagging slave? } - # We need to trigger an election between existing slaves to promote the best - # one based on its current LSN location. Each node set a private attribute - # "lsn_location" with its TL and LSN location. + # We need to trigger an election between existing standbys to promote the + # best one based on its current LSN location. Each node set a private + # attribute "lsn_location" with its TL and LSN location. # # During the following promote action, The designated standby for # promotion use these attributes to check if the instance to be promoted # is the best one, so we can avoid a race condition between the last - # successful monitor on the previous master and the current promotion. + # successful monitor on the previous primary and the current promotion. # As we can not break the transition from a notification action, we check # during the promotion if each node TL and LSN are valid. - # Force a checpoint to make sure the controldata shows the very last TL + # Force a checkpoint to make sure the controldata shows the very last TL _query( q{ CHECKPOINT }, {} ); %cdata = _get_controldata(); $node_lsn = _get_last_received_lsn( 'in decimal' ); @@ -2056,12 +2057,12 @@ sub pgsql_notify_pre_promote { ocf_log( 'warning', 'Could not set the current node LSN' ) if $? != 0 ; - # If this node is the future master, keep track of the slaves that + # If this node is the future primary, keep track of the standbys that # received the same notification to compare our LSN with them during # promotion if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) { # Build the list of active nodes: - # master + slave + start - stop + # primary + standby + start - stop # FIXME: Deal with rsc started during the same transaction but **after** # the promotion ? $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} }, @@ -2074,19 +2075,21 @@ sub pgsql_notify_pre_promote { _set_priv_attr( 'nodes', $attr_nodes ); } + # whatever the result, it is ignored by pacemaker. return $OCF_SUCCESS; } # This action is called after a promote action. sub pgsql_notify_post_promote { - # We have a new master (or the previous one recovered). + # We have a new primary (or the previous one recovered). # Environment cleanup! _delete_priv_attr( 'lsn_location' ); - _delete_priv_attr( 'recover_master' ); + _delete_priv_attr( 'recover_primary' ); _delete_priv_attr( 'nodes' ); _delete_priv_attr( 'cancel_switchover' ); + # whatever the result, it is ignored by pacemaker. return $OCF_SUCCESS; } @@ -2101,12 +2104,12 @@ sub pgsql_notify_pre_demote { $rc = pgsql_monitor(); - # do nothing if this is not a master recovery - return $OCF_SUCCESS unless _is_master_recover( $nodename ) + # do nothing if this is not a primary recovery + return $OCF_SUCCESS unless _is_primary_recover( $nodename ) and $rc == $OCF_FAILED_MASTER; - # in case of master crash, we need to detect if the CRM tries to recover - # the master clone. The usual transition is to do: + # in case of primary crash, we need to detect if the CRM tries to recover + # the primary. The usual transition is to do: # demote->stop->start->promote # # There are multiple flaws with this transition: @@ -2119,11 +2122,11 @@ sub pgsql_notify_pre_demote { # If it success, at least it will be demoted correctly with a normal # status. If it fails, it will be catched up in next steps. - ocf_log( 'info', 'Trying to start failing master "%s"...', + ocf_log( 'info', 'Trying to start failing primary "%s"', $OCF_RESOURCE_INSTANCE ); # Either the instance managed to start or it couldn't. - # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't + # We rely on the pg_ctl '-w' switch to take care of this. If it couldn't # start, this error will be catched up later during the various checks _pg_ctl_start(); @@ -2131,6 +2134,7 @@ sub pgsql_notify_pre_demote { ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); + # whatever the result, it is ignored by pacemaker. return $OCF_SUCCESS; } @@ -2145,14 +2149,14 @@ sub pgsql_notify_pre_stop { $rc = _controldata_to_ocf(); - # do nothing if this is not a slave recovery - return $OCF_SUCCESS unless _is_slave_recover( $nodename ) + # do nothing if this is not a standby recovery + return $OCF_SUCCESS unless _is_standby_recover( $nodename ) and $rc == $OCF_RUNNING_SLAVE; - # in case of slave crash, we need to detect if the CRM tries to recover - # the slaveclone. The usual transition is to do: stop->start + # in case of standby crash, we need to detect if the CRM tries to recover + # it. The usual transition is to do: stop->start # - # This transition can no twork because the instance is in + # This transition can not work because the instance is in # OCF_ERR_GENERIC step. So the stop action will fail, leading most # probably to fencing action. # @@ -2160,7 +2164,7 @@ sub pgsql_notify_pre_stop { # If it success, at least it will be stopped correctly with a normal # status. If it fails, it will be catched up in next steps. - ocf_log( 'info', 'Trying to start failing slave "%s"...', + ocf_log( 'info', 'Trying to start failing standby "%s"...', $OCF_RESOURCE_INSTANCE ); # Either the instance managed to start or it couldn't. @@ -2172,6 +2176,7 @@ sub pgsql_notify_pre_stop { ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); + # whatever the result, it is ignored by pacemaker. return $OCF_SUCCESS; } @@ -2195,6 +2200,7 @@ sub pgsql_notify { elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() } } + # whatever the result, it is ignored by pacemaker. return $OCF_SUCCESS; }