From 533beda99457fa95dd69f01075a828612973986a Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 15 Nov 2022 01:25:02 -0500 Subject: [PATCH] import resource-agents-4.10.0-23.el9 --- .gitignore | 1 + .resource-agents.metadata | 1 + SOURCES/bz1952005-pgsqlms-new-ra.patch | 3338 +++++++++++++++++ .../bz2021125-gcp-ilb-1-fix-log_enable.patch | 29 + ...-2-only-check-log_cmd-if-log-enabled.patch | 51 + ...9704-1-db2-crm_attribute-use-forever.patch | 32 + SOURCES/bz2029704-2-db2-fixes.patch | 32 + ...9753-podman-remove-anonymous-volumes.patch | 22 + ...return-OCF_NOT_RUNNING-missing-route.patch | 43 + .../bz2055016-1-IPsrcaddr-dhcp-warning.patch | 41 + ...rcaddr-error-message-route-not-found.patch | 49 + ...z2055016-3-IPsrcaddr-fix-indentation.patch | 56 + SOURCES/bz2055016-4-IPsrcaddr-fixes.patch | 117 + ...3877-all-agents-use-promotable-terms.patch | 543 +++ ...2065125-LVM-activate-fix-fence-issue.patch | 102 + ...able-more-control-for-IPv6-addresses.patch | 312 ++ SOURCES/bz2069270-corosync-qnetd-new-ra.patch | 401 ++ ...m-1-fix-uuid-label-device-whitespace.patch | 44 + ...em-2-improve-uuid-label-device-logic.patch | 38 + ...cuate-add-user_domain-project_domain.patch | 61 + ...agents-set-domain-parameters-default.patch | 55 + ...k-agents-warn-when-openstackcli-slow.patch | 282 ++ ...90-bz2083092-update-openstack-agents.patch | 770 ++++ ...086-bz2083092-openstack-agents-fixes.patch | 72 + ...bz2083090-openstack-info-fix-bashism.patch | 26 + ...-move-ip-add-interface-label-support.patch | 82 + ...lockd-fail-when-use_lvmlockd-not-set.patch | 25 + SOURCES/bz2103374-ocf-tester-1-update.patch | 39 + ...remove-deprecated-lrmd-lrmadmin-code.patch | 166 + ...ovsmonitor-pgsql-fix-attrd_updater-q.patch | 75 + SOURCES/ha-cloud-support-aliyun.patch | 12 + SOURCES/ha-cloud-support-aws.patch | 48 + SOURCES/ha-cloud-support-gcloud.patch | 33 + SOURCES/nova-compute-wait-NovaEvacuate.patch | 787 ++++ SPECS/resource-agents.spec | 1212 ++++++ 35 files changed, 8997 insertions(+) create mode 100644 .gitignore create mode 100644 .resource-agents.metadata create mode 100644 SOURCES/bz1952005-pgsqlms-new-ra.patch create mode 100644 SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch create mode 100644 SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch create mode 100644 SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch create mode 100644 SOURCES/bz2029704-2-db2-fixes.patch create mode 100644 SOURCES/bz2029753-podman-remove-anonymous-volumes.patch create mode 100644 SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch create mode 100644 SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch create mode 100644 SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch create mode 100644 SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch create mode 100644 SOURCES/bz2055016-4-IPsrcaddr-fixes.patch create mode 100644 SOURCES/bz2063877-all-agents-use-promotable-terms.patch create mode 100644 SOURCES/bz2065125-LVM-activate-fix-fence-issue.patch create mode 100644 SOURCES/bz2065138-IPaddr2-enable-more-control-for-IPv6-addresses.patch create mode 100644 SOURCES/bz2069270-corosync-qnetd-new-ra.patch create mode 100644 SOURCES/bz2072371-Filesystem-1-fix-uuid-label-device-whitespace.patch create mode 100644 SOURCES/bz2072371-Filesystem-2-improve-uuid-label-device-logic.patch create mode 100644 SOURCES/bz2081585-NovaEvacuate-add-user_domain-project_domain.patch create mode 100644 SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-set-domain-parameters-default.patch create mode 100644 SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-warn-when-openstackcli-slow.patch create mode 100644 SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-update-openstack-agents.patch create mode 100644 SOURCES/bz2083081-bz2083086-bz2083092-openstack-agents-fixes.patch create mode 100644 SOURCES/bz2083090-openstack-info-fix-bashism.patch create mode 100644 SOURCES/bz2093213-aws-vpc-move-ip-add-interface-label-support.patch create mode 100644 SOURCES/bz2094828-lvmlockd-fail-when-use_lvmlockd-not-set.patch create mode 100644 SOURCES/bz2103374-ocf-tester-1-update.patch create mode 100644 SOURCES/bz2103374-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch create mode 100644 SOURCES/bz2110452-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch create mode 100644 SOURCES/ha-cloud-support-aliyun.patch create mode 100644 SOURCES/ha-cloud-support-aws.patch create mode 100644 SOURCES/ha-cloud-support-gcloud.patch create mode 100644 SOURCES/nova-compute-wait-NovaEvacuate.patch create mode 100644 SPECS/resource-agents.spec diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..11e610e --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz diff --git a/.resource-agents.metadata b/.resource-agents.metadata new file mode 100644 index 0000000..d893801 --- /dev/null +++ b/.resource-agents.metadata @@ -0,0 +1 @@ +3b517ecdbe2103df77813050e5c998e102c5de7e SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz diff --git a/SOURCES/bz1952005-pgsqlms-new-ra.patch b/SOURCES/bz1952005-pgsqlms-new-ra.patch new file mode 100644 index 0000000..b3b314e --- /dev/null +++ b/SOURCES/bz1952005-pgsqlms-new-ra.patch @@ -0,0 +1,3338 @@ +diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2021-04-12 12:51:56.831835953 +0200 ++++ b/doc/man/Makefile.am 2021-04-13 13:38:14.198361848 +0200 +@@ -154,6 +154,7 @@ + ocf_heartbeat_ovsmonitor.7 \ + ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ ++ ocf_heartbeat_pgsqlms.7 \ + ocf_heartbeat_pingd.7 \ + ocf_heartbeat_podman.7 \ + ocf_heartbeat_portblock.7 \ +diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2021-04-12 12:51:56.831835953 +0200 ++++ b/heartbeat/Makefile.am 2021-04-13 13:37:45.741292178 +0200 +@@ -149,6 +149,7 @@ + ovsmonitor \ + pgagent \ + pgsql \ ++ pgsqlms \ + pingd \ + podman \ + portblock \ +@@ -209,7 +210,10 @@ + mysql-common.sh \ + nfsserver-redhat.sh \ + findif.sh \ +- ocf.py ++ ocf.py \ ++ OCF_Directories.pm \ ++ OCF_Functions.pm \ ++ OCF_ReturnCodes.pm + + # Legacy locations + hbdir = $(sysconfdir)/ha.d +diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm +--- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_Directories.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,139 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_Directories - Binaries and binary options for use in Resource Agents ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_Directories; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-directories shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLES ++ ++Here are the variables exported by this module: ++ ++=over ++ ++=item $INITDIR ++ ++=item $HA_DIR ++ ++=item $HA_RCDIR ++ ++=item $HA_CONFDIR ++ ++=item $HA_CF ++ ++=item $HA_VARLIB ++ ++=item $HA_RSCTMP ++ ++=item $HA_RSCTMP_OLD ++ ++=item $HA_FIFO ++ ++=item $HA_BIN ++ ++=item $HA_SBIN_DIR ++ ++=item $HA_DATEFMT ++ ++=item $HA_DEBUGLOG ++ ++=item $HA_RESOURCEDIR ++ ++=item $HA_DOCDIR ++ ++=item $__SCRIPT_NAME ++ ++=item $HA_VARRUN ++ ++=item $HA_VARLOCK ++ ++=item $ocf_prefix ++ ++=item $ocf_exec_prefix ++ ++=back ++ ++=cut ++ ++package OCF_Directories; ++ ++use strict; ++use warnings; ++use 5.008; ++use File::Basename; ++ ++BEGIN { ++ use Exporter; ++ ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $INITDIR ++ $HA_DIR ++ $HA_RCDIR ++ $HA_CONFDIR ++ $HA_CF ++ $HA_VARLIB ++ $HA_RSCTMP ++ $HA_RSCTMP_OLD ++ $HA_FIFO ++ $HA_BIN ++ $HA_SBIN_DIR ++ $HA_DATEFMT ++ $HA_DEBUGLOG ++ $HA_RESOURCEDIR ++ $HA_DOCDIR ++ $__SCRIPT_NAME ++ $HA_VARRUN ++ $HA_VARLOCK ++ $ocf_prefix ++ $ocf_exec_prefix ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $INITDIR = ( $ENV{'INITDIR'} || '/etc/init.d' ); ++our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' ); ++our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' ); ++our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' ); ++our $HA_CF = ( $ENV{'HA_CF'} || '/etc/ha.d/ha.cf' ); ++our $HA_VARLIB = ( $ENV{'HA_VARLIB'} || '/var/lib/heartbeat' ); ++our $HA_RSCTMP = ( $ENV{'HA_RSCTMP'} || '/run/resource-agents' ); ++our $HA_RSCTMP_OLD = ( $ENV{'HA_RSCTMP_OLD'} || '/var/run/heartbeat/rsctmp' ); ++our $HA_FIFO = ( $ENV{'HA_FIFO'} || '/var/lib/heartbeat/fifo' ); ++our $HA_BIN = ( $ENV{'HA_BIN'} || '/usr/libexec/heartbeat' ); ++our $HA_SBIN_DIR = ( $ENV{'HA_SBIN_DIR'} || '/usr/sbin' ); ++our $HA_DATEFMT = ( $ENV{'HA_DATEFMT'} || '%b %d %T ' ); ++our $HA_DEBUGLOG = ( $ENV{'HA_DEBUGLOG'} || '/dev/null' ); ++our $HA_RESOURCEDIR = ( $ENV{'HA_RESOURCEDIR'}|| '/etc/ha.d/resource.d' ); ++our $HA_DOCDIR = ( $ENV{'HA_DOCDIR'} || '/usr/share/doc/heartbeat' ); ++our $__SCRIPT_NAME = ( $ENV{'__SCRIPT_NAME'} || fileparse($0) ); ++our $HA_VARRUN = ( $ENV{'HA_VARRUN'} || '/var/run' ); ++our $HA_VARLOCK = ( $ENV{'HA_VARLOCK'} || '/var/lock/subsys' ); ++our $ocf_prefix = '/usr'; ++our $ocf_exec_prefix = '/usr'; ++ ++1; ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. ++ +diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm +--- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_Functions.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,631 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_Functions - helper subroutines for OCF agent ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_Functions; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-shellfuncs shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLE ++ ++The only variable exported by this module is C<__OCF_ACTION>. ++ ++=head1 SUBROUTINES ++ ++Here are the subroutines ported from ocf-shellfuncs and exported by this module: ++ ++=over ++ ++=item ha_debug ++ ++=item ha_log ++ ++=item hadate ++ ++=item ocf_is_clone ++ ++=item ocf_is_ms ++ ++=item ocf_is_probe ++ ++=item ocf_is_root ++ ++=item ocf_is_true ++ ++=item ocf_is_ver ++ ++=item ocf_local_nodename ++ ++=item ocf_log ++ ++=item ocf_exit_reason ++ ++=item ocf_maybe_random ++ ++=item ocf_ver2num ++ ++=item ocf_ver_complete_level ++ ++=item ocf_ver_level ++ ++=item ocf_version_cmp ++ ++=item set_logtag ++ ++=back ++ ++Here are the subroutines only existing in the perl module but not in the ++ocf-shellfuncs script: ++ ++=over ++ ++=item ocf_notify_env ++ ++=back ++ ++=cut ++ ++package OCF_Functions; ++ ++use strict; ++use warnings; ++use 5.008; ++use POSIX qw( strftime setlocale LC_ALL ); ++use English; ++ ++use FindBin; ++use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++use OCF_ReturnCodes; ++use OCF_Directories; ++ ++BEGIN { ++ use Exporter; ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $__OCF_ACTION ++ ocf_is_root ++ ocf_maybe_random ++ ocf_is_true ++ hadate ++ set_logtag ++ ha_log ++ ha_debug ++ ocf_log ++ ocf_exit_reason ++ ocf_is_probe ++ ocf_is_clone ++ ocf_is_ms ++ ocf_is_ver ++ ocf_ver2num ++ ocf_ver_level ++ ocf_ver_complete_level ++ ocf_version_cmp ++ ocf_local_nodename ++ ocf_notify_env ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $__OCF_ACTION; ++ ++sub ocf_is_root { ++ return $EUID == 0; ++} ++ ++sub ocf_maybe_random { ++ return int( rand( 32767 ) ); ++} ++ ++sub ocf_is_true { ++ my $v = shift; ++ return ( defined $v and $v =~ /^(?:yes|true|1|YES|TRUE|ja|on|ON)$/ ); ++} ++ ++sub hadate { ++ return strftime( $HA_DATEFMT, localtime ); ++} ++ ++sub set_logtag { ++ ++ return if defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne ''; ++ ++ if ( defined $ENV{'OCF_RESOURCE_INSTANCE'} and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ) { ++ $ENV{'HA_LOGTAG'} = "$__SCRIPT_NAME($ENV{'OCF_RESOURCE_INSTANCE'})[$PID]"; ++ } ++ else { ++ $ENV{'HA_LOGTAG'}="${__SCRIPT_NAME}[$PID]"; ++ } ++} ++ ++sub __ha_log { ++ my $ignore_stderr = 0; ++ my $loglevel = ''; ++ ++ if ( $_[0] eq '--ignore-stderr' ) { ++ $ignore_stderr = 1; ++ shift; ++ } ++ ++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'} ++ or $ENV{'HA_LOGFACILITY'} eq 'none'; ++ ++ # if we're connected to a tty, then output to stderr ++ if ( -t STDERR ) { ++ # FIXME ++ # T.N.: this was ported with the bug on $loglevel being empty ++ # and never set before the test here... ++ if ( defined $ENV{'HA_debug'} ++ and $ENV{'HA_debug'} == 0 ++ and $loglevel eq 'debug' ++ ) { ++ return 0; ++ } ++ elsif ( $ignore_stderr ) { ++ # something already printed this error to stderr, so ignore ++ return 0; ++ } ++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) { ++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG; ++ } ++ else { ++ printf STDERR "%s\n", join ' ', @ARG; ++ } ++ return 0; ++ } ++ ++ set_logtag(); ++ ++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) { ++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, @ARG; ++ return 0 if ( $? >> 8 ) == 0; ++ } ++ ++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) { ++ # logging through syslog ++ # loglevel is unknown, use 'notice' for now ++ $loglevel = 'notice'; ++ for ( "@ARG" ) { ++ if ( /ERROR/ ) { ++ $loglevel = 'err'; ++ } ++ elsif ( /WARN/ ) { ++ $loglevel = 'warning'; ++ } ++ elsif (/INFO|info/ ) { ++ $loglevel = 'info'; ++ } ++ } ++ ++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p', ++ "$ENV{'HA_LOGFACILITY'}.$loglevel", @ARG; ++ } ++ ++ if ( defined $ENV{'HA_LOGFILE'} and $ENV{'HA_LOGFILE'} ne '' ) { ++ # appending to $HA_LOGFILE ++ open my $logfile, '>>', $ENV{'HA_LOGFILE'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++ ++ # appending to stderr ++ printf STDERR "%s %s\n", hadate(), join ' ', @ARG ++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '') ++ and (not defined $ENV{'HA_LOGFILE'} or $ENV{'HA_LOGFILE'} eq '' ) ++ and not $ignore_stderr; ++ ++ if ( defined $ENV{'HA_DEBUGLOG'} and $ENV{'HA_DEBUGLOG'} ne '' ++ and $ENV{'HA_LOGFILE'} ne $ENV{'HA_DEBUGLOG'} ++ ) { ++ # appending to $HA_DEBUGLOG ++ open my $logfile, '>>', $ENV{'HA_DEBUGLOG'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++} ++ ++sub ha_log { ++ return __ha_log( @ARG ); ++} ++ ++sub ha_debug { ++ ++ return 0 if defined $ENV{'HA_debug'} and $ENV{'HA_debug'} == 0; ++ ++ if ( -t STDERR ) { ++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) { ++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG; ++ } ++ else { ++ printf STDERR "%s\n", join ' ', @ARG; ++ } ++ ++ return 0; ++ } ++ ++ set_logtag(); ++ ++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) { ++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, '-D', 'ha-debug', @ARG; ++ return 0 if ( $? >> 8 ) == 0; ++ } ++ ++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'} ++ or $ENV{'HA_LOGFACILITY'} eq 'none'; ++ ++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) { ++ # logging through syslog ++ ++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p', ++ "$ENV{'HA_LOGFACILITY'}.debug", @ARG; ++ } ++ ++ if ( defined $ENV{'HA_DEBUGLOG'} and -f $ENV{'HA_DEBUGLOG'} ) { ++ my $logfile; ++ # appending to $HA_DEBUGLOG ++ open $logfile, '>>', $ENV{'HA_DEBUGLOG'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++ ++ # appending to stderr ++ printf STDERR "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), join ' ', @ARG ++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '') ++ and (not defined $ENV{'HA_DEBUGLOG'} or $ENV{'HA_DEBUGLOG'} eq '' ); ++} ++ ++# ++# ocf_log: log messages from the resource agent ++# This function is slightly different from its equivalent in ocf-shellfuncs.in ++# as it behaves like printf. ++# Arguments: ++# * __OCF_PRIO: log level ++# * __OCF_MSG: printf-like format string ++# * all other arguments are values for the printf-like format string ++# ++sub ocf_log { ++ my $__OCF_PRIO; ++ my $__OCF_MSG; ++ ++ # TODO: Revisit and implement internally. ++ if ( scalar @ARG < 2 ) { ++ ocf_log ( 'err', "Not enough arguments [%d] to ocf_log", scalar @ARG ); ++ } ++ ++ $__OCF_PRIO = shift; ++ $__OCF_MSG = shift; ++ $__OCF_MSG = sprintf $__OCF_MSG, @ARG; ++ ++ for ( $__OCF_PRIO ) { ++ if ( /crit/ ) { $__OCF_PRIO = 'CRIT' } ++ elsif ( /err/ ) { $__OCF_PRIO = 'ERROR' } ++ elsif ( /warn/ ) { $__OCF_PRIO = 'WARNING' } ++ elsif ( /info/ ) { $__OCF_PRIO = 'INFO' } ++ elsif ( /debug/ ) { $__OCF_PRIO = 'DEBUG' } ++ else { $__OCF_PRIO =~ tr/[a-z]/[A-Z]/ } ++ } ++ ++ if ( $__OCF_PRIO eq 'DEBUG' ) { ++ ha_debug( "$__OCF_PRIO: $__OCF_MSG"); ++ } ++ else { ++ ha_log( "$__OCF_PRIO: $__OCF_MSG"); ++ } ++} ++ ++ ++# ++# ocf_exit_reason: print exit error string to stderr and log ++# Usage: Allows the OCF script to provide a string ++# describing why the exit code was returned. ++# Arguments: reason - required, The string that represents ++# why the error occured. ++# ++sub ocf_exit_reason { ++ my $cookie = $ENV{'OCF_EXIT_REASON_PREFIX'} || 'ocf-exit-reason:'; ++ my $fmt; ++ my $msg; ++ ++ # No argument is likely not intentional. ++ # Just one argument implies a printf format string of just "%s". ++ # "Least surprise" in case some interpolated string from variable ++ # expansion or other contains a percent sign. ++ # More than one argument: first argument is going to be the format string. ++ ocf_log ( 'err', 'Not enough arguments [%d] to ocf_exit_reason', ++ scalar @ARG ) if scalar @ARG < 1; ++ ++ $fmt = shift; ++ $msg = sprintf $fmt, @ARG; ++ ++ print STDERR "$cookie$msg\n"; ++ __ha_log( '--ignore-stderr', "ERROR: $msg" ); ++} ++ ++# returns true if the CRM is currently running a probe. A probe is ++# defined as a monitor operation with a monitoring interval of zero. ++sub ocf_is_probe { ++ return ( $__OCF_ACTION eq 'monitor' ++ and $ENV{'OCF_RESKEY_CRM_meta_interval'} == 0 ); ++} ++ ++# returns true if the resource is configured as a clone. This is ++# defined as a resource where the clone-max meta attribute is present, ++# and set to greater than zero. ++sub ocf_is_clone { ++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_clone_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_clone_max'} > 0 ); ++} ++ ++# returns true if the resource is configured as a multistate ++# (master/slave) resource. This is defined as a resource where the ++# master-max meta attribute is present, and set to greater than zero. ++sub ocf_is_ms { ++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} > 0 ); ++} ++ ++# version check functions ++# allow . and - to delimit version numbers ++# max version number is 999 ++# letters and such are effectively ignored ++# ++sub ocf_is_ver { ++ return $ARG[0] =~ /^[0-9][0-9.-]*[0-9]$/; ++} ++ ++sub ocf_ver2num { ++ my $v = 0; ++ ++ $v = $v * 1000 + $1 while $ARG[0] =~ /(\d+)/g; ++ ++ return $v; ++} ++ ++sub ocf_ver_level { ++ my $v = () = $ARG[0] =~ /(\d+)/g; ++ return $v; ++} ++ ++sub ocf_ver_complete_level { ++ my $ver = shift; ++ my $level = shift; ++ my $i = 0; ++ ++ for ( my $i = 0; $i < $level; $i++ ) { ++ $ver .= "$ver.0"; ++ } ++ ++ return $ver; ++} ++ ++# usage: ocf_version_cmp VER1 VER2 ++# version strings can contain digits, dots, and dashes ++# must start and end with a digit ++# returns: ++# 0: VER1 smaller (older) than VER2 ++# 1: versions equal ++# 2: VER1 greater (newer) than VER2 ++# 3: bad format ++sub ocf_version_cmp { ++ my $v1 = shift; ++ my $v2 = shift; ++ my $v1_level; ++ my $v2_level; ++ my $level_diff; ++ ++ return 3 unless ocf_is_ver( $v1 ); ++ return 3 unless ocf_is_ver( $v2 ); ++ ++ $v1_level = ocf_ver_level( $v1 ); ++ $v2_level = ocf_ver_level( $v2 ); ++ ++ if ( $v1_level < $v2_level ) { ++ $level_diff = $v2_level - $v1_level; ++ $v1 = ocf_ver_complete_level( $v1, $level_diff ); ++ } ++ elsif ( $v1_level > $v2_level ) { ++ $level_diff = $v1_level - $v2_level; ++ $v2 = ocf_ver_complete_level( $v2, $level_diff ); ++ } ++ ++ $v1 = ocf_ver2num( $v1 ); ++ $v2 = ocf_ver2num( $v2 ); ++ ++ if ( $v1 == $v2 ) { return 1; } ++ elsif ( $v1 < $v2 ) { return 0; } ++ ++ return 2; # -1 would look funny in shell ;-) ( T.N. not in perl ;) ) ++} ++ ++sub ocf_local_nodename { ++ # use crm_node -n for pacemaker > 1.1.8 ++ my $nodename; ++ ++ qx{ which pacemakerd > /dev/null 2>&1 }; ++ if ( $? == 0 ) { ++ my $version; ++ my $ret = qx{ pacemakerd -\$ }; ++ ++ $ret =~ /Pacemaker ([\d.]+)/; ++ $version = $1; ++ ++ if ( ocf_version_cmp( $version, '1.1.8' ) == 2 ) { ++ qx{ which crm_node > /dev/null 2>&1 }; ++ $nodename = qx{ crm_node -n } if $? == 0; ++ } ++ } ++ else { ++ # otherwise use uname -n ++ $nodename = qx { uname -n }; ++ } ++ ++ chomp $nodename; ++ return $nodename; ++} ++ ++# Parse and returns the notify environment variables in a convenient structure ++# Returns undef if the action is not a notify ++# Returns undef if the resource is neither a clone or a multistate one ++sub ocf_notify_env { ++ my $i; ++ my %notify_env; ++ ++ return undef unless $__OCF_ACTION eq 'notify'; ++ ++ return undef unless ocf_is_clone() or ocf_is_ms(); ++ ++ %notify_env = ( ++ 'type' => $ENV{'OCF_RESKEY_CRM_meta_notify_type'} || '', ++ 'operation' => $ENV{'OCF_RESKEY_CRM_meta_notify_operation'} || '', ++ 'active' => [ ], ++ 'inactive' => [ ], ++ 'start' => [ ], ++ 'stop' => [ ], ++ ); ++ ++ for my $action ( qw{ active start stop } ) { ++ next unless ++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"} ++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ } ++ ++ # notify_nactive_uname doesn't exists. See: ++ # http://lists.clusterlabs.org/pipermail/developers/2017-January/000406.html ++ if ( defined $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"} ) { ++ $i = 0; ++ $notify_env{'inactive'}[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"}; ++ } ++ ++ # exit if the resource is not a mutistate one ++ return %notify_env unless ocf_is_ms(); ++ ++ for my $action ( qw{ master slave promote demote } ) { ++ $notify_env{ $action } = [ ]; ++ ++ next unless ++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"} ++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ } ++ ++ # Fix active and inactive fields for Pacemaker version < 1.1.16 ++ # ie. crm_feature_set < 3.0.11 ++ # See http://lists.clusterlabs.org/pipermail/developers/2016-August/000265.html ++ # and git commit a6713c5d40327eff8549e7f596501ab1785b8765 ++ if ( ++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.11' ) == 0 ++ ) { ++ $notify_env{ 'active' } = [ ++ @{ $notify_env{ 'master' } }, ++ @{ $notify_env{ 'slave' } } ++ ]; ++ } ++ ++ return %notify_env; ++} ++ ++$__OCF_ACTION = $ARGV[0]; ++ ++# Return to sanity for the agents... ++ ++undef $ENV{'LC_ALL'}; ++$ENV{'LC_ALL'} = 'C'; ++setlocale( LC_ALL, 'C' ); ++undef $ENV{'LANG'}; ++undef $ENV{'LANGUAGE'}; ++ ++$ENV{'OCF_ROOT'} = '/usr/lib/ocf' ++ unless defined $ENV{'OCF_ROOT'} and $ENV{'OCF_ROOT'} ne ''; ++ ++# old ++undef $ENV{'OCF_FUNCTIONS_DIR'} ++ if defined $ENV{'OCF_FUNCTIONS_DIR'} ++ and $ENV{'OCF_FUNCTIONS_DIR'} eq "$ENV{'OCF_ROOT'}/resource.d/heartbeat"; ++ ++# Define OCF_RESKEY_CRM_meta_interval in case it isn't already set, ++# to make sure that ocf_is_probe() always works ++$ENV{'OCF_RESKEY_CRM_meta_interval'} = 0 ++ unless defined $ENV{'OCF_RESKEY_CRM_meta_interval'}; ++ ++# Strip the OCF_RESKEY_ prefix from this particular parameter ++unless ( defined $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ++ and $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ne '' ++) { ++ $ENV{'OCF_CHECK_LEVEL'} = $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'}; ++} ++else { ++ ENV{'OCF_CHECK_LEVEL'} = 0; ++} ++ ++unless ( -d $ENV{'OCF_ROOT'} ) { ++ ha_log( "ERROR: OCF_ROOT points to non-directory $ENV{'OCF_ROOT'}." ); ++ $! = $OCF_ERR_GENERIC; ++ die; ++} ++ ++$ENV{'OCF_RESOURCE_TYPE'} = $__SCRIPT_NAME ++ unless defined $ENV{'OCF_RESOURCE_TYPE'} ++ and $ENV{'OCF_RESOURCE_TYPE'} ne ''; ++ ++unless ( defined $ENV{'OCF_RA_VERSION_MAJOR'} ++ and $ENV{'OCF_RA_VERSION_MAJOR'} ne '' ++) { ++ # We are being invoked as an init script. ++ # Fill in some things with reasonable values. ++ $ENV{'OCF_RESOURCE_INSTANCE'} = 'default'; ++ return 1; ++} ++ ++$ENV{'OCF_RESOURCE_INSTANCE'} = "undef" if $__OCF_ACTION eq 'meta-data'; ++ ++unless ( defined $ENV{'OCF_RESOURCE_INSTANCE'} ++ and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ++) { ++ ha_log( "ERROR: Need to tell us our resource instance name." ); ++ $! = $OCF_ERR_ARGS; ++ die; ++} ++ ++1; ++ ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. +diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm +--- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_ReturnCodes.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,97 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_ReturnCodes - Common varibales for the OCF Resource Agents supplied by ++heartbeat. ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_ReturnCodes; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-retrurncodes shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLES ++ ++Here are the variables exported by this module: ++ ++=over ++ ++=item $OCF_SUCCESS ++ ++=item $OCF_ERR_GENERIC ++ ++=item $OCF_ERR_ARGS ++ ++=item $OCF_ERR_UNIMPLEMENTED ++ ++=item $OCF_ERR_PERM ++ ++=item $OCF_ERR_INSTALLED ++ ++=item $OCF_ERR_CONFIGURED ++ ++=item $OCF_NOT_RUNNING ++ ++=item $OCF_RUNNING_MASTER ++ ++=item $OCF_FAILED_MASTER ++ ++=back ++ ++=cut ++ ++package OCF_ReturnCodes; ++ ++use strict; ++use warnings; ++use 5.008; ++ ++BEGIN { ++ use Exporter; ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $OCF_SUCCESS ++ $OCF_ERR_GENERIC ++ $OCF_ERR_ARGS ++ $OCF_ERR_UNIMPLEMENTED ++ $OCF_ERR_PERM ++ $OCF_ERR_INSTALLED ++ $OCF_ERR_CONFIGURED ++ $OCF_NOT_RUNNING ++ $OCF_RUNNING_MASTER ++ $OCF_FAILED_MASTER ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $OCF_SUCCESS = 0; ++our $OCF_ERR_GENERIC = 1; ++our $OCF_ERR_ARGS = 2; ++our $OCF_ERR_UNIMPLEMENTED = 3; ++our $OCF_ERR_PERM = 4; ++our $OCF_ERR_INSTALLED = 5; ++our $OCF_ERR_CONFIGURED = 6; ++our $OCF_NOT_RUNNING = 7; ++our $OCF_RUNNING_MASTER = 8; ++our $OCF_FAILED_MASTER = 9; ++ ++1; ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. +diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +--- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/pgsqlms 2021-04-13 13:37:40.934280411 +0200 +@@ -0,0 +1,2308 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++ocf_heartbeat_pgsqlms - A PostgreSQL multi-state resource agent for Pacemaker ++ ++=head1 SYNOPSIS ++ ++B [start | stop | monitor | promote | demote | notify | reload | methods | meta-data | validate-all] ++ ++=head1 DESCRIPTION ++ ++Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource. ++ ++=cut ++ ++use strict; ++use warnings; ++use 5.008; ++ ++use POSIX qw(locale_h); ++use Scalar::Util qw(looks_like_number); ++use File::Spec; ++use File::Temp; ++use Data::Dumper; ++ ++my $OCF_FUNCTIONS_DIR; ++BEGIN { ++ $OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat"; ++} ++use lib "$OCF_FUNCTIONS_DIR"; ++ ++use OCF_ReturnCodes; ++use OCF_Directories; ++use OCF_Functions; ++ ++our $VERSION = 'v2.3.0'; ++our $PROGRAM = 'pgsqlms'; ++ ++# OCF environment ++my $OCF_RESOURCE_INSTANCE = $ENV{'OCF_RESOURCE_INSTANCE'}; ++my $OCF_RUNNING_SLAVE = $OCF_SUCCESS; ++my %OCF_NOTIFY_ENV = ocf_notify_env() if $__OCF_ACTION eq 'notify'; ++ ++# Default parameters values ++my $system_user_default = "postgres"; ++my $bindir_default = "/usr/bin"; ++my $pgdata_default = "/var/lib/pgsql/data"; ++my $pghost_default = "/tmp"; ++my $pgport_default = 5432; ++my $start_opts_default = ""; ++my $maxlag_default = "0"; ++ ++# Set default values if not found in environment ++my $system_user = $ENV{'OCF_RESKEY_system_user'} || $system_user_default; ++my $bindir = $ENV{'OCF_RESKEY_bindir'} || $bindir_default; ++my $pgdata = $ENV{'OCF_RESKEY_pgdata'} || $pgdata_default; ++my $datadir = $ENV{'OCF_RESKEY_datadir'} || $pgdata; ++my $pghost = $ENV{'OCF_RESKEY_pghost'} || $pghost_default; ++my $pgport = $ENV{'OCF_RESKEY_pgport'} || $pgport_default; ++my $start_opts = $ENV{'OCF_RESKEY_start_opts'} || $start_opts_default; ++my $maxlag = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default; ++my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'} ++ || "$pgdata/recovery.conf.pcmk"; ++ ++ ++# PostgreSQL commands path ++my $POSTGRES = "$bindir/postgres"; ++my $PGCTL = "$bindir/pg_ctl"; ++my $PGPSQL = "$bindir/psql"; ++my $PGCTRLDATA = "$bindir/pg_controldata"; ++my $PGISREADY = "$bindir/pg_isready"; ++my $PGWALDUMP = "$bindir/pg_waldump"; ++ ++# pacemaker commands path ++my $CRM_MASTER = "$HA_SBIN_DIR/crm_master --lifetime forever"; ++my $CRM_NODE = "$HA_SBIN_DIR/crm_node"; ++my $CRM_RESOURCE = "$HA_SBIN_DIR/crm_resource"; ++my $ATTRD_PRIV = "$HA_SBIN_DIR/attrd_updater --private --lifetime reboot"; ++ ++# Global vars ++my $nodename; ++my $exit_code = 0; ++# numeric pgsql versions ++my $PGVERNUM; ++my $PGVER_93 = 90300; ++my $PGVER_10 = 100000; ++my $PGVER_12 = 120000; ++ ++# Run a query using psql. ++# ++# This function returns an array with psql return code as first element and ++# the result as second one. ++# ++sub _query { ++ my $query = shift; ++ my $res = shift; ++ my $connstr = "dbname=postgres"; ++ my $RS = chr(30); # ASCII RS (record separator) ++ my $FS = chr(3); # ASCII ETX (end of text) ++ my $postgres_uid = getpwnam( $system_user ); ++ my $oldeuid = $>; ++ my $tmpfile; ++ my @res; ++ my $ans; ++ my $pid; ++ my $rc; ++ ++ unless ( defined $res and defined $query and $query ne '' ) { ++ ocf_log( 'debug', '_query: wrong parameters!' ); ++ return -1; ++ } ++ ++ unless ( $tmpfile = File::Temp->new( ++ TEMPLATE => 'pgsqlms-XXXXXXXX', ++ DIR => $HA_RSCTMP ++ ) ) ++ { ++ ocf_exit_reason( 'Could not create or write in a temp file' ); ++ exit $OCF_ERR_INSTALLED; ++ } ++ ++ print $tmpfile $query; ++ chmod 0644, $tmpfile; ++ ++ ocf_log( 'debug', '_query: %s', $query ); ++ ++ # Change the effective user to the given system_user so after forking ++ # the given uid to the process should allow psql to connect w/o password ++ $> = $postgres_uid; ++ ++ # Forking + piping ++ $pid = open(my $KID, "-|"); ++ ++ if ( $pid == 0 ) { # child ++ exec $PGPSQL, '--set', 'ON_ERROR_STOP=1', '-qXAtf', $tmpfile, ++ '-R', $RS, '-F', $FS, '--port', $pgport, '--host', $pghost, ++ $connstr; ++ } ++ ++ # parent ++ $> = $oldeuid; ++ ++ { ++ local $/; ++ $ans = <$KID>; ++ } ++ ++ close $KID; ++ $rc = $? >> 8; ++ ++ ocf_log( 'debug', '_query: psql return code: %d', $rc ); ++ ++ if ( defined $ans ) { ++ chop $ans; ++ ++ push @{ $res }, [ split(chr(3) => $_, -1) ] ++ foreach split (chr(30) => $ans, -1); ++ ++ ocf_log( 'debug', '_query: @res: %s', ++ Data::Dumper->new( [ $res ] )->Terse(1)->Dump ); ++ } ++ ++ # Possible return codes: ++ # -1: wrong parameters ++ # 0: OK ++ # 1: failed to get resources (memory, missing file, ...) ++ # 2: unable to connect ++ # 3: query failed ++ return $rc; ++} ++ ++# Get the last received location on a standby ++# if the first argument is true, returns the value as decimal ++# if the first argument is false, returns the value as LSN ++# Returns undef if query failed ++sub _get_last_received_lsn { ++ my ( $dec ) = @_; ++ my $pg_last_wal_receive_lsn = 'pg_last_wal_receive_lsn()'; ++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff'; ++ my $query; ++ my $rc; ++ my @rs; ++ ++ if ( $PGVERNUM < $PGVER_10 ) { ++ $pg_last_wal_receive_lsn = 'pg_last_xlog_receive_location()'; ++ $pg_wal_lsn_diff = 'pg_xlog_location_diff'; ++ } ++ ++ if ( $dec ) { ++ $query = "SELECT $pg_wal_lsn_diff( $pg_last_wal_receive_lsn, '0/0' )"; ++ } ++ else { ++ $query = "SELECT $pg_last_wal_receive_lsn"; ++ } ++ ++ $rc = _query( $query, \@rs ); ++ ++ return $rs[0][0] if $rc == 0 and $rs[0][0]; ++ ++ ocf_log( 'err', 'Could not query last received LSN (%s)', $rc ) if $rc != 0; ++ ocf_log( 'err', 'No values for last received LSN' ) ++ if $rc == 0 and not $rs[0][0]; ++ ++ return undef; ++} ++ ++# Get the master score for each connected standby ++# Returns directly the result set of the query or exit with an error. ++# Exits with OCF_ERR_GENERIC if the query failed ++sub _get_lag_scores { ++ my $pg_current_wal_lsn = 'pg_current_wal_lsn()'; ++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff'; ++ my $write_lsn = 'write_lsn'; ++ my $query; ++ my $rc; ++ my @rs; ++ ++ if ( $PGVERNUM < $PGVER_10 ) { ++ $pg_current_wal_lsn = 'pg_current_xlog_location()'; ++ $pg_wal_lsn_diff = 'pg_xlog_location_diff'; ++ $write_lsn = 'write_location'; ++ } ++ ++ # We check locations of connected standbies by querying the ++ # "pg_stat_replication" view. ++ # The row_number applies on the result set ordered on write_location ASC so ++ # the highest row_number should be given to the closest node from the ++ # master, then the lowest node name (alphanumeric sort) in case of equality. ++ # The result set itself is order by priority DESC to process best known ++ # candidate first. ++ $query = qq{ ++ SELECT application_name, priority, location, state, current_lag ++ FROM ( ++ SELECT application_name, ++ (1000 - ( ++ row_number() OVER ( ++ PARTITION BY state IN ('startup', 'backup') ++ ORDER BY location ASC, application_name ASC ++ ) - 1 ++ ) * 10 ++ ) * CASE WHEN ( $maxlag > 0 ++ AND current_lag > $maxlag) ++ THEN -1 ++ ELSE 1 ++ END AS priority, ++ location, state, current_lag ++ FROM ( ++ SELECT application_name, $write_lsn AS location, state, ++ $pg_wal_lsn_diff($pg_current_wal_lsn, $write_lsn) AS current_lag ++ FROM pg_stat_replication ++ ) AS s2 ++ ) AS s1 ++ ORDER BY priority DESC ++ }; ++ ++ $rc = _query( $query, \@rs ); ++ ++ if ( $rc != 0 ) { ++ ocf_exit_reason( 'Query to get standby locations failed (%d)', $rc ); ++ exit $OCF_ERR_GENERIC; ++ } ++ ++ return \@rs; ++} ++ ++# get the timeout for the current action given from environment var ++# Returns timeout as integer ++# undef if unknown ++sub _get_action_timeout { ++ my $timeout = $ENV{'OCF_RESKEY_CRM_meta_timeout'} / 1000; ++ ++ ocf_log( 'debug', '_get_action_timeout: known timeout: %s', ++ defined $timeout ? $timeout : 'undef' ); ++ ++ return $timeout if defined $timeout and $timeout =~ /^\d+$/; ++ ++ return undef; ++} ++ ++# Get, parse and return the value of the given private attribute name ++# Returns an empty string if not found. ++sub _get_priv_attr { ++ my ( $name, $node ) = @_; ++ my $val = ''; ++ my $node_arg = ''; ++ my $ans; ++ ++ $node = '' unless defined $node; ++ $name = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ $node_arg= "--node $node" if $node ne ''; ++ ++ $ans = qx{ $ATTRD_PRIV --name "$name" --query $node_arg }; ++ ++ $ans =~ m/^name=".*" host=".*" value="(.*)"$/; ++ ++ $val = $1 if defined $1; ++ ++ ocf_log( 'debug', '_get_priv_attr: value of "%s"%s is "%s"', $name, ++ ( $node ? " on \"$node\"": ""), ++ $val ); ++ ++ return $val; ++} ++ ++# Set the given private attribute name to the given value ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really set by attrd and available. ++sub _set_priv_attr { ++ my ( $name, $val ) = @_; ++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ ocf_log( 'debug', '_set_priv_attr: set "%s=%s"...', $name_instance, $val ); ++ ++ qx{ $ATTRD_PRIV --name "$name_instance" --update "$val" }; ++ ++ # give attr name without the resource instance name as _get_priv_attr adds ++ # it as well ++ while ( _get_priv_attr( $name ) ne $val ) { ++ ocf_log( 'debug', '_set_priv_attr: waiting attrd ack for "%s"...', $name_instance ); ++ select( undef, undef, undef, 0.1 ); ++ } ++ ++ return; ++} ++ ++# Delete the given private attribute. ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really deleted by attrd. ++sub _delete_priv_attr { ++ my ( $name ) = @_; ++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ ocf_log( 'debug', '_delete_priv_attr: delete "%s"...', $name_instance ); ++ ++ qx{ $ATTRD_PRIV --name "$name_instance" --delete }; ++ ++ # give attr name without the resource instance name as _get_priv_attr adds ++ # it as well ++ while ( _get_priv_attr( $name ) ne '' ) { ++ ocf_log( 'debug', '_delete_priv_attr: waiting attrd ack for "%s"...', ++ $name_instance ); ++ select( undef, undef, undef, 0.1 ); ++ } ++ ++ return; ++} ++ ++# Get, parse and return the resource master score on given node. ++# Returns an empty string if not found. ++# Returns undef on crm_master call on error ++sub _get_master_score { ++ my ( $node ) = @_; ++ my $node_arg = ''; ++ my $score; ++ ++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne ''; ++ ++ $score = qx{ $CRM_MASTER --quiet --get-value $node_arg 2> /dev/null }; ++ ++ return '' unless $? == 0 and defined $score; ++ ++ chomp $score; ++ ++ return $score; ++} ++ ++# Set the master score of the local node or the optionally given node. ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really set by attrd and available everywhere. ++sub _set_master_score { ++ my ( $score, $node ) = @_; ++ my $node_arg = ''; ++ my $tmp; ++ ++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne ''; ++ ++ qx{ $CRM_MASTER $node_arg --quiet --update "$score" }; ++ ++ while ( ( $tmp = _get_master_score( $node ) ) ne $score ) { ++ ocf_log( 'debug', ++ '_set_master_score: waiting to set score to "%s" (currently "%s")...', ++ $score, $tmp ); ++ select(undef, undef, undef, 0.1); ++ } ++ ++ return; ++} ++ ++# _master_score_exists ++# This subroutine checks if a master score is set for one of the relative clones ++# in the cluster and the score is greater or equal of 0. ++# Returns 1 if at least one master score >= 0 is found. ++# Returns 0 otherwise ++sub _master_score_exists { ++ my @partition_nodes = split /\s+/ => qx{ $CRM_NODE --partition }; ++ ++ foreach my $node ( @partition_nodes ) { ++ my $score = _get_master_score( $node ); ++ ++ return 1 if defined $score and $score ne '' and $score > -1; ++ } ++ ++ return 0; ++} ++ ++# Check if the current transiation is a recover of a master clone on given node. ++sub _is_master_recover { ++ my ( $n ) = @_; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'master'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} } ++ ); ++} ++ ++# Check if the current transition is a recover of a slave clone on given node. ++sub _is_slave_recover { ++ my ( $n ) = @_; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'start'} } ++ ); ++} ++ ++# check if th current transition is a switchover to the given node. ++sub _is_switchover { ++ my ( $n ) = @_; ++ my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'}; ++ ++ return 0 if scalar @{ $OCF_NOTIFY_ENV{'master'} } != 1 ++ or scalar @{ $OCF_NOTIFY_ENV{'demote'} } != 1 ++ or scalar @{ $OCF_NOTIFY_ENV{'promote'} } != 1; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'demote'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} } ++ and not scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'stop'} } ++ ); ++} ++ ++# Run the given command as the "system_user" given as parameter. ++# It basically forks and seteuid/setuid away from root. ++# ++sub _runas { ++ my $rc; ++ my $pid; ++ my @cmd = @_; ++ my (undef, undef, $postgres_uid, $postgres_gid ) = getpwnam( $system_user ); ++ ++ $pid = fork; ++ ++ if ( $pid == 0 ) { # in child ++ $) = "$postgres_gid $postgres_gid"; ++ while ( my ( undef, undef, $gid, $members ) = getgrent ) { ++ $) .= " $gid" if grep { $system_user eq $_ } split /\s+/, $members ++ } ++ $( = $postgres_gid; ++ ++ $< = $> = $postgres_uid; ++ ++ exec @cmd; ++ } ++ ++ ocf_log( 'debug', '_runas: launching as "%s" command "%s"', $system_user, ++ join(' ', @cmd) ); ++ ++ waitpid $pid, 0; ++ $rc = $? >> 8; ++ ++ return $rc; ++} ++ ++# Check if instance is listening on the given host/port. ++# ++sub _pg_isready { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ my $rc = _runas( $PGISREADY, '-h', $pghost, '-p', $pgport, '-d', 'postgres', '-t', $timeout ); ++ ++ # Possible error codes: ++ # 1: ping rejected (usually when instance is in startup, in crash ++ # recovery, in warm standby, or when a shutdown is in progress) ++ # 2: no response, usually means the instance is down ++ # 3: no attempt, probably a syntax error, should not happen ++ return $rc; ++} ++ ++# Check the postmaster.pid file and the postmaster process. ++# WARNING: we do not distinguish the scenario where postmaster.pid does not ++# exist from the scenario where the process is still alive. It should be ok ++# though, as this is considered a hard error from monitor. ++# ++sub _pg_ctl_status { ++ my $rc = _runas( $PGCTL, '--pgdata', $pgdata, 'status' ); ++ ++ # Possible error codes: ++ # 3: postmaster.pid file does not exist OR it does but the process ++ # with the PID found in the file is not alive ++ return $rc; ++} ++ ++# Start the local instance using pg_ctl ++# ++sub _pg_ctl_start { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ my @cmd = ( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, 'start' ); ++ ++ push @cmd => ( '-o', $start_opts ) if $start_opts ne ''; ++ ++ return _runas( @cmd ); ++} ++ ++# Enable the Standby mode. ++# ++# Up to v11, creates the recovery.conf file based on the given template. ++# Since v12, creates standby.signal. ++sub _enable_recovery { ++ my $fh; ++ my $content = ''; ++ my $standby_file = "$datadir/standby.signal"; ++ my (undef, undef, $uid, $gid) = getpwnam($system_user); ++ ++ if ( $PGVERNUM < $PGVER_12 ) { ++ $standby_file = "$datadir/recovery.conf"; ++ ++ ocf_log( 'debug', ++ '_enable_recovery: get replication configuration from the template file "%s"', ++ $recovery_tpl ); ++ ++ # Create the recovery.conf file to start the instance as a secondary. ++ # NOTE: the recovery.conf is supposed to be set up so the secondary can ++ # connect to the primary instance, eg. using a virtual IP address. ++ # As there is no primary instance available at startup, secondaries will ++ # complain about failing to connect. ++ # As we can not reload a recovery.conf file on a standby without restarting ++ # it, we will leave with this. ++ # FIXME how would the reload help us in this case ? ++ unless ( defined open( $fh, '<', $recovery_tpl ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! ); ++ exit $OCF_ERR_CONFIGURED; ++ } ++ ++ # Copy all parameters from the template file ++ while (my $line = <$fh>) { ++ chomp $line; ++ $content .= "$line\n"; ++ } ++ close $fh; ++ } ++ ++ ocf_log( 'debug', '_enable_recovery: write the standby file "%s"', $standby_file ); ++ ++ unless ( open( $fh, '>', $standby_file ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $standby_file, $! ); ++ exit $OCF_ERR_CONFIGURED; ++ } ++ ++ # Write the recovery.conf file using configuration from the template file ++ print $fh $content; ++ ++ close $fh; ++ ++ unless ( chown $uid, $gid, $standby_file ) { ++ ocf_exit_reason( 'Could not set owner of "%s"', $standby_file ); ++ exit $OCF_ERR_CONFIGURED; ++ }; ++} ++ ++# Parse and return various informations about the local PostgreSQL instance as ++# reported by its controldata file. ++# ++# WARNING: the status is NOT updated in case of crash. ++# ++# This sub exit the script with an error on failure ++sub _get_controldata { ++ my %controldata; ++ my $ans; ++ ++ $ans = qx{ $PGCTRLDATA "$datadir" 2>/dev/null }; ++ ++ # Parse the output of pg_controldata. ++ # This output is quite stable between pg versions, but we might need to sort ++ # it at some point if things are moving in there... ++ $ans =~ m{ ++ # get the current state ++ ^\QDatabase cluster state\E:\s+(.*?)\s*$ ++ .* ++ # Get the latest known REDO location ++ ^\QLatest checkpoint's REDO location\E:\s+([/0-9A-F]+)\s*$ ++ .* ++ # Get the latest known TL ++ ^\QLatest checkpoint's TimeLineID\E:\s+(\d+)\s*$ ++ .* ++ # Get the wal level ++ # NOTE: pg_controldata output changed with PostgreSQL 9.5, so we need to ++ # account for both syntaxes ++ ^(?:\QCurrent \E)?\Qwal_level setting\E:\s+(.*?)\s*$ ++ }smx; ++ ++ $controldata{'state'} = $1 if defined $1; ++ $controldata{'redo'} = $2 if defined $2; ++ $controldata{'tl'} = $3 if defined $3; ++ $controldata{'wal_level'} = $4 if defined $4; ++ ++ ocf_log( 'debug', ++ "_get_controldata: found: %s", ++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump ); ++ ++ return %controldata if defined $controldata{'state'} ++ and defined $controldata{'tl'} ++ and defined $controldata{'redo'} ++ and defined $controldata{'wal_level'}; ++ ++ ocf_exit_reason( 'Could not read all datas from controldata file for "%s"', ++ $datadir ); ++ ++ ocf_log( 'debug', ++ "_get_controldata: controldata file: %s", ++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump, $ans ); ++ ++ exit $OCF_ERR_ARGS; ++} ++ ++# Pead major version from datadir/PG_VERSION and return it as numeric version ++sub _get_pg_version { ++ my $fh; ++ my $PGVERSION; ++ my $PGVERNUM; ++ ++ # check PG_VERSION ++ if ( ! -s "$datadir/PG_VERSION" ) { ++ ocf_exit_reason( 'PG_VERSION does not exist in "%s"', $datadir ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ unless ( open( $fh, '<', "$datadir/PG_VERSION" ) ) { ++ ocf_exit_reason( "Could not open file \"$datadir/PG_VERSION\": $!" ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ read( $fh, $PGVERSION, 32 ); ++ close $fh; ++ ++ chomp $PGVERSION; ++ ++ $PGVERSION =~ /^(\d+)(?:\.(\d+))?$/; ++ $PGVERNUM = $1 * 10000; ++ $PGVERNUM += $2 * 100 if $1 < 10; # no 2nd num in the major version from v10 ++ ++ return $PGVERNUM; ++} ++ ++# Use pg_controldata to check the state of the PostgreSQL server. This ++# function returns codes depending on this state, so we can find whether the ++# instance is a primary or a secondary, or use it to detect any inconsistency ++# that could indicate the instance has crashed. ++# ++sub _controldata_to_ocf { ++ my %cdata = _get_controldata(); ++ ++ while ( 1 ) { ++ ocf_log( 'debug', '_controldata: instance "%s" state is "%s"', ++ $OCF_RESOURCE_INSTANCE, $cdata{'state'} ); ++ ++ # Instance should be running as a primary. ++ return $OCF_RUNNING_MASTER if $cdata{'state'} eq "in production"; ++ ++ # Instance should be running as a secondary. ++ # This state includes warm standby (rejects connections attempts, ++ # including pg_isready) ++ return $OCF_SUCCESS if $cdata{'state'} eq "in archive recovery"; ++ ++ ++ # The instance should be stopped. ++ # We don't care if it was a primary or secondary before, because we ++ # always start instances as secondaries, and then promote if necessary. ++ return $OCF_NOT_RUNNING if $cdata{'state'} eq "shut down" ++ or $cdata{'state'} eq "shut down in recovery"; ++ ++ # The state is "in crash recovery", "starting up" or "shutting down". ++ # This state should be transitional, so we wait and loop to check if ++ # it changes. ++ # If it does not, pacemaker will eventually abort with a timeout. ++ ocf_log( 'debug', ++ '_controldata: waiting for transitionnal state "%s" to finish', ++ $cdata{'state'} ); ++ sleep 1; ++ %cdata = _get_controldata(); ++ } ++ ++ # If we reach this point, something went really wrong with this code or ++ # pg_controldata. ++ ocf_exit_reason( 'Unable get instance "%s" state using pg_controldata', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_ERR_INSTALLED ; ++} ++ ++# Check the write_location of all secondaries, and adapt their master score so ++# that the instance closest to the master will be the selected candidate should ++# a promotion be triggered. ++# NOTE: This is only a hint to pacemaker! The selected candidate to promotion ++# actually re-check it is the best candidate and force a re-election by failing ++# if a better one exists. This avoid a race condition between the call of the ++# monitor action and the promotion where another slave might have catchup faster ++# with the master. ++# NOTE: we cannot directly use the write_location, neither a lsn_diff value as ++# promotion score as Pacemaker considers any value greater than 1,000,000 as ++# INFINITY. ++# ++# This sub must be executed from a master monitor action. ++# ++sub _check_locations { ++ my $partition_nodes; ++ my $node_score; ++ my $row_num; ++ my $row; ++ my @rs; ++ ++ # Set the master score if not already done ++ $node_score = _get_master_score(); ++ _set_master_score( '1001' ) unless $node_score eq '1001'; ++ ++ # Ask crm_node what nodes are present in our current cluster partition ++ $partition_nodes = qx{ $CRM_NODE --partition }; ++ ++ @rs = @{ _get_lag_scores() }; ++ ++ $row_num = scalar @rs; ++ ++ # If no lag are reported at this point, it means that there is no ++ # secondary instance connected. ++ ocf_log( 'warning', 'No secondary connected to the master' ) ++ if $row_num == 0; ++ ++ # For each standby connected, set their master score based on the following ++ # rule: the first known node/application, with the highest priority and ++ # an acceptable state. ++ while ( $row = shift @rs ) { ++ ++ if ( $partition_nodes !~ /$row->[0]/ ) { ++ ocf_log( 'info', 'Ignoring unknown application_name/node "%s"', ++ $row->[0] ); ++ next; ++ } ++ ++ if ( $row->[0] eq $nodename ) { ++ ocf_log( 'warning', 'Streaming replication with myself!' ); ++ next; ++ } ++ ++ $node_score = _get_master_score( $row->[0] ); ++ ++ if ( $row->[3] =~ /^\s*(?:startup|backup)\s*$/ ) { ++ # We exclude any standby being in state backup (pg_basebackup) or ++ # startup (new standby or failing standby) ++ ocf_log( 'info', 'Forbidding promotion on "%s" in state "%s"', ++ $row->[0], $row->[3] ); ++ ++ _set_master_score( '-1', $row->[0] ) unless $node_score eq '-1'; ++ } ++ else { ++ ocf_log( 'debug', ++ '_check_locations: checking "%s" promotion ability (current_score: %s, priority: %s, location: %s, lag: %s)', ++ $row->[0], $node_score, $row->[1], $row->[2], $row->[4] ); ++ ++ if ( $node_score ne $row->[1] ) { ++ if ( $row->[1] < -1 ) { ++ ocf_log( 'info', 'Update score of "%s" from %s to %s because replication lag (%s) is higher than given maxlag (%s).', ++ $row->[0], $node_score, $row->[1], $row->[4], $maxlag ); ++ } ++ else { ++ ocf_log( 'info', 'Update score of "%s" from %s to %s because of a change in the replication lag (%s).', ++ $row->[0], $node_score, $row->[1], $row->[4] ); ++ } ++ _set_master_score( $row->[1], $row->[0] ); ++ } ++ else { ++ ocf_log( 'debug', ++ '_check_locations: "%s" keeps its current score of %s', ++ $row->[0], $row->[1] ); ++ } ++ } ++ ++ # Remove this node from the known nodes list. ++ $partition_nodes =~ s/(?:^|\s)$row->[0](?:\s|$)/ /g; ++ } ++ ++ $partition_nodes =~ s/(?:^\s+)|(?:\s+$)//g; ++ ++ # If there are still nodes in "partition_nodes", it means there is no ++ # corresponding line in "pg_stat_replication". ++ # Exclude these nodes that are not part of the cluster at this ++ # point. ++ foreach my $node (split /\s+/ => $partition_nodes) { ++ # Exclude the current node. ++ next if $node eq $nodename; ++ ++ # do not warn if the master score is already set to -1000. ++ # this avoid log flooding (gh #138) ++ $node_score = _get_master_score( $node ); ++ next if $node_score eq '-1000'; ++ ++ ocf_log( 'warning', '"%s" is not connected to the primary', $node ); ++ _set_master_score( '-1000', $node ); ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# _check_switchover ++# check if the pgsql switchover to the localnode is safe. ++# This is supposed to be called **after** the master has been stopped or demoted. ++# This sub checks if the local standby received the shutdown checkpoint from the ++# old master to make sure it can take over the master role and the old master ++# will be able to catchup as a standby after. ++# ++# Returns 0 if switchover is safe ++# Returns 1 if swithcover is not safe ++# Returns 2 for internal error ++sub _check_switchover { ++ my $has_sht_chk = 0; ++ my $last_redo; ++ my $last_lsn; ++ my $ans; ++ my $rc; ++ my $tl; ++ my %cdata; ++ ++ $PGWALDUMP = "$bindir/pg_xlogdump" if $PGVERNUM < $PGVER_10; ++ ++ ocf_log( 'info', 'Switchover in progress from "%s" to "%s".' ++ .' Need to check the last record in WAL', ++ $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename ); ++ ++ # check if we received the shutdown checkpoint of the master during its ++ # demote process. ++ # We need the last local checkpoint LSN and the last received LSN from ++ # master to check in the WAL between these adresses if we have a ++ # "checkpoint shutdown" using pg_xlogdump/pg_waldump. ++ # ++ # Force a checkpoint to make sure the controldata shows the very last TL ++ # and the master's shutdown checkpoint ++ _query( q{ CHECKPOINT }, {} ); ++ %cdata = _get_controldata(); ++ $tl = $cdata{'tl'}; ++ $last_redo = $cdata{'redo'}; ++ ++ # Get the last received LSN from master ++ $last_lsn = _get_last_received_lsn(); ++ ++ unless ( defined $last_lsn ) { ++ ocf_exit_reason( 'Could not fetch last received LSN!' ); ++ ++ return 2; ++ } ++ ++ $ans = qx{ $PGWALDUMP --path "$datadir" --timeline "$tl" \\ ++ --start "$last_redo" --end "$last_lsn" 2>&1 }; ++ $rc = $?; ++ ++ ocf_log( 'debug', ++ '_check_switchover: %s rc: "%s", tl: "%s", last_chk: %s, last_lsn: %s, output: "%s"', ++ $PGWALDUMP, $rc, $tl, $last_redo, $last_lsn, $ans ++ ); ++ ++ if ( $rc == 0 and ++ $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m ++ ) { ++ ocf_log( 'info', 'Slave received the shutdown checkpoint' ); ++ return 0; ++ } ++ ++ ocf_exit_reason( ++ 'Did not receive the shutdown checkpoint from the old master!' ); ++ ++ return 1; ++} ++ ++# Check to confirm if the instance is really started as _pg_isready stated and ++# check if the instance is primary or secondary. ++# ++sub _confirm_role { ++ my $is_in_recovery; ++ my $rc; ++ my @rs; ++ ++ $rc = _query( "SELECT pg_is_in_recovery()", \@rs ); ++ ++ $is_in_recovery = $rs[0][0]; ++ ++ if ( $rc == 0 ) { ++ # The query was executed, check the result. ++ if ( $is_in_recovery eq 't' ) { ++ # The instance is a secondary. ++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a secondary"); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $is_in_recovery eq 'f' ) { ++ # The instance is a primary. ++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary"); ++ # Check lsn diff with current slaves if any ++ _check_locations() if $__OCF_ACTION eq 'monitor'; ++ return $OCF_RUNNING_MASTER; ++ } ++ ++ # This should not happen, raise a hard configuration error. ++ ocf_exit_reason( ++ 'Unexpected result from query to check if "%s" is a primary or a secondary: "%s"', ++ $OCF_RESOURCE_INSTANCE, $is_in_recovery ); ++ ++ return $OCF_ERR_CONFIGURED; ++ } ++ elsif ( $rc == 1 or $rc == 2 ) { ++ # psql cound not connect to the instance. ++ # As pg_isready reported the instance was listening, this error ++ # could be a max_connection saturation. Just report a soft error. ++ ocf_exit_reason( 'psql could not connect to instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The query failed (rc: 3) or bad parameters (rc: -1). ++ # This should not happen, raise a hard configuration error. ++ ocf_exit_reason( ++ 'The query to check if instance "%s" is a primary or a secondary failed (rc: %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_CONFIGURED; ++} ++ ++ ++# Check to confirm if the instance is really stopped as _pg_isready stated ++# and if it was propertly shut down. ++# ++sub _confirm_stopped { ++ my $pgctlstatus_rc; ++ my $controldata_rc; ++ ++ # Check the postmaster process status. ++ $pgctlstatus_rc = _pg_ctl_status(); ++ ++ if ( $pgctlstatus_rc == 0 ) { ++ # The PID file exists and the process is available. ++ # That should not be the case, return an error. ++ ocf_exit_reason( ++ 'Instance "%s" is not listening, but the process referenced in postmaster.pid exists', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The PID file does not exist or the process is not available. ++ ocf_log( 'debug', ++ '_confirm_stopped: no postmaster process found for instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ if ( -f "$datadir/backup_label" ) { ++ # We are probably on a freshly built secondary that was not started yet. ++ ocf_log( 'debug', ++ '_confirm_stopped: backup_label file exists: probably on a never started secondary', ++ ); ++ return $OCF_NOT_RUNNING; ++ } ++ ++ # Continue the check with pg_controldata. ++ $controldata_rc = _controldata_to_ocf(); ++ if ( $controldata_rc == $OCF_RUNNING_MASTER ) { ++ # The controldata has not been updated to "shutdown". ++ # It should mean we had a crash on a primary instance. ++ ocf_exit_reason( ++ 'Instance "%s" controldata indicates a running primary instance, the instance has probably crashed', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_FAILED_MASTER; ++ } ++ elsif ( $controldata_rc == $OCF_SUCCESS ) { ++ # The controldata has not been updated to "shutdown in recovery". ++ # It should mean we had a crash on a secondary instance. ++ # There is no "FAILED_SLAVE" return code, so we return a generic error. ++ ocf_exit_reason( ++ 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ elsif ( $controldata_rc == $OCF_NOT_RUNNING ) { ++ # The controldata state is consistent, the instance was probably ++ # propertly shut down. ++ ocf_log( 'debug', ++ '_confirm_stopped: instance "%s" controldata indicates that the instance was propertly shut down', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_NOT_RUNNING; ++ } ++ ++ # Something went wrong with the controldata check. ++ ocf_exit_reason( ++ 'Could not get instance "%s" status from controldata (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $controldata_rc ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++############################################################ ++#### OCF FUNCS ++ ++ ++ ++=head1 SUPPORTED PARAMETERS ++ ++=over ++ ++=item B ++ ++Location of the PGDATA of your instance ++ ++(optional, string, default "/var/lib/pgsql/data") ++ ++=item B ++ ++The socket directory or IP address to use to connect to the local instance ++ ++(optional, string, default "/tmp") ++ ++=item B ++ ++The port to connect to the local instance ++ ++(optional, integer, default "5432") ++ ++=item B ++ ++Location of the PostgreSQL binaries. ++ ++(optional, string, default "/usr/bin") ++ ++=item B ++ ++The system owner of your instance's process ++ ++(optional, string, default "postgres") ++ ++=item B ++ ++B for PostgreSQL 11 and bellow. ++ ++The local template that will be copied as the C file. ++This template file must exists on all node. ++ ++With PostgreSQL 12 and higher, the cluster will refuse to start if this ++parameter is set or a template file is found. ++ ++(optional, string, default "$PGDATA/recovery.conf.pcmk") ++ ++=item B ++ ++Maximum lag allowed on a standby before we set a negative master score on it. ++The calculation is based on the difference between the current xlog location on ++the master and the write location on the standby. ++ ++(optional, integer, default "0" disables this feature) ++ ++=item B ++ ++Path to the directory set in C from your postgresql.conf file. ++This parameter has same default than PostgreSQL itself: the C parameter ++value. ++ ++Unless you have a special PostgreSQL setup and you understand this parameter, ++B ++ ++(optional, string, default to the value of C) ++ ++=item B ++ ++Additional arguments given to the postgres process on startup. See ++"postgres --help" for available options. Useful when the postgresql.conf file ++is not in the data directory (PGDATA), eg.: ++ ++ -c config_file=/etc/postgresql/9.3/main/postgresql.conf ++ ++(optinal, string, default "") ++ ++=back ++ ++=cut ++ ++sub ocf_meta_data { ++ print qq{ ++ ++ ++ 1.0 ++ ++ ++ Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource. ++ ++ Manages PostgreSQL servers in replication ++ ++ ++ ++ System user account used to run the PostgreSQL server ++ ++ PostgreSQL system User ++ ++ ++ ++ ++ ++ Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl. ++ ++ Path to the PostgreSQL binaries ++ ++ ++ ++ ++ ++ Path to the data directory, e.g. PGDATA ++ ++ Path to the data directory ++ ++ ++ ++ ++ ++ Path to the directory set in data_directory from your postgresql.conf file. This parameter ++ has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a ++ special PostgreSQL setup and you understand this parameter, ignore it. ++ ++ Path to the directory set in data_directory from your postgresql.conf file ++ ++ ++ ++ ++ ++ Host IP address or unix socket folder the instance is listening on. ++ ++ Instance IP or unix socket folder ++ ++ ++ ++ ++ ++ Port the instance is listening on. ++ ++ Instance port ++ ++ ++ ++ ++ ++ Maximum lag allowed on a standby before we set a negative master score on it. The calculation ++ is based on the difference between the current LSN on the master and the LSN ++ written on the standby. ++ This parameter must be a valid positive number as described in PostgreSQL documentation. ++ See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC ++ ++ Maximum write lag before we mark a standby as inappropriate to promote ++ ++ ++ ++ ++ ++ Path to the recovery.conf template. This file is simply copied to \$PGDATA ++ before starting the instance as slave. ++ ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for ++ PostgreSQL 12 and higher. The cluster will refuse to start if a template ++ file is found. ++ ++ Path to the recovery.conf template for PostgreSQL 11 and older. ++ ++ ++ ++ ++ ++ Additionnal arguments given to the postgres process on startup. ++ See "postgres --help" for available options. Usefull when the ++ postgresql.conf file is not in the data directory (PGDATA), eg.: ++ "-c config_file=/etc/postgresql/9.3/main/postgresql.conf". ++ ++ Additionnal arguments given to the postgres process on startup. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ }; ++ return $OCF_SUCCESS; ++} ++ ++ ++=head1 SUPPORTED ACTIONS ++ ++This resource agent supports the following actions (operations): ++ ++=over ++ ++=item B ++ ++Starts the resource. Suggested minimum timeout: 60. ++ ++=item B ++ ++Stops the resource. Suggested minimum timeout: 60. ++ ++=item B ++ ++Suggested minimum timeout: 20. ++ ++=item B ++ ++Promotes the resource to the Master role. Suggested minimum timeout: 30. ++ ++=item B ++ ++Demotes the resource to the Slave role. Suggested minimum timeout: 120. ++ ++=item B ++ ++Performs a detailed status check. Suggested minimum timeout: 10. ++Suggested interval: 15. ++ ++=item B ++ ++Performs a detailed status check. Suggested minimum timeout: 10. ++Suggested interval: 16. ++ ++=item B ++ ++Suggested minimum timeout: 60 ++ ++=item B ++ ++Retrieves resource agent metadata (internal use only). ++Suggested minimum timeout: 5. ++ ++=item B ++ ++Suggested minimum timeout: 5. ++ ++=item B ++ ++Performs a validation of the resource configuration. ++Suggested minimum timeout: 5. ++ ++=back ++ ++=cut ++ ++sub ocf_methods { ++ print q{ ++ start ++ stop ++ reload ++ promote ++ demote ++ monitor ++ notify ++ methods ++ meta-data ++ validate-all ++ }; ++ ++ return $OCF_SUCCESS; ++} ++ ++############################################################ ++#### RA FUNCS ++ ++sub pgsql_validate_all { ++ my $fh; ++ my $ans = ''; ++ my %cdata; ++ ++ unless ( ++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2 ++ ) { ++ ocf_exit_reason( ++ 'PAF %s is compatible with Pacemaker 1.1.13 and greater', ++ $VERSION ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check notify=true ++ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\ ++ --meta --get-parameter notify 2>/dev/null }; ++ chomp $ans; ++ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { ++ ocf_exit_reason( ++ 'You must set meta parameter notify=true for your master resource' ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check master-max=1 ++ unless ( ++ defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' ++ ) { ++ ocf_exit_reason( ++ 'You must set meta parameter master-max=1 for your master resource' ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ if ( $PGVERNUM >= $PGVER_12 ) { ++ # check PostgreSQL setup: checks related to v12 and after ++ my $guc; ++ ++ # recovery.conf template must not exists ++ if ( -f $recovery_tpl ) { ++ ocf_exit_reason( ++ 'Recovery template file "%s" is forbidden for PostgreSQL 12 and above', ++ $recovery_tpl ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ # WARNING: you MUST put -C as first argument to bypass the root check ++ $guc = qx{ $POSTGRES -C recovery_target_timeline -D "$pgdata" $start_opts}; ++ chomp $guc; ++ unless ( $guc eq 'latest' ) { ++ ocf_exit_reason( ++ q{Parameter "recovery_target_timeline" MUST be set to 'latest'. } . ++ q{It is currently set to '%s'}, $guc ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts}; ++ unless ($guc =~ /\bapplication_name='?$nodename'?\b/) { ++ ocf_exit_reason( ++ q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }. ++ q{It is currently set to '%s'}, $nodename, $guc ); ++ return $OCF_ERR_ARGS; ++ } ++ } ++ else { ++ my @content; ++ ++ # check recovery template ++ if ( ! -f $recovery_tpl ) { ++ ocf_exit_reason( 'Recovery template file "%s" does not exist', ++ $recovery_tpl ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # check content of the recovery template file ++ unless ( open( $fh, '<', $recovery_tpl ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! ); ++ return $OCF_ERR_ARGS; ++ } ++ @content = <$fh>; ++ close $fh; ++ ++ ++ unless ( grep /^\s*standby_mode\s*=\s*'?on'?\s*$/, @content ) { ++ ocf_exit_reason( ++ 'Recovery template file must contain "standby_mode = on"' ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ unless ( grep /^\s*recovery_target_timeline\s*=\s*'?latest'?\s*$/, @content ) { ++ ocf_exit_reason( ++ "Recovery template file must contain \"recovery_target_timeline = 'latest'\"" ++ ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ unless ( ++ grep /^\s*primary_conninfo\s*=.*['\s]application_name=$nodename['\s]/, ++ @content ++ ) { ++ ocf_exit_reason( ++ 'Recovery template file must contain in primary_conninfo parameter "application_name=%s"', ++ $nodename ); ++ return $OCF_ERR_ARGS; ++ } ++ } ++ ++ unless ( looks_like_number($maxlag) ) { ++ ocf_exit_reason( 'maxlag is not a number: "%s"', $maxlag ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check system user ++ unless ( defined getpwnam $system_user ) { ++ ocf_exit_reason( 'System user "%s" does not exist', $system_user ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # require 9.3 minimum ++ if ( $PGVERNUM < $PGVER_93 ) { ++ ocf_exit_reason( "Require 9.3 and more" ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check binaries ++ unless ( -x $PGCTL and -x $PGPSQL and -x $PGCTRLDATA and -x $PGISREADY ++ and ( -x $PGWALDUMP or -x "$bindir/pg_xlogdump") ++ ) { ++ ocf_exit_reason( ++ "Missing one or more binary. Check following path: %s, %s, %s, %s, %s or %s", ++ $PGCTL, $PGPSQL, $PGCTRLDATA, $PGISREADY, $PGWALDUMP, "$bindir/pg_xlogdump" ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # require wal_level >= hot_standby ++ %cdata = _get_controldata(); ++ unless ( $cdata{'wal_level'} =~ m{hot_standby|logical|replica} ) { ++ ocf_exit_reason( ++ 'wal_level must be one of "hot_standby", "logical" or "replica"' ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++ ++# Start the PostgreSQL instance as a *secondary* ++# ++sub pgsql_start { ++ my $rc = pgsql_monitor(); ++ my %cdata = _get_controldata(); ++ my $prev_state = $cdata{'state'}; ++ ++ # Instance must be running as secondary or being stopped. ++ # Anything else is an error. ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'Instance "%s" already started', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc != $OCF_NOT_RUNNING ) { ++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # From here, the instance is NOT running for sure. ++ # ++ ++ ocf_log( 'debug', ++ 'pgsql_start: instance "%s" is not running, starting it as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Must start as a standby, so enable recovery. ++ _enable_recovery(); ++ ++ # Start the instance as a secondary. ++ $rc = _pg_ctl_start(); ++ ++ if ( $rc == 0 ) { ++ ++ # Wait for the start to finish. ++ sleep 1 while ( $rc = pgsql_monitor() ) == $OCF_NOT_RUNNING; ++ ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'Instance "%s" started', $OCF_RESOURCE_INSTANCE ); ++ ++ # Check if a master score exists in the cluster. ++ # During the very first start of the cluster, no master score will ++ # exists on any of the existing slaves, unless an admin designated ++ # one of them using crm_master. If no master exists the cluster will ++ # not promote a master among the slaves. ++ # To solve this situation, we check if there is at least one master ++ # score existing on one node in the cluster. Do nothing if at least ++ # one master score is found among the clones of the resource. If no ++ # master score exists, set a score of 1 only if the resource was a ++ # shut downed master before the start. ++ if ( $prev_state eq "shut down" and not _master_score_exists() ) { ++ ocf_log( 'info', 'No master score around. Set mine to 1' ); ++ ++ _set_master_score( '1' ); ++ } ++ ++ return $OCF_SUCCESS; ++ } ++ ++ ocf_exit_reason( ++ 'Instance "%s" is not running as a slave (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ ocf_exit_reason( 'Instance "%s" failed to start (rc: %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++# Stop the PostgreSQL instance ++# ++sub pgsql_stop { ++ my $rc; ++ my $state; ++ my $pidfile = "$datadir/postmaster.pid"; ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ # Instance must be running as secondary or primary or being stopped. ++ # Anything else is an error. ++ $rc = pgsql_monitor(); ++ if ( $rc == $OCF_NOT_RUNNING ) { ++ ocf_log( 'info', 'Instance "%s" already stopped', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc != $OCF_SUCCESS and $rc != $OCF_RUNNING_MASTER ) { ++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # From here, the instance is running for sure. ++ # ++ ++ ocf_log( 'debug', 'pgsql_stop: instance "%s" is running, stopping it', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Try to quit with proper shutdown. ++ ++ ++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, ++ '-m', 'fast', 'stop' ); ++ ++ if ( $rc == 0 ) { ++ # Wait for the stop to finish. ++ sleep 1 while ( $rc = pgsql_monitor() ) != $OCF_NOT_RUNNING ; ++ ++ ocf_log( 'info', 'Instance "%s" stopped', $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_SUCCESS; ++ } ++ ++ ocf_exit_reason( 'Instance "%s" failed to stop', $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++} ++ ++# Monitor the PostgreSQL instance ++# ++sub pgsql_monitor { ++ my $pgisready_rc; ++ my $controldata_rc; ++ ++ ocf_log( 'debug', 'pgsql_monitor: monitor is a probe' ) if ocf_is_probe(); ++ ++ # First check, verify if the instance is listening. ++ $pgisready_rc = _pg_isready(); ++ ++ if ( $pgisready_rc == 0 ) { ++ # The instance is listening. ++ # We confirm that the instance is up and return if it is a primary or a ++ # secondary ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_role(); ++ } ++ ++ if ( $pgisready_rc == 1 ) { ++ # The attempt was rejected. ++ # This could happen in several cases: ++ # - at startup ++ # - during shutdown ++ # - during crash recovery ++ # - if instance is a warm standby ++ # Except for the warm standby case, this should be a transitional state. ++ # We try to confirm using pg_controldata. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" rejects connections - checking again...', ++ $OCF_RESOURCE_INSTANCE ); ++ $controldata_rc = _controldata_to_ocf(); ++ ++ if ( $controldata_rc == $OCF_RUNNING_MASTER ++ or $controldata_rc == $OCF_SUCCESS ++ ) { ++ # This state indicates that pg_isready check should succeed. ++ # We check again. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" controldata shows a running status', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ $pgisready_rc = _pg_isready(); ++ if ( $pgisready_rc == 0 ) { ++ # Consistent with pg_controdata output. ++ # We can check if the instance is primary or secondary ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_role(); ++ } ++ ++ # Still not consistent, raise an error. ++ # NOTE: if the instance is a warm standby, we end here. ++ # TODO raise an hard error here ? ++ ocf_exit_reason( ++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ocf_log( 'info', ++ 'If this instance is in warm standby, this resource agent only supports hot standby', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ if ( $controldata_rc == $OCF_NOT_RUNNING ) { ++ # This state indicates that pg_isready check should fail with rc 2. ++ # We check again. ++ $pgisready_rc = _pg_isready(); ++ if ( $pgisready_rc == 2 ) { ++ # Consistent with pg_controdata output. ++ # We check the process status using pg_ctl status and check ++ # if it was propertly shut down using pg_controldata. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" is not listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_stopped(); ++ } ++ # Still not consistent, raise an error. ++ # TODO raise an hard error here ? ++ ocf_exit_reason( ++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Something went wrong with the controldata check, hard fail. ++ ocf_exit_reason( ++ 'Could not get instance "%s" status from controldata (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $controldata_rc ); ++ ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ elsif ( $pgisready_rc == 2 ) { ++ # The instance is not listening. ++ # We check the process status using pg_ctl status and check ++ # if it was propertly shut down using pg_controldata. ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is not listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_stopped(); ++ } ++ ++ elsif ( $pgisready_rc == 3 ) { ++ # No attempt was done, probably a syntax error. ++ # Hard configuration error, we don't want to retry or failover here. ++ ocf_exit_reason( ++ 'Unknown error while checking if instance "%s" is listening (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_CONFIGURED; ++ } ++ ++ ocf_exit_reason( 'Unexpected result when checking instance "%s" status', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++ ++# Demote the PostgreSQL instance from primary to secondary ++# To demote a PostgreSQL instance, we must: ++# * stop it gracefully ++# * create recovery.conf with standby_mode = on ++# * start it ++# ++sub pgsql_demote { ++ my $rc; ++ ++ $rc = pgsql_monitor(); ++ ++ # Running as primary. Normal, expected behavior. ++ if ( $rc == $OCF_RUNNING_MASTER ) { ++ ocf_log( 'debug', 'pgsql_demote: "%s" currently running as a primary', ++ $OCF_RESOURCE_INSTANCE ) ; ++ } ++ elsif ( $rc == $OCF_SUCCESS ) { ++ # Already running as secondary. Nothing to do. ++ ocf_log( 'debug', ++ 'pgsql_demote: "%s" currently running as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc == $OCF_NOT_RUNNING ) { ++ # Instance is stopped. Nothing to do. ++ ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down', ++ $OCF_RESOURCE_INSTANCE ); ++ } ++ elsif ( $rc == $OCF_ERR_CONFIGURED ) { ++ # We actually prefer raising a hard or fatal error instead of leaving ++ # the CRM abording its transition for a new one because of a soft error. ++ # The hard error will force the CRM to move the resource immediately. ++ return $OCF_ERR_CONFIGURED; ++ } ++ else { ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # TODO we need to make sure at least one slave is connected!! ++ ++ # WARNING if the resource state is stopped instead of master, the ocf ra dev ++ # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where ++ # it computes transitions of demote(failing)->stop->start->promote actions ++ # until failcount == migration-threshold. ++ # This is a really ugly trick to keep going with the demode action if the ++ # rsc is already stopped gracefully. ++ # See discussion "CRM trying to demote a stopped resource" on ++ # developers@clusterlabs.org ++ unless ( $rc == $OCF_NOT_RUNNING ) { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ # WARNING the instance **MUST** be stopped gracefully. ++ # Do **not** use pg_stop() or service or systemctl here as these ++ # commands might force-stop the PostgreSQL instance using immediate ++ # after some timeout and return success, which is misleading. ++ ++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '--mode', 'fast', '-w', ++ '--timeout', $timeout , 'stop' ); ++ ++ # No need to wait for stop to complete, this is handled in pg_ctl ++ # using -w option. ++ unless ( $rc == 0 ) { ++ ocf_exit_reason( 'Failed to stop "%s" using pg_ctl (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Double check that the instance is stopped correctly. ++ $rc = pgsql_monitor(); ++ unless ( $rc == $OCF_NOT_RUNNING ) { ++ ocf_exit_reason( ++ 'Unexpected "%s" state: monitor status (%d) disagree with pg_ctl return code', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ } ++ ++ # ++ # At this point, the instance **MUST** be stopped gracefully. ++ # ++ ++ # Note: We do not need to handle the recovery.conf file here as pgsql_start ++ # deal with that itself. Equally, no need to wait for the start to complete ++ # here, handled in pgsql_start. ++ $rc = pgsql_start(); ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'pgsql_demote: "%s" started as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ ++ # NOTE: No need to double check the instance state as pgsql_start already use ++ # pgsql_monitor to check the state before returning. ++ ++ ocf_exit_reason( 'Starting "%s" as a standby failed (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++} ++ ++ ++# Promote the secondary instance to primary ++# ++sub pgsql_promote { ++ my $rc; ++ my $cancel_switchover; ++ ++ $rc = pgsql_monitor(); ++ ++ if ( $rc == $OCF_SUCCESS ) { ++ # Running as slave. Normal, expected behavior. ++ ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby', ++ $OCF_RESOURCE_INSTANCE ); ++ } ++ elsif ( $rc == $OCF_RUNNING_MASTER ) { ++ # Already a master. Unexpected, but not a problem. ++ ocf_log( 'info', '"%s" already running as a primary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc == $OCF_NOT_RUNNING ) { # INFO this is not supposed to happen. ++ # Currently not running. Need to start before promoting. ++ ocf_log( 'info', '"%s" currently not running, starting it', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ $rc = pgsql_start(); ++ if ( $rc != $OCF_SUCCESS ) { ++ ocf_exit_reason( 'Failed to start the instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ } ++ else { ++ ocf_exit_reason( 'Unexpected error, cannot promote "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # At this point, the instance **MUST** be started as a secondary. ++ # ++ ++ # Cancel the switchover if it has been considered not safe during the ++ # pre-promote action ++ $cancel_switchover = _get_priv_attr('cancel_switchover'); ++ if ( $cancel_switchover ) { # if not empty or not 0 ++ ocf_exit_reason( 'Switchover has been canceled from pre-promote action' ); ++ ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ return $OCF_ERR_GENERIC if $cancel_switchover eq '1'; ++ return $OCF_ERR_ARGS; # ban the resource from the node if we have an ++ # internal error during _check_switchover ++ } ++ ++ # Do not check for a better candidate if we try to recover the master ++ # Recover of a master is detected during the pre-promote action. It sets the ++ # private attribute 'recover_master' to '1' if this is a master recover. ++ if ( _get_priv_attr( 'recover_master' ) eq '1' ) { ++ ocf_log( 'info', 'Recovering old master, no election needed'); ++ } ++ else { ++ ++ # The promotion is occurring on the best known candidate (highest ++ # master score), as chosen by pacemaker during the last working monitor ++ # on previous master (see pgsql_monitor/_check_locations subs). ++ # To avoid any race condition between the last monitor action on the ++ # previous master and the **real** most up-to-date standby, we ++ # set each standby location during the "pre-promote" action, and stored ++ # them using the "lsn_location" resource attribute. ++ # ++ # The best standby to promote would have the highest known LSN. If the ++ # current resource is not the best one, we need to modify the master ++ # scores accordingly, and abort the current promotion. ++ ocf_log( 'debug', ++ 'pgsql_promote: checking if current node is the best candidate for promotion' ); ++ ++ # Exclude nodes that are known to be unavailable (not in the current ++ # partition) using the "crm_node" command ++ my @active_nodes = split /\s+/ => _get_priv_attr( 'nodes' ); ++ my $node_to_promote = ''; ++ my $ans; ++ my $max_tl; ++ my $max_lsn; ++ my $node_tl; ++ my $node_lsn; ++ my $wal_num; ++ my $wal_off; ++ ++ # Get the "lsn_location" attribute value for the current node, as set ++ # during the "pre-promote" action. ++ # It should be the greatest among the secondary instances. ++ $ans = _get_priv_attr( 'lsn_location' ); ++ ++ if ( $ans eq '' ) { ++ # This should not happen as the "lsn_location" attribute should have ++ # been updated during the "pre-promote" action. ++ ocf_exit_reason( 'Can not get current node LSN location' ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ chomp $ans; ++ ( $max_tl, $max_lsn ) = split /#/, $ans; ++ ++ ocf_log( 'debug', 'pgsql_promote: current node TL#LSN location: %s#%s', ++ $max_tl, $max_lsn ); ++ ++ # Now we compare with the other available nodes. ++ foreach my $node ( @active_nodes ) { ++ # We exclude the current node from the check. ++ next if $node eq $nodename; ++ ++ # Get the "lsn_location" attribute value for the node, as set during ++ # the "pre-promote" action. ++ # This is implemented as a loop as private attributes are asynchronously ++ # available from other nodes. ++ # see: https://github.com/ClusterLabs/PAF/issues/131 ++ # NOTE: if a node did not set its lsn_location for some reason, this will end ++ # with a timeout and the whole promotion will start again. ++ WAIT_FOR_LSN: { ++ $ans = _get_priv_attr( 'lsn_location', $node ); ++ if ( $ans eq '' ) { ++ ocf_log( 'info', 'pgsql_promote: waiting for LSN from %s', $node ); ++ select( undef, undef, undef, 0.1 ); ++ redo WAIT_FOR_LSN; ++ } ++ } ++ ++ chomp $ans; ++ ( $node_tl, $node_lsn ) = split /#/, $ans; ++ ++ ocf_log( 'debug', ++ 'pgsql_promote: comparing with "%s": TL#LSN is %s#%s', ++ $node, $node_tl, $node_lsn ); ++ ++ # If the node has a higher LSN, select it as a best candidate to ++ # promotion and keep looping to check the TL/LSN of other nodes. ++ if ( $node_tl > $max_tl ++ or ( $node_tl == $max_tl and $node_lsn > $max_lsn ) ++ ) { ++ ocf_log( 'debug', ++ 'pgsql_promote: "%s" is a better candidate to promote (%s#%s > %s#%s)', ++ $node, $node_tl, $node_lsn, $max_tl, $max_lsn ); ++ $node_to_promote = $node; ++ $max_tl = $node_tl; ++ $max_lsn = $node_lsn; ++ } ++ } ++ ++ # If any node has been selected, we adapt the master scores accordingly ++ # and break the current promotion. ++ if ( $node_to_promote ne '' ) { ++ ocf_exit_reason( ++ '%s is the best candidate to promote, aborting current promotion', ++ $node_to_promote ); ++ ++ # Reset current node master score. ++ _set_master_score( '1' ); ++ ++ # Set promotion candidate master score. ++ _set_master_score( '1000', $node_to_promote ); ++ ++ # We fail the promotion to trigger another promotion transition ++ # with the new scores. ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Else, we will keep on promoting the current node. ++ } ++ ++ unless ( ++ # Promote the instance on the current node. ++ _runas( $PGCTL, '--pgdata', $pgdata, '-w', 'promote' ) == 0 ) ++ { ++ ocf_exit_reason( 'Error during promotion command' ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The instance promotion is asynchronous, so we need to wait for this ++ # process to complete. ++ while ( pgsql_monitor() != $OCF_RUNNING_MASTER ) { ++ ocf_log( 'info', 'Waiting for the promote to complete' ); ++ sleep 1; ++ } ++ ++ ocf_log( 'info', 'Promote complete' ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This action is called **before** the actual promotion when a failing master is ++# considered unreclaimable, recoverable or a new master must be promoted ++# (switchover or first start). ++# As every "notify" action, it is executed almost simultaneously on all ++# available nodes. ++sub pgsql_notify_pre_promote { ++ my $rc; ++ my $node_tl; ++ my $node_lsn; ++ my %cdata; ++ my %active_nodes; ++ my $attr_nodes; ++ ++ ocf_log( 'info', 'Promoting instance on node "%s"', ++ $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ); ++ ++ # No need to do an election between slaves if this is recovery of the master ++ if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { ++ ocf_log( 'warning', 'This is a master recovery!' ); ++ ++ _set_priv_attr( 'recover_master', '1' ) ++ if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename; ++ ++ return $OCF_SUCCESS; ++ } ++ ++ # Environment cleanup! ++ _delete_priv_attr( 'lsn_location' ); ++ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'nodes' ); ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ # check for the last received entry of WAL from the master if we are ++ # the designated slave to promote ++ if ( _is_switchover( $nodename ) and scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'promote'} } ++ ) { ++ $rc = _check_switchover(); ++ ++ unless ( $rc == 0 ) { ++ # Shortcut the election process as the switchover will be ++ # canceled ++ _set_priv_attr( 'cancel_switchover', $rc ); ++ return $OCF_SUCCESS; # return code is ignored during notify ++ } ++ ++ # If the sub keeps going, that means the switchover is safe. ++ # Keep going with the election process in case the switchover was ++ # instruct to the wrong node. ++ # FIXME: should we allow a switchover to a lagging slave? ++ } ++ ++ # We need to trigger an election between existing slaves to promote the best ++ # one based on its current LSN location. Each node set a private attribute ++ # "lsn_location" with its TL and LSN location. ++ # ++ # During the following promote action, The designated standby for ++ # promotion use these attributes to check if the instance to be promoted ++ # is the best one, so we can avoid a race condition between the last ++ # successful monitor on the previous master and the current promotion. ++ ++ # As we can not break the transition from a notification action, we check ++ # during the promotion if each node TL and LSN are valid. ++ ++ # Force a checpoint to make sure the controldata shows the very last TL ++ _query( q{ CHECKPOINT }, {} ); ++ %cdata = _get_controldata(); ++ $node_lsn = _get_last_received_lsn( 'in decimal' ); ++ ++ unless ( defined $node_lsn ) { ++ ocf_log( 'warning', 'Unknown current node LSN' ); ++ # Return code are ignored during notifications... ++ return $OCF_SUCCESS; ++ } ++ ++ $node_lsn = "$cdata{'tl'}#$node_lsn"; ++ ++ ocf_log( 'info', 'Current node TL#LSN: %s', $node_lsn ); ++ ++ # Set the "lsn_location" attribute value for this node so we can use it ++ # during the following "promote" action. ++ _set_priv_attr( 'lsn_location', $node_lsn ); ++ ++ ocf_log( 'warning', 'Could not set the current node LSN' ) ++ if $? != 0 ; ++ ++ # If this node is the future master, keep track of the slaves that ++ # received the same notification to compare our LSN with them during ++ # promotion ++ if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) { ++ # Build the list of active nodes: ++ # master + slave + start - stop ++ # FIXME: Deal with rsc started during the same transaction but **after** ++ # the promotion ? ++ $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} }, ++ @{ $OCF_NOTIFY_ENV{'start'} }; ++ $active_nodes{ $_->{'uname'} }-- foreach @{ $OCF_NOTIFY_ENV{'stop'} }; ++ ++ $attr_nodes = join " " ++ => grep { $active_nodes{$_} > 0 } keys %active_nodes; ++ ++ _set_priv_attr( 'nodes', $attr_nodes ); ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# This action is called after a promote action. ++sub pgsql_notify_post_promote { ++ ++ # We have a new master (or the previous one recovered). ++ # Environment cleanup! ++ _delete_priv_attr( 'lsn_location' ); ++ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'nodes' ); ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This is called before a demote occurs. ++sub pgsql_notify_pre_demote { ++ my $rc; ++ my %cdata; ++ ++ # do nothing if the local node will not be demoted ++ return $OCF_SUCCESS unless scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'demote'} }; ++ ++ $rc = pgsql_monitor(); ++ ++ # do nothing if this is not a master recovery ++ return $OCF_SUCCESS unless _is_master_recover( $nodename ) ++ and $rc == $OCF_FAILED_MASTER; ++ ++ # in case of master crash, we need to detect if the CRM tries to recover ++ # the master clone. The usual transition is to do: ++ # demote->stop->start->promote ++ # ++ # There are multiple flaws with this transition: ++ # * the 1st and 2nd actions will fail because the instance is in ++ # OCF_FAILED_MASTER step ++ # * the usual start action is dangerous as the instance will start with ++ # a recovery.conf instead of entering a normal recovery process ++ # ++ # To avoid this, we try to start the instance in recovery from here. ++ # If it success, at least it will be demoted correctly with a normal ++ # status. If it fails, it will be catched up in next steps. ++ ++ ocf_log( 'info', 'Trying to start failing master "%s"...', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Either the instance managed to start or it couldn't. ++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't ++ # start, this error will be catched up later during the various checks ++ _pg_ctl_start(); ++ ++ %cdata = _get_controldata(); ++ ++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This is called before a stop occurs. ++sub pgsql_notify_pre_stop { ++ my $rc; ++ my %cdata; ++ ++ # do nothing if the local node will not be stopped ++ return $OCF_SUCCESS unless scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'stop'} }; ++ ++ $rc = _controldata_to_ocf(); ++ ++ # do nothing if this is not a slave recovery ++ return $OCF_SUCCESS unless _is_slave_recover( $nodename ) ++ and $rc == $OCF_RUNNING_SLAVE; ++ ++ # in case of slave crash, we need to detect if the CRM tries to recover ++ # the slaveclone. The usual transition is to do: stop->start ++ # ++ # This transition can no twork because the instance is in ++ # OCF_ERR_GENERIC step. So the stop action will fail, leading most ++ # probably to fencing action. ++ # ++ # To avoid this, we try to start the instance in recovery from here. ++ # If it success, at least it will be stopped correctly with a normal ++ # status. If it fails, it will be catched up in next steps. ++ ++ ocf_log( 'info', 'Trying to start failing slave "%s"...', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Either the instance managed to start or it couldn't. ++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't ++ # start, this error will be catched up later during the various checks ++ _pg_ctl_start(); ++ ++ %cdata = _get_controldata(); ++ ++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# Notify type actions, called on all available nodes before (pre) and after ++# (post) other actions, like promote, start, ... ++# ++sub pgsql_notify { ++ my $type_op; ++ ++ ocf_log( 'debug', "pgsql_notify: environment variables: %s", ++ Data::Dumper->new( [ \%OCF_NOTIFY_ENV ] )->Sortkeys(1)->Terse(1)->Dump ); ++ ++ return unless %OCF_NOTIFY_ENV; ++ ++ $type_op = "$OCF_NOTIFY_ENV{'type'}-$OCF_NOTIFY_ENV{'operation'}"; ++ ++ for ( $type_op ) { ++ if ( /^pre-promote$/ ) { return pgsql_notify_pre_promote() } ++ elsif ( /^post-promote$/ ) { return pgsql_notify_post_promote() } ++ elsif ( /^pre-demote$/ ) { return pgsql_notify_pre_demote() } ++ elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() } ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# Action used to allow for online modification of resource parameters value. ++# ++sub pgsql_reload { ++ ++ # No action necessary, the action declaration is enough to inform pacemaker ++ # that the modification of any non-unique parameter can be applied without ++ # having to restart the resource. ++ ocf_log( 'info', 'Instance "%s" reloaded', $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ ++} ++ ++############################################################ ++#### MAIN ++ ++exit ocf_meta_data() if $__OCF_ACTION eq 'meta-data'; ++exit ocf_methods() if $__OCF_ACTION eq 'methods'; ++ ++# Avoid "could not change directory" when executing commands as "system-user". ++chdir File::Spec->tmpdir(); ++ ++# mandatory sanity checks ++# check pgdata ++if ( ! -d $pgdata ) { ++ ocf_exit_reason( 'PGDATA "%s" does not exist', $pgdata ); ++ exit $OCF_ERR_ARGS; ++} ++ ++# check datadir ++if ( ! -d $datadir ) { ++ ocf_exit_reason( 'data_directory "%s" does not exist', $datadir ); ++ exit $OCF_ERR_ARGS; ++} ++ ++# Set PostgreSQL version ++$PGVERNUM = _get_pg_version(); ++ ++# Set current node name. ++$nodename = ocf_local_nodename(); ++ ++$exit_code = pgsql_validate_all(); ++ ++exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all'; ++ ++# Run action ++for ( $__OCF_ACTION ) { ++ if ( /^start$/ ) { $exit_code = pgsql_start() } ++ elsif ( /^stop$/ ) { $exit_code = pgsql_stop() } ++ elsif ( /^monitor$/ ) { $exit_code = pgsql_monitor() } ++ elsif ( /^promote$/ ) { $exit_code = pgsql_promote() } ++ elsif ( /^demote$/ ) { $exit_code = pgsql_demote() } ++ elsif ( /^notify$/ ) { $exit_code = pgsql_notify() } ++ elsif ( /^reload$/ ) { $exit_code = pgsql_reload() } ++ else { $exit_code = $OCF_ERR_UNIMPLEMENTED } ++} ++ ++exit $exit_code; ++ ++ ++=head1 EXAMPLE CRM SHELL ++ ++The following is an example configuration for a pgsqlms resource using the ++crm(8) shell: ++ ++ primitive pgsqld pgsqlms \ ++ params pgdata="/var/lib/postgresql/9.6/main" \ ++ bindir="/usr/lib/postgresql/9.6/bin" \ ++ pghost="/var/run/postgresql" \ ++ recovery_template="/etc/postgresql/9.6/main/recovery.conf.pcmk" \ ++ start_opts="-c config_file=/etc/postgresql/9.6/main/postgresql.conf" \ ++ op start timeout=60s \ ++ op stop timeout=60s \ ++ op promote timeout=30s \ ++ op demote timeout=120s \ ++ op monitor interval=15s timeout=10s role="Master" \ ++ op monitor interval=16s timeout=10s role="Slave" \ ++ op notify timeout=60s ++ ++ ms pgsql-ha pgsqld meta notify=true ++ ++ ++=head1 EXAMPLE PCS ++ ++The following is an example configuration for a pgsqlms resource using pcs(8): ++ ++ pcs resource create pgsqld ocf:heartbeat:pgsqlms \ ++ bindir=/usr/pgsql-9.6/bin pgdata=/var/lib/pgsql/9.6/data \ ++ op start timeout=60s \ ++ op stop timeout=60s \ ++ op promote timeout=30s \ ++ op demote timeout=120s \ ++ op monitor interval=15s timeout=10s role="Master" \ ++ op monitor interval=16s timeout=10s role="Slave" \ ++ op notify timeout=60s --master notify=true ++ ++=head1 SEE ALSO ++ ++http://clusterlabs.org/ ++ ++=head1 AUTHOR ++ ++Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++=cut +diff --color -uNr a/paf_LICENSE b/paf_LICENSE +--- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100 ++++ b/paf_LICENSE 2021-04-14 09:16:39.083555835 +0200 +@@ -0,0 +1,19 @@ ++Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault. ++ ++Permission to use, copy, modify, and distribute this software and its ++documentation for any purpose, without fee, and without a written agreement ++is hereby granted, provided that the above copyright notice and this ++paragraph and the following two paragraphs appear in all copies. ++ ++IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR ++DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING ++LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS ++DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE ++POSSIBILITY OF SUCH DAMAGE. ++ ++THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES, ++INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY ++AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ++ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO ++PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ++ +diff --color -uNr a/paf_README.md b/paf_README.md +--- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100 ++++ b/paf_README.md 2021-04-14 09:18:57.450968048 +0200 +@@ -0,0 +1,86 @@ ++# PostgreSQL Automatic Failover ++ ++High-Availibility for Postgres, based on industry references Pacemaker and ++Corosync. ++ ++## Description ++ ++Pacemaker is nowadays the industry reference for High Availability. In the same ++fashion than for Systemd, all Linux distributions moved (or are moving) to this ++unique Pacemaker+Corosync stack, removing all other existing high availability ++stacks (CMAN, RGManager, OpenAIS, ...). It is able to detect failure on various ++services and automatically decide to failover the failing resource to another ++node when possible. ++ ++To be able to manage a specific service resource, Pacemaker interact with it ++through a so-called "Resource Agent". Resource agents must comply to the OCF ++specification which define what they must implement (start, stop, promote, ++etc), how they should behave and inform Pacemaker of their results. ++ ++PostgreSQL Automatic Failover is a new OCF resource Agent dedicated to ++PostgreSQL. Its original wish is to keep a clear limit between the Pacemaker ++administration and the PostgreSQL one, to keep things simple, documented and ++yet powerful. ++ ++Once your PostgreSQL cluster built using internal streaming replication, PAF is ++able to expose to Pacemaker what is the current status of the PostgreSQL ++instance on each node: master, slave, stopped, catching up, etc. Should a ++failure occurs on the master, Pacemaker will try to recover it by default. ++Should the failure be non-recoverable, PAF allows the slaves to be able to ++elect the best of them (the closest one to the old master) and promote it as ++the new master. All of this thanks to the robust, feature-full and most ++importantly experienced project: Pacemaker. ++ ++For information about how to install this agent, see `INSTALL.md`. ++ ++## Setup and requirements ++ ++PAF supports PostgreSQL 9.3 and higher. It has been extensively tested under ++CentOS 6 and 7 in various scenario. ++ ++PAF has been written to give to the administrator the maximum control ++over their PostgreSQL configuration and architecture. Thus, you are 100% ++responsible for the master/slave creations and their setup. The agent ++will NOT edit your setup. It only requires you to follow these pre-requisites: ++ ++ * slave __must__ be in hot_standby (accept read-only connections) ; ++ * the following parameters __must__ be configured in the appropriate place : ++ * `standby_mode = on` (for PostgreSQL 11 and before) ++ * `recovery_target_timeline = 'latest'` ++ * `primary_conninfo` wih `application_name` set to the node name as seen ++ in Pacemaker. ++ * these last parameters has been merged inside the instance configuration ++ file with PostgreSQL 12. For PostgreSQL 11 and before, you __must__ ++ provide a `recovery.conf` template file. ++ ++When setting up the resource in Pacemaker, here are the available parameters you ++can set: ++ ++ * `bindir`: location of the PostgreSQL binaries (default: `/usr/bin`) ++ * `pgdata`: location of the PGDATA of your instance (default: ++ `/var/lib/pgsql/data`) ++ * `datadir`: path to the directory set in `data_directory` from your ++ postgresql.conf file. This parameter has same default than PostgreSQL ++ itself: the `pgdata` parameter value. Unless you have a special PostgreSQL ++ setup and you understand this parameter, __ignore it__ ++ * `pghost`: the socket directory or IP address to use to connect to the ++ local instance (default: `/tmp` or `/var/run/postgresql` for DEBIAN) ++ * `pgport`: the port to connect to the local instance (default: `5432`) ++ * `recovery_template`: __only__ for PostgreSQL 11 and before. The local ++ template that will be copied as the `PGDATA/recovery.conf` file. This ++ file must not exist on any node for PostgreSQL 12 and after. ++ (default: `$PGDATA/recovery.conf.pcmk`) ++ * `start_opts`: Additional arguments given to the postgres process on startup. ++ See "postgres --help" for available options. Useful when the postgresql.conf ++ file is not in the data directory (PGDATA), eg.: ++ `-c config_file=/etc/postgresql/9.3/main/postgresql.conf` ++ * `system_user`: the system owner of your instance's process (default: ++ `postgres`) ++ * `maxlag`: maximum lag allowed on a standby before we set a negative master ++ score on it. The calculation is based on the difference between the current ++ xlog location on the master and the write location on the standby. ++ (default: 0, which disables this feature) ++ ++For a demonstration about how to setup a cluster, see ++[http://clusterlabs.github.io/PAF/documentation.html](http://clusterlabs.github.io/PAF/documentation.html). ++ diff --git a/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch b/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch new file mode 100644 index 0000000..b72f3e3 --- /dev/null +++ b/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch @@ -0,0 +1,29 @@ +From 9a7b47f1838e9d6e3c807e9db5312097adb5c499 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 5 Nov 2021 10:30:49 +0100 +Subject: [PATCH] gcp-ilb/Squid: fix issues detected by CI + +--- + heartbeat/Squid.in | 2 +- + heartbeat/gcp-ilb | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb +index 28484b241..48dc3ac4e 100755 +--- a/heartbeat/gcp-ilb ++++ b/heartbeat/gcp-ilb +@@ -53,12 +53,12 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid" + + + #Validate command for logging +-if $OCF_RESKEY_log_enable = "true"; then ++if [ $OCF_RESKEY_log_enable = "true" ]; then + if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then + logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params" + ocf_log debug "Logging command is: \'$logging_cmd\' " + else +- $OCF_RESKEY_log_enable = "false" ++ OCF_RESKEY_log_enable="false" + ocf_log err "\'$logging_cmd\' is invalid. External logging disabled." + + fi; diff --git a/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch b/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch new file mode 100644 index 0000000..8a7df42 --- /dev/null +++ b/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch @@ -0,0 +1,51 @@ +From 14576f7ca02fb0abff188238ac019e88ab06e878 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 9 Nov 2021 11:49:36 +0100 +Subject: [PATCH] gcp-ilb: only check if log_cmd binary is available if + log_enable is true + +--- + heartbeat/gcp-ilb | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb +index 48dc3ac4e..f84f373b7 100755 +--- a/heartbeat/gcp-ilb ++++ b/heartbeat/gcp-ilb +@@ -37,7 +37,7 @@ if type "socat" > /dev/null 2>&1; then + OCF_RESKEY_cat_default="socat" + else + OCF_RESKEY_cat_default="nc" +-fi; ++fi + + + : ${OCF_RESKEY_cat=${OCF_RESKEY_cat_default}} +@@ -53,7 +53,7 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid" + + + #Validate command for logging +-if [ $OCF_RESKEY_log_enable = "true" ]; then ++if ocf_is_true "$OCF_RESKEY_log_enable"; then + if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then + logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params" + ocf_log debug "Logging command is: \'$logging_cmd\' " +@@ -61,7 +61,7 @@ if [ $OCF_RESKEY_log_enable = "true" ]; then + OCF_RESKEY_log_enable="false" + ocf_log err "\'$logging_cmd\' is invalid. External logging disabled." + +- fi; ++ fi + fi + + +@@ -285,7 +285,8 @@ ilb_stop() { + + ilb_validate() { + check_binary "$OCF_RESKEY_cat" +- check_binary "$OCF_RESKEY_log_cmd" ++ ++ ocf_is_true "$OCF_RESKEY_log_enable" && check_binary "$OCF_RESKEY_log_cmd" + + if ! ocf_is_decimal "$OCF_RESKEY_port"; then + ocf_exit_reason "$OCF_RESKEY_port is not a valid port" diff --git a/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch b/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch new file mode 100644 index 0000000..17f26c8 --- /dev/null +++ b/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch @@ -0,0 +1,32 @@ +From 925180da2f41feddc5aac3c249563eb179b34029 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 22 Nov 2021 16:44:48 +0100 +Subject: [PATCH] db2: use -l forever instead of -t nodes -l reboot, as they + conflict with eachother + +--- + heartbeat/db2 | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index 03146a957..fa2a45a5d 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -274,7 +274,7 @@ db2_fal_attrib() { + while read id node member + do + [ "$member" = member -a "$node" != "$me" ] || continue +- crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3" ++ crm_attribute -l forever --node=$node -n $attr -v "$3" + rc=$? + ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node" + [ $rc != 0 ] && break +@@ -282,7 +282,7 @@ db2_fal_attrib() { + ;; + + get) +- crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1 ++ crm_attribute -l forever -n $attr -G --quiet 2>&1 + rc=$? + if [ $rc != 0 ] + then diff --git a/SOURCES/bz2029704-2-db2-fixes.patch b/SOURCES/bz2029704-2-db2-fixes.patch new file mode 100644 index 0000000..c9fe8c3 --- /dev/null +++ b/SOURCES/bz2029704-2-db2-fixes.patch @@ -0,0 +1,32 @@ +From 75eaf06eea8957aa3941823955d1c8fa7933ab1d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 23 Feb 2022 16:32:21 +0100 +Subject: [PATCH] db2: only warn when notify isnt set, and use + ocf_local_nodename() to get node name + +--- + heartbeat/db2 | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index fa2a45a5d..ea24d33fc 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -267,7 +267,7 @@ db2_fal_attrib() { + + case "$2" in + set) +- me=$(uname -n) ++ me=$(ocf_local_nodename) + + # loop over all member nodes and set attribute + crm_node -l | +@@ -284,7 +284,7 @@ db2_fal_attrib() { + get) + crm_attribute -l forever -n $attr -G --quiet 2>&1 + rc=$? +- if [ $rc != 0 ] ++ if ! ocf_is_true "$OCF_RESKEY_CRM_meta_notify" && [ $rc != 0 ] + then + ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?" + fi diff --git a/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch b/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch new file mode 100644 index 0000000..f5ad417 --- /dev/null +++ b/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch @@ -0,0 +1,22 @@ +From c6338011cf9ea69324f44c8c31a4ca2478aab35a Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 7 Dec 2021 08:59:50 +0100 +Subject: [PATCH] podman: remove anonymous volumes + +--- + heartbeat/podman | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index fd901c968..2b73857f1 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -251,7 +251,7 @@ remove_container() + return 0 + fi + ocf_log notice "Cleaning up inactive container, ${CONTAINER}." +- ocf_run podman rm $CONTAINER ++ ocf_run podman rm -v $CONTAINER + rc=$? + if [ $rc -ne 0 ]; then + # due to a podman bug (rhbz#1841485), sometimes a stopped diff --git a/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch b/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch new file mode 100644 index 0000000..53d3299 --- /dev/null +++ b/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch @@ -0,0 +1,43 @@ +From 7c54e4ecda33c90a1046c0688774f5b847ab10fe Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 7 Dec 2021 10:37:24 +0100 +Subject: [PATCH] Route: return OCF_NOT_RUNNING for probe action when interface + or route doesnt exist + +--- + heartbeat/Route | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index 8b390615a..7db41d0ae 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -227,15 +227,6 @@ route_stop() { + } + + route_status() { +- if [ -n "${OCF_RESKEY_device}" ]; then +- # Must check if device exists or is gone. +- # If device is gone, route is also unconfigured. +- ip link show dev ${OCF_RESKEY_device} >/dev/null 2>&1 +- if [ $? -ne 0 ]; then +- # Assume device does not exist, and short-circuit here. +- return $OCF_NOT_RUNNING +- fi +- fi + show_output="$(ip $addr_family route show $(create_route_spec) 2>/dev/null)" + if [ $? -eq 0 ]; then + if [ -n "$show_output" ]; then +@@ -251,7 +242,11 @@ route_status() { + else + # "ip route show" returned an error code. Assume something + # went wrong. +- return $OCF_ERR_GENERIC ++ if ocf_is_probe; then ++ return $OCF_NOT_RUNNING ++ else ++ return $OCF_ERR_GENERIC ++ fi + fi + } + diff --git a/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch b/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch new file mode 100644 index 0000000..34bad14 --- /dev/null +++ b/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch @@ -0,0 +1,41 @@ +From 6d2ed7615614ede093f097189876d0f08553a43e Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Mon, 14 Feb 2022 22:23:39 -0800 +Subject: [PATCH] IPsrcaddr: Add warning about DHCP + +If DHCP is enabled for the interface that serves OCF_RESKEY_ipaddress, +then NetworkManager (and possibly dhclient in systems without NM; +unsure) may later re-add a route that the IPsrcaddr resource replaced. +This may cause the resource to fail or cause other unexpected behavior. + +So far this has been observed with a default route, albeit with an edge +case of a configuration (OCF_RESKEY_ipaddress on a different subnet) +that may not be totally valid. There are likely to be other situations +as well where DHCP can cause conflicts with IPsrcaddr's manual updates +via iproute. The safest option is to use only static configuration for +the involved interface. + +Resolves: RHBZ#1654862 + +Signed-off-by: Reid Wahl +--- + heartbeat/IPsrcaddr | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index ec868409f..fd7b6f68d 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -99,6 +99,12 @@ meta_data() { + + Resource script for IPsrcaddr. It manages the preferred source address + modification. ++ ++Note: DHCP should not be enabled for the interface serving the preferred ++source address. Enabling DHCP may result in unexpected behavior, such as ++the automatic addition of duplicate or conflicting routes. This may ++cause the IPsrcaddr resource to fail, or it may produce undesired ++behavior while the resource continues to run. + + Manages the preferred source address for outgoing IP packets + diff --git a/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch b/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch new file mode 100644 index 0000000..8a4a6fc --- /dev/null +++ b/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch @@ -0,0 +1,49 @@ +From 5a65f66ff803ad7ed15af958cc1efdde4d53dcb7 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 17 Feb 2022 03:53:21 -0800 +Subject: [PATCH] IPsrcaddr: Better error message when no matching route found + +If OCF_RESKEY_destination is not explicitly set and `ip route list` +can't find a route matching the specifications, the NETWORK variable +doesn't get set. This causes a certain failure of the start operation, +because there is no PREFIX argument to `ip route replace` (syntax +error). It may also cause unexpected behavior for stop operations (but +not in all cases). During a monitor, this event can only happen if +something has changed outside the cluster's control, and so is cause +for warning there. + +Exit OCF_ERR_ARGS for start, log debug for probe, log warning for all +other ops. + +Resolves: RHBZ#1654862 + +Signed-off-by: Reid Wahl +--- + heartbeat/IPsrcaddr | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index fd7b6f68d..f0216722d 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -549,6 +549,20 @@ rc=$? + INTERFACE=`echo $findif_out | awk '{print $1}'` + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++ ++ if [ -z "$NETWORK" ]; then ++ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" ++ err_str="$err_str match $ipaddress' failed to find a matching route" ++ ++ if [ "$__OCF_ACTION" = "start" ]; then ++ ocf_exit_reason "$err_str" ++ exit $OCF_ERR_ARGS ++ elif ! ocf_is_probe; then ++ ocf_log warn "$err_str" ++ else ++ ocf_log debug "$err_str" ++ fi ++ fi + else + NETWORK="$OCF_RESKEY_destination" + fi diff --git a/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch b/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch new file mode 100644 index 0000000..337943d --- /dev/null +++ b/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch @@ -0,0 +1,56 @@ +From 0a197f1cd227e768837dff778a0c56fc1085d434 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 21 Feb 2022 13:54:04 +0100 +Subject: [PATCH] IPsrcaddr: fix indentation in better error message code + +--- + heartbeat/IPsrcaddr | 30 +++++++++++++++--------------- + 1 file changed, 15 insertions(+), 15 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index f0216722d..c82adc0e9 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -542,27 +542,27 @@ fi + findif_out=`$FINDIF -C` + rc=$? + [ $rc -ne 0 ] && { +- ocf_exit_reason "[$FINDIF -C] failed" +- exit $rc ++ ocf_exit_reason "[$FINDIF -C] failed" ++ exit $rc + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` + +- if [ -z "$NETWORK" ]; then +- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" +- err_str="$err_str match $ipaddress' failed to find a matching route" +- +- if [ "$__OCF_ACTION" = "start" ]; then +- ocf_exit_reason "$err_str" +- exit $OCF_ERR_ARGS +- elif ! ocf_is_probe; then +- ocf_log warn "$err_str" +- else +- ocf_log debug "$err_str" +- fi +- fi ++ if [ -z "$NETWORK" ]; then ++ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" ++ err_str="$err_str match $ipaddress' failed to find a matching route" ++ ++ if [ "$__OCF_ACTION" = "start" ]; then ++ ocf_exit_reason "$err_str" ++ exit $OCF_ERR_ARGS ++ elif ! ocf_is_probe; then ++ ocf_log warn "$err_str" ++ else ++ ocf_log debug "$err_str" ++ fi ++ fi + else + NETWORK="$OCF_RESKEY_destination" + fi diff --git a/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch b/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch new file mode 100644 index 0000000..c099fa5 --- /dev/null +++ b/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch @@ -0,0 +1,117 @@ +From 50a596bfb977b18902dc62b99145bbd1a087690a Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 1 Mar 2022 11:06:07 +0100 +Subject: [PATCH] IPsrcaddr: fixes + +- use findif.sh to detect secondary interfaces +- get metric and proto to update the correct route/update it correctly +- match route using interface to fail when trying to update secondary + interfaces without specifying destination (would update default route + before) +- also use PRIMARY_IP/OPTS during stop-action for default routes (to get + back to the exact routes we started with) +- dont fail during stop-action if route doesnt exist +- use [[:blank:]] for WS to follow POSIX standard (suggested by nrwahl) +--- + heartbeat/IPsrcaddr | 35 +++++++++++++++++++---------------- + 1 file changed, 19 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index c82adc0e9..7dbf65ff5 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -52,6 +52,7 @@ + # Initialization: + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. ${OCF_FUNCTIONS_DIR}/findif.sh + + # Defaults + OCF_RESKEY_ipaddress_default="" +@@ -181,19 +182,21 @@ errorexit() { + # + # where the src clause "src Y.Y.Y.Y" may or may not be present + +-WS="[`echo -en ' \t'`]" ++WS="[[:blank:]]" + OCTET="[0-9]\{1,3\}" + IPADDR="\($OCTET\.\)\{3\}$OCTET" + SRCCLAUSE="src$WS$WS*\($IPADDR\)" + MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)" +-FINDIF=$HA_BIN/findif ++METRICCLAUSE=".*\(metric$WS[^ ]\+\)" ++PROTOCLAUSE=".*\(proto$WS[^ ]\+\)" ++FINDIF=findif + + # findif needs that to be set + export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress + + srca_read() { + # Capture matching route - doublequotes prevent word splitting... +- ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" ++ ROUTE="`$CMDSHOW dev $INTERFACE 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" + + # ... so we can make sure there is only 1 matching route + [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ +@@ -201,7 +204,7 @@ srca_read() { + + # But there might still be no matching route + [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \ +- ! ocf_is_probe && errorexit "no matching route exists" ++ ! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists" + + # Sed out the source ip address if it exists + SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"` +@@ -232,8 +235,8 @@ srca_start() { + rc=$OCF_SUCCESS + ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" + else +- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \ +- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed" + + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + $CMDCHANGE $ROUTE_WO_SRC src $1 || \ +@@ -266,14 +269,11 @@ srca_stop() { + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- OPTS="" +- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then +- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" +- OPTS="proto kernel scope host src $PRIMARY_IP" +- fi ++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" ++ OPTS="proto kernel scope link src $PRIMARY_IP" + +- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \ +- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed" + + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + $CMDCHANGE $ROUTE_WO_SRC || \ +@@ -539,16 +539,19 @@ if [ $rc -ne $OCF_SUCCESS ]; then + esac + fi + +-findif_out=`$FINDIF -C` ++findif_out=`$FINDIF` + rc=$? + [ $rc -ne 0 ] && { +- ocf_exit_reason "[$FINDIF -C] failed" ++ ocf_exit_reason "[$FINDIF] failed" + exit $rc + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` ++LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress` ++METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"` ++[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"` + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then +- NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++ NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'` + + if [ -z "$NETWORK" ]; then + err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" diff --git a/SOURCES/bz2063877-all-agents-use-promotable-terms.patch b/SOURCES/bz2063877-all-agents-use-promotable-terms.patch new file mode 100644 index 0000000..60ae3f0 --- /dev/null +++ b/SOURCES/bz2063877-all-agents-use-promotable-terms.patch @@ -0,0 +1,543 @@ +From 3e469239e8c853725b28a9c6b509152aacc2c5cc Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 13 Jun 2022 11:24:05 +0200 +Subject: [PATCH 1/2] all agents: update to promotable terms + +--- + heartbeat/SAPInstance | 22 +++++++++++----------- + heartbeat/conntrackd.in | 6 +++--- + heartbeat/db2 | 12 ++++++------ + heartbeat/dnsupdate.in | 2 +- + heartbeat/galera.in | 26 +++++++++++++------------- + heartbeat/iface-bridge | 6 +++--- + heartbeat/mariadb.in | 30 +++++++++++++++--------------- + heartbeat/mpathpersist.in | 24 ++++++++++++------------ + heartbeat/mysql | 4 ++-- + heartbeat/mysql-proxy | 2 +- + heartbeat/pgsql | 2 +- + heartbeat/redis.in | 4 ++-- + heartbeat/sg_persist.in | 4 ++-- + 14 files changed, 74 insertions(+), 74 deletions(-) + +diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance +index 016f59aff..e3fe788ae 100755 +--- a/heartbeat/SAPInstance ++++ b/heartbeat/SAPInstance +@@ -25,8 +25,8 @@ + # OCF_RESKEY_AUTOMATIC_RECOVER (optional, automatic startup recovery using cleanipc, default is false) + # OCF_RESKEY_MONITOR_SERVICES (optional, default is to monitor critical services only) + # OCF_RESKEY_SHUTDOWN_METHOD (optional, defaults to NORMAL, KILL: terminate the SAP instance with OS commands - faster, at your own risk) +-# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Master/Slave configuration) +-# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Master/Slave configuration) ++# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Promotable configuration) ++# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Promotable configuration) + # OCF_RESKEY_PRE_START_USEREXIT (optional, lists a script which can be executed before the resource is started) + # OCF_RESKEY_POST_START_USEREXIT (optional, lists a script which can be executed after the resource is started) + # OCF_RESKEY_PRE_STOP_USEREXIT (optional, lists a script which can be executed before the resource is stopped) +@@ -92,11 +92,11 @@ sapinstance_usage() { + + $0 manages a SAP Instance as an HA resource. + +- The 'start' operation starts the instance or the ERS instance in a Master/Slave configuration ++ The 'start' operation starts the instance or the ERS instance in a Promotable configuration + The 'stop' operation stops the instance + The 'status' operation reports whether the instance is running + The 'monitor' operation reports whether the instance seems to be working +- The 'promote' operation starts the primary instance in a Master/Slave configuration ++ The 'promote' operation starts the primary instance in a Promotable configuration + The 'demote' operation stops the primary instance and starts the ERS instance + The 'reload' operation allows changed parameters (non-unique only) without restarting the service + The 'notify' operation always returns SUCCESS +@@ -201,11 +201,11 @@ You may specify multiple services separated by a | (pipe) sign in this parameter + + + +- Only used in a Master/Slave resource configuration: ++ Only used in a Promotable resource configuration: + The full qualified SAP enqueue replication instance name. e.g. P01_ERS02_sapp01ers. Usually this is the name of the SAP instance profile. +-The enqueue replication instance must be installed, before you want to configure a master-slave cluster resource. ++The enqueue replication instance must be installed, before you want to configure a promotable cluster resource. + +-The master-slave configuration in the cluster must use this properties: ++The promotable configuration in the cluster must use this properties: + clone_max = 2 + clone_node_max = 1 + master_node_max = 1 +@@ -215,7 +215,7 @@ master_max = 1 + + + +- Only used in a Master/Slave resource configuration: ++ Only used in a Promotable resource configuration: + The parameter ERS_InstanceName must also be set in this configuration. + The name of the SAP START profile. Specify this parameter, if you have changed the name of the SAP START profile after the default SAP installation. As SAP release 7.10 does not have a START profile anymore, you need to specify the Instance Profile than. + +@@ -243,7 +243,7 @@ The name of the SAP START profile. Specify this parameter, if you have changed t + + + +- Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to ++ Only used for ASCS/ERS SAP Netweaver installations without implementing a promotable resource to + allow the ASCS to 'find' the ERS running on another cluster node after a resource failure. This parameter should be set + to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also + systems for NetWeaver less than 7.40, if you like to implement the NW-HA-CLU-740 scenario. +@@ -266,8 +266,8 @@ The name of the SAP START profile. Specify this parameter, if you have changed t + + + +- +- ++ ++ + + + +diff --git a/heartbeat/conntrackd.in b/heartbeat/conntrackd.in +index f115250d6..1c2ee955b 100644 +--- a/heartbeat/conntrackd.in ++++ b/heartbeat/conntrackd.in +@@ -50,7 +50,7 @@ meta_data() { + 1.0 + + +-Master/Slave OCF Resource Agent for conntrackd ++Promotable OCF Resource Agent for conntrackd + + + This resource agent manages conntrackd +@@ -81,8 +81,8 @@ For example "/packages/conntrackd-0.9.14/etc/conntrackd/conntrackd.conf" + + +- +- ++ ++ + + + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index 4a4b2f477..620b89583 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -3,7 +3,7 @@ + # db2 + # + # Resource agent that manages a DB2 LUW database in Standard role +-# or HADR configuration in master/slave configuration. ++# or HADR configuration in promotable configuration. + # Multi partition is supported as well. + # + # Copyright (c) 2011 Holger Teutsch +@@ -61,7 +61,7 @@ cat < + 1.0 + +-Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in master/slave configuration. Multiple partitions are supported. ++Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in promotable configuration. Multiple partitions are supported. + + Standard mode: + +@@ -71,8 +71,8 @@ Configure each partition as a separate primitive resource. + HADR mode: + + A single database in HADR configuration is made highly available by automating takeover operations. +-Configure a master / slave resource with notifications enabled and an +-additional monitoring operation with role "Master". ++Configure a promotable resource with notifications enabled and an ++additional monitoring operation with role "Promoted". + + In case of HADR be very deliberate in specifying intervals/timeouts. The detection of a failure including promote must complete within HADR_PEER_WINDOW. + +@@ -84,7 +84,7 @@ In addition to honoring requirements for crash recovery etc. for your specific d + + For further information and examples consult http://www.linux-ha.org/wiki/db2_(resource_agent) + +-Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as master/slave configuration. Multiple partitions are supported. ++Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as promotable configuration. Multiple partitions are supported. + + + +@@ -125,7 +125,7 @@ The number of the partition (DBPARTITIONNUM) to be managed. + + + +- ++ + + + +diff --git a/heartbeat/dnsupdate.in b/heartbeat/dnsupdate.in +index 35b7c99bb..b54822cd8 100755 +--- a/heartbeat/dnsupdate.in ++++ b/heartbeat/dnsupdate.in +@@ -119,7 +119,7 @@ the exact syntax. + + + Which DNS server to send these updates for. When no +-server is provided, this defaults to the master server ++server is provided, this defaults to the promoted server + for the correct zone. + + DNS server to contact +diff --git a/heartbeat/galera.in b/heartbeat/galera.in +index c363eb254..546b1a853 100755 +--- a/heartbeat/galera.in ++++ b/heartbeat/galera.in +@@ -26,31 +26,31 @@ + ## + # README. + # +-# This agent only supports being configured as a multistate Master ++# This agent only supports being configured as a multistate Promoted + # resource. + # +-# Slave vs Master role: ++# Unpromoted vs Promoted role: + # +-# During the 'Slave' role, galera instances are in read-only mode and ++# During the 'Unpromoted' role, galera instances are in read-only mode and + # will not attempt to connect to the cluster. This role exists only as + # a means to determine which galera instance is the most up-to-date. The + # most up-to-date node will be used to bootstrap a galera cluster that + # has no current members. + # +-# The galera instances will only begin to be promoted to the Master role ++# The galera instances will only begin to be promoted to the Promoted role + # once all the nodes in the 'wsrep_cluster_address' connection address + # have entered read-only mode. At that point the node containing the +-# database that is most current will be promoted to Master. Once the first +-# Master instance bootstraps the galera cluster, the other nodes will be +-# promoted to Master as well. ++# database that is most current will be promoted to Promoted. Once the first ++# Promoted instance bootstraps the galera cluster, the other nodes will be ++# promoted to Promoted as well. + # + # Example: Create a galera cluster using nodes rhel7-node1 rhel7-node2 rhel7-node3 + # + # pcs resource create db galera enable_creation=true \ +-# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta master-max=3 --master ++# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta promoted-max=3 --promoted + # + # By setting the 'enable_creation' option, the database will be automatically +-# generated at startup. The meta attribute 'master-max=3' means that all 3 ++# generated at startup. The meta attribute 'promoted-max=3' means that all 3 + # nodes listed in the wsrep_cluster_address list will be allowed to connect + # to the galera cluster and perform replication. + # +@@ -114,8 +114,8 @@ The 'start' operation starts the database. + The 'stop' operation stops the database. + The 'status' operation reports whether the database is running + The 'monitor' operation reports whether the database seems to be working +-The 'promote' operation makes this mysql server run as master +-The 'demote' operation makes this mysql server run as slave ++The 'promote' operation makes this mysql server run as promoted ++The 'demote' operation makes this mysql server run as unpromoted + The 'validate-all' operation reports whether the parameters are valid + + UEND +@@ -298,8 +298,8 @@ Use it with caution! (and fencing) + + + +- +- ++ ++ + + + +diff --git a/heartbeat/iface-bridge b/heartbeat/iface-bridge +index 75d5371dd..a4e50adb9 100755 +--- a/heartbeat/iface-bridge ++++ b/heartbeat/iface-bridge +@@ -211,7 +211,7 @@ bridge_meta_data() { + + Set the port cost. This is a dimensionless metric. + A list of port/cost can be specified using the following +- format: slave cost slave cost. ++ format: unpromoted cost unpromoted cost. + Example: eth0 100 eth1 1000 + + +@@ -228,7 +228,7 @@ bridge_meta_data() { + This metric is used in the designated port and root port + selection algorithms. + A list of port/priority can be specified using the following +- format: slave cost slave cost. ++ format: unpromoted cost unpromoted cost. + Example: eth0 10 eth1 60 + + +@@ -262,7 +262,7 @@ bridge_meta_data() { + Enable or disable a port from the multicast router. + Kernel enables all port by default. + A list of port can be specified using the following +- format: slave 0|1 slave 0|1. ++ format: unpromoted 0|1 unpromoted 0|1. + Example: eth0 1 eth1 0 + + +diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in +index 39ad191bb..5a39ccb66 100644 +--- a/heartbeat/mariadb.in ++++ b/heartbeat/mariadb.in +@@ -3,7 +3,7 @@ + # + # MariaDB + # +-# Description: Manages a MariaDB Master/Slave database as Linux-HA resource ++# Description: Manages a MariaDB Promotable database as Linux-HA resource + # + # Authors: Alan Robertson: DB2 Script + # Jakub Janczak: rewrite as MySQL +@@ -61,8 +61,8 @@ The 'start' operation starts the database. + The 'stop' operation stops the database. + The 'status' operation reports whether the database is running + The 'monitor' operation reports whether the database seems to be working +-The 'promote' operation makes this mysql server run as master +-The 'demote' operation makes this mysql server run as slave ++The 'promote' operation makes this mysql server run as promoted ++The 'demote' operation makes this mysql server run as unpromoted + The 'validate-all' operation reports whether the parameters are valid + + UEND +@@ -78,20 +78,20 @@ meta_data() { + + Resource script for MariaDB. + +-Manages a complete master/slave replication setup with GTID, for simpler ++Manages a complete promotable replication setup with GTID, for simpler + uses look at the mysql resource agent which supports older replication + forms which mysql and mariadb have in common. + + The resource must be setup to use notifications. Set 'notify=true' in the metadata +-attributes when defining a MariaDB master/slave instance. ++attributes when defining a MariaDB promotable instance. + +-The default behavior is to use uname -n values in the change master to command. ++The default behavior is to use uname -n values in the change promoted to command. + Other IPs can be specified manually by adding a node attribute + \${INSTANCE_ATTR_NAME}_mysql_master_IP giving the IP to use for replication. + For example, if the mariadb primitive you are using is p_mariadb, the + attribute to set will be p_mariadb_mysql_master_IP. + +-Manages a MariaDB master/slave instance ++Manages a MariaDB promotable instance + + + +@@ -154,7 +154,7 @@ The logfile to be used for mysqld. + + All node names of nodes that will execute mariadb. + Please separate each node name with a space. +-This is required for the master selection to function. ++This is required for the promoted selection to function. + + node list + +@@ -220,11 +220,11 @@ Additional parameters which are passed to the mysqld on startup. + + + MariaDB replication user. This user is used for starting and stopping +-MariaDB replication, for setting and resetting the master host, and for ++MariaDB replication, for setting and resetting the promoted host, and for + setting and unsetting read-only mode. Because of that, this user must + have SUPER, REPLICATION SLAVE, REPLICATION CLIENT, PROCESS and RELOAD + privileges on all nodes within the cluster. Mandatory if you define a +-master-slave resource. ++promotable resource. + + MariaDB replication user + +@@ -232,8 +232,8 @@ master-slave resource. + + + +-MariaDB replication password. Used for replication client and slave. +-Mandatory if you define a master-slave resource. ++MariaDB replication password. Used for replication client and unpromoted. ++Mandatory if you define a promotable resource. + + MariaDB replication user password + +@@ -241,7 +241,7 @@ Mandatory if you define a master-slave resource. + + + +-The port on which the Master MariaDB instance is listening. ++The port on which the Promoted MariaDB instance is listening. + + MariaDB replication port + +@@ -254,8 +254,8 @@ The port on which the Master MariaDB instance is listening. + + + +- +- ++ ++ + + + +diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in +index fcf1b3a4b..e47fef4bd 100644 +--- a/heartbeat/mpathpersist.in ++++ b/heartbeat/mpathpersist.in +@@ -80,9 +80,9 @@ meta_data() { + + This resource agent manages SCSI persistent reservations on multipath devices. + "mpathpersist" from multipath-tools is used, please see its documentation. +-Should be used as multistate (Master/Slave) resource +-Slave registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list. +-Master reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter. ++Should be used as multistate (Promotable) resource ++Unpromoted registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list. ++Promoted reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter. + Please see man sg_persist(8) and mpathpersist(8) for reservation_type details. + + Manages SCSI persistent reservations on multipath devices +@@ -132,7 +132,7 @@ reservation type + master_score_base value + "master_score_base" value is used in "master_score" calculation: + master_score = master_score_base + master_score_dev_factor * working_devs +-if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for master role. ++if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for promoted role. + + base master_score value + +@@ -140,9 +140,9 @@ if set to bigger value in mpathpersist resource configuration on some node, this + + + +-Working device factor in master_score calculation ++Working device factor in promoted calculation + each "working" device provides additional value to "master_score", +-so the node that sees more devices will be preferred for the "Master"-role ++so the node that sees more devices will be preferred for the "Promoted"-role + Setting it to 0 will disable this behavior. + + working device factor in master_score calculation +@@ -151,10 +151,10 @@ Setting it to 0 will disable this behavior. + + + +-master/slave decreases/increases its master_score after delay of "master_score_delay" seconds +-so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched +-and after this device reappears again the master increases its master_score first +-this can work only if the master_score_delay is bigger then monitor interval on both master and slave ++promoted/unpromoted decreases/increases its master_score after delay of "master_score_delay" seconds ++so if some device gets inaccessible, the unpromoted decreases its promoted first and the resource will no be watched ++and after this device reappears again the promoted increases its master_score first ++this can work only if the master_score_delay is bigger then monitor interval on both promoted and unpromoted + Setting it to 0 will disable this behavior. + + master_score decrease/increase delay time +@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior. + + + +- +- ++ ++ + + + +diff --git a/heartbeat/mysql b/heartbeat/mysql +index 720de8c1a..aec44fe5e 100755 +--- a/heartbeat/mysql ++++ b/heartbeat/mysql +@@ -321,8 +321,8 @@ whether a node is usable for clients to read from. + + + +- +- ++ ++ + + + +diff --git a/heartbeat/mysql-proxy b/heartbeat/mysql-proxy +index e34396d9a..fdf2fa230 100755 +--- a/heartbeat/mysql-proxy ++++ b/heartbeat/mysql-proxy +@@ -162,7 +162,7 @@ Address:port of the remote back-end servers (default: 127.0.0.1:3306). + + + +-Address:port of the remote (read only) slave-server (default: ). ++Address:port of the remote (read only) unpromoted-server (default: ). + + MySql Proxy read only back-end servers + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index e3a39038f..94aceb324 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -458,7 +458,7 @@ wal receiver is not running in the master and the attribute shows status as + + + +- ++ + + + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 7f886c7ea..6429477e1 100755 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -220,8 +220,8 @@ is in use. + + + +- +- ++ ++ + + + +diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in +index 678762f40..0497cc469 100644 +--- a/heartbeat/sg_persist.in ++++ b/heartbeat/sg_persist.in +@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior. + + + +- +- ++ ++ + + + + +From 14e5cb71e3749d311745f110f90cc1139f9cedaf Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 16 Jun 2022 15:54:39 +0200 +Subject: [PATCH 2/2] metadata: update to promoted roles + +--- + heartbeat/metadata.rng | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/metadata.rng b/heartbeat/metadata.rng +index 3dd735547..909efc284 100644 +--- a/heartbeat/metadata.rng ++++ b/heartbeat/metadata.rng +@@ -85,8 +85,8 @@ + + + +- Master +- Slave ++ Promoted ++ Unpromoted + + + diff --git a/SOURCES/bz2065125-LVM-activate-fix-fence-issue.patch b/SOURCES/bz2065125-LVM-activate-fix-fence-issue.patch new file mode 100644 index 0000000..03727c8 --- /dev/null +++ b/SOURCES/bz2065125-LVM-activate-fix-fence-issue.patch @@ -0,0 +1,102 @@ +From e651576c1b5c1ffbe0fd1b78f209be9a3f9764e7 Mon Sep 17 00:00:00 2001 +From: XingWei-Liu +Date: Thu, 10 Mar 2022 10:38:11 +0800 +Subject: [PATCH 1/4] change lvm_status return value from ocf_not_running to + ocf_err_generic + +--- + heartbeat/LVM-activate | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index aed672ea3..0aef76706 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -790,7 +790,7 @@ lvm_status() { + fi + + if [ $dm_count -eq 0 ]; then +- return $OCF_NOT_RUNNING ++ return $OCF_ERR_GENERIC + fi + + case "$OCF_CHECK_LEVEL" in + +From 540ae56436a4f9547bb17aa206fe0e8c7a7fea87 Mon Sep 17 00:00:00 2001 +From: XingWei-Liu +Date: Thu, 10 Mar 2022 16:44:25 +0800 +Subject: [PATCH 2/4] add if ocf_is_probe in monitor func + +--- + heartbeat/LVM-activate | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 0aef76706..c86606637 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -790,7 +790,11 @@ lvm_status() { + fi + + if [ $dm_count -eq 0 ]; then +- return $OCF_ERR_GENERIC ++ if ocf_is_probe ;then ++ return $OCF_NOT_RUNNING ++ else ++ return $OCF_ERR_GENERIC ++ fi + fi + + case "$OCF_CHECK_LEVEL" in + +From ae3f35d4f671f3288034a257c6dd8eff9a83447a Mon Sep 17 00:00:00 2001 +From: XingWei-Liu +Date: Thu, 10 Mar 2022 16:50:04 +0800 +Subject: [PATCH 3/4] add if ocf_is_probe in monitor func + +--- + heartbeat/LVM-activate | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index c86606637..f345f73a9 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -791,9 +791,9 @@ lvm_status() { + + if [ $dm_count -eq 0 ]; then + if ocf_is_probe ;then +- return $OCF_NOT_RUNNING +- else + return $OCF_ERR_GENERIC ++ else ++ return $OCF_NOT_RUNNING + fi + fi + + +From 1072c0490ef936a1a7dfd8411da434dce1569457 Mon Sep 17 00:00:00 2001 +From: XingWei-Liu +Date: Thu, 10 Mar 2022 18:10:21 +0800 +Subject: [PATCH 4/4] reverse return value in monitor func + +--- + heartbeat/LVM-activate | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f345f73a9..c86606637 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -791,9 +791,9 @@ lvm_status() { + + if [ $dm_count -eq 0 ]; then + if ocf_is_probe ;then +- return $OCF_ERR_GENERIC +- else + return $OCF_NOT_RUNNING ++ else ++ return $OCF_ERR_GENERIC + fi + fi + diff --git a/SOURCES/bz2065138-IPaddr2-enable-more-control-for-IPv6-addresses.patch b/SOURCES/bz2065138-IPaddr2-enable-more-control-for-IPv6-addresses.patch new file mode 100644 index 0000000..5118514 --- /dev/null +++ b/SOURCES/bz2065138-IPaddr2-enable-more-control-for-IPv6-addresses.patch @@ -0,0 +1,312 @@ +From fd1d6426a2d05f521207c305d10b49fedd92c2df Mon Sep 17 00:00:00 2001 +From: Petr Pavlu +Date: Mon, 28 Feb 2022 09:27:42 +0100 +Subject: [PATCH 1/4] IPaddr2: Allow to disable Duplicate Address Detection for + IPv6 + +"Starting" an IPv6 address with IPaddr2 involves performing Duplicate +Address Detection which typically takes at least 1000 ms. Allow the user +to disable DAD if they can guarantee that the configured address is not +duplicate and they wish to start the resource faster. +--- + heartbeat/IPaddr2 | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 735dd7779..650392b70 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -88,6 +88,7 @@ OCF_RESKEY_arp_sender_default="" + OCF_RESKEY_send_arp_opts_default="" + OCF_RESKEY_flush_routes_default="false" + OCF_RESKEY_run_arping_default=false ++OCF_RESKEY_nodad_default=false + OCF_RESKEY_noprefixroute_default="false" + OCF_RESKEY_preferred_lft_default="forever" + OCF_RESKEY_network_namespace_default="" +@@ -110,6 +111,7 @@ OCF_RESKEY_network_namespace_default="" + : ${OCF_RESKEY_send_arp_opts=${OCF_RESKEY_send_arp_opts_default}} + : ${OCF_RESKEY_flush_routes=${OCF_RESKEY_flush_routes_default}} + : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}} ++: ${OCF_RESKEY_nodad=${OCF_RESKEY_nodad_default}} + : ${OCF_RESKEY_noprefixroute=${OCF_RESKEY_noprefixroute_default}} + : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}} + : ${OCF_RESKEY_network_namespace=${OCF_RESKEY_network_namespace_default}} +@@ -391,6 +393,14 @@ Whether or not to run arping for IPv4 collision detection check. + + + ++ ++ ++For IPv6, do not perform Duplicate Address Detection when adding the address. ++ ++Use nodad flag ++ ++ ++ + + + Use noprefixroute flag (see 'man ip-address'). +@@ -662,6 +672,11 @@ add_interface () { + msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface" + fi + ++ if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then ++ cmd="$cmd nodad" ++ msg="${msg} (with nodad)" ++ fi ++ + if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then + cmd="$cmd noprefixroute" + msg="${msg} (with noprefixroute)" + +From f4a9e3281d48c5d37f5df593d014706c46ddb3a7 Mon Sep 17 00:00:00 2001 +From: Petr Pavlu +Date: Mon, 7 Mar 2022 17:21:59 +0100 +Subject: [PATCH 2/4] IPaddr2: Allow to send IPv6 Neighbor Advertisements in + background + +"Starting" an IPv6 address with IPaddr2 involves sending Neighbor +Advertisement packets to inform neighboring machines about the new +IP+MAC translation. By default, 5x packets with 200 ms sleep after each +are sent which delays the start by 1000 ms. Allow the user to run this +operation in background, similarly as is possible with GARP for IPv4. +--- + heartbeat/IPaddr2 | 33 +++++++++++++++++++++++++++++---- + 1 file changed, 29 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 650392b70..e243a642d 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -83,7 +83,7 @@ OCF_RESKEY_unique_clone_address_default=false + OCF_RESKEY_arp_interval_default=200 + OCF_RESKEY_arp_count_default=5 + OCF_RESKEY_arp_count_refresh_default=0 +-OCF_RESKEY_arp_bg_default=true ++OCF_RESKEY_arp_bg_default="" + OCF_RESKEY_arp_sender_default="" + OCF_RESKEY_send_arp_opts_default="" + OCF_RESKEY_flush_routes_default="false" +@@ -336,9 +336,10 @@ situations. + + + +-Whether or not to send the ARP packets in the background. ++Whether or not to send the ARP (IPv4) or NA (IPv6) packets in the background. ++The default is true for IPv4 and false for IPv6. + +-ARP from background ++ARP/NA from background + + + +@@ -507,6 +508,9 @@ ip_init() { + ocf_exit_reason "IPv4 does not support lvs_ipv6_addrlabel" + exit $OCF_ERR_CONFIGURED + fi ++ if [ -z "$OCF_RESKEY_arp_bg" ]; then ++ OCF_RESKEY_arp_bg=true ++ fi + else + FAMILY=inet6 + # address sanitization defined in RFC5952 +@@ -527,6 +531,9 @@ ip_init() { + exit $OCF_ERR_CONFIGURED + fi + fi ++ if [ -z "$OCF_RESKEY_arp_bg" ]; then ++ OCF_RESKEY_arp_bg=false ++ fi + fi + + # support nic:iflabel format in nic parameter +@@ -893,6 +900,20 @@ run_arp_sender() { + fi + } + ++log_send_ua() { ++ local cmdline ++ local output ++ local rc ++ ++ cmdline="$@" ++ output=$($cmdline 2>&1) ++ rc=$? ++ if [ $rc -ne 0 ] ; then ++ ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements: rc=$rc" ++ fi ++ ocf_log info "$output" ++ return $rc ++} + + # + # Run send_ua to note send ICMPv6 Unsolicited Neighbor Advertisements. +@@ -930,7 +951,11 @@ run_send_ua() { + + ARGS="-i $OCF_RESKEY_arp_interval -c $OCF_RESKEY_arp_count $OCF_RESKEY_ip $NETMASK $NIC" + ocf_log info "$SENDUA $ARGS" +- $SENDUA $ARGS || ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements." ++ if ocf_is_true $OCF_RESKEY_arp_bg; then ++ log_send_ua $SENDUA $ARGS & ++ else ++ log_send_ua $SENDUA $ARGS ++ fi + } + + # Do we already serve this IP address on the given $NIC? + +From c8afb43012c264f3ee24013a92b2a2f3566db2fd Mon Sep 17 00:00:00 2001 +From: Petr Pavlu +Date: Tue, 8 Mar 2022 12:35:56 +0100 +Subject: [PATCH 3/4] IPaddr2: Log 'ip addr add' options together + +Change the log message in add_interface() from +"Adding ... (with ) (with )" +to +"Adding ... (with )". +--- + heartbeat/IPaddr2 | 19 ++++++++++--------- + 1 file changed, 10 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index e243a642d..dca1b6f5b 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -651,7 +651,7 @@ delete_interface () { + # Add an interface + # + add_interface () { +- local cmd msg ipaddr netmask broadcast iface label ++ local cmd msg extra_opts ipaddr netmask broadcast iface label + + ipaddr="$1" + netmask="$2" +@@ -679,23 +679,24 @@ add_interface () { + msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface" + fi + ++ extra_opts="" + if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then +- cmd="$cmd nodad" +- msg="${msg} (with nodad)" ++ extra_opts="$extra_opts nodad" + fi + + if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then +- cmd="$cmd noprefixroute" +- msg="${msg} (with noprefixroute)" ++ extra_opts="$extra_opts noprefixroute" + fi + + if [ ! -z "$label" ]; then +- cmd="$cmd label $label" +- msg="${msg} (with label $label)" ++ extra_opts="$extra_opts label $label" + fi + if [ "$FAMILY" = "inet6" ] ;then +- cmd="$cmd preferred_lft $OCF_RESKEY_preferred_lft" +- msg="${msg} (with preferred_lft $OCF_RESKEY_preferred_lft)" ++ extra_opts="$extra_opts preferred_lft $OCF_RESKEY_preferred_lft" ++ fi ++ if [ -n "$extra_opts" ]; then ++ cmd="$cmd$extra_opts" ++ msg="$msg (with$extra_opts)" + fi + + ocf_log info "$msg" + +From cb4d52ead694718282a40eab24e04b6d85bcc802 Mon Sep 17 00:00:00 2001 +From: Petr Pavlu +Date: Mon, 7 Mar 2022 17:25:02 +0100 +Subject: [PATCH 4/4] IPaddr2: Clarify behavior of 'arp_*' parameters for IPv4 + and IPv6 + +* Mention that 'arp_*' parameters are shared by the IPv4 and IPv6 code. +* Clarify description of these parameters and mark which of them apply + only to IPv4. +--- + heartbeat/IPaddr2 | 26 +++++++++++++++++--------- + 1 file changed, 17 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index dca1b6f5b..97a7431a2 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -157,6 +157,12 @@ and/or clone-max < number of nodes. In case of node failure, + clone instances need to be re-allocated on surviving nodes. + This would not be possible if there is already an instance + on those nodes, and clone-node-max=1 (which is the default). ++ ++When the specified IP address gets assigned to a respective interface, the ++resource agent sends unsolicited ARP (Address Resolution Protocol, IPv4) or NA ++(Neighbor Advertisement, IPv6) packets to inform neighboring machines about the ++change. This functionality is controlled for both IPv4 and IPv6 by shared ++'arp_*' parameters. + + + Manages virtual IPv4 and IPv6 addresses (Linux specific version) +@@ -306,28 +312,30 @@ a unique address to manage + + + +-Specify the interval between unsolicited ARP packets in milliseconds. ++Specify the interval between unsolicited ARP (IPv4) or NA (IPv6) packets in ++milliseconds. + + This parameter is deprecated and used for the backward compatibility only. + It is effective only for the send_arp binary which is built with libnet, + and send_ua for IPv6. It has no effect for other arp_sender. + +-ARP packet interval in ms (deprecated) ++ARP/NA packet interval in ms (deprecated) + + + + + +-Number of unsolicited ARP packets to send at resource initialization. ++Number of unsolicited ARP (IPv4) or NA (IPv6) packets to send at resource ++initialization. + +-ARP packet count sent during initialization ++ARP/NA packet count sent during initialization + + + + + +-Number of unsolicited ARP packets to send during resource monitoring. Doing +-so helps mitigate issues of stuck ARP caches resulting from split-brain ++For IPv4, number of unsolicited ARP packets to send during resource monitoring. ++Doing so helps mitigate issues of stuck ARP caches resulting from split-brain + situations. + + ARP packet count sent during monitoring +@@ -345,7 +353,7 @@ The default is true for IPv4 and false for IPv6. + + + +-The program to send ARP packets with on start. Available options are: ++For IPv4, the program to send ARP packets with on start. Available options are: + - send_arp: default + - ipoibarping: default for infiniband interfaces if ipoibarping is available + - iputils_arping: use arping in iputils package +@@ -357,7 +365,7 @@ The program to send ARP packets with on start. Available options are: + + + +-Extra options to pass to the arp_sender program. ++For IPv4, extra options to pass to the arp_sender program. + Available options are vary depending on which arp_sender is used. + + A typical use case is specifying '-A' for iputils_arping to use +@@ -388,7 +396,7 @@ IP address goes away. + + + +-Whether or not to run arping for IPv4 collision detection check. ++For IPv4, whether or not to run arping for collision detection check. + + Run arping for IPv4 collision detection check + diff --git a/SOURCES/bz2069270-corosync-qnetd-new-ra.patch b/SOURCES/bz2069270-corosync-qnetd-new-ra.patch new file mode 100644 index 0000000..fdaa6bf --- /dev/null +++ b/SOURCES/bz2069270-corosync-qnetd-new-ra.patch @@ -0,0 +1,401 @@ +From d59a000da2766476538bb82d1889f5c0f3882f9f Mon Sep 17 00:00:00 2001 +From: Jan Friesse +Date: Wed, 2 Mar 2022 18:43:31 +0100 +Subject: [PATCH] corosync-qnetd: Add resource agent + +Mostly for better monitor operation using corosync-qnetd-tool. As qnetd +is (almost) stateless only directory which has to be copied (once) +across the nodes is nss db directory (usually +/etc/corosync/qnetd/nssdb). + +Signed-off-by: Jan Friesse +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/corosync-qnetd | 353 +++++++++++++++++++++++++++++++++++++++ + 3 files changed, 355 insertions(+) + create mode 100755 heartbeat/corosync-qnetd + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 1093717fe..013aa392d 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -127,6 +127,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_azure-lb.7 \ + ocf_heartbeat_clvm.7 \ + ocf_heartbeat_conntrackd.7 \ ++ ocf_heartbeat_corosync-qnetd.7 \ + ocf_heartbeat_crypt.7 \ + ocf_heartbeat_db2.7 \ + ocf_heartbeat_dhcpd.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 67b400679..38154e2da 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -101,6 +101,7 @@ ocf_SCRIPTS = AoEtarget \ + azure-lb \ + clvm \ + conntrackd \ ++ corosync-qnetd \ + crypt \ + db2 \ + dhcpd \ +diff --git a/heartbeat/corosync-qnetd b/heartbeat/corosync-qnetd +new file mode 100755 +index 000000000..6b9777711 +--- /dev/null ++++ b/heartbeat/corosync-qnetd +@@ -0,0 +1,353 @@ ++#!/bin/sh ++# ++# Copyright (C) 2022 Red Hat, Inc. All rights reserved. ++# ++# Authors: Jan Friesse ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++# Initialization: ++: "${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}" ++. "${OCF_FUNCTIONS_DIR}/ocf-shellfuncs" ++ ++# Use runuser if available for SELinux. ++if [ -x "/sbin/runuser" ]; then ++ SU="runuser" ++else ++ SU="su" ++fi ++ ++# Attempt to detect a default binary ++OCF_RESKEY_binary_default=$(which corosync-qnetd 2> /dev/null) ++if [ "${OCF_RESKEY_binary_default}" = "" ]; then ++ OCF_RESKEY_binary_default="/usr/bin/corosync-qnetd" ++fi ++ ++# Defaults ++OCF_RESKEY_qnetd_opts_default="" ++OCF_RESKEY_qnetd_tool_binary_default="/usr/bin/corosync-qnetd-tool" ++OCF_RESKEY_ip_default="" ++OCF_RESKEY_port_default="" ++OCF_RESKEY_nss_db_dir_default="" ++OCF_RESKEY_pid_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.pid" ++OCF_RESKEY_ipc_sock_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.sock" ++OCF_RESKEY_user_default="coroqnetd" ++OCF_RESKEY_group_default="${OCF_RESKEY_user_default}" ++ ++: "${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}" ++: "${OCF_RESKEY_qnetd_opts=${OCF_RESKEY_qnetd_opts_default}}" ++: "${OCF_RESKEY_qnetd_tool_binary=${OCF_RESKEY_qnetd_tool_binary_default}}" ++: "${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}}" ++: "${OCF_RESKEY_port=${OCF_RESKEY_port_default}}" ++: "${OCF_RESKEY_nss_db_dir=${OCF_RESKEY_nss_db_dir_default}}" ++: "${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}" ++: "${OCF_RESKEY_ipc_sock=${OCF_RESKEY_ipc_sock_default}}" ++: "${OCF_RESKEY_user=${OCF_RESKEY_user_default}}" ++: "${OCF_RESKEY_group=${OCF_RESKEY_group_default}}" ++ ++corosync_qnetd_usage() { ++ cat < ++ ++ ++1.0 ++ ++OCF Resource script for corosync-qnetd. It manages a corosync-qnetd ++instance as a HA resource. It is required to copy nss db directory (usually /etc/corosync/qnetd/nssdb) ++across all nodes (only once - after database is initialized). ++Corosync QNet daemon resource agent ++ ++ ++ ++ ++ Location of the corosync-qnetd binary ++ corosync-qnetd binary ++ ++ ++ ++ ++ ++ Additional options for corosync-qnetd binary. "-4" for example. ++ ++ corosync-qnetd extra options ++ ++ ++ ++ ++ ++ The absolute path to the corosync-qnetd-tool for monitoring with OCF_CHECK_LEVEL greater zero. ++ ++ The absolute path to the corosync-qnetd-tool binary ++ ++ ++ ++ ++ ++ IP address to listen on. By default the daemon listens on all addresses (wildcard). ++ ++ IP address to listen on ++ ++ ++ ++ ++ ++ TCP port to listen on. Default port is 5403. ++ ++ TCP port to listen on ++ ++ ++ ++ ++ ++ Location of the corosync-qnetd nss db directory (empty for default - usually /etc/corosync/qnetd/nssdb) ++ ++ corosync-qnetd nss db directory ++ ++ ++ ++ ++ ++ Location of the corosync-qnetd pid/lock ++ ++ corosync-qnetd pid file ++ ++ ++ ++ ++ ++ Location of the corosync-qnetd ipc socket ++ ++ corosync-qnetd ipc socket file ++ ++ ++ ++ ++ User running corosync-qnetd ++ corosync-qnetd user ++ ++ ++ ++ ++ Group running corosync-qnetd ++ corosync-qnetd group ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++corosync_qnetd_status() { ++ ocf_pidfile_status "${OCF_RESKEY_pid}" > /dev/null 2>&1 ++ case "$?" in ++ 0) ++ rc="$OCF_SUCCESS" ++ ;; ++ 1|2) ++ rc="$OCF_NOT_RUNNING" ++ ;; ++ *) ++ rc="$OCF_ERR_GENERIC" ++ ;; ++ esac ++ ++ return "$rc" ++} ++ ++corosync_qnetd_start() { ++ corosync_qnetd_validate_all ++ rc="$?" ++ ++ if [ "$rc" -ne 0 ]; then ++ return "$rc" ++ fi ++ ++ # if resource is already running,no need to continue code after this. ++ if corosync_qnetd_status; then ++ ocf_log info "corosync-qnetd is already running" ++ return "${OCF_SUCCESS}" ++ fi ++ ++ pid_dir=$(dirname "${OCF_RESKEY_pid}") ++ sock_dir=$(dirname "${OCF_RESKEY_ipc_sock}") ++ ++ for d in "$pid_dir" "$sock_dir";do ++ if [ ! -d "$d" ];then ++ mkdir -p "$d" ++ chmod 0770 "$d" ++ chown "${OCF_RESKEY_user}:${OCF_RESKEY_group}" "$d" ++ fi ++ done ++ ++ params="-S \"local_socket_file=${OCF_RESKEY_ipc_sock}\" -S \"lock_file=${OCF_RESKEY_pid}\"" ++ ++ if [ -n "${OCF_RESKEY_nss_db_dir}" ];then ++ params="$params -S \"nss_db_dir=${OCF_RESKEY_nss_db_dir}\"" ++ fi ++ ++ if [ -n "${OCF_RESKEY_ip}" ];then ++ params="$params -l \"${OCF_RESKEY_ip}\"" ++ fi ++ ++ if [ -n "${OCF_RESKEY_port}" ];then ++ params="$params -p \"${OCF_RESKEY_port}\"" ++ fi ++ ++ params="$params ${OCF_RESKEY_qnetd_opts}" ++ ++ ocf_run "$SU" -s "/bin/sh" "${OCF_RESKEY_user}" -c "${OCF_RESKEY_binary} $params" ++ ++ while :; do ++ corosync_qnetd_monitor "debug" ++ rc="$?" ++ ++ if [ "$rc" -eq "${OCF_SUCCESS}" ]; then ++ break ++ fi ++ sleep 1 ++ ++ ocf_log debug "corosync-qnetd still hasn't started yet. Waiting..." ++ done ++ ++ ocf_log info "corosync-qnetd started" ++ return "${OCF_SUCCESS}" ++} ++ ++corosync_qnetd_stop() { ++ corosync_qnetd_status ++ ++ if [ "$?" -ne "$OCF_SUCCESS" ]; then ++ # Currently not running. Nothing to do. ++ ocf_log info "corosync-qnetd is already stopped" ++ ++ return "$OCF_SUCCESS" ++ fi ++ ++ pid=$(cat "${OCF_RESKEY_pid}") ++ kill "$pid" ++ ++ # Wait for process to stop ++ while corosync_qnetd_monitor "debug"; do ++ sleep 1 ++ done ++ ++ ocf_log info "corosync-qnetd stopped" ++ return "$OCF_SUCCESS" ++} ++ ++corosync_qnetd_monitor() { ++ loglevel=${1:-err} ++ ++ corosync_qnetd_status ++ rc="$?" ++ ++ if [ "$rc" -ne "$OCF_SUCCESS" ];then ++ return "$rc" ++ fi ++ ++ out=$("${OCF_RESKEY_qnetd_tool_binary}" -s -p "${OCF_RESKEY_ipc_sock}" 2>&1 >/dev/null) ++ rc="$?" ++ ++ if [ "$rc" != 0 ];then ++ ocf_log "$loglevel" "$out" ++ fi ++ ++ case "$rc" in ++ "0") rc="$OCF_SUCCESS" ;; ++ "3") rc="$OCF_NOT_RUNNING" ;; ++ *) rc="$OCF_ERR_GENERIC" ;; ++ esac ++ ++ return "$rc" ++} ++ ++corosync_qnetd_validate_all() { ++ check_binary "${OCF_RESKEY_binary}" ++ ++ check_binary "${OCF_RESKEY_qnetd_tool_binary}" ++} ++ ++ ++# **************************** MAIN SCRIPT ************************************ ++ ++# Make sure meta-data and usage always succeed ++case "$__OCF_ACTION" in ++ meta-data) ++ corosync_qnetd_meta_data ++ exit "$OCF_SUCCESS" ++ ;; ++ usage|help) ++ corosync_qnetd_usage ++ exit "$OCF_SUCCESS" ++ ;; ++esac ++ ++# This OCF agent script need to be run as root user. ++if ! ocf_is_root; then ++ echo "$0 agent script need to be run as root user." ++ ocf_log debug "$0 agent script need to be run as root user." ++ exit "$OCF_ERR_GENERIC" ++fi ++ ++# Translate each action into the appropriate function call ++case "$__OCF_ACTION" in ++ start) ++ corosync_qnetd_start ++ ;; ++ stop) ++ corosync_qnetd_stop ++ ;; ++ status) ++ corosync_qnetd_status ++ ;; ++ monitor) ++ corosync_qnetd_monitor ++ ;; ++ validate-all) ++ corosync_qnetd_validate_all ++ ;; ++ *) ++ corosync_qnetd_usage ++ exit "$OCF_ERR_UNIMPLEMENTED" ++ ;; ++esac ++ ++rc="$?" ++exit "$rc" ++# End of this script diff --git a/SOURCES/bz2072371-Filesystem-1-fix-uuid-label-device-whitespace.patch b/SOURCES/bz2072371-Filesystem-1-fix-uuid-label-device-whitespace.patch new file mode 100644 index 0000000..09960f0 --- /dev/null +++ b/SOURCES/bz2072371-Filesystem-1-fix-uuid-label-device-whitespace.patch @@ -0,0 +1,44 @@ +From 26de0ad2f0f975166fe79ef72ab08e2c03519eea Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 28 Mar 2022 13:25:35 +0200 +Subject: [PATCH] Filesystem: fix logic for UUID/label devices with space + between parameter and UUID/label + +--- + heartbeat/Filesystem | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 1a90d6a42..72a1b8623 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -596,11 +596,11 @@ Filesystem_start() + flushbufs "$DEVICE" + # Mount the filesystem. + case "$FSTYPE" in +- none) $MOUNT $options "$DEVICE" "$MOUNTPOINT" && ++ none) $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" && + bind_mount + ;; +- "") $MOUNT $options "$DEVICE" "$MOUNTPOINT" ;; +- *) $MOUNT -t "$FSTYPE" $options "$DEVICE" "$MOUNTPOINT" ;; ++ "") $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" ;; ++ *) $MOUNT -t "$FSTYPE" $options $device_opt "$DEVICE" "$MOUNTPOINT" ;; + esac + + if [ $? -ne 0 ]; then +@@ -902,7 +902,13 @@ set_blockdevice_var() { + fi + + case "$DEVICE" in +- -*) # Oh... An option to mount instead... Typically -U or -L ++ --*) # Typically --uuid or --label ++ device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//") ++ DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//") ++ ;; ++ -*) # Oh... An option to mount instead... Typically -U or -L ++ device_opt=$(echo $DEVICE | cut -c1-2) ++ DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//") + ;; + /dev/null) # Special case for BSC + blockdevice=yes diff --git a/SOURCES/bz2072371-Filesystem-2-improve-uuid-label-device-logic.patch b/SOURCES/bz2072371-Filesystem-2-improve-uuid-label-device-logic.patch new file mode 100644 index 0000000..844772a --- /dev/null +++ b/SOURCES/bz2072371-Filesystem-2-improve-uuid-label-device-logic.patch @@ -0,0 +1,38 @@ +From d9b46474fc19d9c57e2cfb752d60319017da8410 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 6 Apr 2022 14:14:19 +0200 +Subject: [PATCH] Filesystem: improve logic for UUID/label and add note that + /dev/disk/by-{uuid,label}/ are preferred on Linux + +--- + heartbeat/Filesystem | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 72a1b8623..44270ad98 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -163,6 +163,8 @@ directory where the status file is to be placed. + + + The name of block device for the filesystem, or -U, -L options for mount, or NFS mount specification. ++ ++NOTE: On Linux /dev/disk/by-{uuid,label}/ are preferred to -U/-L. + + block device + +@@ -902,11 +904,11 @@ set_blockdevice_var() { + fi + + case "$DEVICE" in +- --*) # Typically --uuid or --label +- device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//") ++ --uuid=*|--uuid\ *|--label=*|--label\ *) ++ device_opt=$(echo $DEVICE | sed "s/\([[:blank:]]\|=\).*//") + DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//") + ;; +- -*) # Oh... An option to mount instead... Typically -U or -L ++ -U*|-L*) # short versions of --uuid/--label + device_opt=$(echo $DEVICE | cut -c1-2) + DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//") + ;; diff --git a/SOURCES/bz2081585-NovaEvacuate-add-user_domain-project_domain.patch b/SOURCES/bz2081585-NovaEvacuate-add-user_domain-project_domain.patch new file mode 100644 index 0000000..f187bbb --- /dev/null +++ b/SOURCES/bz2081585-NovaEvacuate-add-user_domain-project_domain.patch @@ -0,0 +1,61 @@ +From 340e12c0d457d244d375c2d805e78033c9dbdf78 Mon Sep 17 00:00:00 2001 +From: Takashi Kajinami +Date: Wed, 04 May 2022 23:13:35 +0900 +Subject: [PATCH] NovaCompute/Evacuate: Make user/project domain configurable + +... so that we can use a user or a project in a non-default keystone +domain. + +Change-Id: I6e2175adca08fd97942cb83b8f8094e980b60c9d +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 596f520..4565766 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -63,13 +63,29 @@ + + + +-Tenant name for connecting to keystone in admin context. ++Tenant(Project) name for connecting to keystone in admin context. + Note that with Keystone V3 tenant names are only unique within a domain. + + Tenant name + + + ++ ++ ++Keystone domain the user belongs to ++ ++Keystone v3 User Domain ++ ++ ++ ++ ++ ++Keystone domain the tenant(project) belongs to ++ ++Keystone v3 Project Domain ++ ++ ++ + + + DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN +@@ -319,6 +335,14 @@ + + fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}" + ++ if [ -n "${OCF_RESKEY_user_domain}" ]; then ++ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}" ++ fi ++ ++ if [ -n "${OCF_RESKEY_project_domain}" ]; then ++ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}" ++ fi ++ + if [ -n "${OCF_RESKEY_domain}" ]; then + fence_options="${fence_options} -d ${OCF_RESKEY_domain}" + fi diff --git a/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-set-domain-parameters-default.patch b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-set-domain-parameters-default.patch new file mode 100644 index 0000000..8ee70e5 --- /dev/null +++ b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-set-domain-parameters-default.patch @@ -0,0 +1,55 @@ +From bb5cfa172ca58cd8adcedcaca92bde54d0645661 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 14 Jul 2022 10:55:19 +0200 +Subject: [PATCH] openstack-agents: set domain parameter's default to Default + and fix missing parameter name in ocf_exit_reason + +--- + heartbeat/openstack-common.sh | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh +index b6eec09c..14d290bd 100644 +--- a/heartbeat/openstack-common.sh ++++ b/heartbeat/openstack-common.sh +@@ -1,6 +1,10 @@ ++OCF_RESKEY_user_domain_name_default="Default" ++OCF_RESKEY_project_domain_name_default="Default" + OCF_RESKEY_openstackcli_default="/usr/bin/openstack" + OCF_RESKEY_insecure_default="false" + ++: ${OCF_RESKEY_user_domain_name=${OCF_RESKEY_user_domain_name_default}} ++: ${OCF_RESKEY_project_domain_name=${OCF_RESKEY_project_domain_name_default}} + : ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} + : ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}} + +@@ -64,7 +68,7 @@ Keystone Project. + Keystone User Domain Name. + + Keystone User Domain Name +- ++ + + + +@@ -72,7 +76,7 @@ Keystone User Domain Name. + Keystone Project Domain Name. + + Keystone Project Domain Name +- ++ + + + +@@ -133,7 +137,7 @@ get_config() { + exit $OCF_ERR_CONFIGURED + fi + if [ -z "$OCF_RESKEY_project_domain_name" ]; then +- ocf_exit_reason " not set" ++ ocf_exit_reason "project_domain_name not set" + exit $OCF_ERR_CONFIGURED + fi + +-- +2.36.1 + diff --git a/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-warn-when-openstackcli-slow.patch b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-warn-when-openstackcli-slow.patch new file mode 100644 index 0000000..3f8bf0c --- /dev/null +++ b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-warn-when-openstackcli-slow.patch @@ -0,0 +1,282 @@ +From ebea4c3620261c529cad908c0e52064df84b0c61 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 11 Jul 2022 10:28:11 +0200 +Subject: [PATCH] openstack-agents: warn when openstackcli is slow + +--- + heartbeat/openstack-cinder-volume | 19 +++++++++++-------- + heartbeat/openstack-common.sh | 22 ++++++++++++++++++++++ + heartbeat/openstack-floating-ip | 17 ++++++++++------- + heartbeat/openstack-info.in | 20 ++++++++++---------- + heartbeat/openstack-virtual-ip | 20 ++++++++++---------- + 5 files changed, 63 insertions(+), 35 deletions(-) + +diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume +index 19bf04faf..116442c41 100755 +--- a/heartbeat/openstack-cinder-volume ++++ b/heartbeat/openstack-cinder-volume +@@ -113,11 +113,14 @@ _get_node_id() { + } + + osvol_validate() { ++ local result ++ + check_binary "$OCF_RESKEY_openstackcli" + + get_config + +- if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then ++ result=$(run_openstackcli "volume list") ++ if ! echo "$result" | grep -q $OCF_RESKEY_volume_id; then + ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found" + return $OCF_ERR_CONFIGURED + fi +@@ -156,17 +159,17 @@ osvol_monitor() { + # Is the volue attached? + # We use the API + # +- result=$($OCF_RESKEY_openstackcli volume show \ ++ result=$(run_openstackcli "volume show \ + --column status \ + --column attachments \ + --format value \ +- $OCF_RESKEY_volume_id) ++ $OCF_RESKEY_volume_id") + +- if echo "$result" | grep -q available ; then ++ if echo "$result" | grep -q available; then + ocf_log warn "$OCF_RESKEY_volume_id is not attached to any instance" + return $OCF_NOT_RUNNING + else +- export attached_server_id=$(echo $result|head -n1| ++ export attached_server_id=$(echo "$result"|head -n1| + grep -P -o "'server_id': '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}'"| + grep -P -o "[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") + ocf_log info "$OCF_RESKEY_volume_id is attached to instance $attached_server_id" +@@ -199,7 +202,7 @@ osvol_stop() { + # + # Detach the volume + # +- if ! $OCF_RESKEY_openstackcli server remove volume $node_id $OCF_RESKEY_volume_id ; then ++ if ! run_openstackcli "server remove volume $node_id $OCF_RESKEY_volume_id"; then + ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $node_id" + return $OCF_ERR_GENERIC + fi +@@ -225,7 +228,7 @@ osvol_start() { + # TODO: make it optional in case multi-attachment is allowed by Cinder + # + if [ ! -z $attached_server_id ] ; then +- if ! $OCF_RESKEY_openstackcli server remove volume $attached_server_id $OCF_RESKEY_volume_id ; then ++ if ! run_openstackcli "server remove volume $attached_server_id $OCF_RESKEY_volume_id"; then + ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $attached_server_id" + return $OCF_ERR_GENERIC + fi +@@ -238,7 +241,7 @@ osvol_start() { + # + # Attach the volume + # +- $OCF_RESKEY_openstackcli server add volume $node_id $OCF_RESKEY_volume_id ++ run_openstackcli "server add volume $node_id $OCF_RESKEY_volume_id" + if [ $? != $OCF_SUCCESS ]; then + ocf_log error "Couldn't add volume $OCF_RESKEY_volume_id to instance $node_id" + return $OCF_ERR_GENERIC +diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh +index 4763c90db..b6eec09c2 100644 +--- a/heartbeat/openstack-common.sh ++++ b/heartbeat/openstack-common.sh +@@ -145,3 +145,25 @@ get_config() { + OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name" + fi + } ++ ++run_openstackcli() { ++ local cmd="${OCF_RESKEY_openstackcli} $1" ++ local result ++ local rc ++ local start_time=$(date +%s) ++ local end_time ++ local elapsed_time ++ ++ result=$($cmd) ++ rc=$? ++ end_time=$(date +%s) ++ elapsed_time=$(expr $end_time - $start_time) ++ ++ if [ $elapsed_time -gt 20 ]; then ++ ocf_log warn "$cmd took ${elapsed_time}s to complete" ++ fi ++ ++ echo "$result" ++ ++ return $rc ++} +diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip +index 6e2895654..7317f19a8 100755 +--- a/heartbeat/openstack-floating-ip ++++ b/heartbeat/openstack-floating-ip +@@ -101,11 +101,14 @@ END + } + + osflip_validate() { ++ local result ++ + check_binary "$OCF_RESKEY_openstackcli" + + get_config + +- if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then ++ result=$(run_openstackcli "floating ip list") ++ if ! echo "$result" | grep -q $OCF_RESKEY_ip_id; then + ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found" + return $OCF_ERR_CONFIGURED + fi +@@ -132,14 +135,14 @@ osflip_monitor() { + | awk '{gsub("[^ ]*:", "");print}') + + # Is the IP active and attached? +- result=$($OCF_RESKEY_openstackcli floating ip show \ ++ result=$(run_openstackcli "floating ip show \ + --column port_id --column floating_ip_address \ + --format yaml \ +- $OCF_RESKEY_ip_id) ++ $OCF_RESKEY_ip_id") + + for port in $node_port_ids ; do +- if echo $result | grep -q $port ; then +- floating_ip=$(echo $result | awk '/floating_ip_address/ {print $2}') ++ if echo "$result" | grep -q $port ; then ++ floating_ip=$(echo "$result" | awk '/floating_ip_address/ {print $2}') + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_floating_ip -v $floating_ip + + return $OCF_SUCCESS +@@ -160,7 +163,7 @@ osflip_stop() { + return $OCF_SUCCESS + fi + +- if ! $OCF_RESKEY_openstackcli floating ip unset --port $OCF_RESKEY_ip_id ; then ++ if ! run_openstackcli "floating ip unset --port $OCF_RESKEY_ip_id"; then + return $OCF_ERR_GENERIC + fi + +@@ -194,7 +197,7 @@ osflip_start() { + + ocf_log info "Moving IP address $OCF_RESKEY_ip_id to port ID $node_port_id" + +- $OCF_RESKEY_openstackcli floating ip set --port $node_port_id $OCF_RESKEY_ip_id ++ run_openstackcli "floating ip set --port $node_port_id $OCF_RESKEY_ip_id" + if [ $? != $OCF_SUCCESS ]; then + ocf_log error "$OCF_RESKEY_ip_id Cannot be set to port $node_port_id" + return $OCF_ERR_GENERIC +diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in +index f3a59fc7a..6502f1df1 100755 +--- a/heartbeat/openstack-info.in ++++ b/heartbeat/openstack-info.in +@@ -119,9 +119,7 @@ END + ####################################################################### + + OSInfoStats() { +- local result + local value +- local node + local node_id + + get_config +@@ -141,31 +139,33 @@ OSInfoStats() { + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id" + + # Nova data: flavor +- value=$($OCF_RESKEY_openstackcli server show \ ++ value=$(run_openstackcli "server show \ + --format value \ + --column flavor \ +- $node_id) ++ $node_id") + + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value" + + # Nova data: availability zone +- value=$($OCF_RESKEY_openstackcli server show \ ++ value=$(run_openstackcli "server show \ + --format value \ + --column OS-EXT-AZ:availability_zone \ +- $node_id) ++ $node_id") + + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value" + + # Network data: ports + value="" +- for port_id in $($OCF_RESKEY_openstackcli port list \ ++ for port_id in $(run_openstackcli "port list \ + --format value \ + --column id \ +- --server $node_id); do +- subnet_id=$($OCF_RESKEY_openstackcli port show \ ++ --server $node_id"); do ++ subnet_result=$(run_openstackcli "port show \ + --format json \ + --column fixed_ips \ +- ${port_id} | grep -P '\"subnet_id\": \".*\",$' | ++ ${port_id}") ++ subnet_id=$(echo "$subnet_result" | ++ grep -P '\"subnet_id\": \".*\",$' | + grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') + value="${value}${subnet_id}:${port_id}," + done +diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip +index c654d980a..361357d55 100755 +--- a/heartbeat/openstack-virtual-ip ++++ b/heartbeat/openstack-virtual-ip +@@ -132,11 +132,11 @@ osvip_monitor() { + + node_port_id=$(osvip_port_id) + +- result=$($OCF_RESKEY_openstackcli port show \ ++ result=$(run_openstackcli "port show \ + --format value \ + --column allowed_address_pairs \ +- ${node_port_id}) +- if echo $result | grep -q "$OCF_RESKEY_ip"; then ++ ${node_port_id}") ++ if echo "$result" | grep -q "$OCF_RESKEY_ip"; then + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip + + return $OCF_SUCCESS +@@ -158,20 +158,20 @@ osvip_stop() { + return $OCF_SUCCESS + fi + +- mac_address=$($OCF_RESKEY_openstackcli port show \ ++ mac_address=$(run_openstackcli "port show \ + --format value \ + --column mac_address \ +- $node_port_id) +- echo ${mac_address} | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$" ++ $node_port_id") ++ echo "${mac_address}" | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$" + if [ $? -ne 0 ]; then + ocf_log error "MAC address '${mac_address}' is not valid." + return $OCF_ERR_GENERIC + fi + +- if ! $OCF_RESKEY_openstackcli port unset \ ++ if ! run_openstackcli "port unset \ + --allowed-address \ + ip-address=$OCF_RESKEY_ip,mac-address=${mac_address} \ +- $node_port_id; then ++ $node_port_id"; then + return $OCF_ERR_GENERIC + fi + +@@ -196,9 +196,9 @@ osvip_start() { + + ocf_log info "Moving IP address $OCF_RESKEY_ip to port ID $node_port_id" + +- $OCF_RESKEY_openstackcli port set \ ++ run_openstackcli "port set \ + --allowed-address ip-address=$OCF_RESKEY_ip \ +- $node_port_id ++ $node_port_id" + if [ $? != $OCF_SUCCESS ]; then + ocf_log error "$OCF_RESKEY_ip Cannot be set to port $node_port_id" + return $OCF_ERR_GENERIC diff --git a/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-update-openstack-agents.patch b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-update-openstack-agents.patch new file mode 100644 index 0000000..7b1a6e8 --- /dev/null +++ b/SOURCES/bz2083081-bz2083086-bz2083090-bz2083092-update-openstack-agents.patch @@ -0,0 +1,770 @@ +diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2022-03-15 16:14:29.355209012 +0100 ++++ b/heartbeat/Makefile.am 2022-03-15 16:18:35.917048467 +0100 +@@ -217,6 +217,7 @@ + lvm-clvm.sh \ + lvm-plain.sh \ + lvm-tag.sh \ ++ openstack-common.sh \ + ora-common.sh \ + mysql-common.sh \ + nfsserver-redhat.sh \ +diff --color -uNr a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume +--- a/heartbeat/openstack-cinder-volume 2022-03-15 16:14:29.370209063 +0100 ++++ b/heartbeat/openstack-cinder-volume 2022-03-15 16:17:36.231840008 +0100 +@@ -34,11 +34,11 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++. ${OCF_FUNCTIONS_DIR}/openstack-common.sh ++ + # Defaults +-OCF_RESKEY_openstackcli_default="/usr/bin/openstack" + OCF_RESKEY_volume_local_check_default="true" + +-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} + : ${OCF_RESKEY_volume_local_check=${OCF_RESKEY_volume_local_check_default}} + + ####################################################################### +@@ -68,14 +68,11 @@ + Attach a cinder volume + + +- +- +-Path to command line tools for openstack. +- +-Path to Openstack CLI tool +- +- ++END + ++common_meta_data ++ ++cat < + + This option allows the cluster to monitor the cinder volume presence without +@@ -85,28 +82,19 @@ + + + +- +- +-Valid Openstack credentials as openrc file from api_access/openrc. +- +-openrc file +- +- +- + + +-Cinder volume identifier to use to attach the bloc storage. ++Cinder volume identifier to use to attach the block storage. + + Volume ID + + +- + + + + + +- ++ + + + +@@ -127,17 +115,7 @@ + osvol_validate() { + check_binary "$OCF_RESKEY_openstackcli" + +- if [ -z "$OCF_RESKEY_openrc" ]; then +- ocf_exit_reason "openrc parameter not set" +- return $OCF_ERR_CONFIGURED +- fi +- +- if [ ! -f "$OCF_RESKEY_openrc" ] ; then +- ocf_exit_reason "openrc file not found" +- return $OCF_ERR_CONFIGURED +- fi +- +- . $OCF_RESKEY_openrc ++ get_config + + if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then + ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found" +diff --color -uNr a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh +--- a/heartbeat/openstack-common.sh 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/openstack-common.sh 2022-03-15 16:17:36.232840011 +0100 +@@ -0,0 +1,147 @@ ++OCF_RESKEY_openstackcli_default="/usr/bin/openstack" ++OCF_RESKEY_insecure_default="false" ++ ++: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} ++: ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}} ++ ++if ocf_is_true "${OCF_RESKEY_insecure}"; then ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --insecure" ++fi ++ ++common_meta_data() { ++ cat < ++ ++Openstack cloud (from ~/.config/openstack/clouds.yaml or /etc/openstack/clouds.yaml). ++ ++Cloud from clouds.yaml ++ ++ ++ ++ ++ ++Openstack credentials as openrc file from api_access/openrc. ++ ++openrc file ++ ++ ++ ++ ++ ++Keystone Auth URL ++ ++Keystone Auth URL ++ ++ ++ ++ ++ ++Username. ++ ++Username ++ ++ ++ ++ ++ ++Password. ++ ++Password ++ ++ ++ ++ ++ ++Keystone Project. ++ ++Keystone Project ++ ++ ++ ++ ++ ++Keystone User Domain Name. ++ ++Keystone User Domain Name ++ ++ ++ ++ ++ ++Keystone Project Domain Name. ++ ++Keystone Project Domain Name ++ ++ ++ ++ ++ ++Path to command line tools for openstack. ++ ++Path to Openstack CLI tool ++ ++ ++ ++ ++ ++Allow insecure connections ++ ++Allow insecure connections ++ ++ ++END ++} ++ ++get_config() { ++ if [ -n "$OCF_RESKEY_cloud" ]; then ++ TILDE=$(echo ~) ++ clouds_yaml="$TILDE/.config/openstack/clouds.yaml" ++ if [ ! -f "$clouds_yaml" ]; then ++ clouds_yaml="/etc/openstack/clouds.yaml" ++ fi ++ if [ ! -f "$clouds_yaml" ]; then ++ ocf_exit_reason "~/.config/openstack/clouds.yaml and /etc/openstack/clouds.yaml does not exist" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-cloud $OCF_RESKEY_cloud" ++ elif [ -n "$OCF_RESKEY_openrc" ]; then ++ if [ ! -f "$OCF_RESKEY_openrc" ]; then ++ ocf_exit_reason "$OCF_RESKEY_openrc does not exist" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ . $OCF_RESKEY_openrc ++ else ++ if [ -z "$OCF_RESKEY_auth_url" ]; then ++ ocf_exit_reason "auth_url not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -z "$OCF_RESKEY_username" ]; then ++ ocf_exit_reason "username not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -z "$OCF_RESKEY_password" ]; then ++ ocf_exit_reason "password not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -z "$OCF_RESKEY_project_name" ]; then ++ ocf_exit_reason "project_name not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -z "$OCF_RESKEY_user_domain_name" ]; then ++ ocf_exit_reason "user_domain_name not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -z "$OCF_RESKEY_project_domain_name" ]; then ++ ocf_exit_reason " not set" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-auth-url $OCF_RESKEY_auth_url" ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-username $OCF_RESKEY_username" ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-password $OCF_RESKEY_password" ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-name $OCF_RESKEY_project_name" ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-user-domain-name $OCF_RESKEY_user_domain_name" ++ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name" ++ fi ++} +diff --color -uNr a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip +--- a/heartbeat/openstack-floating-ip 2022-03-15 16:14:29.370209063 +0100 ++++ b/heartbeat/openstack-floating-ip 2022-03-15 16:17:36.233840014 +0100 +@@ -34,10 +34,9 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + +-# Defaults +-OCF_RESKEY_openstackcli_default="/usr/bin/openstack" ++. ${OCF_FUNCTIONS_DIR}/openstack-common.sh + +-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} ++# Defaults + + ####################################################################### + +@@ -67,22 +66,11 @@ + Move a floating IP + + +- +- +-Path to command line tools for openstack. +- +-Path to Openstack CLI tool +- +- ++END + +- +- +-Valid Openstack credentials as openrc file from api_access/openrc. +- +-openrc file +- +- ++common_meta_data + ++cat < + + Floating IP Identifier. +@@ -104,7 +92,7 @@ + + + +- ++ + + + +@@ -115,17 +103,7 @@ + osflip_validate() { + check_binary "$OCF_RESKEY_openstackcli" + +- if [ -z "$OCF_RESKEY_openrc" ]; then +- ocf_exit_reason "openrc parameter not set" +- return $OCF_ERR_CONFIGURED +- fi +- +- if [ ! -f "$OCF_RESKEY_openrc" ] ; then +- ocf_exit_reason "openrc file not found" +- return $OCF_ERR_CONFIGURED +- fi +- +- . $OCF_RESKEY_openrc ++ get_config + + if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then + ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found" +diff --color -uNr a/heartbeat/openstack-info b/heartbeat/openstack-info +--- a/heartbeat/openstack-info 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/openstack-info 2022-03-15 16:17:36.234840018 +0100 +@@ -0,0 +1,270 @@ ++#!/bin/sh ++# ++# ++# OCF resource agent to set attributes from Openstack instance details. ++# It records (in the CIB) various attributes of a node ++# ++# Copyright (c) 2018 Mathieu Grzybek ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++. ${OCF_FUNCTIONS_DIR}/openstack-common.sh ++ ++# Defaults ++OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}" ++OCF_RESKEY_delay_default="0" ++OCF_RESKEY_clone_default="0" ++OCF_RESKEY_curlcli_default="/usr/bin/curl" ++OCF_RESKEY_pythoncli_default="/usr/bin/python" ++ ++: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}} ++: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}} ++: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}} ++: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}} ++: ${OCF_RESKEY_clone=${OCF_RESKEY_clone_default}} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++OCF resource agent to set attributes from Openstack instance details. ++It records (in the CIB) various attributes of a node. ++Sample output: ++ openstack_az : nova ++ openstack_flavor : c1.small ++ openstack_id : 60ac4343-5828-49b1-8aac-7c69b1417f31 ++ openstack_ports : 7960d889-9750-4160-bf41-c69a41ad72d9:96530d18-57a3-4718-af32-30f2a74c22a2,b0e55a06-bd75-468d-8baa-22cfeb65799f:a55ae917-8016-4b1e-8ffa-04311b9dc7d6 ++ ++The layout of openstack_ports is a comma-separated list of tuples "subnet_id:port_id". ++ ++Records various node attributes in the CIB ++ ++ ++END ++ ++common_meta_data ++ ++ cat < ++PID file ++PID file ++ ++ ++ ++ ++Interval to allow values to stabilize ++Dampening Delay ++ ++ ++ ++ ++ ++Path to command line cURL binary. ++ ++Path to cURL binary ++ ++ ++ ++ ++ ++Path to command line Python interpreter. ++ ++Path to Python interpreter ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++OSInfoStats() { ++ local result ++ local value ++ local node ++ local node_id ++ ++ get_config ++ ++ # Nova data: server ID ++ node_id=$($OCF_RESKEY_curlcli \ ++ -s http://169.254.169.254/openstack/latest/meta_data.json | ++ $OCF_RESKEY_pythoncli -m json.tool | ++ grep -P '\"uuid\": \".*\",$' | ++ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') ++ ++ if [ $? -ne 0 ] ; then ++ ocf_exit_reason "Cannot find server ID" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id" ++ ++ # Nova data: flavor ++ value=$($OCF_RESKEY_openstackcli server show \ ++ --format value \ ++ --column flavor \ ++ $node_id) ++ ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value" ++ ++ # Nova data: availability zone ++ value=$($OCF_RESKEY_openstackcli server show \ ++ --format value \ ++ --column OS-EXT-AZ:availability_zone \ ++ $node_id) ++ ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value" ++ ++ # Network data: ports ++ value="" ++ for port_id in $($OCF_RESKEY_openstackcli port list \ ++ --format value \ ++ --column id \ ++ --server $node_id); do ++ subnet_id=$($OCF_RESKEY_openstackcli port show \ ++ --format json \ ++ --column fixed_ips \ ++ ${port_id} | grep -P '\"subnet_id\": \".*\",$' | ++ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') ++ value+="${subnet_id}:${port_id}," ++ done ++ value=$(echo ${value} | sed -e 's/,$//g') ++ ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value" ++ ++ if [ ! -z "$OS_REGION_NAME" ] ; then ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_region -v "$OS_REGION_NAME" ++ fi ++ ++ if [ ! -z "$OS_TENANT_ID" ] ; then ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_id -v "$OS_TENANT_ID" ++ ++ if [ ! -z "$OS_TENANT_NAME" ] ; then ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_name -v "$OS_TENANT_NAME" ++ fi ++ else ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_id -v "$OS_PROJECT_ID" ++ ++ if [ ! -z "$OS_PROJECT_NAME" ] ; then ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_name -v "$OS_PROJECT_NAME" ++ fi ++ fi ++ ++} ++ ++OSInfo_usage() { ++ cat < $OCF_RESKEY_pidfile ++ OSInfoStats ++ exit $OCF_SUCCESS ++} ++ ++OSInfo_stop() { ++ rm -f $OCF_RESKEY_pidfile ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_id ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_flavor ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_az ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_ports ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_region ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_id ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_name ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_id ++ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_name ++ exit $OCF_SUCCESS ++} ++ ++OSInfo_monitor() { ++ if [ -f "$OCF_RESKEY_pidfile" ] ; then ++ OSInfoStats ++ exit $OCF_RUNNING ++ fi ++ exit $OCF_NOT_RUNNING ++} ++ ++OSInfo_validate() { ++ check_binary "$OCF_RESKEY_curlcli" ++ check_binary "$OCF_RESKEY_openstackcli" ++ check_binary "$OCF_RESKEY_pythoncli" ++ ++ return $OCF_SUCCESS ++} ++ ++if [ $# -ne 1 ]; then ++ OSInfo_usage ++ exit $OCF_ERR_ARGS ++fi ++ ++if [ x != x${OCF_RESKEY_delay} ]; then ++ OCF_RESKEY_delay="-d ${OCF_RESKEY_delay}" ++fi ++ ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS ++ ;; ++start) OSInfo_validate || exit $? ++ OSInfo_start ++ ;; ++stop) OSInfo_stop ++ ;; ++monitor) OSInfo_monitor ++ ;; ++validate-all) OSInfo_validate ++ ;; ++usage|help) OSInfo_usage ++ exit $OCF_SUCCESS ++ ;; ++*) OSInfo_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++ ++exit $? +diff --color -uNr a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in +--- a/heartbeat/openstack-info.in 2022-03-15 16:14:29.370209063 +0100 ++++ b/heartbeat/openstack-info.in 2022-03-15 16:17:36.234840018 +0100 +@@ -32,16 +32,16 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++. ${OCF_FUNCTIONS_DIR}/openstack-common.sh ++ + # Defaults + OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}" + OCF_RESKEY_delay_default="0" + OCF_RESKEY_clone_default="0" + OCF_RESKEY_curlcli_default="/usr/bin/curl" +-OCF_RESKEY_openstackcli_default="/usr/bin/openstack" + OCF_RESKEY_pythoncli_default="@PYTHON@" + + : ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}} +-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} + : ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}} + : ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}} + : ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}} +@@ -70,25 +70,23 @@ + Records various node attributes in the CIB + + ++END ++ ++common_meta_data ++ ++ cat < + PID file + PID file + + ++ + + Interval to allow values to stabilize + Dampening Delay + + + +- +- +-Valid Openstack credentials as openrc file from api_access/openrc. +- +-openrc file +- +- +- + + + Path to command line cURL binary. +@@ -97,14 +95,6 @@ + + + +- +- +-Path to command line tools for openstack. +- +-Path to Openstack CLI tool +- +- +- + + + Path to command line Python interpreter. +@@ -116,9 +106,9 @@ + + + +- +- +- ++ ++ ++ + + + +@@ -134,7 +124,7 @@ + local node + local node_id + +- . $OCF_RESKEY_openrc ++ get_config + + # Nova data: server ID + node_id=$($OCF_RESKEY_curlcli \ +@@ -244,16 +234,6 @@ + check_binary "$OCF_RESKEY_openstackcli" + check_binary "$OCF_RESKEY_pythoncli" + +- if [ -z "$OCF_RESKEY_openrc" ]; then +- ocf_exit_reason "openrc parameter not set" +- return $OCF_ERR_CONFIGURED +- fi +- +- if [ ! -f "$OCF_RESKEY_openrc" ] ; then +- ocf_exit_reason "openrc file not found" +- return $OCF_ERR_CONFIGURED +- fi +- + return $OCF_SUCCESS + } + +diff --color -uNr a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip +--- a/heartbeat/openstack-virtual-ip 2022-03-15 16:14:29.370209063 +0100 ++++ b/heartbeat/openstack-virtual-ip 2022-03-15 16:17:36.235840021 +0100 +@@ -34,10 +34,9 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + +-# Defaults +-OCF_RESKEY_openstackcli_default="/usr/bin/openstack" ++. ${OCF_FUNCTIONS_DIR}/openstack-common.sh + +-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} ++# Defaults + + ####################################################################### + +@@ -68,22 +67,11 @@ + Move a virtual IP + + +- +- +-Path to command line tools for openstack. +- +-Path to Openstack CLI tool +- +- ++END + +- +- +-Valid Openstack credentials as openrc file from api_access/openrc. +- +-openrc file +- +- ++common_meta_data + ++cat < + + Virtual IP Address. +@@ -105,7 +93,7 @@ + + + +- ++ + + + +@@ -128,17 +116,7 @@ + osvip_validate() { + check_binary "$OCF_RESKEY_openstackcli" + +- if [ -z "$OCF_RESKEY_openrc" ]; then +- ocf_exit_reason "openrc parameter not set" +- return $OCF_ERR_CONFIGURED +- fi +- +- if [ ! -f "$OCF_RESKEY_openrc" ] ; then +- ocf_exit_reason "openrc file not found" +- return $OCF_ERR_CONFIGURED +- fi +- +- . $OCF_RESKEY_openrc ++ get_config + + ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 + if [ $? -ne 0 ] ; then diff --git a/SOURCES/bz2083081-bz2083086-bz2083092-openstack-agents-fixes.patch b/SOURCES/bz2083081-bz2083086-bz2083092-openstack-agents-fixes.patch new file mode 100644 index 0000000..451fba7 --- /dev/null +++ b/SOURCES/bz2083081-bz2083086-bz2083092-openstack-agents-fixes.patch @@ -0,0 +1,72 @@ +From 64f434014bc198055478a139532c7cc133967c5d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 8 Jul 2022 15:41:34 +0200 +Subject: [PATCH] openstack-agents: fixes + +- openstack-cinder-volume: dont do volume_local_check during start/stop-action +- openstack-floating-ip/openstack-virtual-ip: dont fail in validate() + during probe-calls +- openstack-floating-ip: fix awk only catching last id for node_port_ids +--- + heartbeat/openstack-cinder-volume | 2 +- + heartbeat/openstack-floating-ip | 4 ++-- + heartbeat/openstack-virtual-ip | 4 ++-- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume +index cc12e58ae..19bf04faf 100755 +--- a/heartbeat/openstack-cinder-volume ++++ b/heartbeat/openstack-cinder-volume +@@ -138,7 +138,7 @@ osvol_monitor() { + + node_id=$(_get_node_id) + +- if ocf_is_true $OCF_RESKEY_volume_local_check ; then ++ if [ "$__OCF_ACTION" = "monitor" ] && ocf_is_true $OCF_RESKEY_volume_local_check ; then + # + # Is the volue attached? + # We check the local devices +diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip +index 8c135cc24..6e2895654 100755 +--- a/heartbeat/openstack-floating-ip ++++ b/heartbeat/openstack-floating-ip +@@ -111,7 +111,7 @@ osflip_validate() { + fi + + ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 +- if [ $? -ne 0 ] ; then ++ if [ $? -ne 0 ] && ! ocf_is_probe; then + ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE" + return $OCF_ERR_GENERIC + fi +@@ -129,7 +129,7 @@ osflip_monitor() { + node_port_ids=$(${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) \ + | awk -F= '{gsub("\"","");print $NF}' \ + | tr ',' ' ' \ +- | awk -F: '{print $NF}') ++ | awk '{gsub("[^ ]*:", "");print}') + + # Is the IP active and attached? + result=$($OCF_RESKEY_openstackcli floating ip show \ +diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip +index a1084c420..c654d980a 100755 +--- a/heartbeat/openstack-virtual-ip ++++ b/heartbeat/openstack-virtual-ip +@@ -119,7 +119,7 @@ osvip_validate() { + get_config + + ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 +- if [ $? -ne 0 ] ; then ++ if [ $? -ne 0 ] && ! ocf_is_probe; then + ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE" + return $OCF_ERR_GENERIC + fi +@@ -136,7 +136,7 @@ osvip_monitor() { + --format value \ + --column allowed_address_pairs \ + ${node_port_id}) +- if echo $result | grep -q $OCF_RESKEY_ip ; then ++ if echo $result | grep -q "$OCF_RESKEY_ip"; then + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip + + return $OCF_SUCCESS diff --git a/SOURCES/bz2083090-openstack-info-fix-bashism.patch b/SOURCES/bz2083090-openstack-info-fix-bashism.patch new file mode 100644 index 0000000..4f78d54 --- /dev/null +++ b/SOURCES/bz2083090-openstack-info-fix-bashism.patch @@ -0,0 +1,26 @@ +From 8b1d3257e5176a2f50a843a21888c4b4f51f370b Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Sun, 3 Apr 2022 20:31:50 +0200 +Subject: [PATCH] openstack-info: fix bashism + +Also simplify striping of trailing comma. +--- + heartbeat/openstack-info.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in +index f6dc1ee4d..f3a59fc7a 100755 +--- a/heartbeat/openstack-info.in ++++ b/heartbeat/openstack-info.in +@@ -167,9 +167,9 @@ OSInfoStats() { + --column fixed_ips \ + ${port_id} | grep -P '\"subnet_id\": \".*\",$' | + grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') +- value+="${subnet_id}:${port_id}," ++ value="${value}${subnet_id}:${port_id}," + done +- value=$(echo ${value} | sed -e 's/,$//g') ++ value=${value%,} + + ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value" + diff --git a/SOURCES/bz2093213-aws-vpc-move-ip-add-interface-label-support.patch b/SOURCES/bz2093213-aws-vpc-move-ip-add-interface-label-support.patch new file mode 100644 index 0000000..d1a611c --- /dev/null +++ b/SOURCES/bz2093213-aws-vpc-move-ip-add-interface-label-support.patch @@ -0,0 +1,82 @@ +From 4420ef84f3172c67fc7b8b6ae41ea173de017bf4 Mon Sep 17 00:00:00 2001 +From: Petr Pavlu +Date: Wed, 25 May 2022 15:12:33 +0200 +Subject: [PATCH] aws-vpc-move-ip: Allow to set the interface label + +Add a parameter to specify an interface label to distinguish the IP +address managed by aws-vpc-move-ip, similarly as can be done with +IPaddr2. This allows to easily recognize the address from other +addresses assigned to a given interface. +--- + heartbeat/aws-vpc-move-ip | 30 +++++++++++++++++++++++++++++- + 1 file changed, 29 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 5d5204080..dee040300 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -43,6 +43,7 @@ OCF_RESKEY_address_default="" + OCF_RESKEY_routing_table_default="" + OCF_RESKEY_routing_table_role_default="" + OCF_RESKEY_interface_default="eth0" ++OCF_RESKEY_iflabel_default="" + OCF_RESKEY_monapi_default="false" + OCF_RESKEY_lookup_type_default="InstanceId" + +@@ -54,6 +55,7 @@ OCF_RESKEY_lookup_type_default="InstanceId" + : ${OCF_RESKEY_routing_table=${OCF_RESKEY_routing_table_default}} + : ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}} + : ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} ++: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} + : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} + : ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} + +@@ -149,6 +151,18 @@ Name of the network interface, i.e. eth0 + + + ++ ++ ++You can specify an additional label for your IP address here. ++This label is appended to your interface name. ++ ++The kernel allows alphanumeric labels up to a maximum length of 15 ++characters including the interface name and colon (e.g. eth0:foobar1234) ++ ++Interface label ++ ++ ++ + + + Enable enhanced monitoring using AWS API calls to check route table entry +@@ -215,6 +229,14 @@ ec2ip_validate() { + return $OCF_ERR_CONFIGURED + fi + ++ if [ -n "$OCF_RESKEY_iflabel" ]; then ++ label=${OCF_RESKEY_interface}:${OFC_RESKEY_iflabel} ++ if [ ${#label} -gt 15 ]; then ++ ocf_exit_reason "Interface label [$label] exceeds maximum character limit of 15" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ fi ++ + TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + +@@ -363,7 +385,13 @@ ec2ip_get_and_configure() { + + # Reconfigure the local ip address + ec2ip_drop +- cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface" ++ ++ extra_opts="" ++ if [ -n "$OCF_RESKEY_iflabel" ]; then ++ extra_opts="$extra_opts label $OCF_RESKEY_interface:$OCF_RESKEY_iflabel" ++ fi ++ ++ cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface $extra_opts" + ocf_log debug "executing command: $cmd" + $cmd + rc=$? diff --git a/SOURCES/bz2094828-lvmlockd-fail-when-use_lvmlockd-not-set.patch b/SOURCES/bz2094828-lvmlockd-fail-when-use_lvmlockd-not-set.patch new file mode 100644 index 0000000..8400437 --- /dev/null +++ b/SOURCES/bz2094828-lvmlockd-fail-when-use_lvmlockd-not-set.patch @@ -0,0 +1,25 @@ +From b3885f7d95fe390371f806c7f3debb3ec8ad012d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 7 Jun 2022 15:20:11 +0200 +Subject: [PATCH] lvmlockd: fail when use_lvmlockd has not been set + +--- + heartbeat/lvmlockd | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd +index 05bb0a2e5..dc7bd2d7e 100755 +--- a/heartbeat/lvmlockd ++++ b/heartbeat/lvmlockd +@@ -179,6 +179,11 @@ setup_lvm_config() + out=$(lvmconfig 'global/locking_type' 2> /dev/null) + lock_type=$(echo "$out" | cut -d'=' -f2) + ++ if [ -z "$use_lvmlockd" ]; then ++ ocf_exit_reason "\"use_lvmlockd\" not set in /etc/lvm/lvm.conf ..." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ + if [ -n "$use_lvmlockd" ] && [ "$use_lvmlockd" != 1 ] ; then + ocf_log info "setting \"use_lvmlockd=1\" in /etc/lvm/lvm.conf ..." + sed -i 's,^[[:blank:]]*use_lvmlockd[[:blank:]]*=.*,\ \ \ \ use_lvmlockd = 1,g' /etc/lvm/lvm.conf diff --git a/SOURCES/bz2103374-ocf-tester-1-update.patch b/SOURCES/bz2103374-ocf-tester-1-update.patch new file mode 100644 index 0000000..0e32ed2 --- /dev/null +++ b/SOURCES/bz2103374-ocf-tester-1-update.patch @@ -0,0 +1,39 @@ +From 46e8d346ca4803245f51a157591c4df1126d3b49 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 12 Jul 2022 12:45:52 +0200 +Subject: [PATCH] ocf-tester: use promotable terms + +--- + tools/ocf-tester.in | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in +index 10822a5a06..f1703ba1b7 100755 +--- a/tools/ocf-tester.in ++++ b/tools/ocf-tester.in +@@ -295,10 +295,10 @@ if [ $rc -eq 3 ]; then + + elif [ $rc -eq 8 ]; then + test_command demote "Cleanup, demote" +- assert $? 0 "Your agent was a master and could not be demoted" 1 ++ assert $? 0 "Your agent was promoted and could not be demoted" 1 + + test_command stop "Cleanup, stop" +- assert $? 0 "Your agent was a master and could not be stopped" 1 ++ assert $? 0 "Your agent was promoted and could not be stopped" 1 + + elif [ $rc -ne 7 ]; then + test_command stop +@@ -370,10 +370,10 @@ if [ $has_promote -eq 1 -a $has_demote -eq 1 ]; then + assert $? 0 "Demote failed" 1 + + elif [ $has_promote -eq 0 -a $has_demote -eq 0 ]; then +- info "* Your agent does not support master/slave (optional)" ++ info "* Your agent does not support promotable clones (optional)" + + else +- echo "* Your agent partially supports master/slave" ++ echo "* Your agent partially supports promotable clones" + num_errors=`expr $num_errors + 1` + fi + diff --git a/SOURCES/bz2103374-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch b/SOURCES/bz2103374-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch new file mode 100644 index 0000000..a932397 --- /dev/null +++ b/SOURCES/bz2103374-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch @@ -0,0 +1,166 @@ +From 687aa646852d5fd5d4e811b2ec562ebffa15e23d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 14 Jul 2022 14:52:07 +0200 +Subject: [PATCH] ocf-tester: remove deprecated lrmd/lrmadmin code that hasnt + worked since pre-pacemaker days + +--- + tools/ocf-tester.8 | 12 ++----- + tools/ocf-tester.in | 81 --------------------------------------------- + 2 files changed, 2 insertions(+), 91 deletions(-) + +diff --git a/tools/ocf-tester.8 b/tools/ocf-tester.8 +index 850ec0be04..3f398282d2 100644 +--- a/tools/ocf-tester.8 ++++ b/tools/ocf-tester.8 +@@ -1,9 +1,9 @@ +-.TH OCF-TESTER "8" "January 2012" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities" ++.TH OCF-TESTER "8" "July 2022" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities" + .SH NAME + ocf-tester \- Part of the Linux-HA project + .SH SYNOPSIS + .B ocf-tester +-[\fI-LhvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR ++[\fI-hvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR + .SH DESCRIPTION + Tool for testing if a cluster resource is OCF compliant + .SH OPTIONS +@@ -26,11 +26,6 @@ Name of the resource + \fB\-o\fR name=value + Name and value of any parameters required by the agent + .TP +-\fB\-L\fR +-Use lrmadmin/lrmd for tests +-.PP +-Usage: ocf\-tester [\-Lh] \fB\-n\fR resource_name [\-o name=value]* /full/path/to/resource/agent +-.TP + \fB\-h\fR + This text + .TP +@@ -51,6 +46,3 @@ Name of the resource + .TP + \fB\-o\fR name=value + Name and value of any parameters required by the agent +-.TP +-\fB\-L\fR +-Use lrmadmin/lrmd for tests +diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in +index 10822a5a06..15b14e51ea 100755 +--- a/tools/ocf-tester.in ++++ b/tools/ocf-tester.in +@@ -25,8 +25,6 @@ + # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + # + +-LRMD=@libdir@/heartbeat/lrmd +-LRMADMIN=@sbindir@/lrmadmin + DATADIR=@datadir@ + METADATA_LINT="xmllint --noout --valid -" + +@@ -61,7 +59,6 @@ usage() { + echo " -X Turn on RA tracing (expect large output)" + echo " -n name Name of the resource" + echo " -o name=value Name and value of any parameters required by the agent" +- echo " -L Use lrmadmin/lrmd for tests" + exit $1 + } + +@@ -104,7 +101,6 @@ while test "$done" = "0"; do + -o) name=${2%%=*}; value=${2#*=}; + lrm_ra_args="$lrm_ra_args $2"; + ra_args="$ra_args OCF_RESKEY_$name='$value'"; shift; shift;; +- -L) use_lrmd=1; shift;; + -v) verbose=1; shift;; + -d) export HA_debug=1; shift;; + -X) export OCF_TRACE_RA=1; verbose=1; shift;; +@@ -140,79 +136,6 @@ stopped_rc=7 + has_demote=1 + has_promote=1 + +-start_lrmd() { +- lrmd_timeout=0 +- lrmd_interval=0 +- lrmd_target_rc=EVERYTIME +- lrmd_started="" +- $LRMD -s 2>/dev/null +- rc=$? +- if [ $rc -eq 3 ]; then +- lrmd_started=1 +- $LRMD & +- sleep 1 +- $LRMD -s 2>/dev/null +- else +- return $rc +- fi +-} +-add_resource() { +- $LRMADMIN -A $OCF_RESOURCE_INSTANCE \ +- ocf \ +- `basename $agent` \ +- $(basename `dirname $agent`) \ +- $lrm_ra_args > /dev/null +-} +-del_resource() { +- $LRMADMIN -D $OCF_RESOURCE_INSTANCE +-} +-parse_lrmadmin_output() { +- awk ' +-BEGIN{ rc=1; } +-/Waiting for lrmd to callback.../ { n=1; next; } +-n==1 && /----------------operation--------------/ { n++; next; } +-n==2 && /return code:/ { rc=$0; sub("return code: *","",rc); next } +-n==2 && /---------------------------------------/ { +- n++; +- next; +-} +-END{ +- if( n!=3 ) exit 1; +- else exit rc; +-} +-' +-} +-exec_resource() { +- op="$1" +- args="$2" +- $LRMADMIN -E $OCF_RESOURCE_INSTANCE \ +- $op $lrmd_timeout $lrmd_interval \ +- $lrmd_target_rc \ +- $args | parse_lrmadmin_output +-} +- +-if [ "$use_lrmd" = 1 ]; then +- echo "Using lrmd/lrmadmin for all tests" +- start_lrmd || { +- echo "could not start lrmd" >&2 +- exit 1 +- } +- trap ' +- [ "$lrmd_started" = 1 ] && $LRMD -k +- ' EXIT +- add_resource || { +- echo "failed to add resource to lrmd" >&2 +- exit 1 +- } +-fi +- +-lrm_test_command() { +- action="$1" +- msg="$2" +- debug "$msg" +- exec_resource $action "$lrm_ra_args" +-} +- + test_permissions() { + action=meta-data + debug ${1:-"Testing permissions with uid nobody"} +@@ -233,10 +156,6 @@ test_command() { + action=$1; shift + export __OCF_ACTION=$action + msg=${1:-"Testing: $action"} +- if [ "$use_lrmd" = 1 ]; then +- lrm_test_command $action "$msg" +- return $? +- fi + #echo Running: "export $ra_args; $agent $action 2>&1 > /dev/null" + if [ $verbose -eq 0 ]; then + command_output=`$agent $action 2>&1` diff --git a/SOURCES/bz2110452-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch b/SOURCES/bz2110452-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch new file mode 100644 index 0000000..f4c46f8 --- /dev/null +++ b/SOURCES/bz2110452-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch @@ -0,0 +1,75 @@ +From 0063164d72bbaca68f12a2f0a7dbae9ccb41fa39 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 26 Jul 2022 09:08:26 +0200 +Subject: [PATCH] ethmonitor/ovsmonitor/pgsql: remove ignored attrd_updater + "-q" parameter + +attrd_updater in 2.1.3 no longer ignores the -q parameter, which makes +these agents break. It never did anything in attrd_updater, and is +probably left-over from copy/paste crm_attribute code that got changed +to attrd_updater. +--- + heartbeat/ethmonitor | 2 +- + heartbeat/ovsmonitor | 2 +- + heartbeat/pgsql | 8 ++++---- + 3 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor +index ba8574131..451738a0b 100755 +--- a/heartbeat/ethmonitor ++++ b/heartbeat/ethmonitor +@@ -464,7 +464,7 @@ END + + set_cib_value() { + local score=`expr $1 \* $OCF_RESKEY_multiplier` +- attrd_updater -n $ATTRNAME -v $score -q ++ attrd_updater -n $ATTRNAME -v $score + local rc=$? + case $rc in + 0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;; +diff --git a/heartbeat/ovsmonitor b/heartbeat/ovsmonitor +index 872ce86eb..6765da4b9 100755 +--- a/heartbeat/ovsmonitor ++++ b/heartbeat/ovsmonitor +@@ -355,7 +355,7 @@ END + + set_cib_value() { + local score=`expr $1 \* $OCF_RESKEY_multiplier` +- attrd_updater -n $ATTRNAME -v $score -q ++ attrd_updater -n $ATTRNAME -v $score + local rc=$? + case $rc in + 0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;; +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 94aceb324..e93d66855 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -808,7 +808,7 @@ pgsql_real_stop() { + local stop_escalate + + if ocf_is_true ${OCF_RESKEY_check_wal_receiver}; then +- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D -q ++ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D + fi + + if ! pgsql_status +@@ -937,16 +937,16 @@ pgsql_wal_receiver_status() { + receiver_parent_pids=`ps -ef | tr -s " " | grep "[w]al\s*receiver" | cut -d " " -f 3` + + if echo "$receiver_parent_pids" | grep -q -w "$PID" ; then +- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" -q ++ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" + return 0 + fi + + if [ $pgsql_real_monitor_status -eq "$OCF_RUNNING_MASTER" ]; then +- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" -q ++ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" + return 0 + fi + +- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" -q ++ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" + ocf_log warn "wal receiver process is not running" + return 1 + } diff --git a/SOURCES/ha-cloud-support-aliyun.patch b/SOURCES/ha-cloud-support-aliyun.patch new file mode 100644 index 0000000..93d78aa --- /dev/null +++ b/SOURCES/ha-cloud-support-aliyun.patch @@ -0,0 +1,12 @@ +diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +--- a/heartbeat/aliyun-vpc-move-ip 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2021-08-25 13:38:26.786626079 +0200 +@@ -17,7 +17,7 @@ + OCF_RESKEY_interface_default="eth0" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_endpoint_default="vpc.aliyuncs.com" +-OCF_RESKEY_aliyuncli_default="detect" ++OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/bin/aliyuncli" + + + : ${OCF_RESKEY_address=${OCF_RESKEY_address_default}} diff --git a/SOURCES/ha-cloud-support-aws.patch b/SOURCES/ha-cloud-support-aws.patch new file mode 100644 index 0000000..fbda111 --- /dev/null +++ b/SOURCES/ha-cloud-support-aws.patch @@ -0,0 +1,48 @@ +diff --color -uNr a/heartbeat/awseip b/heartbeat/awseip +--- a/heartbeat/awseip 2020-12-03 14:31:17.000000000 +0100 ++++ b/heartbeat/awseip 2021-02-15 16:47:36.624610378 +0100 +@@ -43,7 +43,7 @@ + # + # Defaults + # +-OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_api_delay_default="3" + +diff --color -uNr a/heartbeat/awsvip b/heartbeat/awsvip +--- a/heartbeat/awsvip 2020-12-03 14:31:17.000000000 +0100 ++++ b/heartbeat/awsvip 2021-02-15 16:47:48.960632484 +0100 +@@ -42,7 +42,7 @@ + # + # Defaults + # +-OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_api_delay_default="3" + +diff --color -uNr a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +--- a/heartbeat/aws-vpc-move-ip 2020-12-03 14:31:17.000000000 +0100 ++++ b/heartbeat/aws-vpc-move-ip 2021-02-15 16:47:55.484644118 +0100 +@@ -35,7 +35,7 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults +-OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_region_default="" + OCF_RESKEY_ip_default="" +diff --color -uNr a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +--- a/heartbeat/aws-vpc-route53.in 2020-12-03 14:31:17.000000000 +0100 ++++ b/heartbeat/aws-vpc-route53.in 2021-02-15 16:47:59.808651828 +0100 +@@ -45,7 +45,7 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults +-OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_hostedzoneid_default="" + OCF_RESKEY_fullname_default="" diff --git a/SOURCES/ha-cloud-support-gcloud.patch b/SOURCES/ha-cloud-support-gcloud.patch new file mode 100644 index 0000000..95b0d7a --- /dev/null +++ b/SOURCES/ha-cloud-support-gcloud.patch @@ -0,0 +1,33 @@ +diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +--- a/heartbeat/gcp-pd-move.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-pd-move.in 2021-08-25 13:50:54.461732967 +0200 +@@ -32,6 +32,7 @@ + from ocf import logger + + try: ++ sys.path.insert(0, '/usr/lib/fence-agents/support/google') + import googleapiclient.discovery + except ImportError: + pass +diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2021-08-25 13:51:17.489797999 +0200 +@@ -45,6 +45,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/fence-agents/support/google') + import googleapiclient.discovery + import pyroute2 + try: +diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2021-08-25 13:51:35.012847487 +0200 +@@ -29,6 +29,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/fence-agents/support/google') + import googleapiclient.discovery + try: + from google.oauth2.service_account import Credentials as ServiceAccountCredentials diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch new file mode 100644 index 0000000..0e7b605 --- /dev/null +++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch @@ -0,0 +1,787 @@ +diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2021-08-25 09:51:53.037906134 +0200 ++++ b/doc/man/Makefile.am 2021-08-25 09:48:44.578408475 +0200 +@@ -97,6 +97,8 @@ + ocf_heartbeat_ManageRAID.7 \ + ocf_heartbeat_ManageVE.7 \ + ocf_heartbeat_NodeUtilization.7 \ ++ ocf_heartbeat_nova-compute-wait.7 \ ++ ocf_heartbeat_NovaEvacuate.7 \ + ocf_heartbeat_Pure-FTPd.7 \ + ocf_heartbeat_Raid1.7 \ + ocf_heartbeat_Route.7 \ +diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2021-08-25 09:51:53.038906137 +0200 ++++ b/heartbeat/Makefile.am 2021-08-25 09:48:44.588408501 +0200 +@@ -29,6 +29,8 @@ + + ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat + ++ospdir = $(OCF_RA_DIR_PREFIX)/openstack ++ + dtddir = $(datadir)/$(PACKAGE_NAME) + dtd_DATA = ra-api-1.dtd metadata.rng + +@@ -50,6 +52,9 @@ + send_ua_SOURCES = send_ua.c IPv6addr_utils.c + send_ua_LDADD = $(LIBNETLIBS) + ++osp_SCRIPTS = nova-compute-wait \ ++ NovaEvacuate ++ + ocf_SCRIPTS = AoEtarget \ + AudibleAlarm \ + ClusterMon \ +diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait +--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/nova-compute-wait 2021-08-25 09:50:14.626646141 +0200 +@@ -0,0 +1,345 @@ ++#!/bin/sh ++# ++# ++# nova-compute-wait agent manages compute daemons. ++# ++# Copyright (c) 2015 ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++####################################################################### ++# Initialization: ++ ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++OpenStack Nova Compute Server. ++ ++OpenStack Nova Compute Server ++ ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++ ++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN ++ ++DNS domain ++ ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++ ++Deprecated option not in use ++ ++Deprecated ++ ++ ++ ++ ++ ++How long to wait for nova to finish evacuating instances elsewhere ++before starting nova-compute. Only used when the agent detects ++evacuations might be in progress. ++ ++You may need to increase the start timeout when increasing this value. ++ ++Delay to allow evacuations time to complete ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++nova_usage() { ++ cat </run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf ++[Service] ++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST} ++EOF ++} ++ ++nova_validate() { ++ rc=$OCF_SUCCESS ++ ++ check_binary crudini ++ check_binary nova-compute ++ check_binary fence_compute ++ ++ if [ ! -f /etc/nova/nova.conf ]; then ++ ocf_exit_reason "/etc/nova/nova.conf not found" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ # Is the state directory writable? ++ state_dir=$(dirname $statefile) ++ touch "$state_dir/$$" ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Invalid state directory: $state_dir" ++ return $OCF_ERR_ARGS ++ fi ++ rm -f "$state_dir/$$" ++ ++ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null) ++ if [ $? = 1 ]; then ++ short_host=$(uname -n | awk -F. '{print $1}') ++ if [ "x${OCF_RESKEY_domain}" != x ]; then ++ NOVA_HOST=${short_host}.${OCF_RESKEY_domain} ++ else ++ NOVA_HOST=$(uname -n) ++ fi ++ fi ++ ++ if [ $rc != $OCF_SUCCESS ]; then ++ exit $rc ++ fi ++ return $rc ++} ++ ++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" ++ ++: ${OCF_RESKEY_evacuation_delay=120} ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS ++ ;; ++usage|help) nova_usage ++ exit $OCF_SUCCESS ++ ;; ++esac ++ ++case $__OCF_ACTION in ++start) nova_validate; nova_start;; ++stop) nova_stop;; ++monitor) nova_validate; nova_monitor;; ++notify) nova_notify;; ++validate-all) exit $OCF_SUCCESS;; ++*) nova_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc ++ +diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/NovaEvacuate 2021-08-25 09:50:23.780670326 +0200 +@@ -0,0 +1,400 @@ ++#!/bin/bash ++# ++# Copyright 2015 Red Hat, Inc. ++# ++# Description: Manages evacuation of nodes running nova-compute ++# ++# Authors: Andrew Beekhof ++# ++# Support: openstack@lists.openstack.org ++# License: Apache Software License (ASL) 2.0 ++# ++ ++ ++####################################################################### ++# Initialization: ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged. ++ ++Evacuator for OpenStack Nova Compute Server ++ ++ ++ ++ ++ ++Authorization URL for connecting to keystone in admin context ++ ++Authorization URL ++ ++ ++ ++ ++ ++Username for connecting to keystone in admin context ++ ++Username ++ ++ ++ ++ ++ ++Password for connecting to keystone in admin context ++ ++Password ++ ++ ++ ++ ++ ++Tenant name for connecting to keystone in admin context. ++Note that with Keystone V3 tenant names are only unique within a domain. ++ ++Tenant name ++ ++ ++ ++ ++ ++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN ++ ++DNS domain ++ ++ ++ ++ ++ ++Nova API location (internal, public or admin URL) ++ ++Nova API location (internal, public or admin URL) ++ ++ ++ ++ ++ ++Region name for connecting to nova. ++ ++Region name ++ ++ ++ ++ ++ ++Explicitly allow client to perform "insecure" TLS (https) requests. ++The server's certificate will not be verified against any certificate authorities. ++This option should be used with caution. ++ ++Allow insecure TLS requests ++ ++ ++ ++ ++ ++Indicate that nova storage for instances is not shared across compute ++nodes. This must match the reality of how nova storage is configured! ++Otherwise VMs could end up in error state upon evacuation. When ++storage is non-shared, instances on dead hypervisors will be rebuilt ++from their original image or volume, so anything on ephemeral storage ++will be lost. ++ ++Disable shared storage recovery for instances ++ ++ ++ ++ ++ ++Enable extra logging from the evacuation process ++ ++Enable debug logging ++ ++ ++ ++ ++ ++Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean ++up eventual locks/leases. ++ ++Nova evacuate delay ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++evacuate_usage() { ++ cat < >(grep -v "attribute does not exist" 1>&2) | ++ sed 's/ value=""/ value="no"/' | ++ tr '="' ' ' | ++ awk '{print $4" "$6}' ++ ) ++ return $OCF_SUCCESS ++} ++ ++evacuate_validate() { ++ rc=$OCF_SUCCESS ++ fence_options="" ++ ++ if ! have_binary fence_evacuate; then ++ check_binary fence_compute ++ fi ++ ++ # Is the state directory writable? ++ state_dir=$(dirname $statefile) ++ touch "$state_dir/$$" ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Invalid state directory: $state_dir" ++ return $OCF_ERR_ARGS ++ fi ++ rm -f "$state_dir/$$" ++ ++ if [ -z "${OCF_RESKEY_auth_url}" ]; then ++ ocf_exit_reason "auth_url not configured" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}" ++ ++ if [ -z "${OCF_RESKEY_username}" ]; then ++ ocf_exit_reason "username not configured" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ fence_options="${fence_options} -l ${OCF_RESKEY_username}" ++ ++ if [ -z "${OCF_RESKEY_password}" ]; then ++ ocf_exit_reason "password not configured" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ fence_options="${fence_options} -p ${OCF_RESKEY_password}" ++ ++ if [ -z "${OCF_RESKEY_tenant_name}" ]; then ++ ocf_exit_reason "tenant_name not configured" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}" ++ ++ if [ -n "${OCF_RESKEY_domain}" ]; then ++ fence_options="${fence_options} -d ${OCF_RESKEY_domain}" ++ fi ++ ++ if [ -n "${OCF_RESKEY_region_name}" ]; then ++ fence_options="${fence_options} \ ++ --region-name ${OCF_RESKEY_region_name}" ++ fi ++ ++ if [ -n "${OCF_RESKEY_insecure}" ]; then ++ if ocf_is_true "${OCF_RESKEY_insecure}"; then ++ fence_options="${fence_options} --insecure" ++ fi ++ fi ++ ++ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then ++ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then ++ fence_options="${fence_options} --no-shared-storage" ++ fi ++ fi ++ ++ if [ -n "${OCF_RESKEY_verbose}" ]; then ++ if ocf_is_true "${OCF_RESKEY_verbose}"; then ++ fence_options="${fence_options} --verbose" ++ fi ++ fi ++ ++ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then ++ case ${OCF_RESKEY_endpoint_type} in ++ adminURL|publicURL|internalURL) ++ ;; ++ *) ++ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \ ++ "not valid. Use adminURL or publicURL or internalURL" ++ exit $OCF_ERR_CONFIGURED ++ ;; ++ esac ++ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}" ++ fi ++ ++ if [ $rc != $OCF_SUCCESS ]; then ++ exit $rc ++ fi ++ return $rc ++} ++ ++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" ++ ++case $__OCF_ACTION in ++ start) ++ evacuate_validate ++ evacuate_start ++ ;; ++ stop) ++ evacuate_stop ++ ;; ++ monitor) ++ evacuate_validate ++ evacuate_monitor ++ ;; ++ meta-data) ++ meta_data ++ exit $OCF_SUCCESS ++ ;; ++ usage|help) ++ evacuate_usage ++ exit $OCF_SUCCESS ++ ;; ++ validate-all) ++ exit $OCF_SUCCESS ++ ;; ++ *) ++ evacuate_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec new file mode 100644 index 0000000..a793b66 --- /dev/null +++ b/SPECS/resource-agents.spec @@ -0,0 +1,1212 @@ +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. +# + +# Below is the script used to generate a new source file +# from the resource-agent upstream git repo. +# +# TAG=$(git log --pretty="format:%h" -n 1) +# distdir="ClusterLabs-resource-agents-${TAG}" +# TARFILE="${distdir}.tar.gz" +# rm -rf $TARFILE $distdir +# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE +# + +%global upstream_prefix ClusterLabs-resource-agents +%global upstream_version fd0720f7 + +# Whether this platform defaults to using systemd as an init system +# (needs to be evaluated prior to BuildRequires being enumerated and +# installed as it's intended to conditionally select some of these, and +# for that there are only few indicators with varying reliability: +# - presence of systemd-defined macros (when building in a full-fledged +# environment, which is not the case with ordinary mock-based builds) +# - systemd-aware rpm as manifested with the presence of particular +# macro (rpm itself will trivially always be present when building) +# - existence of /usr/lib/os-release file, which is something heavily +# propagated by systemd project +# - when not good enough, there's always a possibility to check +# particular distro-specific macros (incl. version comparison) +%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ + } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ + } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) + +# determine the ras-set to process based on configure invokation +%bcond_with rgmanager +%bcond_without linuxha + +Name: resource-agents +Summary: Open Source HA Reusable Cluster Resource Scripts +Version: 4.10.0 +Release: 23%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +License: GPLv2+ and LGPLv2+ +URL: https://github.com/ClusterLabs/resource-agents +Source0: %{upstream_prefix}-%{upstream_version}.tar.gz +Patch0: nova-compute-wait-NovaEvacuate.patch +Patch1: bz1952005-pgsqlms-new-ra.patch +Patch2: bz2021125-gcp-ilb-1-fix-log_enable.patch +Patch3: bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch +Patch4: bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch +Patch5: bz2029704-1-db2-crm_attribute-use-forever.patch +Patch6: bz2029704-2-db2-fixes.patch +Patch7: bz2029753-podman-remove-anonymous-volumes.patch +Patch8: bz2055016-1-IPsrcaddr-dhcp-warning.patch +Patch9: bz2055016-2-IPsrcaddr-error-message-route-not-found.patch +Patch10: bz2055016-3-IPsrcaddr-fix-indentation.patch +Patch11: bz2055016-4-IPsrcaddr-fixes.patch +Patch12: bz2069270-corosync-qnetd-new-ra.patch +Patch13: bz2065125-LVM-activate-fix-fence-issue.patch +Patch14: bz2065138-IPaddr2-enable-more-control-for-IPv6-addresses.patch +Patch15: bz2072371-Filesystem-1-fix-uuid-label-device-whitespace.patch +Patch16: bz2072371-Filesystem-2-improve-uuid-label-device-logic.patch +Patch17: bz2083081-bz2083086-bz2083090-bz2083092-update-openstack-agents.patch +Patch18: bz2081585-NovaEvacuate-add-user_domain-project_domain.patch +Patch19: bz2094828-lvmlockd-fail-when-use_lvmlockd-not-set.patch +Patch20: bz2093213-aws-vpc-move-ip-add-interface-label-support.patch +Patch21: bz2063877-all-agents-use-promotable-terms.patch +Patch22: bz2083090-openstack-info-fix-bashism.patch +Patch23: bz2083081-bz2083086-bz2083092-openstack-agents-fixes.patch +Patch24: bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-warn-when-openstackcli-slow.patch +Patch25: bz2083081-bz2083086-bz2083090-bz2083092-openstack-agents-set-domain-parameters-default.patch +Patch26: bz2103374-ocf-tester-1-update.patch +Patch27: bz2103374-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch +Patch28: bz2110452-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch + +# bundled ha-cloud-support libs +Patch500: ha-cloud-support-aws.patch +Patch501: ha-cloud-support-aliyun.patch +Patch502: ha-cloud-support-gcloud.patch + +Obsoletes: heartbeat-resources <= %{version} +Provides: heartbeat-resources = %{version} + +# Build dependencies +BuildRequires: make +BuildRequires: automake autoconf pkgconfig gcc +BuildRequires: libxslt glib2-devel +BuildRequires: systemd +BuildRequires: which + +%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version} +BuildRequires: python3-devel +%else +BuildRequires: python-devel +%endif + +# for pgsqlms +BuildRequires: perl-devel perl-English perl-FindBin + +%ifarch x86_64 +BuildRequires: ha-cloud-support +%endif + +%if 0%{?fedora} || 0%{?centos} || 0%{?rhel} +BuildRequires: docbook-style-xsl docbook-dtds +%if 0%{?rhel} == 0 +BuildRequires: libnet-devel +%endif +%endif + +%if 0%{?suse_version} +BuildRequires: libnet-devel +BuildRequires: libglue-devel +BuildRequires: libxslt docbook_4 docbook-xsl-stylesheets +%endif + +## Runtime deps +# system tools shared by several agents +Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk +Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat +Requires: /usr/sbin/fuser /bin/mount +Requires: which + +# Filesystem / fs.sh / netfs.sh +Requires: /sbin/fsck +Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4 +Requires: /usr/sbin/fsck.xfs +Requires: /sbin/mount.nfs /sbin/mount.nfs4 +%if (0%{?fedora} && 0%{?fedora} < 33) || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version} +%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8) +Requires: /usr/sbin/mount.cifs +%else +Recommends: /usr/sbin/mount.cifs +%endif +%endif + +# IPaddr2 +Requires: /sbin/ip + +# LVM / lvm.sh +Requires: /usr/sbin/lvm + +# nfsserver / netfs.sh +Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd + +# ocf.py +Requires: python3 + +# rgmanager +%if %{with rgmanager} +# ip.sh +Requires: /usr/sbin/ethtool +Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 + +# nfsexport.sh +Requires: /sbin/findfs +Requires: /sbin/quotaon /sbin/quotacheck +%endif + +%description +A set of scripts to interface with several services to operate in a +High Availability environment for both Pacemaker and rgmanager +service managers. + +%ifarch x86_64 +%package cloud +License: GPLv2+ and LGPLv2+ +Summary: Cloud resource agents +Requires: %{name} = %{version}-%{release} +Requires: ha-cloud-support +Requires: socat +Provides: resource-agents-aliyun +Obsoletes: resource-agents-aliyun <= %{version} +Provides: resource-agents-gcp +Obsoletes: resource-agents-gcp <= %{version} + +%description cloud +Cloud resource agents allows Cloud instances to be managed +in a cluster environment. +%endif + +%package paf +License: PostgreSQL +Summary: PostgreSQL Automatic Failover (PAF) resource agent +Requires: %{name} = %{version}-%{release} +Requires: perl-interpreter perl-English perl-FindBin + +%description paf +PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL +databases to be managed in a cluster environment. + +%prep +%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos} == 0 && 0%{?rhel} == 0 +%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} +exit 1 +%endif +%setup -q -n %{upstream_prefix}-%{upstream_version} +%patch0 -p1 -F1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 +%patch6 -p1 +%patch7 -p1 +%patch8 -p1 +%patch9 -p1 +%patch10 -p1 +%patch11 -p1 +%patch12 -p1 +%patch13 -p1 +%patch14 -p1 +%patch15 -p1 +%patch16 -p1 +%patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 + +# bundled ha-cloud-support libs +%patch500 -p1 +%patch501 -p1 +%patch502 -p1 + +chmod 755 heartbeat/nova-compute-wait +chmod 755 heartbeat/NovaEvacuate +chmod 755 heartbeat/pgsqlms + +%build +if [ ! -f configure ]; then + ./autogen.sh +fi + +%if 0%{?fedora} >= 11 || 0%{?centos} > 5 || 0%{?rhel} > 5 +CFLAGS="$(echo '%{optflags}')" +%global conf_opt_fatal "--enable-fatal-warnings=no" +%else +CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" +%global conf_opt_fatal "--enable-fatal-warnings=yes" +%endif + +%if %{with rgmanager} +%global rasset rgmanager +%endif +%if %{with linuxha} +%global rasset linux-ha +%endif +%if %{with rgmanager} && %{with linuxha} +%global rasset all +%endif + +export CFLAGS + +%configure \ +%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version} + PYTHON="%{__python3}" \ +%endif +%ifarch x86_64 + PYTHONPATH="%{_usr}/lib/fence-agents/support/google" \ +%endif + %{conf_opt_fatal} \ +%if %{defined _unitdir} + SYSTEMD_UNIT_DIR=%{_unitdir} \ +%endif +%if %{defined _tmpfilesdir} + SYSTEMD_TMPFILES_DIR=%{_tmpfilesdir} \ + --with-rsctmpdir=/run/resource-agents \ +%endif + --with-pkg-name=%{name} \ + --with-ras-set=%{rasset} + +%if %{defined jobs} +JFLAGS="$(echo '-j%{jobs}')" +%else +JFLAGS="$(echo '%{_smp_mflags}')" +%endif + +make $JFLAGS + +%install +rm -rf %{buildroot} +make install DESTDIR=%{buildroot} + +## tree fixup +# remove docs (there is only one and they should come from doc sections in files) +rm -rf %{buildroot}/usr/share/doc/resource-agents + +%files +%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog +%if %{with linuxha} +%doc heartbeat/README.galera +%doc doc/README.webapps +%doc %{_datadir}/%{name}/ra-api-1.dtd +%doc %{_datadir}/%{name}/metadata.rng +%endif + +%if %{with rgmanager} +%{_datadir}/cluster +%{_sbindir}/rhev-check.sh +%endif + +%if %{with linuxha} +%dir %{_usr}/lib/ocf +%dir %{_usr}/lib/ocf/resource.d +%dir %{_usr}/lib/ocf/lib + +%{_usr}/lib/ocf/lib/heartbeat + +%{_usr}/lib/ocf/resource.d/heartbeat +%{_usr}/lib/ocf/resource.d/openstack + +%{_datadir}/pkgconfig/%{name}.pc + +%if %{defined _unitdir} +%{_unitdir}/resource-agents-deps.target +%endif +%if %{defined _tmpfilesdir} +%{_tmpfilesdir}/%{name}.conf +%endif + +%dir %{_datadir}/%{name} +%dir %{_datadir}/%{name}/ocft +%{_datadir}/%{name}/ocft/configs +%{_datadir}/%{name}/ocft/caselib +%{_datadir}/%{name}/ocft/README +%{_datadir}/%{name}/ocft/README.zh_CN +%{_datadir}/%{name}/ocft/helpers.sh +%exclude %{_datadir}/%{name}/ocft/runocft +%exclude %{_datadir}/%{name}/ocft/runocft.prereq + +%{_sbindir}/ocf-tester +%{_sbindir}/ocft + +%{_includedir}/heartbeat + +%if %{defined _tmpfilesdir} +%dir %attr (1755, root, root) /run/resource-agents +%else +%dir %attr (1755, root, root) %{_var}/run/resource-agents +%endif + +%{_mandir}/man7/*.7* +%{_mandir}/man8/ocf-tester.8* + +### +# Supported, but in another sub package +### +%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/aws* +%exclude /usr/lib/ocf/resource.d/heartbeat/azure-* +%exclude %{_mandir}/man7/*aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* +%exclude %{_mandir}/man7/*gcp* +%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms +%exclude %{_mandir}/man7/*pgsqlms* +%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +### +# Moved to separate packages +### +%exclude /usr/lib/ocf/resource.d/heartbeat/SAP* +%exclude /usr/lib/ocf/lib/heartbeat/sap* +%exclude %{_mandir}/man7/*SAP* + +### +# Unsupported +### +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/IPaddr +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker* +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dovecot +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dummypy +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mdraid +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/nvmet-* +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/smb-share +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver +%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_docker*.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dovecot.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dummypy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mdraid.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_nvmet-*.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_smb-share.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz + +### +# Other excluded files. +### +# ldirectord is not supported +%exclude /etc/ha.d/resource.d/ldirectord +%exclude %{_sysconfdir}/init.d/ldirectord +%exclude %{_sysconfdir}/rc.d/init.d/ldirectord +%exclude %{_unitdir}/ldirectord.service +%exclude /etc/logrotate.d/ldirectord +%exclude /usr/sbin/ldirectord +%exclude %{_mandir}/man8/ldirectord.8.gz + +# For compatability with pre-existing agents +%dir %{_sysconfdir}/ha.d +%{_sysconfdir}/ha.d/shellfuncs + +%{_libexecdir}/heartbeat +%endif + +%ifarch x86_64 +%files cloud +/usr/lib/ocf/resource.d/heartbeat/aliyun-* +%{_mandir}/man7/*aliyun-* +/usr/lib/ocf/resource.d/heartbeat/aws* +%{_mandir}/man7/*aws* +/usr/lib/ocf/resource.d/heartbeat/azure-* +%{_mandir}/man7/*azure-* +/usr/lib/ocf/resource.d/heartbeat/gcp-* +%{_mandir}/man7/*gcp-* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-ip +%exclude %{_mandir}/man7/*gcp-vpc-move-ip* +%endif + +%files paf +%doc paf_README.md +%license paf_LICENSE +%defattr(-,root,root) +%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms +%{_mandir}/man7/*pgsqlms* +%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +%changelog +* Tue Jul 26 2022 Oyvind Albrigtsen - 4.10.0-23 +- ethmonitor/pgsql: remove attrd_updater "-q" parameter to solve issue + with Pacemaker 2.1.3+ not ignoring it + + Resolves: rhbz#2110452 + +* Thu Jul 14 2022 Oyvind Albrigtsen - 4.10.0-22 +- ocf-tester: add testing tool + + Resolves: rhbz#2103374 + +* Thu Jul 14 2022 Oyvind Albrigtsen - 4.10.0-20 +- openstack-cinder-volume/openstack-floating-ip/openstack-info/ + openstack-virtual-ip: new resource agents + + Resolves: rhbz#2083081, rhbz#2083086, rhbz#2083090, rhbz#2083092 + +* Mon Jun 20 2022 Oyvind Albrigtsen - 4.10.0-18 +- all agents: use promotable terms + + Resolves: rhbz#2063877 + +* Thu Jun 9 2022 Oyvind Albrigtsen - 4.10.0-17 +- lvmlockd: fail when use_lvmlockd has not been set +- aws-vpc-move-ip: add interface label support + + Resolves: rhbz#2094828 + Resolves: rhbz#2093213 + +* Tue May 17 2022 Oyvind Albrigtsen - 4.10.0-15 +- NovaEvacuate: add user_domain and project_domain parameters + + Resolves: rhbz#2081585 + +* Thu Apr 21 2022 Oyvind Albrigtsen - 4.10.0-12 +- Filesystem: fix UUID/label device support when there's whitespace + between parameter and UUID/label + + Resolves: rhbz#2072371 + +* Tue Apr 5 2022 Oyvind Albrigtsen - 4.10.0-11 +- LVM-activate: use correct return code to fence failed node +- IPaddr2: enable more control for IPv6 addresses + + Resolves: rhbz#2065125 + Resolves: rhbz#2065138 + +* Tue Mar 29 2022 Oyvind Albrigtsen - 4.10.0-10 +- corosync-qnetd: new resource agent +- spec: remove Samba/CIFS dependency + + Resolves: rhbz#2069270 + Resolves: rhbz#1826455 + +* Thu Mar 3 2022 Oyvind Albrigtsen - 4.10.0-8 +- IPsrcaddr: add warning about possible issues when used with DHCP, + and add error message when matching route not found + + Resolves: rhbz#2055016 + +* Wed Feb 23 2022 Oyvind Albrigtsen - 4.10.0-7 +- db2: use -l forever to fix crm_attribute issue + + Resolves: rhbz#2029704 + +* Wed Jan 5 2022 Oyvind Albrigtsen - 4.10.0-5 +- podman: remove anonymous volumes + + Resolves: rhbz#2029753 + +* Tue Dec 7 2021 Oyvind Albrigtsen - 4.10.0-4 +- Route: return OCF_NOT_RUNNING for probe action when interface + or route doesnt exist + + Resolves: rhbz#2029796 + +* Tue Nov 9 2021 Oyvind Albrigtsen - 4.10.0-3 +- gcp-ilb: new resource agent + + Resolves: rhbz#2021125 + +* Wed Nov 3 2021 Oyvind Albrigtsen - 4.10.0-1 +- Rebase to resource-agents 4.10.0 upstream release. +- nfsserver: add nfs_server_scope parameter + + Resolves: rhbz#1995918 + Resolves: rhbz#2015171 + +* Thu Oct 7 2021 Oyvind Albrigtsen - 4.9.0-2 +- Rebase to resource-agents 4.9.0 upstream release. +- Add "which" dependency +- All agents: set correct agent and OCF version in metadata + + Resolves: rhbz#1995918 + Resolves: rhbz#2011142 + Resolves: rhbz#2003118 + +* Thu Aug 26 2021 Oyvind Albrigtsen - 4.8.0-13 +- nfsnotify: fix default value for "notify_args" + + Resolves: rhbz#1998039 + +* Wed Aug 25 2021 Oyvind Albrigtsen - 4.8.0-12 +- Create cloud subpackage +- Add nova-compute-wait/NovaEvacuate +- nfsserver: fix nfs-convert issue + + Resolves: rhbz#1997548, rhbz#1997576, rhbz#1991855 + +* Mon Aug 16 2021 Oyvind Albrigtsen - 4.8.0-11 +- Filesystem: force_unmount: remove "Default value" text from metadata + + Resolves: rhbz#1993900 + +* Tue Aug 10 2021 Oyvind Albrigtsen - 4.8.0-10 +- pgsqlms: new resource agent + + Resolves: rhbz#1952005 + +* Tue Aug 10 2021 Mohan Boddu - 4.8.0-8.1 +- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags + Related: rhbz#1991688 + +* Tue Jun 29 2021 Oyvind Albrigtsen - 4.8.0-8 +- Exclude SAP agents that are in separate -sap subpackage + + Resolves: rhbz#1977208 + +* Mon May 17 2021 Oyvind Albrigtsen - 4.8.0-3 +- Remove redhat-lsb-core dependency (lsb_release) + + Resolves: rhbz#1961539 + +* Wed Apr 21 2021 Oyvind Albrigtsen - 4.8.0-2 +- Solve build issues + + Resolves: rhbz#1951253 + +* Fri Apr 16 2021 Mohan Boddu - 4.8.0-1.1 +- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937 + +* Tue Mar 16 2021 Oyvind Albrigtsen - 4.7.0-7 +- Filesystem: change force_unmount default to safe for RHEL9+ (1843578) + +* Wed Mar 3 2021 Oyvind Albrigtsen - 4.7.0-5 +- Exclude unsupported agents + +* Wed Feb 24 2021 Oyvind Albrigtsen - 4.7.0-4 +- remove ldirectord subpackage + + Resolves: rhbz#1932218 + +* Tue Feb 16 2021 Oyvind Albrigtsen - 4.7.0-3 +- add BuildRequires for google lib +- use HA cloud support supplied awscli + +* Wed Jan 27 2021 Fedora Release Engineering - 4.7.0-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild + +* Wed Dec 9 2020 Oyvind Albrigtsen - 4.7.0-1 +- Rebase to resource-agents 4.7.0 upstream release. + +* Mon Aug 24 2020 Oyvind Albrigtsen - 4.6.1-4 +- spec: improvements from upstream project + +* Mon Aug 24 2020 Oyvind Albrigtsen - 4.6.1-3 +- ldirectord: add dependency for perl-IO-Socket-INET6 + + Resolves: rhbz#1868063 + +* Wed Jul 29 2020 Fedora Release Engineering - 4.6.1-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild + +* Fri Jul 24 2020 Oyvind Albrigtsen - 4.6.1-2 +- Make Samba/CIFS dependency weak for Fedora 32 and remove the + dependency from 33+ + +* Thu Jun 18 2020 Oyvind Albrigtsen - 4.6.1-1 +- Rebase to resource-agents 4.6.1 upstream release. + +* Thu Jun 18 2020 Oyvind Albrigtsen - 4.6.0-1 +- Rebase to resource-agents 4.6.0 upstream release. + +* Mon Mar 9 2020 Oyvind Albrigtsen - 4.5.0-1 +- Rebase to resource-agents 4.5.0 upstream release. + +* Thu Jan 30 2020 Fedora Release Engineering - 4.4.0-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild + +* Wed Oct 23 2019 Oyvind Albrigtsen - 4.4.0-1 +- Rebase to resource-agents 4.4.0 upstream release. + +* Fri Jul 26 2019 Fedora Release Engineering - 4.3.0-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild + +* Fri Jun 21 2019 Oyvind Albrigtsen - 4.3.0-1 +- Rebase to resource-agents 4.3.0 upstream release. + +* Fri May 24 2019 Oyvind Albrigtsen - 4.2.0-4 +- Fix build issues + +* Fri Mar 15 2019 Oyvind Albrigtsen - 4.2.0-3 +- systemd-tmpfiles: change path to /run/resource-agents + + Resolves: rhbz#1688865 + +* Sat Feb 02 2019 Fedora Release Engineering - 4.2.0-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild + +* Wed Oct 24 2018 Oyvind Albrigtsen - 4.2.0-1 +- Rebase to resource-agents 4.2.0 upstream release. +- spec: fix missing systemd config files + +* Sat Jul 14 2018 Fedora Release Engineering - 4.1.1-1.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild + +* Sun Mar 18 2018 Iryna Shcherbina - 4.1.1-1.1 +- Update Python 2 dependency declarations to new packaging standards + (See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3) + +* Tue Mar 13 2018 Oyvind Albrigtsen - 4.1.1-1 +- Rebase to resource-agents 4.1.1 upstream release. + +* Mon Feb 19 2018 Oyvind Albrigtsen - 4.1.0-2 +- Add gcc to BuildRequires + +* Fri Feb 09 2018 Igor Gnatenko - 4.1.0-1.1 +- Escape macros in %%changelog + +* Wed Jan 10 2018 Oyvind Albrigtsen - 4.1.0-1 +- Rebase to resource-agents 4.1.0 upstream release. + +* Thu Aug 03 2017 Fedora Release Engineering - 4.0.1-1.3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Thu Jul 27 2017 Fedora Release Engineering - 4.0.1-1.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Sat Feb 11 2017 Fedora Release Engineering - 4.0.1-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Feb 2 2017 Oyvind Albrigtsen - 4.0.1-1 +- Rebase to resource-agents 4.0.1 upstream release. + +* Wed Feb 1 2017 Oyvind Albrigtsen - 4.0.0-2 +- galera: remove "long SST monitoring" support due to corner-case issues + +* Tue Jan 31 2017 Oyvind Albrigtsen - 4.0.0-1 +- Rebase to resource-agents 4.0.0 upstream release. + +* Thu Dec 15 2016 Oyvind Albrigtsen - 3.9.7-6 +- Add netstat dependency + +* Tue Feb 9 2016 Oyvind Albrigtsen - 3.9.7-4 +- Rebase to resource-agents 3.9.7 upstream release. + +* Thu Feb 04 2016 Fedora Release Engineering - 3.9.6-2.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Thu Jun 18 2015 Fedora Release Engineering - 3.9.6-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Mon Apr 20 2015 David Vossel - 3.9.6-2 +- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent + +* Fri Feb 13 2015 David Vossel - 3.9.6-1 +- Rebase to resource-agents 3.9.6 upstream release. + +* Sun Aug 17 2014 Fedora Release Engineering - 3.9.5-12.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Sun Jun 08 2014 Fedora Release Engineering - 3.9.5-12.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Wed Apr 30 2014 David Vossel - 3.9.5-12 +- Sync with latest upstream. + +* Thu Jan 2 2014 David Vossel - 3.9.5-11 +- Sync with latest upstream. + +* Sun Oct 20 2013 David Vossel - 3.9.5-10 +- Fix build system for rawhide. + +* Wed Oct 16 2013 David Vossel - 3.9.5-9 +- Remove rgmanager agents from build. + +* Sun Aug 04 2013 Fedora Release Engineering - 3.9.5-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild + +* Wed Jul 17 2013 Petr Pisar - 3.9.5-7 +- Perl 5.18 rebuild + +* Tue Jun 18 2013 David Vossel - 3.9.5-6 +- Restores rsctmp directory to upstream default. + +* Tue Jun 18 2013 David Vossel - 3.9.5-5 +- Merges redhat provider into heartbeat provider. Remove + rgmanager's redhat provider. + + Resolves: rhbz#917681 + Resolves: rhbz#928890 + Resolves: rhbz#952716 + Resolves: rhbz#960555 + +* Tue Mar 12 2013 David Vossel - 3.9.5-3 +- Fixes build system error with conditional logic involving + IPv6addr and updates spec file to build against rhel 7 as + well as fedora 19. + +* Mon Mar 11 2013 David Vossel - 3.9.5-2 +- Resolves rhbz#915050 + +* Mon Mar 11 2013 David Vossel - 3.9.5-1 +- New upstream release. + +* Fri Jan 25 2013 Kevin Fenzi - 3.9.2-5 +- Fix cifs mount requires + +* Mon Nov 12 2012 Chris Feist - 3.9.2-4 +- Removed version number after dist + +* Mon Oct 29 2012 Chris Feist - 3.9.2-3.8 +- Remove cluster-glue-libs-devel +- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to + disappearance of cluster-glue in F18) + +* Sat Jul 21 2012 Fedora Release Engineering - 3.9.2-3.5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu Jul 05 2012 Chris Feist - 3.9.2-3.4 +- Fix location of lvm (change from /sbin to /usr/sbin) + +* Wed Apr 04 2012 Jon Ciesla - 3.9.2-3.3 +- Rebuilt to fix rawhide dependency issues (caused by move of fsck from + /sbin to /usr/sbin). + +* Fri Mar 30 2012 Jon Ciesla - 3.9.2-3.1 +- libnet rebuild. + +* Sat Jan 14 2012 Fedora Release Engineering - 3.9.2-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Fri Jul 8 2011 Fabio M. Di Nitto - 3.9.2-2 +- add post call to resource-agents to integrate with cluster 3.1.4 + +* Thu Jun 30 2011 Fabio M. Di Nitto - 3.9.2-1 +- new upstream release +- fix 2 regressions from 3.9.1 + +* Mon Jun 20 2011 Fabio M. Di Nitto - 3.9.1-1 +- new upstream release +- import spec file from upstream + +* Tue Mar 1 2011 Fabio M. Di Nitto - 3.1.1-1 +- new upstream release 3.1.1 and 1.0.4 + +* Wed Feb 09 2011 Fedora Release Engineering - 3.1.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Thu Dec 2 2010 Fabio M. Di Nitto - 3.1.0-1 +- new upstream release +- spec file update: + Update upstream URL + Update source URL + use standard configure macro + use standard make invokation + +* Thu Oct 7 2010 Fabio M. Di Nitto - 3.0.17-1 +- new upstream release + Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013 + Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733 + Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718 + Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943 + Resolves: rhbz#639018 + +* Thu Oct 7 2010 Andrew Beekhof - 3.0.16-2 +- new upstream release of the Pacemaker agents: 71b1377f907c + +* Thu Sep 2 2010 Fabio M. Di Nitto - 3.0.16-1 +- new upstream release + Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680 + Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844 + Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691 + Resolves: rhbz#622576 + +* Thu Jul 29 2010 Fabio M. Di Nitto - 3.0.14-1 +- new upstream release + Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003 + Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547 + Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368 + Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989 + Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181 + Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110 + Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356 + Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202 + Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566 + Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814 + +* Mon Jun 7 2010 Fabio M. Di Nitto - 3.0.13-1 +- new upstream release + Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626 + Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002 + Resolves: rhbz#599643 + +* Tue May 18 2010 Andrew Beekhof - 3.0.12-2 +- libnet is not available on RHEL +- Do not package ldirectord on RHEL + Resolves: rhbz#577264 + +* Mon May 10 2010 Fabio M. Di Nitto - 3.0.12-1 +- new upstream release + Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753 + Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890 + Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010 + Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823 + +* Mon May 10 2010 Andrew Beekhof - 3.0.12-1 +- New pacemaker agents upstream release: a7c0f35916bf + + High: pgsql: properly implement pghost parameter + + High: RA: mysql: fix syntax error + + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371) + + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378) + + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data + + Medium: IPaddr: return the correct code if interface delete failed + + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis) + + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815) + + Medium: pgsql: implement "config" parameter + + Medium: RA: iSCSITarget: follow changed IET access policy + +* Wed Apr 21 2010 Fabio M. Di Nitto - 3.0.11-1 +- new upstream release + Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017 + Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017 + Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533 +- Switch to file based Requires. + Also address several other problems related to missing runtime + components in different agents. + With the current Requires: set, we guarantee all basic functionalities + out of the box for lvm/fs/clusterfs/netfs/networking. + Resolves: rhbz#570008 + +* Sat Apr 17 2010 Andrew Beekhof - 3.0.10-2 +- New pacemaker agents upstream release + + High: RA: vmware: fix set_environment() invocation (LF 2342) + + High: RA: vmware: update to version 0.2 + + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388) + + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg) + + Medium: IPsrcaddr: modify the interface route (lf#2367) + + Medium: ldirectord: Allow multiple email addresses (LF 2168) + + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328) + + Medium: meta-data: improve timeouts in most resource agents + + Medium: nfsserver: use default values (lf#2321) + + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal + + Medium: ocf-shellfuncs: don't output to stderr if using syslog + + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid + + Medium: RA: iSCSILogicalUnit: fix monitor for STGT + + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284) + + Medium: RA: ManageRAID: require bash + + Medium: RA: ManageRAID: require bash + + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988) + + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION + + Medium: RA: VirtualDomain: improve error messages + + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name + + Medium: Route: add route table parameter (lf#2335) + + Medium: sfex: don't use pid file (lf#2363,bnc#585416) + + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416) + +* Fri Apr 9 2010 Fabio M. Di Nitto - 3.0.10-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027 + Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335 + Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249 + Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626 + Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626 + Resolves: rhbz#579059 + +* Wed Mar 24 2010 Andrew Beekhof - 3.0.9-2 +- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page +- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built + +* Mon Mar 1 2010 Fabio M. Di Nitto - 3.0.9-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902 + Resolves: rhbz#512171, rhbz#519491 + +* Mon Feb 22 2010 Fabio M. Di Nitto - 3.0.8-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901 + Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157 + Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201 + Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363 + Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630 + Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047 + Resolves: rhbz#554968, rhbz#555047 +- spec file update: + * update spec file copyright date + * use bz2 tarball + +* Fri Jan 15 2010 Fabio M. Di Nitto - 3.0.7-2 +- Add python as BuildRequires + +* Mon Jan 11 2010 Fabio M. Di Nitto - 3.0.7-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#526286, rhbz#533461 + +* Mon Jan 11 2010 Andrew Beekhof - 3.0.6-2 +- Update Pacameker agents to upstream version: c76b4a6eb576 + + High: RA: VirtualDomain: fix forceful stop (LF 2283) + + High: apache: monitor operation of depth 10 for web applications (LF 2234) + + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281) + + Medium: RA: Route: improve validate (LF 2232) + + Medium: mark obsolete RAs as deprecated (LF 2244) + + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work + +* Mon Dec 7 2009 Fabio M. Di Nitto - 3.0.6-1 +- New rgmanager resource agents upstream release +- spec file update: + * use global instead of define + * use new Source0 url + * use %%name macro more aggressively + +* Mon Dec 7 2009 Andrew Beekhof - 3.0.5-2 +- Update Pacameker agents to upstream version: bc00c0b065d9 + + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239) + + High: doc: add man pages for all RAs (LF2237) + + High: syslog-ng: new RA + + High: vmware: make meta-data work and several cleanups (LF 2212) + + Medium: .ocf-shellfuncs: add ocf_is_probe function + + Medium: Dev: make RAs executable (LF2239) + + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034) + + Medium: add mercurial repository version information to .ocf-shellfuncs + + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469) + + Medium: iSCSITarget, iSCSILogicalUnit: support LIO + + Medium: nfsserver: use check_binary properly in validate (LF 2211) + + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219) + + Medium: oracle/oralsnr: export variables properly + + Medium: pgsql: remove the previous backup_label if it exists + + Medium: postfix: fix double stop (thanks to Dinh N. Quoc) + + RA: LVM: Make monitor operation quiet in logs (bnc#546353) + + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968) + + ldirectord: OCF agent: overhaul + +* Fri Nov 20 2009 Fabio M. Di Nitto - 3.0.5-1 +- New rgmanager resource agents upstream release +- Allow pacemaker to use rgmanager resource agents + +* Wed Oct 28 2009 Andrew Beekhof - 3.0.4-2 +- Update Pacameker agents to upstream version: e2338892f59f + + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes + + High: Trap sigterm for compatibility with the libnet version of send_arp + + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down + + Medium: IPv6addr: recognize network masks properly + + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define" + +* Wed Oct 21 2009 Fabio M. Di Nitto - 3.0.4-1 +- New rgmanager resource agents upstream release + +* Mon Oct 12 2009 Andrew Beekhof - 3.0.3-3 +- Update Pacameker agents to upstream version: 099c0e5d80db + + Add the ha_parameter function back into .ocf-shellfuncs. + + Bug bnc#534803 - Provide a default for MAILCMD + + Fix use of undefined macro @HA_NOARCHDATAHBDIR@ + + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff) + + Import shellfuncs from heartbeat as badly written RAs use it + + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate + + Medium: RA: Filesystem: implement monitor operation + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum) + + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID + + Medium: RA: iSCSITarget: be more persistent deleting targets on stop + + Medium: RA: portblock: add per-IP filtering capability + + Medium: mysql-proxy: log_level and keepalive parameters + + Medium: oracle: drop spurious output from sqlplus + + RA: Filesystem: allow configuring smbfs mounts as clones + +* Wed Sep 23 2009 Fabio M. Di Nitto - 3.0.3-1 +- New rgmanager resource agents upstream release + +* Thu Aug 20 2009 Fabio M. Di Nitto - 3.0.1-1 +- New rgmanager resource agents upstream release + +* Tue Aug 18 2009 Andrew Beekhof - 3.0.0-16 +- Create an ldirectord package +- Update Pacameker agents to upstream version: 2198dc90bec4 + + Build: Import ldirectord. + + Ensure HA_VARRUNDIR has a value to substitute + + High: Add findif tool (mandatory for IPaddr/IPaddr2) + + High: IPv6addr: new nic and cidr_netmask parameters + + High: postfix: new resource agent + + Include license information + + Low (LF 2159): Squid: make the regexp match more precisely output of netstat + + Low: configure: Fix package name. + + Low: ldirectord: add dependency on $remote_fs. + + Low: ldirectord: add mandatory required header to init script. + + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp + + Medium: VirtualDomain: destroy domain shortly before timeout expiry + + Medium: shellfuncs: Make the mktemp wrappers work. + + Remove references to Echo function + + Remove references to heartbeat shellfuncs. + + Remove useless path lookups + + findif: actually include the right header. Simplify configure. + + ldirectord: Remove superfluous configure artifact. + + ocf-tester: Fix package reference and path to DTD. + +* Tue Aug 11 2009 Ville Skyttä - 3.0.0-15 +- Use bzipped upstream hg tarball. + +* Wed Jul 29 2009 Fabio M. Di Nitto - 3.0.0-14 +- Merge Pacemaker cluster resource agents: + * Add Source1. + * Drop noarch. We have real binaries now. + * Update BuildRequires. + * Update all relevant prep/build/install/files/description sections. + +* Sun Jul 26 2009 Fedora Release Engineering - 3.0.0-13 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Wed Jul 8 2009 Fabio M. Di Nitto - 3.0.0-12 +- spec file updates: + * Update copyright header + * final release.. undefine alphatag + +* Thu Jul 2 2009 Fabio M. Di Nitto - 3.0.0-11.rc4 +- New upstream release. + +* Sat Jun 20 2009 Fabio M. Di Nitto - 3.0.0-10.rc3 +- New upstream release. + +* Wed Jun 10 2009 Fabio M. Di Nitto - 3.0.0-9.rc2 +- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970 + +* Tue Mar 24 2009 Fabio M. Di Nitto - 3.0.0-8.rc1 +- New upstream release. +- Update BuildRoot usage to preferred versions/names + +* Mon Mar 9 2009 Fabio M. Di Nitto - 3.0.0-7.beta1 +- New upstream release. + +* Fri Mar 6 2009 Fabio M. Di Nitto - 3.0.0-6.alpha7 +- New upstream release. + +* Tue Mar 3 2009 Fabio M. Di Nitto - 3.0.0-5.alpha6 +- New upstream release. + +* Tue Feb 24 2009 Fabio M. Di Nitto - 3.0.0-4.alpha5 +- Drop Conflicts with rgmanager. + +* Mon Feb 23 2009 Fabio M. Di Nitto - 3.0.0-3.alpha5 +- New upstream release. + +* Thu Feb 19 2009 Fabio M. Di Nitto - 3.0.0-2.alpha4 +- Add comments on how to build this package. + +* Thu Feb 5 2009 Fabio M. Di Nitto - 3.0.0-1.alpha4 +- New upstream release. +- Fix datadir/cluster directory ownership. + +* Tue Jan 27 2009 Fabio M. Di Nitto - 3.0.0-1.alpha3 + - Initial packaging