commit 61e037f831b35514ff2f2b4385330bc8db4c47bf Author: CentOS Sources Date: Tue May 16 09:16:13 2023 +0000 import pcs-0.10.15-4.el8_8.1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ab78704 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +SOURCES/HAM-logo.png +SOURCES/backports-3.23.0.gem +SOURCES/dacite-1.6.0.tar.gz +SOURCES/daemons-1.4.1.gem +SOURCES/dataclasses-0.8.tar.gz +SOURCES/ethon-0.16.0.gem +SOURCES/eventmachine-1.2.7.gem +SOURCES/ffi-1.15.5.gem +SOURCES/json-2.6.3.gem +SOURCES/mustermann-2.0.2.gem +SOURCES/open4-1.3.4-1.gem +SOURCES/pcs-0.10.15.tar.gz +SOURCES/pcs-web-ui-0.1.13.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.13.tar.xz +SOURCES/pyagentx-0.4.pcs.2.tar.gz +SOURCES/python-dateutil-2.8.2.tar.gz +SOURCES/rack-2.2.6.4.gem +SOURCES/rack-protection-2.2.4.gem +SOURCES/rack-test-2.0.2.gem +SOURCES/rexml-3.2.5.gem +SOURCES/ruby2_keywords-0.0.5.gem +SOURCES/sinatra-2.2.4.gem +SOURCES/thin-1.8.1.gem +SOURCES/tilt-2.0.11.gem +SOURCES/tornado-6.1.0.tar.gz diff --git a/.pcs.metadata b/.pcs.metadata new file mode 100644 index 0000000..5227366 --- /dev/null +++ b/.pcs.metadata @@ -0,0 +1,25 @@ +679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png +0e11246385a9e0a4bc122b74fb74fe536a234f81 SOURCES/backports-3.23.0.gem +31546c37fbdc6270d5097687619e9c0db6f1c05c SOURCES/dacite-1.6.0.tar.gz +4795a8962cc1608bfec0d91fa4d438c7cfe90c62 SOURCES/daemons-1.4.1.gem +8b7598273d2ae6dad2b88466aefac55071a41926 SOURCES/dataclasses-0.8.tar.gz +5b56a68268708c474bef04550639ded3add5e946 SOURCES/ethon-0.16.0.gem +7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem +97632b7975067266c0b39596de0a4c86d9330658 SOURCES/ffi-1.15.5.gem +6d78f730b7f3b25fb3f93684fe1364acf58bce6b SOURCES/json-2.6.3.gem +f5f804366823c1126791dfefd98dd0539563785c SOURCES/mustermann-2.0.2.gem +41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem +00e234824e85afca99df9043dd6eb47490b220c4 SOURCES/pcs-0.10.15.tar.gz +f7455776936492ce7b241f9801d6bbc946b0461a SOURCES/pcs-web-ui-0.1.13.tar.gz +bd18d97d611233914828719c97b4d98d079913d2 SOURCES/pcs-web-ui-node-modules-0.1.13.tar.xz +3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz +c2ba10c775b7a52a4b57cac4d4110a0c0f812a82 SOURCES/python-dateutil-2.8.2.tar.gz +bbaa023e07bdc4143c5dd18d752c2543f254666f SOURCES/rack-2.2.6.4.gem +5347315a7283f0b04443e924ed4eaa17807432c8 SOURCES/rack-protection-2.2.4.gem +3c669527ecbcb9f915a83983ec89320c356e1fe3 SOURCES/rack-test-2.0.2.gem +e7f48fa5fb2d92e6cb21d6b1638fe41a5a7c4287 SOURCES/rexml-3.2.5.gem +d017b9e4d1978e0b3ccc3e2a31493809e4693cd3 SOURCES/ruby2_keywords-0.0.5.gem +fa6a6c98f885e93f54c23dd0454cae906e82c31b SOURCES/sinatra-2.2.4.gem +1ac6292a98e17247b7bb847a35ff868605256f7b SOURCES/thin-1.8.1.gem +360d77c80d2851a538fb13d43751093115c34712 SOURCES/tilt-2.0.11.gem +c23c617c7a0205e465bebad5b8cdf289ae8402a2 SOURCES/tornado-6.1.0.tar.gz diff --git a/SOURCES/bz2151166-01-fix-displaying-bool-and-integer-values.patch b/SOURCES/bz2151166-01-fix-displaying-bool-and-integer-values.patch new file mode 100644 index 0000000..cda72c4 --- /dev/null +++ b/SOURCES/bz2151166-01-fix-displaying-bool-and-integer-values.patch @@ -0,0 +1,128 @@ +From 0da95a7f05ae7600eebe30df78a3d4622cd6b4f8 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 7 Dec 2022 15:53:25 +0100 +Subject: [PATCH 2/5] fix displaying bool and integer values in `pcs resource + config` command + +--- + pcs/cli/resource/output.py | 18 +++++++++--------- + pcs_test/resources/cib-resources.xml | 2 +- + pcs_test/tier1/legacy/test_resource.py | 3 ++- + pcs_test/tools/resources_dto.py | 4 ++-- + 4 files changed, 14 insertions(+), 13 deletions(-) + +diff --git a/pcs/cli/resource/output.py b/pcs/cli/resource/output.py +index 6d1fad16..0705d27b 100644 +--- a/pcs/cli/resource/output.py ++++ b/pcs/cli/resource/output.py +@@ -69,9 +69,9 @@ def _resource_operation_to_pairs( + pairs.append(("interval-origin", operation_dto.interval_origin)) + if operation_dto.timeout: + pairs.append(("timeout", operation_dto.timeout)) +- if operation_dto.enabled: ++ if operation_dto.enabled is not None: + pairs.append(("enabled", _bool_to_cli_value(operation_dto.enabled))) +- if operation_dto.record_pending: ++ if operation_dto.record_pending is not None: + pairs.append( + ("record-pending", _bool_to_cli_value(operation_dto.record_pending)) + ) +@@ -477,13 +477,13 @@ def _resource_bundle_container_options_to_pairs( + options: CibResourceBundleContainerRuntimeOptionsDto, + ) -> List[Tuple[str, str]]: + option_list = [("image", options.image)] +- if options.replicas: ++ if options.replicas is not None: + option_list.append(("replicas", str(options.replicas))) +- if options.replicas_per_host: ++ if options.replicas_per_host is not None: + option_list.append( + ("replicas-per-host", str(options.replicas_per_host)) + ) +- if options.promoted_max: ++ if options.promoted_max is not None: + option_list.append(("promoted-max", str(options.promoted_max))) + if options.run_command: + option_list.append(("run-command", options.run_command)) +@@ -508,7 +508,7 @@ def _resource_bundle_network_options_to_pairs( + network_options.append( + ("ip-range-start", bundle_network_dto.ip_range_start) + ) +- if bundle_network_dto.control_port: ++ if bundle_network_dto.control_port is not None: + network_options.append( + ("control-port", str(bundle_network_dto.control_port)) + ) +@@ -516,7 +516,7 @@ def _resource_bundle_network_options_to_pairs( + network_options.append( + ("host-interface", bundle_network_dto.host_interface) + ) +- if bundle_network_dto.host_netmask: ++ if bundle_network_dto.host_netmask is not None: + network_options.append( + ("host-netmask", str(bundle_network_dto.host_netmask)) + ) +@@ -531,9 +531,9 @@ def _resource_bundle_port_mapping_to_pairs( + bundle_net_port_mapping_dto: CibResourceBundlePortMappingDto, + ) -> List[Tuple[str, str]]: + mapping = [] +- if bundle_net_port_mapping_dto.port: ++ if bundle_net_port_mapping_dto.port is not None: + mapping.append(("port", str(bundle_net_port_mapping_dto.port))) +- if bundle_net_port_mapping_dto.internal_port: ++ if bundle_net_port_mapping_dto.internal_port is not None: + mapping.append( + ("internal-port", str(bundle_net_port_mapping_dto.internal_port)) + ) +diff --git a/pcs_test/resources/cib-resources.xml b/pcs_test/resources/cib-resources.xml +index 67cf5178..524b8fbb 100644 +--- a/pcs_test/resources/cib-resources.xml ++++ b/pcs_test/resources/cib-resources.xml +@@ -53,7 +53,7 @@ + + + +- ++ + + + +diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py +index 2ea5c423..65ad1090 100644 +--- a/pcs_test/tier1/legacy/test_resource.py ++++ b/pcs_test/tier1/legacy/test_resource.py +@@ -753,7 +753,7 @@ Error: moni=tor does not appear to be a valid operation action + + o, r = pcs( + self.temp_cib.name, +- "resource create --no-default-ops OPTest ocf:heartbeat:Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1".split(), ++ "resource create --no-default-ops OPTest ocf:heartbeat:Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1 enabled=0".split(), + ) + ac(o, "") + assert r == 0 +@@ -770,6 +770,7 @@ Error: moni=tor does not appear to be a valid operation action + OCF_CHECK_LEVEL=1 + monitor: OPTest-monitor-interval-25s + interval=25s ++ enabled=0 + OCF_CHECK_LEVEL=1 + """ + ), +diff --git a/pcs_test/tools/resources_dto.py b/pcs_test/tools/resources_dto.py +index 8f46f6dd..a980ec80 100644 +--- a/pcs_test/tools/resources_dto.py ++++ b/pcs_test/tools/resources_dto.py +@@ -233,8 +233,8 @@ PRIMITIVE_R7 = CibResourcePrimitiveDto( + start_delay=None, + interval_origin=None, + timeout="20s", +- enabled=None, +- record_pending=None, ++ enabled=False, ++ record_pending=False, + role=None, + on_fail=None, + meta_attributes=[], +-- +2.39.0 + diff --git a/SOURCES/bz2151511-01-add-warning-when-updating-a-misconfigured-resource.patch b/SOURCES/bz2151511-01-add-warning-when-updating-a-misconfigured-resource.patch new file mode 100644 index 0000000..52e75f8 --- /dev/null +++ b/SOURCES/bz2151511-01-add-warning-when-updating-a-misconfigured-resource.patch @@ -0,0 +1,732 @@ +From 58589e47f2913276ea1c2164a3ce8ee694fb2b78 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 7 Dec 2022 11:33:25 +0100 +Subject: [PATCH 1/5] add warning when updating a misconfigured resource + +--- + pcs/common/reports/codes.py | 3 + + pcs/common/reports/messages.py | 19 +++++ + pcs/lib/cib/resource/primitive.py | 84 ++++++++++++++----- + pcs/lib/pacemaker/live.py | 38 ++------- + .../tier0/common/reports/test_messages.py | 16 ++++ + .../cib/resource/test_primitive_validate.py | 56 +++++++------ + pcs_test/tier0/lib/pacemaker/test_live.py | 78 +++++------------ + pcs_test/tier1/legacy/test_stonith.py | 5 +- + 8 files changed, 161 insertions(+), 138 deletions(-) + +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index deecc626..48048af7 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -40,6 +40,9 @@ AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE") + AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE") + AGENT_NAME_GUESSED = M("AGENT_NAME_GUESSED") + AGENT_SELF_VALIDATION_INVALID_DATA = M("AGENT_SELF_VALIDATION_INVALID_DATA") ++AGENT_SELF_VALIDATION_SKIPPED_UPDATED_RESOURCE_MISCONFIGURED = M( ++ "AGENT_SELF_VALIDATION_SKIPPED_UPDATED_RESOURCE_MISCONFIGURED" ++) + AGENT_SELF_VALIDATION_RESULT = M("AGENT_SELF_VALIDATION_RESULT") + BAD_CLUSTER_STATE_FORMAT = M("BAD_CLUSTER_STATE_FORMAT") + BOOTH_ADDRESS_DUPLICATION = M("BOOTH_ADDRESS_DUPLICATION") +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index d27c1dee..24bb222f 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -7584,6 +7584,25 @@ class AgentSelfValidationInvalidData(ReportItemMessage): + return f"Invalid validation data from agent: {self.reason}" + + ++@dataclass(frozen=True) ++class AgentSelfValidationSkippedUpdatedResourceMisconfigured(ReportItemMessage): ++ """ ++ Agent self validation is skipped when updating a resource as it is ++ misconfigured in its current state. ++ """ ++ ++ result: str ++ _code = codes.AGENT_SELF_VALIDATION_SKIPPED_UPDATED_RESOURCE_MISCONFIGURED ++ ++ @property ++ def message(self) -> str: ++ return ( ++ "The resource was misconfigured before the update, therefore agent " ++ "self-validation will not be run for the updated configuration. " ++ "Validation output of the original configuration:\n{result}" ++ ).format(result="\n".join(indent(self.result.splitlines()))) ++ ++ + @dataclass(frozen=True) + class BoothAuthfileNotUsed(ReportItemMessage): + """ +diff --git a/pcs/lib/cib/resource/primitive.py b/pcs/lib/cib/resource/primitive.py +index 3ebd01c6..c5df8e58 100644 +--- a/pcs/lib/cib/resource/primitive.py ++++ b/pcs/lib/cib/resource/primitive.py +@@ -357,6 +357,31 @@ def _is_ocf_or_stonith_agent(resource_agent_name: ResourceAgentName) -> bool: + return resource_agent_name.standard in ("stonith", "ocf") + + ++def _get_report_from_agent_self_validation( ++ is_valid: Optional[bool], ++ reason: str, ++ report_severity: reports.ReportItemSeverity, ++) -> reports.ReportItemList: ++ report_items = [] ++ if is_valid is None: ++ report_items.append( ++ reports.ReportItem( ++ report_severity, ++ reports.messages.AgentSelfValidationInvalidData(reason), ++ ) ++ ) ++ elif not is_valid or reason: ++ if is_valid: ++ report_severity = reports.ReportItemSeverity.warning() ++ report_items.append( ++ reports.ReportItem( ++ report_severity, ++ reports.messages.AgentSelfValidationResult(reason), ++ ) ++ ) ++ return report_items ++ ++ + def validate_resource_instance_attributes_create( + cmd_runner: CommandRunner, + resource_agent: ResourceAgentFacade, +@@ -402,16 +427,16 @@ def validate_resource_instance_attributes_create( + for report_item in report_items + ) + ): +- ( +- dummy_is_valid, +- agent_validation_reports, +- ) = validate_resource_instance_attributes_via_pcmk( +- cmd_runner, +- agent_name, +- instance_attributes, +- reports.get_severity(reports.codes.FORCE, force), ++ report_items.extend( ++ _get_report_from_agent_self_validation( ++ *validate_resource_instance_attributes_via_pcmk( ++ cmd_runner, ++ agent_name, ++ instance_attributes, ++ ), ++ reports.get_severity(reports.codes.FORCE, force), ++ ) + ) +- report_items.extend(agent_validation_reports) + return report_items + + +@@ -505,25 +530,40 @@ def validate_resource_instance_attributes_update( + ) + ): + ( +- is_valid, +- dummy_reports, ++ original_is_valid, ++ original_reason, + ) = validate_resource_instance_attributes_via_pcmk( + cmd_runner, + agent_name, + current_instance_attrs, +- reports.ReportItemSeverity.error(), + ) +- if is_valid: +- ( +- dummy_is_valid, +- agent_validation_reports, +- ) = validate_resource_instance_attributes_via_pcmk( +- cmd_runner, +- resource_agent.metadata.name, +- final_attrs, +- reports.get_severity(reports.codes.FORCE, force), ++ if original_is_valid: ++ report_items.extend( ++ _get_report_from_agent_self_validation( ++ *validate_resource_instance_attributes_via_pcmk( ++ cmd_runner, ++ resource_agent.metadata.name, ++ final_attrs, ++ ), ++ reports.get_severity(reports.codes.FORCE, force), ++ ) ++ ) ++ elif original_is_valid is None: ++ report_items.append( ++ reports.ReportItem.warning( ++ reports.messages.AgentSelfValidationInvalidData( ++ original_reason ++ ) ++ ) ++ ) ++ else: ++ report_items.append( ++ reports.ReportItem.warning( ++ reports.messages.AgentSelfValidationSkippedUpdatedResourceMisconfigured( ++ original_reason ++ ) ++ ) + ) +- report_items.extend(agent_validation_reports) + return report_items + + +diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py +index fd26dabb..726f6b67 100644 +--- a/pcs/lib/pacemaker/live.py ++++ b/pcs/lib/pacemaker/live.py +@@ -902,8 +902,7 @@ def _validate_stonith_instance_attributes_via_pcmk( + cmd_runner: CommandRunner, + agent_name: ResourceAgentName, + instance_attributes: Mapping[str, str], +- not_valid_severity: reports.ReportItemSeverity, +-) -> Tuple[Optional[bool], reports.ReportItemList]: ++) -> Tuple[Optional[bool], str]: + cmd = [ + settings.stonith_admin, + "--validate", +@@ -917,7 +916,6 @@ def _validate_stonith_instance_attributes_via_pcmk( + cmd, + "./validate/command/output", + instance_attributes, +- not_valid_severity, + ) + + +@@ -925,8 +923,7 @@ def _validate_resource_instance_attributes_via_pcmk( + cmd_runner: CommandRunner, + agent_name: ResourceAgentName, + instance_attributes: Mapping[str, str], +- not_valid_severity: reports.ReportItemSeverity, +-) -> Tuple[Optional[bool], reports.ReportItemList]: ++) -> Tuple[Optional[bool], str]: + cmd = [ + settings.crm_resource_binary, + "--validate", +@@ -944,7 +941,6 @@ def _validate_resource_instance_attributes_via_pcmk( + cmd, + "./resource-agent-action/command/output", + instance_attributes, +- not_valid_severity, + ) + + +@@ -953,8 +949,7 @@ def _handle_instance_attributes_validation_via_pcmk( + cmd: StringSequence, + data_xpath: str, + instance_attributes: Mapping[str, str], +- not_valid_severity: reports.ReportItemSeverity, +-) -> Tuple[Optional[bool], reports.ReportItemList]: ++) -> Tuple[Optional[bool], str]: + full_cmd = list(cmd) + for key, value in sorted(instance_attributes.items()): + full_cmd.extend(["--option", f"{key}={value}"]) +@@ -963,12 +958,7 @@ def _handle_instance_attributes_validation_via_pcmk( + # dom = _get_api_result_dom(stdout) + dom = xml_fromstring(stdout) + except (etree.XMLSyntaxError, etree.DocumentInvalid) as e: +- return None, [ +- reports.ReportItem( +- not_valid_severity, +- reports.messages.AgentSelfValidationInvalidData(str(e)), +- ) +- ] ++ return None, str(e) + result = "\n".join( + "\n".join( + line.strip() for line in item.text.split("\n") if line.strip() +@@ -976,38 +966,22 @@ def _handle_instance_attributes_validation_via_pcmk( + for item in dom.iterfind(data_xpath) + if item.get("source") == "stderr" and item.text + ).strip() +- if return_value == 0: +- if result: +- return True, [ +- reports.ReportItem.warning( +- reports.messages.AgentSelfValidationResult(result) +- ) +- ] +- return True, [] +- return False, [ +- reports.ReportItem( +- not_valid_severity, +- reports.messages.AgentSelfValidationResult(result), +- ) +- ] ++ return return_value == 0, result + + + def validate_resource_instance_attributes_via_pcmk( + cmd_runner: CommandRunner, + resource_agent_name: ResourceAgentName, + instance_attributes: Mapping[str, str], +- not_valid_severity: reports.ReportItemSeverity, +-) -> Tuple[Optional[bool], reports.ReportItemList]: ++) -> Tuple[Optional[bool], str]: + if resource_agent_name.is_stonith: + return _validate_stonith_instance_attributes_via_pcmk( + cmd_runner, + resource_agent_name, + instance_attributes, +- not_valid_severity, + ) + return _validate_resource_instance_attributes_via_pcmk( + cmd_runner, + resource_agent_name, + instance_attributes, +- not_valid_severity, + ) +diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py +index 17627b80..5fcc62fc 100644 +--- a/pcs_test/tier0/common/reports/test_messages.py ++++ b/pcs_test/tier0/common/reports/test_messages.py +@@ -5562,6 +5562,22 @@ class AgentSelfValidationInvalidData(NameBuildTest): + ) + + ++class AgentSelfValidationSkippedUpdatedResourceMisconfigured(NameBuildTest): ++ def test_message(self): ++ lines = list(f"line #{i}" for i in range(3)) ++ self.assert_message_from_report( ++ ( ++ "The resource was misconfigured before the update, therefore " ++ "agent self-validation will not be run for the updated " ++ "configuration. Validation output of the original " ++ "configuration:\n {}" ++ ).format("\n ".join(lines)), ++ reports.AgentSelfValidationSkippedUpdatedResourceMisconfigured( ++ "\n".join(lines) ++ ), ++ ) ++ ++ + class BoothAuthfileNotUsed(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +diff --git a/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py b/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py +index 2cba7086..1bc3a5a6 100644 +--- a/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py ++++ b/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py +@@ -609,7 +609,6 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ) + + def test_force(self): +@@ -629,15 +628,14 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + attributes, +- reports.ReportItemSeverity.warning(), + ) + + def test_failure(self): + attributes = {"required": "value"} + facade = _fixture_ocf_agent() +- failure_reports = ["report1", "report2"] +- self.agent_self_validation_mock.return_value = False, failure_reports +- self.assertEqual( ++ failure_reason = "failure reason" ++ self.agent_self_validation_mock.return_value = False, failure_reason ++ assert_report_item_list_equal( + primitive.validate_resource_instance_attributes_create( + self.cmd_runner, + facade, +@@ -645,13 +643,18 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + etree.Element("resources"), + force=False, + ), +- failure_reports, ++ [ ++ fixture.error( ++ reports.codes.AGENT_SELF_VALIDATION_RESULT, ++ result=failure_reason, ++ force_code=reports.codes.FORCE, ++ ) ++ ], + ) + self.agent_self_validation_mock.assert_called_once_with( + self.cmd_runner, + facade.metadata.name, + attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ) + + def test_stonith_check(self): +@@ -671,7 +674,6 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ) + + def test_nonexisting_agent(self): +@@ -1295,13 +1297,11 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + old_attributes, +- reports.ReportItemSeverity.error(), + ), + mock.call( + self.cmd_runner, + facade.metadata.name, + new_attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ), + ], + ) +@@ -1328,13 +1328,11 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + old_attributes, +- reports.ReportItemSeverity.error(), + ), + mock.call( + self.cmd_runner, + facade.metadata.name, + new_attributes, +- reports.ReportItemSeverity.warning(), + ), + ], + ) +@@ -1342,13 +1340,13 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + def test_failure(self): + old_attributes = {"required": "old_value"} + new_attributes = {"required": "new_value"} +- failure_reports = ["report1", "report2"] ++ failure_reason = "failure reason" + facade = _fixture_ocf_agent() + self.agent_self_validation_mock.side_effect = ( +- (True, []), +- (False, failure_reports), ++ (True, ""), ++ (False, failure_reason), + ) +- self.assertEqual( ++ assert_report_item_list_equal( + primitive.validate_resource_instance_attributes_update( + self.cmd_runner, + facade, +@@ -1357,7 +1355,13 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._fixture_resources(old_attributes), + force=False, + ), +- failure_reports, ++ [ ++ fixture.error( ++ reports.codes.AGENT_SELF_VALIDATION_RESULT, ++ result=failure_reason, ++ force_code=reports.codes.FORCE, ++ ) ++ ], + ) + self.assertEqual( + self.agent_self_validation_mock.mock_calls, +@@ -1366,13 +1370,11 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + old_attributes, +- reports.ReportItemSeverity.error(), + ), + mock.call( + self.cmd_runner, + facade.metadata.name, + new_attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ), + ], + ) +@@ -1399,13 +1401,11 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + old_attributes, +- reports.ReportItemSeverity.error(), + ), + mock.call( + self.cmd_runner, + facade.metadata.name, + new_attributes, +- reports.ReportItemSeverity.error(reports.codes.FORCE), + ), + ], + ) +@@ -1471,10 +1471,10 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + def test_current_attributes_failure(self): + old_attributes = {"required": "old_value"} + new_attributes = {"required": "new_value"} +- failure_reports = ["report1", "report2"] ++ failure_reason = "failure reason" + facade = _fixture_ocf_agent() +- self.agent_self_validation_mock.return_value = False, failure_reports +- self.assertEqual( ++ self.agent_self_validation_mock.return_value = False, failure_reason ++ assert_report_item_list_equal( + primitive.validate_resource_instance_attributes_update( + self.cmd_runner, + facade, +@@ -1483,7 +1483,12 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._fixture_resources(old_attributes), + force=False, + ), +- [], ++ [ ++ fixture.warn( ++ reports.codes.AGENT_SELF_VALIDATION_SKIPPED_UPDATED_RESOURCE_MISCONFIGURED, ++ result=failure_reason, ++ ) ++ ], + ) + self.assertEqual( + self.agent_self_validation_mock.mock_calls, +@@ -1492,7 +1497,6 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self.cmd_runner, + facade.metadata.name, + old_attributes, +- reports.ReportItemSeverity.error(), + ), + ], + ) +diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py +index 5c8000cd..239a72b1 100644 +--- a/pcs_test/tier0/lib/pacemaker/test_live.py ++++ b/pcs_test/tier0/lib/pacemaker/test_live.py +@@ -1752,16 +1752,15 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertTrue(is_valid) +- self.assertEqual(report_list, []) ++ self.assertEqual(reason, "") + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] + ) +@@ -1771,23 +1770,17 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertIsNone(is_valid) +- assert_report_item_list_equal( +- report_list, +- [ +- fixture.info( +- report_codes.AGENT_SELF_VALIDATION_INVALID_DATA, +- reason="Start tag expected, '<' not found, line 1, column 1 (, line 1)", +- ) +- ], ++ self.assertEqual( ++ reason, ++ "Start tag expected, '<' not found, line 1, column 1 (, line 1)", + ) + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] +@@ -1806,19 +1799,15 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertTrue(is_valid) +- assert_report_item_list_equal( +- report_list, +- [], +- ) ++ self.assertEqual(reason, "") + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] + ) +@@ -1837,23 +1826,15 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertFalse(is_valid) +- assert_report_item_list_equal( +- report_list, +- [ +- fixture.info( +- report_codes.AGENT_SELF_VALIDATION_RESULT, result="" +- ) +- ], +- ) ++ self.assertEqual(reason, "") + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] + ) +@@ -1881,23 +1862,17 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertFalse(is_valid) +- assert_report_item_list_equal( +- report_list, +- [ +- fixture.info( +- report_codes.AGENT_SELF_VALIDATION_RESULT, +- result="first line\nImportant output\nand another line", +- ) +- ], ++ self.assertEqual( ++ reason, ++ "first line\nImportant output\nand another line", + ) + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] +@@ -1925,23 +1900,17 @@ class HandleInstanceAttributesValidateViaPcmkTest(TestCase): + base_cmd = ["some", "command"] + ( + is_valid, +- report_list, ++ reason, + ) = lib._handle_instance_attributes_validation_via_pcmk( + runner, + base_cmd, + "result/output", + {"attr1": "val1", "attr2": "val2"}, +- not_valid_severity=Severity.info(), + ) + self.assertTrue(is_valid) +- assert_report_item_list_equal( +- report_list, +- [ +- fixture.warn( +- report_codes.AGENT_SELF_VALIDATION_RESULT, +- result="first line\nImportant output\nand another line", +- ) +- ], ++ self.assertEqual( ++ reason, ++ "first line\nImportant output\nand another line", + ) + runner.run.assert_called_once_with( + base_cmd + ["--option", "attr1=val1", "--option", "attr2=val2"] +@@ -1953,7 +1922,6 @@ class ValidateResourceInstanceAttributesViaPcmkTest(TestCase): + def setUp(self): + self.runner = mock.Mock() + self.attrs = dict(attra="val1", attrb="val2") +- self.severity = Severity.info() + patcher = mock.patch( + "pcs.lib.pacemaker.live._handle_instance_attributes_validation_via_pcmk" + ) +@@ -1967,7 +1935,7 @@ class ValidateResourceInstanceAttributesViaPcmkTest(TestCase): + ) + self.assertEqual( + lib._validate_resource_instance_attributes_via_pcmk( +- self.runner, agent, self.attrs, self.severity ++ self.runner, agent, self.attrs + ), + self.ret_val, + ) +@@ -1987,7 +1955,6 @@ class ValidateResourceInstanceAttributesViaPcmkTest(TestCase): + ], + "./resource-agent-action/command/output", + self.attrs, +- self.severity, + ) + + def test_without_provider(self): +@@ -1996,7 +1963,7 @@ class ValidateResourceInstanceAttributesViaPcmkTest(TestCase): + ) + self.assertEqual( + lib._validate_resource_instance_attributes_via_pcmk( +- self.runner, agent, self.attrs, self.severity ++ self.runner, agent, self.attrs + ), + self.ret_val, + ) +@@ -2014,7 +1981,6 @@ class ValidateResourceInstanceAttributesViaPcmkTest(TestCase): + ], + "./resource-agent-action/command/output", + self.attrs, +- self.severity, + ) + + +@@ -2024,7 +1990,6 @@ class ValidateStonithInstanceAttributesViaPcmkTest(TestCase): + def setUp(self): + self.runner = mock.Mock() + self.attrs = dict(attra="val1", attrb="val2") +- self.severity = Severity.info() + patcher = mock.patch( + "pcs.lib.pacemaker.live._handle_instance_attributes_validation_via_pcmk" + ) +@@ -2038,7 +2003,7 @@ class ValidateStonithInstanceAttributesViaPcmkTest(TestCase): + ) + self.assertEqual( + lib._validate_stonith_instance_attributes_via_pcmk( +- self.runner, agent, self.attrs, self.severity ++ self.runner, agent, self.attrs + ), + self.ret_val, + ) +@@ -2054,5 +2019,4 @@ class ValidateStonithInstanceAttributesViaPcmkTest(TestCase): + ], + "./validate/command/output", + self.attrs, +- self.severity, + ) +diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py +index 9911d604..cf430d75 100644 +--- a/pcs_test/tier1/legacy/test_stonith.py ++++ b/pcs_test/tier1/legacy/test_stonith.py +@@ -1294,7 +1294,10 @@ class StonithTest(TestCase, AssertPcsMixin): + ), + ) + +- self.assert_pcs_success("stonith update test3 username=testA".split()) ++ self.assert_pcs_success( ++ "stonith update test3 username=testA".split(), ++ stdout_start="Warning: ", ++ ) + + self.assert_pcs_success( + "stonith config test2".split(), +-- +2.39.0 + diff --git a/SOURCES/bz2158804-01-fix-stonith-watchdog-timeout-validation.patch b/SOURCES/bz2158804-01-fix-stonith-watchdog-timeout-validation.patch new file mode 100644 index 0000000..897cedb --- /dev/null +++ b/SOURCES/bz2158804-01-fix-stonith-watchdog-timeout-validation.patch @@ -0,0 +1,485 @@ +From 5bed788246ac19c866a60ab3773d94fa4ca28c37 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 5 Jan 2023 16:21:44 +0100 +Subject: [PATCH 5/5] Fix stonith-watchdog-timeout validation + +--- + pcs/lib/cluster_property.py | 25 ++++- + pcs/lib/sbd.py | 15 ++- + .../lib/commands/test_cluster_property.py | 50 ++++++++-- + pcs_test/tier0/lib/test_cluster_property.py | 98 ++++++++++++++----- + pcs_test/tier1/test_cluster_property.py | 14 ++- + 5 files changed, 157 insertions(+), 45 deletions(-) + +diff --git a/pcs/lib/cluster_property.py b/pcs/lib/cluster_property.py +index 9ccacd74..b622bdaf 100644 +--- a/pcs/lib/cluster_property.py ++++ b/pcs/lib/cluster_property.py +@@ -8,6 +8,7 @@ from lxml.etree import _Element + + from pcs.common import reports + from pcs.common.services.interfaces import ServiceManagerInterface ++from pcs.common.tools import timeout_to_seconds + from pcs.common.types import StringSequence + from pcs.lib import ( + sbd, +@@ -38,8 +39,21 @@ def _validate_stonith_watchdog_timeout_property( + force: bool = False, + ) -> reports.ReportItemList: + report_list: reports.ReportItemList = [] ++ original_value = value ++ # if value is not empty, try to convert time interval string ++ if value: ++ seconds = timeout_to_seconds(value) ++ if seconds is None: ++ # returns empty list because this should be reported by ++ # ValueTimeInterval validator ++ return report_list ++ value = str(seconds) + if sbd.is_sbd_enabled(service_manager): +- report_list.extend(sbd.validate_stonith_watchdog_timeout(value, force)) ++ report_list.extend( ++ sbd.validate_stonith_watchdog_timeout( ++ validate.ValuePair(original_value, value), force ++ ) ++ ) + else: + if value not in ["", "0"]: + report_list.append( +@@ -124,9 +138,6 @@ def validate_set_cluster_properties( + # unknow properties are reported by NamesIn validator + continue + property_metadata = possible_properties_dict[property_name] +- if property_metadata.name == "stonith-watchdog-timeout": +- # needs extra validation +- continue + if property_metadata.type == "boolean": + validators.append( + validate.ValuePcmkBoolean( +@@ -154,9 +165,13 @@ def validate_set_cluster_properties( + ) + ) + elif property_metadata.type == "time": ++ # make stonith-watchdog-timeout value not forcable + validators.append( + validate.ValueTimeInterval( +- property_metadata.name, severity=severity ++ property_metadata.name, ++ severity=severity ++ if property_metadata.name != "stonith-watchdog-timeout" ++ else reports.ReportItemSeverity.error(), + ) + ) + report_list.extend( +diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py +index 1e3cfb37..38cd8767 100644 +--- a/pcs/lib/sbd.py ++++ b/pcs/lib/sbd.py +@@ -1,6 +1,9 @@ + import re + from os import path +-from typing import Optional ++from typing import ( ++ Optional, ++ Union, ++) + + from pcs import settings + from pcs.common import reports +@@ -392,7 +395,10 @@ def _get_local_sbd_watchdog_timeout() -> int: + + + def validate_stonith_watchdog_timeout( +- stonith_watchdog_timeout: str, force: bool = False ++ stonith_watchdog_timeout: Union[ ++ validate.TypeOptionValue, validate.ValuePair ++ ], ++ force: bool = False, + ) -> reports.ReportItemList: + """ + Check sbd status and config when user is setting stonith-watchdog-timeout +@@ -401,6 +407,7 @@ def validate_stonith_watchdog_timeout( + + stonith_watchdog_timeout -- value to be validated + """ ++ stonith_watchdog_timeout = validate.ValuePair.get(stonith_watchdog_timeout) + severity = reports.get_severity(reports.codes.FORCE, force) + if _is_device_set_local(): + return ( +@@ -412,11 +419,11 @@ def validate_stonith_watchdog_timeout( + ), + ) + ] +- if stonith_watchdog_timeout not in ["", "0"] ++ if stonith_watchdog_timeout.normalized not in ["", "0"] + else [] + ) + +- if stonith_watchdog_timeout in ["", "0"]: ++ if stonith_watchdog_timeout.normalized in ["", "0"]: + return [ + reports.ReportItem( + severity, +diff --git a/pcs_test/tier0/lib/commands/test_cluster_property.py b/pcs_test/tier0/lib/commands/test_cluster_property.py +index 319d1df6..fd124843 100644 +--- a/pcs_test/tier0/lib/commands/test_cluster_property.py ++++ b/pcs_test/tier0/lib/commands/test_cluster_property.py +@@ -120,6 +120,34 @@ class StonithWatchdogTimeoutMixin(LoadMetadataMixin): + ) + self.env_assist.assert_reports([]) + ++ def _set_invalid_value(self, forced=False): ++ self.config.remove("services.is_enabled") ++ self.env_assist.assert_raise_library_error( ++ lambda: cluster_property.set_properties( ++ self.env_assist.get_env(), ++ {"stonith-watchdog-timeout": "15x"}, ++ [] if not forced else [reports.codes.FORCE], ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.INVALID_OPTION_VALUE, ++ option_name="stonith-watchdog-timeout", ++ option_value="15x", ++ allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)", ++ cannot_be_empty=False, ++ forbidden_characters=None, ++ ), ++ ] ++ ) ++ ++ def test_set_invalid_value(self): ++ self._set_invalid_value(forced=False) ++ ++ def test_set_invalid_value_forced(self): ++ self._set_invalid_value(forced=True) ++ + + class TestSetStonithWatchdogTimeoutSBDIsDisabled( + StonithWatchdogTimeoutMixin, TestCase +@@ -132,6 +160,9 @@ class TestSetStonithWatchdogTimeoutSBDIsDisabled( + def test_set_zero(self): + self._set_success({"stonith-watchdog-timeout": "0"}) + ++ def test_set_zero_time_suffix(self): ++ self._set_success({"stonith-watchdog-timeout": "0s"}) ++ + def test_set_not_zero_or_empty(self): + self.env_assist.assert_raise_library_error( + lambda: cluster_property.set_properties( +@@ -231,12 +262,12 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledWatchdogOnly( + def test_set_zero_forced(self): + self.config.env.push_cib( + crm_config=fixture_crm_config_properties( +- [("cib-bootstrap-options", {"stonith-watchdog-timeout": "0"})] ++ [("cib-bootstrap-options", {"stonith-watchdog-timeout": "0s"})] + ) + ) + cluster_property.set_properties( + self.env_assist.get_env(), +- {"stonith-watchdog-timeout": "0"}, ++ {"stonith-watchdog-timeout": "0s"}, + [reports.codes.FORCE], + ) + self.env_assist.assert_reports( +@@ -271,7 +302,7 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledWatchdogOnly( + self.env_assist.assert_raise_library_error( + lambda: cluster_property.set_properties( + self.env_assist.get_env(), +- {"stonith-watchdog-timeout": "9"}, ++ {"stonith-watchdog-timeout": "9s"}, + [], + ) + ) +@@ -281,7 +312,7 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledWatchdogOnly( + reports.codes.STONITH_WATCHDOG_TIMEOUT_TOO_SMALL, + force_code=reports.codes.FORCE, + cluster_sbd_watchdog_timeout=10, +- entered_watchdog_timeout="9", ++ entered_watchdog_timeout="9s", + ) + ] + ) +@@ -289,12 +320,12 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledWatchdogOnly( + def test_too_small_forced(self): + self.config.env.push_cib( + crm_config=fixture_crm_config_properties( +- [("cib-bootstrap-options", {"stonith-watchdog-timeout": "9"})] ++ [("cib-bootstrap-options", {"stonith-watchdog-timeout": "9s"})] + ) + ) + cluster_property.set_properties( + self.env_assist.get_env(), +- {"stonith-watchdog-timeout": "9"}, ++ {"stonith-watchdog-timeout": "9s"}, + [reports.codes.FORCE], + ) + self.env_assist.assert_reports( +@@ -302,13 +333,13 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledWatchdogOnly( + fixture.warn( + reports.codes.STONITH_WATCHDOG_TIMEOUT_TOO_SMALL, + cluster_sbd_watchdog_timeout=10, +- entered_watchdog_timeout="9", ++ entered_watchdog_timeout="9s", + ) + ] + ) + + def test_more_than_timeout(self): +- self._set_success({"stonith-watchdog-timeout": "11"}) ++ self._set_success({"stonith-watchdog-timeout": "11s"}) + + + @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: ["dev1", "dev2"]) +@@ -323,6 +354,9 @@ class TestSetStonithWatchdogTimeoutSBDIsEnabledSharedDevices( + def test_set_to_zero(self): + self._set_success({"stonith-watchdog-timeout": "0"}) + ++ def test_set_to_zero_time_suffix(self): ++ self._set_success({"stonith-watchdog-timeout": "0min"}) ++ + def test_set_not_zero_or_empty(self): + self.env_assist.assert_raise_library_error( + lambda: cluster_property.set_properties( +diff --git a/pcs_test/tier0/lib/test_cluster_property.py b/pcs_test/tier0/lib/test_cluster_property.py +index 2feb728d..8d6f90b1 100644 +--- a/pcs_test/tier0/lib/test_cluster_property.py ++++ b/pcs_test/tier0/lib/test_cluster_property.py +@@ -83,6 +83,7 @@ FIXTURE_VALID_OPTIONS_DICT = { + "integer_param": "10", + "percentage_param": "20%", + "select_param": "s3", ++ "stonith-watchdog-timeout": "0", + "time_param": "5min", + } + +@@ -96,6 +97,8 @@ FIXTURE_INVALID_OPTIONS_DICT = { + "have-watchdog": "100", + } + ++STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES = ["", "0", "0s"] ++ + + def _fixture_parameter(name, param_type, default, enum_values): + return ResourceAgentParameter( +@@ -239,6 +242,7 @@ class TestValidateSetClusterProperties(TestCase): + sbd_enabled=False, + sbd_devices=False, + force=False, ++ valid_value=True, + ): + self.mock_is_sbd_enabled.return_value = sbd_enabled + self.mock_sbd_devices.return_value = ["devices"] if sbd_devices else [] +@@ -254,9 +258,13 @@ class TestValidateSetClusterProperties(TestCase): + ), + expected_report_list, + ) +- if "stonith-watchdog-timeout" in new_properties and ( +- new_properties["stonith-watchdog-timeout"] +- or "stonith-watchdog-timeout" in configured_properties ++ if ( ++ "stonith-watchdog-timeout" in new_properties ++ and ( ++ new_properties["stonith-watchdog-timeout"] ++ or "stonith-watchdog-timeout" in configured_properties ++ ) ++ and valid_value + ): + self.mock_is_sbd_enabled.assert_called_once_with( + self.mock_service_manager +@@ -266,7 +274,10 @@ class TestValidateSetClusterProperties(TestCase): + if sbd_devices: + self.mock_sbd_timeout.assert_not_called() + else: +- if new_properties["stonith-watchdog-timeout"] in ["", "0"]: ++ if ( ++ new_properties["stonith-watchdog-timeout"] ++ in STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES ++ ): + self.mock_sbd_timeout.assert_not_called() + else: + self.mock_sbd_timeout.assert_called_once_with() +@@ -280,6 +291,8 @@ class TestValidateSetClusterProperties(TestCase): + self.mock_sbd_timeout.assert_not_called() + + self.mock_is_sbd_enabled.reset_mock() ++ self.mock_sbd_devices.reset_mock() ++ self.mock_sbd_timeout.reset_mock() + + def test_no_properties_to_set_or_unset(self): + self.assert_validate_set( +@@ -328,7 +341,7 @@ class TestValidateSetClusterProperties(TestCase): + ) + + def test_unset_stonith_watchdog_timeout_sbd_disabled(self): +- for value in ["0", ""]: ++ for value in STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES: + with self.subTest(value=value): + self.assert_validate_set( + ["stonith-watchdog-timeout"], +@@ -349,22 +362,27 @@ class TestValidateSetClusterProperties(TestCase): + ) + + def test_set_ok_stonith_watchdog_timeout_sbd_enabled_without_devices(self): +- self.assert_validate_set( +- [], {"stonith-watchdog-timeout": "15"}, [], sbd_enabled=True +- ) ++ for value in ["15", "15s"]: ++ with self.subTest(value=value): ++ self.assert_validate_set( ++ [], ++ {"stonith-watchdog-timeout": value}, ++ [], ++ sbd_enabled=True, ++ ) + + def test_set_small_stonith_watchdog_timeout_sbd_enabled_without_devices( + self, + ): + self.assert_validate_set( + [], +- {"stonith-watchdog-timeout": "9"}, ++ {"stonith-watchdog-timeout": "9s"}, + [ + fixture.error( + reports.codes.STONITH_WATCHDOG_TIMEOUT_TOO_SMALL, + force_code=reports.codes.FORCE, + cluster_sbd_watchdog_timeout=10, +- entered_watchdog_timeout="9", ++ entered_watchdog_timeout="9s", + ) + ], + sbd_enabled=True, +@@ -387,28 +405,54 @@ class TestValidateSetClusterProperties(TestCase): + force=True, + ) + +- def test_set_not_a_number_stonith_watchdog_timeout_sbd_enabled_without_devices( ++ def _set_invalid_value_stonith_watchdog_timeout( ++ self, sbd_enabled=False, sbd_devices=False ++ ): ++ for value in ["invalid", "10x"]: ++ with self.subTest(value=value): ++ self.assert_validate_set( ++ [], ++ {"stonith-watchdog-timeout": value}, ++ [ ++ fixture.error( ++ reports.codes.INVALID_OPTION_VALUE, ++ option_name="stonith-watchdog-timeout", ++ option_value=value, ++ allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)", ++ cannot_be_empty=False, ++ forbidden_characters=None, ++ ) ++ ], ++ sbd_enabled=sbd_enabled, ++ sbd_devices=sbd_devices, ++ valid_value=False, ++ ) ++ ++ def test_set_invalid_value_stonith_watchdog_timeout_sbd_enabled_without_devices( + self, + ): ++ self._set_invalid_value_stonith_watchdog_timeout( ++ sbd_enabled=True, sbd_devices=False ++ ) + +- self.assert_validate_set( +- [], +- {"stonith-watchdog-timeout": "invalid"}, +- [ +- fixture.error( +- reports.codes.STONITH_WATCHDOG_TIMEOUT_TOO_SMALL, +- force_code=reports.codes.FORCE, +- cluster_sbd_watchdog_timeout=10, +- entered_watchdog_timeout="invalid", +- ) +- ], +- sbd_enabled=True, ++ def test_set_invalid_value_stonith_watchdog_timeout_sbd_enabled_with_devices( ++ self, ++ ): ++ self._set_invalid_value_stonith_watchdog_timeout( ++ sbd_enabled=True, sbd_devices=True ++ ) ++ ++ def test_set_invalid_value_stonith_watchdog_timeout_sbd_disabled( ++ self, ++ ): ++ self._set_invalid_value_stonith_watchdog_timeout( ++ sbd_enabled=False, sbd_devices=False + ) + + def test_unset_stonith_watchdog_timeout_sbd_enabled_without_devices( + self, + ): +- for value in ["0", ""]: ++ for value in STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES: + with self.subTest(value=value): + self.assert_validate_set( + ["stonith-watchdog-timeout"], +@@ -426,7 +470,7 @@ class TestValidateSetClusterProperties(TestCase): + def test_unset_stonith_watchdog_timeout_sbd_enabled_without_devices_forced( + self, + ): +- for value in ["0", ""]: ++ for value in STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES: + with self.subTest(value=value): + self.assert_validate_set( + ["stonith-watchdog-timeout"], +@@ -459,7 +503,7 @@ class TestValidateSetClusterProperties(TestCase): + def test_set_stonith_watchdog_timeout_sbd_enabled_with_devices_forced(self): + self.assert_validate_set( + [], +- {"stonith-watchdog-timeout": 15}, ++ {"stonith-watchdog-timeout": "15s"}, + [ + fixture.warn( + reports.codes.STONITH_WATCHDOG_TIMEOUT_CANNOT_BE_SET, +@@ -472,7 +516,7 @@ class TestValidateSetClusterProperties(TestCase): + ) + + def test_unset_stonith_watchdog_timeout_sbd_enabled_with_devices(self): +- for value in ["0", ""]: ++ for value in STONITH_WATCHDOG_TIMEOUT_UNSET_VALUES: + with self.subTest(value=value): + self.assert_validate_set( + ["stonith-watchdog-timeout"], +diff --git a/pcs_test/tier1/test_cluster_property.py b/pcs_test/tier1/test_cluster_property.py +index ff1f9cfb..51e25efc 100644 +--- a/pcs_test/tier1/test_cluster_property.py ++++ b/pcs_test/tier1/test_cluster_property.py +@@ -169,7 +169,7 @@ class TestPropertySet(PropertyMixin, TestCase): + + def test_set_stonith_watchdog_timeout(self): + self.assert_pcs_fail( +- "property set stonith-watchdog-timeout=5".split(), ++ "property set stonith-watchdog-timeout=5s".split(), + stdout_full=( + "Error: stonith-watchdog-timeout can only be unset or set to 0 " + "while SBD is disabled\n" +@@ -179,6 +179,18 @@ class TestPropertySet(PropertyMixin, TestCase): + ) + self.assert_resources_xml_in_cib(UNCHANGED_CRM_CONFIG) + ++ def test_set_stonith_watchdog_timeout_invalid_value(self): ++ self.assert_pcs_fail( ++ "property set stonith-watchdog-timeout=5x".split(), ++ stdout_full=( ++ "Error: '5x' is not a valid stonith-watchdog-timeout value, use" ++ " time interval (e.g. 1, 2s, 3m, 4h, ...)\n" ++ "Error: Errors have occurred, therefore pcs is unable to " ++ "continue\n" ++ ), ++ ) ++ self.assert_resources_xml_in_cib(UNCHANGED_CRM_CONFIG) ++ + + class TestPropertyUnset(PropertyMixin, TestCase): + def test_success(self): +-- +2.39.0 + diff --git a/SOURCES/bz2159455-01-add-agent-validation-option.patch b/SOURCES/bz2159455-01-add-agent-validation-option.patch new file mode 100644 index 0000000..f922c58 --- /dev/null +++ b/SOURCES/bz2159455-01-add-agent-validation-option.patch @@ -0,0 +1,1438 @@ +From 537d18f785edfffb7505510ef309cbd4c31bb914 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Thu, 12 Jan 2023 14:14:26 +0100 +Subject: [PATCH 1/2] add '--agent-validation' option for enabling agent + self-validation feature + +--- + pcs/cli/common/parse_args.py | 3 + + pcs/lib/cib/resource/primitive.py | 12 +- + pcs/lib/cib/resource/remote_node.py | 1 + + pcs/lib/commands/booth.py | 1 + + pcs/lib/commands/resource.py | 16 ++ + pcs/lib/commands/stonith.py | 8 + + pcs/pcs.8.in | 16 +- + pcs/resource.py | 13 +- + pcs/stonith.py | 3 + + pcs/usage.py | 31 ++-- + .../cib/resource/test_primitive_validate.py | 49 ++++++ + .../commands/resource/test_resource_create.py | 155 ++++++++++++------ + pcs_test/tier0/lib/commands/test_booth.py | 49 ------ + pcs_test/tier0/lib/commands/test_stonith.py | 24 +-- + pcs_test/tier1/cib_resource/test_create.py | 2 + + .../tier1/cib_resource/test_stonith_create.py | 2 - + pcs_test/tier1/legacy/test_resource.py | 8 +- + pcs_test/tier1/legacy/test_stonith.py | 25 ++- + 18 files changed, 272 insertions(+), 146 deletions(-) + +diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py +index 0a43deec..a16d882e 100644 +--- a/pcs/cli/common/parse_args.py ++++ b/pcs/cli/common/parse_args.py +@@ -94,6 +94,8 @@ PCS_LONG_OPTIONS = [ + f"{_OUTPUT_FORMAT_OPTION_STR}=", + # auth token + "token=", ++ # enable agent self validation ++ "agent-validation", + ] + + +@@ -485,6 +487,7 @@ class InputModifiers: + { + # boolean values + "--all": "--all" in options, ++ "--agent-validation": "--agent-validation" in options, + "--autodelete": "--autodelete" in options, + "--brief": "--brief" in options, + "--config": "--config" in options, +diff --git a/pcs/lib/cib/resource/primitive.py b/pcs/lib/cib/resource/primitive.py +index c5df8e58..5a6e1e0d 100644 +--- a/pcs/lib/cib/resource/primitive.py ++++ b/pcs/lib/cib/resource/primitive.py +@@ -137,6 +137,7 @@ def create( + resource_type: str = "resource", + # TODO remove this arg + do_not_report_instance_attribute_server_exists: bool = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments + # pylint: disable=too-many-locals +@@ -159,6 +160,8 @@ def create( + resource_type -- describes the resource for reports + do_not_report_instance_attribute_server_exists -- dirty fix due to + suboptimal architecture, TODO: fix the architecture and remove the param ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + if raw_operation_list is None: + raw_operation_list = [] +@@ -200,6 +203,7 @@ def create( + instance_attributes, + resources_section, + force=allow_invalid_instance_attributes, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + # TODO remove this "if", see pcs.lib.cib.remote_node.create for details + if do_not_report_instance_attribute_server_exists: +@@ -388,6 +392,7 @@ def validate_resource_instance_attributes_create( + instance_attributes: Mapping[str, str], + resources_section: _Element, + force: bool = False, ++ enable_agent_self_validation: bool = False, + ) -> reports.ReportItemList: + report_items: reports.ReportItemList = [] + agent_name = resource_agent.metadata.name +@@ -419,7 +424,8 @@ def validate_resource_instance_attributes_create( + ) + + if ( +- _is_ocf_or_stonith_agent(agent_name) ++ enable_agent_self_validation ++ and _is_ocf_or_stonith_agent(agent_name) + and resource_agent.metadata.agent_exists + and resource_agent.metadata.provides_self_validation + and not any( +@@ -447,6 +453,7 @@ def validate_resource_instance_attributes_update( + resource_id: str, + resources_section: _Element, + force: bool = False, ++ enable_agent_self_validation: bool = False, + ) -> reports.ReportItemList: + # pylint: disable=too-many-locals + # TODO This function currently accepts the updated resource as a string and +@@ -521,7 +528,8 @@ def validate_resource_instance_attributes_update( + ) + + if ( +- _is_ocf_or_stonith_agent(agent_name) ++ enable_agent_self_validation ++ and _is_ocf_or_stonith_agent(agent_name) + and resource_agent.metadata.agent_exists + and resource_agent.metadata.provides_self_validation + and not any( +diff --git a/pcs/lib/cib/resource/remote_node.py b/pcs/lib/cib/resource/remote_node.py +index c76e37a6..f65c5446 100644 +--- a/pcs/lib/cib/resource/remote_node.py ++++ b/pcs/lib/cib/resource/remote_node.py +@@ -253,4 +253,5 @@ def create( + # 3) call the validation from here and handle the results or config + # the validator before / when running it + do_not_report_instance_attribute_server_exists=True, ++ enable_agent_self_validation=False, + ) +diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py +index ee91ea14..7b0ed4de 100644 +--- a/pcs/lib/commands/booth.py ++++ b/pcs/lib/commands/booth.py +@@ -480,6 +480,7 @@ def create_in_cluster( + env.cmd_runner(), + resources_section, + id_provider, ++ enable_agent_self_validation=False, + ) + agent_factory = ResourceAgentFacadeFactory( + env.cmd_runner(), report_processor +diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py +index 28407d48..12b1f0e9 100644 +--- a/pcs/lib/commands/resource.py ++++ b/pcs/lib/commands/resource.py +@@ -348,6 +348,7 @@ def create( + ensure_disabled: bool = False, + wait: WaitType = False, + allow_not_suitable_command: bool = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -381,6 +382,8 @@ def create( + pcs.lib.commands.remote_node); + in the case of remote/guest node forcible error is produced when this + flag is set to False and warning is produced otherwise ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -427,6 +430,7 @@ def create( + allow_invalid_operation, + allow_invalid_instance_attributes, + use_default_operations, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + if env.report_processor.has_errors: + raise LibraryError() +@@ -451,6 +455,7 @@ def create_as_clone( + ensure_disabled: bool = False, + wait: WaitType = False, + allow_not_suitable_command: bool = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -478,6 +483,8 @@ def create_as_clone( + ensure_disabled -- is flag that keeps resource in target-role "Stopped" + wait -- is flag for controlling waiting for pacemaker idle mechanism + allow_not_suitable_command -- turn forceable errors into warnings ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -531,6 +538,7 @@ def create_as_clone( + allow_invalid_operation, + allow_invalid_instance_attributes, + use_default_operations, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + + clone_element = resource.clone.append_new( +@@ -561,6 +569,7 @@ def create_in_group( + put_after_adjacent: bool = False, + wait: WaitType = False, + allow_not_suitable_command: bool = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -589,6 +598,8 @@ def create_in_group( + adjacent resource + wait -- is flag for controlling waiting for pacemaker idle mechanism + allow_not_suitable_command -- turn forceable errors into warnings ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -664,6 +675,7 @@ def create_in_group( + allow_invalid_operation, + allow_invalid_instance_attributes, + use_default_operations, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + if ensure_disabled: + resource.common.disable(primitive_element, id_provider) +@@ -701,6 +713,7 @@ def create_into_bundle( + wait: WaitType = False, + allow_not_suitable_command: bool = False, + allow_not_accessible_resource: bool = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -728,6 +741,8 @@ def create_into_bundle( + wait -- is flag for controlling waiting for pacemaker idle mechanism + allow_not_suitable_command -- turn forceable errors into warnings + allow_not_accessible_resource -- turn forceable errors into warnings ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -775,6 +790,7 @@ def create_into_bundle( + allow_invalid_operation, + allow_invalid_instance_attributes, + use_default_operations, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + if ensure_disabled: + resource.common.disable(primitive_element, id_provider) +diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py +index 15b10dec..fc0264e7 100644 +--- a/pcs/lib/commands/stonith.py ++++ b/pcs/lib/commands/stonith.py +@@ -117,6 +117,7 @@ def create( + use_default_operations: bool = True, + ensure_disabled: bool = False, + wait: WaitType = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -139,6 +140,8 @@ def create( + operations (specified in a stonith agent) + ensure_disabled -- flag that keeps resource in target-role "Stopped" + wait -- flag for controlling waiting for pacemaker idle mechanism ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -174,6 +177,7 @@ def create( + allow_invalid_instance_attributes=allow_invalid_instance_attributes, + use_default_operations=use_default_operations, + resource_type="stonith", ++ enable_agent_self_validation=enable_agent_self_validation, + ) + if ensure_disabled: + resource.common.disable(stonith_element, id_provider) +@@ -195,6 +199,7 @@ def create_in_group( + adjacent_resource_id: Optional[str] = None, + put_after_adjacent: bool = False, + wait: WaitType = False, ++ enable_agent_self_validation: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +@@ -221,6 +226,8 @@ def create_in_group( + put_after_adjacent -- is flag to put a newly create resource befor/after + adjacent stonith + wait -- flag for controlling waiting for pacemaker idle mechanism ++ enable_agent_self_validation -- if True, use agent self-validation feature ++ to validate instance attributes + """ + runner = env.cmd_runner() + agent_factory = ResourceAgentFacadeFactory(runner, env.report_processor) +@@ -286,6 +293,7 @@ def create_in_group( + allow_invalid_operation, + allow_invalid_instance_attributes, + use_default_operations, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + if ensure_disabled: + resource.common.disable(stonith_element, id_provider) +diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in +index cd00f8ac..6f7fe9cc 100644 +--- a/pcs/pcs.8.in ++++ b/pcs/pcs.8.in +@@ -95,8 +95,8 @@ Show list of all available resource agents (if filter is provided then only reso + describe [:[:]] [\fB\-\-full\fR] + Show options for the specified resource. If \fB\-\-full\fR is specified, all options including advanced and deprecated ones are shown. + .TP +-create [:[:]] [resource options] [\fBop\fR [ ]...] [\fBmeta\fR ...] [\fBclone\fR [] [] | promotable [] [] | \fB\-\-group\fR [\fB\-\-before\fR | \fB\-\-after\fR ] | \fBbundle\fR ] [\fB\-\-disabled\fR] [\fB\-\-no\-default\-ops] [\fB\-\-wait\fR[=n]] +-Create specified resource. If \fBclone\fR is used a clone resource is created. If \fBpromotable\fR is used a promotable clone resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fBbundle\fR is specified, resource will be created inside of the specified bundle. If \fB\-\-disabled\fR is specified the resource is not started automatically. If \fB\-\-no\-default\-ops\fR is specified, only monitor operations are created for the resource and all other operations use default settings. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start and then return 0 if the resource is started, or 1 if the resource has not yet started. If 'n' is not specified it defaults to 60 minutes. ++create [:[:]] [resource options] [\fBop\fR [ ]...] [\fBmeta\fR ...] [\fBclone\fR [] [] | promotable [] [] | \fB\-\-group\fR [\fB\-\-before\fR | \fB\-\-after\fR ] | \fBbundle\fR ] [\fB\-\-disabled\fR] [\fB\-\-agent\-validation\fR] [\fB\-\-no\-default\-ops\fR] [\fB\-\-wait\fR[=n]] ++Create specified resource. If \fBclone\fR is used a clone resource is created. If \fBpromotable\fR is used a promotable clone resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fBbundle\fR is specified, resource will be created inside of the specified bundle. If \fB\-\-disabled\fR is specified the resource is not started automatically. If \fB\-\-agent\-validation\fR is specified, resource agent validate\-all action will be used to validate resource options. If \fB\-\-no\-default\-ops\fR is specified, only monitor operations are created for the resource and all other operations use default settings. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start and then return 0 if the resource is started, or 1 if the resource has not yet started. If 'n' is not specified it defaults to 60 minutes. + + Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s + .TP +@@ -183,11 +183,13 @@ List available OCF resource agent providers. + agents [standard[:provider]] + List available agents optionally filtered by standard and provider. + .TP +-update [resource options] [op [ ]...] [meta ...] [\fB\-\-wait\fR[=n]] ++update [resource options] [op [ ]...] [meta ...] [\fB\-\-agent\-validation\fR] [\fB\-\-wait\fR[=n]] + Add, remove or change options of specified resource, clone or multi\-state resource. Unspecified options will be kept unchanged. If you wish to remove an option, set it to empty value, i.e. 'option_name='. + + If an operation (op) is specified it will update the first found operation with the same action on the specified resource. If no operation with that action exists then a new operation will be created. (WARNING: all existing options on the updated operation will be reset if not specified.) If you want to create multiple monitor operations you should use the 'op add' & 'op remove' commands. + ++If \fB\-\-agent\-validation\fR is specified, resource agent validate\-all action will be used to validate resource options. ++ + If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise. If 'n' is not specified it defaults to 60 minutes. + .TP + op add [operation properties] +@@ -670,8 +672,8 @@ Show list of all available stonith agents (if filter is provided then only stoni + describe [\fB\-\-full\fR] + Show options for specified stonith agent. If \fB\-\-full\fR is specified, all options including advanced and deprecated ones are shown. + .TP +-create [stonith device options] [op [ ]...] [meta ...] [\fB\-\-group\fR [\fB\-\-before\fR | \fB\-\-after\fR ]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]] +-Create stonith device with specified type and options. If \fB\-\-group\fR is specified the stonith device is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added stonith device relatively to some stonith device already existing in the group. If\fB\-\-disabled\fR is specified the stonith device is not used. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to start and then return 0 if the stonith device is started, or 1 if the stonith device has not yet started. If 'n' is not specified it defaults to 60 minutes. ++create [stonith device options] [op [ ]...] [meta ...] [\fB\-\-group\fR [\fB\-\-before\fR | \fB\-\-after\fR ]] [\fB\-\-disabled\fR] [\fB\-\-agent\-validation\fR] [\fB\-\-wait\fR[=n]] ++Create stonith device with specified type and options. If \fB\-\-group\fR is specified the stonith device is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added stonith device relatively to some stonith device already existing in the group. If\fB\-\-disabled\fR is specified the stonith device is not used. If \fB\-\-agent\-validation\fR is specified, stonith agent validate\-all action will be used to validate stonith device options. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to start and then return 0 if the stonith device is started, or 1 if the stonith device has not yet started. If 'n' is not specified it defaults to 60 minutes. + + Example: Create a device for nodes node1 and node2 + .br +@@ -681,8 +683,10 @@ Example: Use port p1 for node n1 and ports p2 and p3 for node n2 + .br + pcs stonith create MyFence fence_virt 'pcmk_host_map=n1:p1;n2:p2,p3' + .TP +-update [stonith device options] ++update [stonith device options] [\fB\-\-agent\-validation\fR] + Add, remove or change options of specified stonith id. Unspecified options will be kept unchanged. If you wish to remove an option, set it to empty value, i.e. 'option_name='. ++ ++If \fB\-\-agent\-validation\fR is specified, stonith agent validate\-all action will be used to validate stonith device options. + .TP + update\-scsi\-devices (set [...]) | (add [...] delete|remove [...] ) + Update scsi fencing devices without affecting other resources. You must specify either list of set devices or at least one device for add or delete/remove devices. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi, fence_mpath. +diff --git a/pcs/resource.py b/pcs/resource.py +index bf34e8d7..7500b37e 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -580,6 +580,7 @@ def _format_desc(indentation, desc): + def resource_create(lib, argv, modifiers): + """ + Options: ++ * --agent-validation - use agent self validation of instance attributes + * --before - specified resource inside a group before which new resource + will be placed inside the group + * --after - specified resource inside a group after which new resource +@@ -593,6 +594,7 @@ def resource_create(lib, argv, modifiers): + * -f - CIB file + """ + modifiers.ensure_only_supported( ++ "--agent-validation", + "--before", + "--after", + "--group", +@@ -655,6 +657,7 @@ def resource_create(lib, argv, modifiers): + use_default_operations=not modifiers.get("--no-default-ops"), + wait=modifiers.get("--wait"), + allow_not_suitable_command=modifiers.get("--force"), ++ enable_agent_self_validation=modifiers.get("--agent-validation"), + ) + + clone_id = parts.get("clone_id", None) +@@ -894,12 +897,15 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + """ + Options: + * -f - CIB file ++ * --agent-validation - use agent self validation of instance attributes + * --wait + * --force - allow invalid options, do not fail if not possible to get + agent metadata, allow not suitable command + """ + del lib +- modifiers.ensure_only_supported("-f", "--wait", "--force") ++ modifiers.ensure_only_supported( ++ "-f", "--wait", "--force", "--agent-validation" ++ ) + if len(args) < 2: + raise CmdLineInputError() + res_id = args.pop(0) +@@ -970,7 +976,10 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + dict(params), + res_id, + get_resources(lib_pacemaker.get_cib(cib_xml)), +- force=modifiers.get("--force"), ++ force=bool(modifiers.get("--force")), ++ enable_agent_self_validation=bool( ++ modifiers.get("--agent-validation") ++ ), + ) + if report_list: + process_library_reports(report_list) +diff --git a/pcs/stonith.py b/pcs/stonith.py +index 58cd14fc..17ba6aca 100644 +--- a/pcs/stonith.py ++++ b/pcs/stonith.py +@@ -127,6 +127,7 @@ def stonith_create(lib, argv, modifiers): + instance attributes + * --disabled - created resource will be disabled + * --no-default-ops - do not add default operations ++ * --agent-validation - use agent self validation of instance attributes + * --wait + * -f - CIB file + """ +@@ -137,6 +138,7 @@ def stonith_create(lib, argv, modifiers): + "--force", + "--disabled", + "--no-default-ops", ++ "--agent-validation", + "--wait", + "-f", + ) +@@ -170,6 +172,7 @@ def stonith_create(lib, argv, modifiers): + ensure_disabled=modifiers.get("--disabled"), + use_default_operations=not modifiers.get("--no-default-ops"), + wait=modifiers.get("--wait"), ++ enable_agent_self_validation=modifiers.get("--agent-validation"), + ) + + if not modifiers.get("--group"): +diff --git a/pcs/usage.py b/pcs/usage.py +index 0a6ffcb6..bb5f864d 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -357,7 +357,8 @@ Commands: + [clone [] [] | + promotable [] [] | + --group [--before | --after ] | +- bundle ] [--disabled] [--no-default-ops] [--wait[=n]] ++ bundle ] [--disabled] [--agent-validation] ++ [--no-default-ops] [--wait[=n]] + Create specified resource. If clone is used a clone resource is + created. If promotable is used a promotable clone resource is created. + If --group is specified the resource is added to the group named. You +@@ -365,12 +366,13 @@ Commands: + resource relatively to some resource already existing in the group. If + bundle is used, the resource will be created inside of the specified + bundle. If --disabled is specified the resource is not started +- automatically. If --no-default-ops is specified, only monitor +- operations are created for the resource and all other operations use +- default settings. If --wait is specified, pcs will wait up to 'n' +- seconds for the resource to start and then return 0 if the resource is +- started, or 1 if the resource has not yet started. If 'n' is not +- specified it defaults to 60 minutes. ++ automatically. If --agent-validation is specified, resource agent ++ validate-all action will be used to validate resource options. If ++ --no-default-ops is specified, only monitor operations are created for ++ the resource and all other operations use default settings. If --wait ++ is specified, pcs will wait up to 'n' seconds for the resource to start ++ and then return 0 if the resource is started, or 1 if the resource has ++ not yet started. If 'n' is not specified it defaults to 60 minutes. + Example: Create a new resource called 'VirtualIP' with IP address + 192.168.0.99, netmask of 32, monitored everything 30 seconds, + on eth2: +@@ -545,7 +547,8 @@ Commands: + List available agents optionally filtered by standard and provider. + + update [resource options] [op [ +- ]...] [meta ...] [--wait[=n]] ++ ]...] [meta ...] ++ [--agent-validation] [--wait[=n]] + Add, remove or change options of specified resource, clone or + multi-state resource. Unspecified options will be kept unchanged. If + you wish to remove an option, set it to empty value, i.e. +@@ -558,6 +561,9 @@ Commands: + if not specified.) If you want to create multiple monitor operations + you should use the 'op add' & 'op remove' commands. + ++ If --agent-validation is specified, resource agent validate-all action ++ will be used to validate resource options. ++ + If --wait is specified, pcs will wait up to 'n' seconds for the changes + to take effect and then return 0 if the changes have been processed or + 1 otherwise. If 'n' is not specified it defaults to 60 minutes. +@@ -1420,13 +1426,15 @@ Commands: + [op [ + ]...] [meta ...] + [--group [--before | --after ]] +- [--disabled] [--wait[=n]] ++ [--disabled] [--agent-validation] [--wait[=n]] + Create stonith device with specified type and options. + If --group is specified the stonith device is added to the group named. + You can use --before or --after to specify the position of the added + stonith device relatively to some stonith device already existing in the + group. + If --disabled is specified the stonith device is not used. ++ If --agent-validation is specified, stonith agent validate-all action ++ will be used to validate stonith device options. + If --wait is specified, pcs will wait up to 'n' seconds for the stonith + device to start and then return 0 if the stonith device is started, or 1 + if the stonith device has not yet started. If 'n' is not specified it +@@ -1436,11 +1444,14 @@ Commands: + Example: Use port p1 for node n1 and ports p2 and p3 for node n2 + pcs stonith create MyFence fence_virt 'pcmk_host_map=n1:p1;n2:p2,p3' + +- update [stonith device options] ++ update [stonith device options] [--agent-validation] + Add, remove or change options of specified stonith id. Unspecified + options will be kept unchanged. If you wish to remove an option, set it + to empty value, i.e. 'option_name='. + ++ If --agent-validation is specified, stonith agent validate-all action ++ will be used to validate stonith device options. ++ + update-scsi-devices (set [...]) + | (add [...] delete|remove + [device-path>...]) +diff --git a/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py b/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py +index 1bc3a5a6..0456abcf 100644 +--- a/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py ++++ b/pcs_test/tier0/lib/cib/resource/test_primitive_validate.py +@@ -592,6 +592,22 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + self.agent_self_validation_mock.return_value = True, [] + self.cmd_runner = mock.Mock() + ++ def test_disabled(self): ++ attributes = {"required": "value"} ++ facade = _fixture_ocf_agent() ++ self.assertEqual( ++ primitive.validate_resource_instance_attributes_create( ++ self.cmd_runner, ++ facade, ++ attributes, ++ etree.Element("resources"), ++ force=False, ++ enable_agent_self_validation=False, ++ ), ++ [], ++ ) ++ self.agent_self_validation_mock.assert_not_called() ++ + def test_success(self): + attributes = {"required": "value"} + facade = _fixture_ocf_agent() +@@ -602,6 +618,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -621,6 +638,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=True, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -642,6 +660,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [ + fixture.error( +@@ -667,6 +686,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -686,6 +706,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -701,6 +722,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -716,6 +738,7 @@ class ValidateResourceInstanceAttributesCreateSelfValidation(TestCase): + attributes, + etree.Element("resources"), + force=False, ++ enable_agent_self_validation=True, + ), + [ + fixture.error( +@@ -1275,6 +1298,24 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + etree.SubElement(nvset_el, "nvpair", dict(name=name, value=value)) + return resources_el + ++ def test_disabled(self): ++ old_attributes = {"required": "old_value"} ++ new_attributes = {"required": "new_value"} ++ facade = _fixture_ocf_agent() ++ self.assertEqual( ++ primitive.validate_resource_instance_attributes_update( ++ self.cmd_runner, ++ facade, ++ new_attributes, ++ self._NAME, ++ self._fixture_resources(old_attributes), ++ force=False, ++ enable_agent_self_validation=False, ++ ), ++ [], ++ ) ++ self.agent_self_validation_mock.assert_not_called() ++ + def test_success(self): + old_attributes = {"required": "old_value"} + new_attributes = {"required": "new_value"} +@@ -1287,6 +1328,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -1318,6 +1360,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=True, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -1354,6 +1397,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [ + fixture.error( +@@ -1391,6 +1435,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -1422,6 +1467,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -1439,6 +1485,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [], + ) +@@ -1456,6 +1503,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [ + fixture.error( +@@ -1482,6 +1530,7 @@ class ValidateResourceInstanceAttributesUpdateSelfValidation(TestCase): + self._NAME, + self._fixture_resources(old_attributes), + force=False, ++ enable_agent_self_validation=True, + ), + [ + fixture.warn( +diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_create.py b/pcs_test/tier0/lib/commands/resource/test_resource_create.py +index 3384a674..225bb57b 100644 +--- a/pcs_test/tier0/lib/commands/resource/test_resource_create.py ++++ b/pcs_test/tier0/lib/commands/resource/test_resource_create.py +@@ -29,7 +29,9 @@ def create( + allow_invalid_operation=False, + agent_name="ocf:heartbeat:Dummy", + allow_invalid_instance_attributes=False, ++ enable_agent_self_validation=False, + ): ++ # pylint: disable=too-many-arguments + return resource.create( + env, + "A", +@@ -41,6 +43,7 @@ def create( + ensure_disabled=disabled, + allow_invalid_operation=allow_invalid_operation, + allow_invalid_instance_attributes=allow_invalid_instance_attributes, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + + +@@ -50,6 +53,7 @@ def create_group( + disabled=False, + meta_attributes=None, + operation_list=None, ++ enable_agent_self_validation=False, + ): + return resource.create_in_group( + env, +@@ -61,6 +65,7 @@ def create_group( + instance_attributes={}, + wait=wait, + ensure_disabled=disabled, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + + +@@ -72,6 +77,7 @@ def create_clone( + clone_options=None, + operation_list=None, + clone_id=None, ++ enable_agent_self_validation=False, + ): + return resource.create_as_clone( + env, +@@ -84,6 +90,7 @@ def create_clone( + clone_id=clone_id, + wait=wait, + ensure_disabled=disabled, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + + +@@ -94,6 +101,7 @@ def create_bundle( + meta_attributes=None, + allow_not_accessible_resource=False, + operation_list=None, ++ enable_agent_self_validation=False, + ): + return resource.create_into_bundle( + env, +@@ -106,6 +114,7 @@ def create_bundle( + wait=wait, + ensure_disabled=disabled, + allow_not_accessible_resource=allow_not_accessible_resource, ++ enable_agent_self_validation=enable_agent_self_validation, + ) + + +@@ -390,13 +399,6 @@ class CreateRolesNormalization(TestCase): + agent_filename=agent_file_name, + ) + self.config.runner.cib.load(filename=cib_file) +- self.config.runner.pcmk.resource_agent_self_validation( +- {}, +- output="", +- standard="ocf", +- provider="pacemaker", +- agent_type="Stateful", +- ) + + def create(self, operation_list=None): + resource.create( +@@ -564,7 +566,6 @@ class Create(TestCase): + def test_simplest_resource(self): + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=fixture_cib_resources_xml_primitive_simplest + ) +@@ -586,7 +587,9 @@ class Create(TestCase): + returncode=1, + ) + self.env_assist.assert_raise_library_error( +- lambda: create(self.env_assist.get_env()), ++ lambda: create( ++ self.env_assist.get_env(), enable_agent_self_validation=True ++ ), + ) + self.env_assist.assert_reports( + [ +@@ -617,7 +620,9 @@ class Create(TestCase): + resources=fixture_cib_resources_xml_primitive_simplest + ) + create( +- self.env_assist.get_env(), allow_invalid_instance_attributes=True ++ self.env_assist.get_env(), ++ allow_invalid_instance_attributes=True, ++ enable_agent_self_validation=True, + ) + self.env_assist.assert_reports( + [ +@@ -637,7 +642,10 @@ class Create(TestCase): + returncode=0, + ) + self.env_assist.assert_raise_library_error( +- lambda: create(self.env_assist.get_env()), ++ lambda: create( ++ self.env_assist.get_env(), ++ enable_agent_self_validation=True, ++ ), + ) + self.env_assist.assert_reports( + [ +@@ -682,7 +690,6 @@ class Create(TestCase): + ) + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=fixture_cib_resources_xml_primitive_simplest + ) +@@ -845,7 +852,6 @@ class Create(TestCase): + def test_resource_with_operation(self): + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=""" + +@@ -891,14 +897,12 @@ class Create(TestCase): + ), + ) + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib(resources=self.fixture_sanitized_operation) + create(self.env_assist.get_env()) + + def test_sanitize_operation_id_from_user(self): + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib(resources=self.fixture_sanitized_operation) + create( + self.env_assist.get_env(), +@@ -1080,9 +1084,6 @@ class Create(TestCase): + + """, + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict(state=1), output="" +- ) + self.config.env.push_cib( + resources=""" + +@@ -1168,7 +1169,6 @@ class Create(TestCase): + ) + self.config.runner.cib.upgrade() + self.config.runner.cib.load(filename="cib-empty-3.4.xml") +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=""" + +@@ -1224,7 +1224,6 @@ class CreateWait(TestCase): + self.env_assist, self.config = get_env_tools(test_case=self) + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=fixture_cib_resources_xml_primitive_simplest, + wait=TIMEOUT, +@@ -1354,15 +1353,9 @@ class CreateWait(TestCase): + + class CreateInGroup(TestCase): + def setUp(self): +- self.agent_self_validation_call_name = ( +- "runner.pcmk.resource_agent_self_validation" +- ) + self.env_assist, self.config = get_env_tools(test_case=self) + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation( +- {}, output="", name=self.agent_self_validation_call_name +- ) + + def test_simplest_resource(self): + ( +@@ -1405,7 +1398,6 @@ class CreateInGroup(TestCase): + create_group(self.env_assist.get_env(), wait=False) + + def test_cib_upgrade_on_onfail_demote(self): +- self.config.remove(self.agent_self_validation_call_name) + self.config.runner.cib.load( + filename="cib-empty-3.3.xml", + instead="runner.cib.load", +@@ -1413,7 +1405,6 @@ class CreateInGroup(TestCase): + ) + self.config.runner.cib.upgrade() + self.config.runner.cib.load(filename="cib-empty-3.4.xml") +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=""" + +@@ -1465,6 +1456,34 @@ class CreateInGroup(TestCase): + [fixture.info(reports.codes.CIB_UPGRADE_SUCCESSFUL)] + ) + ++ def test_resource_self_validation_failure(self): ++ self.config.runner.pcmk.resource_agent_self_validation( ++ {}, ++ output=""" ++ not ignored ++ this is ignored ++ ++ first issue ++ another one ++ ++ """, ++ returncode=1, ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: create_group( ++ self.env_assist.get_env(), enable_agent_self_validation=True ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.AGENT_SELF_VALIDATION_RESULT, ++ result="not ignored\nfirst issue\nanother one", ++ force_code=reports.codes.FORCE, ++ ) ++ ] ++ ) ++ + def test_fail_wait(self): + self.config.env.push_cib( + resources=fixture_cib_resources_xml_group_simplest, +@@ -1608,15 +1627,9 @@ class CreateInGroup(TestCase): + + class CreateAsClone(TestCase): + def setUp(self): +- self.agent_self_validation_call_name = ( +- "runner.pcmk.resource_agent_self_validation" +- ) + self.env_assist, self.config = get_env_tools(test_case=self) + self.config.runner.pcmk.load_agent() + self.config.runner.cib.load() +- self.config.runner.pcmk.resource_agent_self_validation( +- {}, output="", name=self.agent_self_validation_call_name +- ) + + def test_simplest_resource(self): + ( +@@ -1626,6 +1639,34 @@ class CreateAsClone(TestCase): + ) + create_clone(self.env_assist.get_env(), wait=False) + ++ def test_resource_self_validation_failure(self): ++ self.config.runner.pcmk.resource_agent_self_validation( ++ {}, ++ output=""" ++ not ignored ++ this is ignored ++ ++ first issue ++ another one ++ ++ """, ++ returncode=1, ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: create_clone( ++ self.env_assist.get_env(), enable_agent_self_validation=True ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.AGENT_SELF_VALIDATION_RESULT, ++ result="not ignored\nfirst issue\nanother one", ++ force_code=reports.codes.FORCE, ++ ) ++ ] ++ ) ++ + def test_custom_clone_id(self): + ( + self.config.env.push_cib( +@@ -1637,7 +1678,6 @@ class CreateAsClone(TestCase): + ) + + def test_custom_clone_id_error_invalid_id(self): +- self.config.remove(self.agent_self_validation_call_name) + self.env_assist.assert_raise_library_error( + lambda: create_clone( + self.env_assist.get_env(), wait=False, clone_id="1invalid" +@@ -1649,7 +1689,6 @@ class CreateAsClone(TestCase): + + def test_custom_clone_id_error_id_already_exist(self): + self.config.remove(name="runner.cib.load") +- self.config.remove(self.agent_self_validation_call_name) + self.config.runner.cib.load( + resources=""" + +@@ -1672,7 +1711,6 @@ class CreateAsClone(TestCase): + self.env_assist.assert_reports([fixture.report_id_already_exist("C")]) + + def test_cib_upgrade_on_onfail_demote(self): +- self.config.remove(self.agent_self_validation_call_name) + self.config.runner.cib.load( + filename="cib-empty-3.3.xml", + instead="runner.cib.load", +@@ -1680,7 +1718,6 @@ class CreateAsClone(TestCase): + ) + self.config.runner.cib.upgrade() + self.config.runner.cib.load(filename="cib-empty-3.4.xml") +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=""" + +@@ -2237,7 +2274,6 @@ class CreateInToBundle(TestCase): + self.config.runner.cib.load( + filename="cib-empty-3.4.xml", resources=self.fixture_resources_pre + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resource_post_simple_without_network.format( + network=""" +@@ -2267,13 +2303,11 @@ class CreateInToBundle(TestCase): + + def test_simplest_resource(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib(resources=self.fixture_resources_post_simple) + create_bundle(self.env_assist.get_env(), wait=False) + + def test_bundle_doesnt_exist(self): + self.config.runner.cib.load(resources=self.fixture_empty_resources) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.env_assist.assert_raise_library_error( + lambda: create_bundle(self.env_assist.get_env(), wait=False), + [ +@@ -2296,7 +2330,6 @@ class CreateInToBundle(TestCase): + + """ + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + + self.env_assist.assert_raise_library_error( + lambda: create_bundle(self.env_assist.get_env(), wait=False), +@@ -2322,7 +2355,6 @@ class CreateInToBundle(TestCase): + + """ + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.env_assist.assert_raise_library_error( + lambda: create_bundle(self.env_assist.get_env(), wait=False), + [ +@@ -2337,7 +2369,6 @@ class CreateInToBundle(TestCase): + + def test_wait_fail(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resources_post_simple, + wait=TIMEOUT, +@@ -2362,7 +2393,6 @@ class CreateInToBundle(TestCase): + ) + def test_wait_ok_run_ok(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resources_post_simple, wait=TIMEOUT + ) +@@ -2383,7 +2413,6 @@ class CreateInToBundle(TestCase): + ) + def test_wait_ok_run_fail(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resources_post_simple, wait=TIMEOUT + ) +@@ -2408,7 +2437,6 @@ class CreateInToBundle(TestCase): + ) + def test_disabled_wait_ok_not_running(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resources_post_disabled, wait=TIMEOUT + ) +@@ -2427,7 +2455,6 @@ class CreateInToBundle(TestCase): + ) + def test_disabled_wait_ok_running(self): + self.config.runner.cib.load(resources=self.fixture_resources_pre) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=self.fixture_resources_post_disabled, wait=TIMEOUT + ) +@@ -2455,7 +2482,6 @@ class CreateInToBundle(TestCase): + + """ + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.env_assist.assert_raise_library_error( + lambda: create_bundle(self.env_assist.get_env(), wait=False) + ) +@@ -2479,7 +2505,6 @@ class CreateInToBundle(TestCase): + + """ + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=( + self.fixture_resource_post_simple_without_network.format( +@@ -2512,7 +2537,6 @@ class CreateInToBundle(TestCase): + + """ + ) +- self.config.runner.pcmk.resource_agent_self_validation({}, output="") + self.config.env.push_cib( + resources=( + self.fixture_resource_post_simple_without_network.format( +@@ -2529,3 +2553,32 @@ class CreateInToBundle(TestCase): + self._test_with_network_defined( + '' + ) ++ ++ def test_resource_self_validation_failure(self): ++ self.config.runner.cib.load() ++ self.config.runner.pcmk.resource_agent_self_validation( ++ {}, ++ output=""" ++ not ignored ++ this is ignored ++ ++ first issue ++ another one ++ ++ """, ++ returncode=1, ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: create_bundle( ++ self.env_assist.get_env(), enable_agent_self_validation=True ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.AGENT_SELF_VALIDATION_RESULT, ++ result="not ignored\nfirst issue\nanother one", ++ force_code=reports.codes.FORCE, ++ ) ++ ] ++ ) +diff --git a/pcs_test/tier0/lib/commands/test_booth.py b/pcs_test/tier0/lib/commands/test_booth.py +index 220d058f..e0b6924e 100644 +--- a/pcs_test/tier0/lib/commands/test_booth.py ++++ b/pcs_test/tier0/lib/commands/test_booth.py +@@ -1754,30 +1754,10 @@ class CreateInCluster(TestCase, FixtureMixin): + agent_name="ocf:heartbeat:IPaddr2", + name="runner.pcmk.load_agent.ipaddr2", + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict(ip=self.site_ip), +- standard="ocf", +- provider="heartbeat", +- agent_type="IPaddr2", +- output="", +- ) + self.config.runner.pcmk.load_agent( + agent_name="ocf:pacemaker:booth-site", + name="runner.pcmk.load_agent.booth-site", + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict( +- config=os.path.join( +- settings.booth_config_dir, +- f"{instance_name}.conf", +- ) +- ), +- standard="ocf", +- provider="pacemaker", +- agent_type="booth-site", +- output="", +- name="runner.pcmk.agent_self_validation.booth-site", +- ) + self.config.env.push_cib( + resources=self.fixture_cib_booth_group(instance_name) + ) +@@ -1809,33 +1789,11 @@ class CreateInCluster(TestCase, FixtureMixin): + name="runner.pcmk.load_agent.ipaddr2", + env=env, + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict(ip=self.site_ip), +- standard="ocf", +- provider="heartbeat", +- agent_type="IPaddr2", +- output="", +- env=env, +- ) + self.config.runner.pcmk.load_agent( + agent_name="ocf:pacemaker:booth-site", + name="runner.pcmk.load_agent.booth-site", + env=env, + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict( +- config=os.path.join( +- settings.booth_config_dir, +- f"{constants.DEFAULT_INSTANCE_NAME}.conf", +- ) +- ), +- standard="ocf", +- provider="pacemaker", +- agent_type="booth-site", +- output="", +- env=env, +- name="runner.pcmk.agent_self_validation.booth-site", +- ) + self.config.env.push_cib(resources=self.fixture_cib_booth_group()) + commands.create_in_cluster(self.env_assist.get_env(), self.site_ip) + +@@ -1943,13 +1901,6 @@ class CreateInCluster(TestCase, FixtureMixin): + agent_name="ocf:heartbeat:IPaddr2", + name="runner.pcmk.load_agent.ipaddr2", + ) +- self.config.runner.pcmk.resource_agent_self_validation( +- dict(ip=self.site_ip), +- standard="ocf", +- provider="heartbeat", +- agent_type="IPaddr2", +- output="", +- ) + self.config.runner.pcmk.load_agent( + agent_name="ocf:pacemaker:booth-site", + agent_is_missing=True, +diff --git a/pcs_test/tier0/lib/commands/test_stonith.py b/pcs_test/tier0/lib/commands/test_stonith.py +index 65a0608f..eedd1c04 100644 +--- a/pcs_test/tier0/lib/commands/test_stonith.py ++++ b/pcs_test/tier0/lib/commands/test_stonith.py +@@ -108,9 +108,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- instance_attributes, agent_name, output="" +- ) + self.config.env.push_cib( + resources=self._expected_cib(expected_cib_simple) + ) +@@ -158,6 +155,7 @@ class CreateMixin: + operations=[], + meta_attributes={}, + instance_attributes=instance_attributes, ++ enable_agent_self_validation=True, + ), + ) + self.env_assist.assert_reports( +@@ -208,6 +206,7 @@ class CreateMixin: + meta_attributes={}, + instance_attributes=instance_attributes, + allow_invalid_instance_attributes=True, ++ enable_agent_self_validation=True, + ) + self.env_assist.assert_reports( + [ +@@ -245,6 +244,7 @@ class CreateMixin: + operations=[], + meta_attributes={}, + instance_attributes=instance_attributes, ++ enable_agent_self_validation=True, + ), + ) + self.env_assist.assert_reports( +@@ -266,9 +266,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- {}, agent_name, output="" +- ) + self.config.env.push_cib( + resources=self._expected_cib(expected_cib_unfencing) + ) +@@ -306,9 +303,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- instance_attributes, agent_name, output="" +- ) + self.config.env.push_cib(resources=self._expected_cib(expected_cib)) + + self._create( +@@ -334,9 +328,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- {}, agent_name, output="" +- ) + self.config.env.push_cib( + resources=self._expected_cib(expected_cib_operations) + ) +@@ -395,9 +386,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- instance_attributes, agent_name, output="" +- ) + self.config.env.push_cib( + resources=self._expected_cib(expected_cib_simple_forced) + ) +@@ -611,9 +599,6 @@ class CreateMixin: + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load() +- self.config.runner.pcmk.stonith_agent_self_validation( +- instance_attributes, agent_name, output="" +- ) + self.config.env.push_cib( + resources=self._expected_cib(expected_cib_simple), wait=timeout + ) +@@ -727,9 +712,6 @@ class CreateInGroup(CreateMixin, TestCase): + ) + self.config.runner.pcmk.load_fake_agent_metadata() + self.config.runner.cib.load(resources=original_cib) +- self.config.runner.pcmk.stonith_agent_self_validation( +- instance_attributes, agent_name, output="" +- ) + self.config.env.push_cib(resources=expected_cib) + + stonith.create_in_group( +diff --git a/pcs_test/tier1/cib_resource/test_create.py b/pcs_test/tier1/cib_resource/test_create.py +index 16c20116..29db0ffd 100644 +--- a/pcs_test/tier1/cib_resource/test_create.py ++++ b/pcs_test/tier1/cib_resource/test_create.py +@@ -751,7 +751,9 @@ class Promotable(TestCase, AssertPcsMixin): + ensure_disabled=False, + use_default_operations=True, + wait=False, ++ enable_agent_self_validation=False, + ): ++ # pylint: disable=too-many-arguments + options = locals() + del options["self"] + return options +diff --git a/pcs_test/tier1/cib_resource/test_stonith_create.py b/pcs_test/tier1/cib_resource/test_stonith_create.py +index 6d6841fe..d8801871 100644 +--- a/pcs_test/tier1/cib_resource/test_stonith_create.py ++++ b/pcs_test/tier1/cib_resource/test_stonith_create.py +@@ -45,7 +45,6 @@ class PlainStonith(ResourceTest): + + + """, +- output_start="Warning: Validation result from agent:", + ) + + def test_error_when_not_valid_name(self): +@@ -249,7 +248,6 @@ class WithMeta(ResourceTest): + + + """, +- output_start="Warning: Validation result from agent:", + ) + + +diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py +index 65ad1090..f4424a58 100644 +--- a/pcs_test/tier1/legacy/test_resource.py ++++ b/pcs_test/tier1/legacy/test_resource.py +@@ -5809,7 +5809,13 @@ class UpdateInstanceAttrs( + def test_agent_self_validation_failure(self): + self.fixture_resource() + self.assert_pcs_fail( +- ["resource", "update", "R", "fake=is_invalid=True"], ++ [ ++ "resource", ++ "update", ++ "R", ++ "fake=is_invalid=True", ++ "--agent-validation", ++ ], + stdout_start="Error: Validation result from agent (use --force to override):", + ) + +diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py +index cf430d75..c528c921 100644 +--- a/pcs_test/tier1/legacy/test_stonith.py ++++ b/pcs_test/tier1/legacy/test_stonith.py +@@ -1286,6 +1286,27 @@ class StonithTest(TestCase, AssertPcsMixin): + "Deleting Resource - apc-fencing\n", + ) + ++ self.assert_pcs_fail( ++ ( ++ "stonith create apc-fencing fence_apc ip=morph-apc username=apc " ++ "--agent-validation" ++ ).split(), ++ stdout_start="Error: Validation result from agent", ++ ) ++ ++ self.assert_pcs_success( ++ ( ++ "stonith create apc-fencing fence_apc ip=morph-apc username=apc " ++ "--agent-validation --force" ++ ).split(), ++ stdout_start="Warning: Validation result from agent", ++ ) ++ ++ self.assert_pcs_success( ++ "stonith remove apc-fencing".split(), ++ stdout_full="Deleting Resource - apc-fencing\n", ++ ) ++ + self.assert_pcs_fail( + "stonith update test3 bad_ipaddr=test username=login".split(), + stdout_regexp=( +@@ -1295,8 +1316,8 @@ class StonithTest(TestCase, AssertPcsMixin): + ) + + self.assert_pcs_success( +- "stonith update test3 username=testA".split(), +- stdout_start="Warning: ", ++ "stonith update test3 username=testA --agent-validation".split(), ++ stdout_start="Warning: The resource was misconfigured before the update,", + ) + + self.assert_pcs_success( +-- +2.39.0 + diff --git a/SOURCES/bz2166243-01-fix-stonith-watchdog-timeout-offline-update.patch b/SOURCES/bz2166243-01-fix-stonith-watchdog-timeout-offline-update.patch new file mode 100644 index 0000000..b388333 --- /dev/null +++ b/SOURCES/bz2166243-01-fix-stonith-watchdog-timeout-offline-update.patch @@ -0,0 +1,311 @@ +From 3cd35ed8e5b190c2e8203acd68a0100b84ed3bb4 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 31 Jan 2023 17:44:16 +0100 +Subject: [PATCH] fix update of stonith-watchdog-timeout when cluster is not + running + +--- + pcs/lib/communication/sbd.py | 4 +- + .../lib/commands/sbd/test_disable_sbd.py | 10 ++-- + .../tier0/lib/commands/sbd/test_enable_sbd.py | 49 ++++++++++--------- + pcsd/pcs.rb | 17 +++++-- + 4 files changed, 48 insertions(+), 32 deletions(-) + +diff --git a/pcs/lib/communication/sbd.py b/pcs/lib/communication/sbd.py +index 4762245c..633312a4 100644 +--- a/pcs/lib/communication/sbd.py ++++ b/pcs/lib/communication/sbd.py +@@ -98,8 +98,8 @@ class StonithWatchdogTimeoutAction( + ) + if report_item is None: + self._on_success() +- return [] +- self._report(report_item) ++ else: ++ self._report(report_item) + return self._get_next_list() + + +diff --git a/pcs_test/tier0/lib/commands/sbd/test_disable_sbd.py b/pcs_test/tier0/lib/commands/sbd/test_disable_sbd.py +index 13135fb2..f8f165bf 100644 +--- a/pcs_test/tier0/lib/commands/sbd/test_disable_sbd.py ++++ b/pcs_test/tier0/lib/commands/sbd/test_disable_sbd.py +@@ -19,7 +19,7 @@ class DisableSbd(TestCase): + self.config.corosync_conf.load(filename=self.corosync_conf_name) + self.config.http.host.check_auth(node_labels=self.node_list) + self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( +- node_labels=self.node_list[:1] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.disable_sbd(node_labels=self.node_list) + disable_sbd(self.env_assist.get_env()) +@@ -56,7 +56,7 @@ class DisableSbd(TestCase): + self.config.corosync_conf.load(filename=self.corosync_conf_name) + self.config.http.host.check_auth(node_labels=self.node_list) + self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( +- node_labels=self.node_list[:1] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.disable_sbd(node_labels=self.node_list) + +@@ -158,7 +158,9 @@ class DisableSbd(TestCase): + ] + ) + self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( +- node_labels=online_nodes_list[:1] ++ communication_list=[ ++ [dict(label=node)] for node in self.node_list[1:] ++ ], + ) + self.config.http.sbd.disable_sbd(node_labels=online_nodes_list) + disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True) +@@ -291,7 +293,7 @@ class DisableSbd(TestCase): + self.config.corosync_conf.load(filename=self.corosync_conf_name) + self.config.http.host.check_auth(node_labels=self.node_list) + self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( +- node_labels=self.node_list[:1] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.disable_sbd( + communication_list=[ +diff --git a/pcs_test/tier0/lib/commands/sbd/test_enable_sbd.py b/pcs_test/tier0/lib/commands/sbd/test_enable_sbd.py +index 57e680e0..f192f429 100644 +--- a/pcs_test/tier0/lib/commands/sbd/test_enable_sbd.py ++++ b/pcs_test/tier0/lib/commands/sbd/test_enable_sbd.py +@@ -130,7 +130,7 @@ class OddNumOfNodesSuccess(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -164,7 +164,7 @@ class OddNumOfNodesSuccess(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -218,7 +218,7 @@ class OddNumOfNodesDefaultsSuccess(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -248,7 +248,7 @@ class OddNumOfNodesDefaultsSuccess(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -351,7 +351,7 @@ class WatchdogValidations(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -407,7 +407,7 @@ class EvenNumOfNodes(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -443,7 +443,7 @@ class EvenNumOfNodes(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -480,7 +480,7 @@ class EvenNumOfNodes(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -513,7 +513,7 @@ class EvenNumOfNodes(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + self.config.http.sbd.enable_sbd(node_labels=self.node_list) + enable_sbd( +@@ -604,7 +604,9 @@ class OfflineNodes(TestCase): + node_labels=self.online_node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.online_node_list[0]] ++ communication_list=[ ++ [dict(label=node)] for node in self.online_node_list ++ ], + ) + self.config.http.sbd.enable_sbd(node_labels=self.online_node_list) + enable_sbd( +@@ -644,7 +646,9 @@ class OfflineNodes(TestCase): + node_labels=self.online_node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.online_node_list[0]] ++ communication_list=[ ++ [dict(label=node)] for node in self.online_node_list ++ ], + ) + self.config.http.sbd.enable_sbd(node_labels=self.online_node_list) + enable_sbd( +@@ -1226,7 +1230,7 @@ class FailureHandling(TestCase): + node_labels=self.node_list, + ) + self.config.http.pcmk.remove_stonith_watchdog_timeout( +- node_labels=[self.node_list[0]] ++ communication_list=[[dict(label=node)] for node in self.node_list], + ) + + def _remove_calls(self, count): +@@ -1302,7 +1306,8 @@ class FailureHandling(TestCase): + ) + + def test_removing_stonith_wd_timeout_failure(self): +- self._remove_calls(2) ++ self._remove_calls(len(self.node_list) + 1) ++ + self.config.http.pcmk.remove_stonith_watchdog_timeout( + communication_list=[ + self.communication_list_failure[:1], +@@ -1331,7 +1336,7 @@ class FailureHandling(TestCase): + ) + + def test_removing_stonith_wd_timeout_not_connected(self): +- self._remove_calls(2) ++ self._remove_calls(len(self.node_list) + 1) + self.config.http.pcmk.remove_stonith_watchdog_timeout( + communication_list=[ + self.communication_list_not_connected[:1], +@@ -1360,7 +1365,7 @@ class FailureHandling(TestCase): + ) + + def test_removing_stonith_wd_timeout_complete_failure(self): +- self._remove_calls(2) ++ self._remove_calls(len(self.node_list) + 1) + self.config.http.pcmk.remove_stonith_watchdog_timeout( + communication_list=[ + self.communication_list_not_connected[:1], +@@ -1406,7 +1411,7 @@ class FailureHandling(TestCase): + ) + + def test_set_sbd_config_failure(self): +- self._remove_calls(4) ++ self._remove_calls(len(self.node_list) + 1 + 2) + self.config.http.sbd.set_sbd_config( + communication_list=[ + dict( +@@ -1453,7 +1458,7 @@ class FailureHandling(TestCase): + ) + + def test_set_corosync_conf_failed(self): +- self._remove_calls(5) ++ self._remove_calls(len(self.node_list) + 1 + 3) + self.config.env.push_corosync_conf( + corosync_conf_text=_get_corosync_conf_text_with_atb( + self.corosync_conf_name +@@ -1477,7 +1482,7 @@ class FailureHandling(TestCase): + ) + + def test_check_sbd_invalid_data_format(self): +- self._remove_calls(7) ++ self._remove_calls(len(self.node_list) + 1 + 5) + self.config.http.sbd.check_sbd( + communication_list=[ + dict( +@@ -1516,7 +1521,7 @@ class FailureHandling(TestCase): + ) + + def test_check_sbd_failure(self): +- self._remove_calls(7) ++ self._remove_calls(len(self.node_list) + 1 + 5) + self.config.http.sbd.check_sbd( + communication_list=[ + dict( +@@ -1558,7 +1563,7 @@ class FailureHandling(TestCase): + ) + + def test_check_sbd_not_connected(self): +- self._remove_calls(7) ++ self._remove_calls(len(self.node_list) + 1 + 5) + self.config.http.sbd.check_sbd( + communication_list=[ + dict( +@@ -1601,7 +1606,7 @@ class FailureHandling(TestCase): + ) + + def test_get_online_targets_failed(self): +- self._remove_calls(9) ++ self._remove_calls(len(self.node_list) + 1 + 7) + self.config.http.host.check_auth( + communication_list=self.communication_list_failure + ) +@@ -1626,7 +1631,7 @@ class FailureHandling(TestCase): + ) + + def test_get_online_targets_not_connected(self): +- self._remove_calls(9) ++ self._remove_calls(len(self.node_list) + 1 + 7) + self.config.http.host.check_auth( + communication_list=self.communication_list_not_connected + ) +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 452de97f..e3397c25 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1838,13 +1838,22 @@ end + def set_cluster_prop_force(auth_user, prop, val) + cmd = ['property', 'set', "#{prop}=#{val}"] + flags = ['--force'] ++ sig_file = "#{CIB_PATH}.sig" ++ retcode = 0 ++ + if pacemaker_running? +- user = auth_user ++ _, _, retcode = run_cmd(auth_user, PCS, *flags, "--", *cmd) + else +- user = PCSAuth.getSuperuserAuth() +- flags += ['-f', CIB_PATH] ++ if File.exist?(CIB_PATH) ++ flags += ['-f', CIB_PATH] ++ _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), PCS, *flags, "--", *cmd) ++ begin ++ File.delete(sig_file) ++ rescue => e ++ $logger.debug("Cannot delete file '#{sig_file}': #{e.message}") ++ end ++ end + end +- _, _, retcode = run_cmd(user, PCS, *flags, "--", *cmd) + return (retcode == 0) + end + +-- +2.39.0 + diff --git a/SOURCES/bz2180700-01-fix-pcs-config-checkpoint-diff.patch b/SOURCES/bz2180700-01-fix-pcs-config-checkpoint-diff.patch new file mode 100644 index 0000000..f0d6b4d --- /dev/null +++ b/SOURCES/bz2180700-01-fix-pcs-config-checkpoint-diff.patch @@ -0,0 +1,84 @@ +From ce48dbe8b410b2dc4f3159e22c243c1d8824cba0 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 16 Mar 2023 11:32:40 +0100 +Subject: [PATCH 1/2] fix `pcs config checkpoint diff` command + +--- + pcs/cli/common/lib_wrapper.py | 15 +-------------- + pcs/config.py | 3 +++ + 2 files changed, 4 insertions(+), 14 deletions(-) + +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 0643a808..b17f43b1 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -1,9 +1,5 @@ + import logging + from collections import namedtuple +-from typing import ( +- Any, +- Dict, +-) + + from pcs import settings + from pcs.cli.common import middleware +@@ -36,9 +32,6 @@ from pcs.lib.commands.constraint import order as constraint_order + from pcs.lib.commands.constraint import ticket as constraint_ticket + from pcs.lib.env import LibraryEnvironment + +-# Note: not properly typed +-_CACHE: Dict[Any, Any] = {} +- + + def wrapper(dictionary): + return namedtuple("wrapper", dictionary.keys())(**dictionary) +@@ -106,12 +99,6 @@ def bind_all(env, run_with_middleware, dictionary): + ) + + +-def get_module(env, middleware_factory, name): +- if name not in _CACHE: +- _CACHE[name] = load_module(env, middleware_factory, name) +- return _CACHE[name] +- +- + def load_module(env, middleware_factory, name): + # pylint: disable=too-many-return-statements, too-many-branches + if name == "acl": +@@ -541,4 +528,4 @@ class Library: + self.middleware_factory = middleware_factory + + def __getattr__(self, name): +- return get_module(self.env, self.middleware_factory, name) ++ return load_module(self.env, self.middleware_factory, name) +diff --git a/pcs/config.py b/pcs/config.py +index 6c90c13f..25007d26 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -711,6 +711,7 @@ def _checkpoint_to_lines(lib, checkpoint_number): + orig_usefile = utils.usefile + orig_filename = utils.filename + orig_middleware = lib.middleware_factory ++ orig_env = lib.env + # configure old code to read the CIB from a file + utils.usefile = True + utils.filename = os.path.join( +@@ -720,6 +721,7 @@ def _checkpoint_to_lines(lib, checkpoint_number): + lib.middleware_factory = orig_middleware._replace( + cib=middleware.cib(utils.filename, utils.touch_cib_file) + ) ++ lib.env = utils.get_cli_env() + # export the CIB to text + result = False, [] + if os.path.isfile(utils.filename): +@@ -728,6 +730,7 @@ def _checkpoint_to_lines(lib, checkpoint_number): + utils.usefile = orig_usefile + utils.filename = orig_filename + lib.middleware_factory = orig_middleware ++ lib.env = orig_env + return result + + +-- +2.39.2 + diff --git a/SOURCES/bz2180706-01-fix-pcs-stonith-update-scsi-devices.patch b/SOURCES/bz2180706-01-fix-pcs-stonith-update-scsi-devices.patch new file mode 100644 index 0000000..780b377 --- /dev/null +++ b/SOURCES/bz2180706-01-fix-pcs-stonith-update-scsi-devices.patch @@ -0,0 +1,975 @@ +From 0b9175e04ee0527bbf603ad8dd8240c50c623bd6 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Mon, 20 Mar 2023 10:35:34 +0100 +Subject: [PATCH 2/2] fix `pcs stonith update-scsi-devices` command + +--- + pcs/lib/cib/stonith.py | 168 +++++- + .../test_stonith_update_scsi_devices.py | 571 ++++++++++++++---- + 2 files changed, 601 insertions(+), 138 deletions(-) + +diff --git a/pcs/lib/cib/stonith.py b/pcs/lib/cib/stonith.py +index 85b46fd7..f49bbc37 100644 +--- a/pcs/lib/cib/stonith.py ++++ b/pcs/lib/cib/stonith.py +@@ -158,12 +158,64 @@ def get_node_key_map_for_mpath( + return node_key_map + + +-DIGEST_ATTRS = ["op-digest", "op-secure-digest", "op-restart-digest"] +-DIGEST_ATTR_TO_TYPE_MAP = { ++DIGEST_ATTR_TO_DIGEST_TYPE_MAP = { + "op-digest": "all", + "op-secure-digest": "nonprivate", + "op-restart-digest": "nonreloadable", + } ++TRANSIENT_DIGEST_ATTR_TO_DIGEST_TYPE_MAP = { ++ "#digests-all": "all", ++ "#digests-secure": "nonprivate", ++} ++DIGEST_ATTRS = frozenset(DIGEST_ATTR_TO_DIGEST_TYPE_MAP.keys()) ++TRANSIENT_DIGEST_ATTRS = frozenset( ++ TRANSIENT_DIGEST_ATTR_TO_DIGEST_TYPE_MAP.keys() ++) ++ ++ ++def _get_digest( ++ attr: str, ++ attr_to_type_map: Dict[str, str], ++ calculated_digests: Dict[str, Optional[str]], ++) -> str: ++ """ ++ Return digest of right type for the specified attribute. If missing, raise ++ an error. ++ ++ attr -- name of digest attribute ++ atttr_to_type_map -- map for attribute name to digest type conversion ++ calculated_digests -- digests calculated by pacemaker ++ """ ++ if attr not in attr_to_type_map: ++ raise AssertionError( ++ f"Key '{attr}' is missing in the attribute name to digest type map" ++ ) ++ digest = calculated_digests.get(attr_to_type_map[attr]) ++ if digest is None: ++ # this should not happen and when it does it is pacemaker fault ++ raise LibraryError( ++ ReportItem.error( ++ reports.messages.StonithRestartlessUpdateUnableToPerform( ++ f"necessary digest for '{attr}' attribute is missing" ++ ) ++ ) ++ ) ++ return digest ++ ++ ++def _get_transient_instance_attributes(cib: _Element) -> List[_Element]: ++ """ ++ Return list of instance_attributes elements which could contain digest ++ attributes. ++ ++ cib -- CIB root element ++ """ ++ return cast( ++ List[_Element], ++ cib.xpath( ++ "./status/node_state/transient_attributes/instance_attributes" ++ ), ++ ) + + + def _get_lrm_rsc_op_elements( +@@ -267,21 +319,89 @@ def _update_digest_attrs_in_lrm_rsc_op( + ) + ) + for attr in common_digests_attrs: +- new_digest = calculated_digests[DIGEST_ATTR_TO_TYPE_MAP[attr]] +- if new_digest is None: +- # this should not happen and when it does it is pacemaker fault ++ # update digest in cib ++ lrm_rsc_op.attrib[attr] = _get_digest( ++ attr, DIGEST_ATTR_TO_DIGEST_TYPE_MAP, calculated_digests ++ ) ++ ++ ++def _get_transient_digest_value( ++ old_value: str, stonith_id: str, stonith_type: str, digest: str ++) -> str: ++ """ ++ Return transient digest value with replaced digest. ++ ++ Value has comma separated format: ++ ::,... ++ ++ and we need to replace only digest for our currently updated stonith device. ++ ++ old_value -- value to be replaced ++ stonith_id -- id of stonith resource ++ stonith_type -- stonith resource type ++ digest -- digest for new value ++ """ ++ new_comma_values_list = [] ++ for comma_value in old_value.split(","): ++ if comma_value: ++ try: ++ _id, _type, _ = comma_value.split(":") ++ except ValueError as e: ++ raise LibraryError( ++ ReportItem.error( ++ reports.messages.StonithRestartlessUpdateUnableToPerform( ++ f"invalid digest attribute value: '{old_value}'" ++ ) ++ ) ++ ) from e ++ if _id == stonith_id and _type == stonith_type: ++ comma_value = ":".join([stonith_id, stonith_type, digest]) ++ new_comma_values_list.append(comma_value) ++ return ",".join(new_comma_values_list) ++ ++ ++def _update_digest_attrs_in_transient_instance_attributes( ++ nvset_el: _Element, ++ stonith_id: str, ++ stonith_type: str, ++ calculated_digests: Dict[str, Optional[str]], ++) -> None: ++ """ ++ Update digests attributes in transient instance attributes element. ++ ++ nvset_el -- instance_attributes element containing nvpairs with digests ++ attributes ++ stonith_id -- id of stonith resource being updated ++ stonith_type -- type of stonith resource being updated ++ calculated_digests -- digests calculated by pacemaker ++ """ ++ for attr in TRANSIENT_DIGEST_ATTRS: ++ nvpair_list = cast( ++ List[_Element], ++ nvset_el.xpath("./nvpair[@name=$name]", name=attr), ++ ) ++ if not nvpair_list: ++ continue ++ if len(nvpair_list) > 1: + raise LibraryError( + ReportItem.error( + reports.messages.StonithRestartlessUpdateUnableToPerform( +- ( +- f"necessary digest for '{attr}' attribute is " +- "missing" +- ) ++ f"multiple digests attributes: '{attr}'" + ) + ) + ) +- # update digest in cib +- lrm_rsc_op.attrib[attr] = new_digest ++ old_value = nvpair_list[0].attrib["value"] ++ if old_value: ++ nvpair_list[0].attrib["value"] = _get_transient_digest_value( ++ str(old_value), ++ stonith_id, ++ stonith_type, ++ _get_digest( ++ attr, ++ TRANSIENT_DIGEST_ATTR_TO_DIGEST_TYPE_MAP, ++ calculated_digests, ++ ), ++ ) + + + def update_scsi_devices_without_restart( +@@ -300,6 +420,8 @@ def update_scsi_devices_without_restart( + id_provider -- elements' ids generator + device_list -- list of updated scsi devices + """ ++ # pylint: disable=too-many-locals ++ cib = get_root(resource_el) + resource_id = resource_el.get("id", "") + roles_with_nodes = get_resource_state(cluster_state, resource_id) + if "Started" not in roles_with_nodes: +@@ -330,17 +452,14 @@ def update_scsi_devices_without_restart( + ) + + lrm_rsc_op_start_list = _get_lrm_rsc_op_elements( +- get_root(resource_el), resource_id, node_name, "start" ++ cib, resource_id, node_name, "start" ++ ) ++ new_instance_attrs_digests = get_resource_digests( ++ runner, resource_id, node_name, new_instance_attrs + ) + if len(lrm_rsc_op_start_list) == 1: + _update_digest_attrs_in_lrm_rsc_op( +- lrm_rsc_op_start_list[0], +- get_resource_digests( +- runner, +- resource_id, +- node_name, +- new_instance_attrs, +- ), ++ lrm_rsc_op_start_list[0], new_instance_attrs_digests + ) + else: + raise LibraryError( +@@ -353,7 +472,7 @@ def update_scsi_devices_without_restart( + + monitor_attrs_list = _get_monitor_attrs(resource_el) + lrm_rsc_op_monitor_list = _get_lrm_rsc_op_elements( +- get_root(resource_el), resource_id, node_name, "monitor" ++ cib, resource_id, node_name, "monitor" + ) + if len(lrm_rsc_op_monitor_list) != len(monitor_attrs_list): + raise LibraryError( +@@ -369,7 +488,7 @@ def update_scsi_devices_without_restart( + + for monitor_attrs in monitor_attrs_list: + lrm_rsc_op_list = _get_lrm_rsc_op_elements( +- get_root(resource_el), ++ cib, + resource_id, + node_name, + "monitor", +@@ -398,3 +517,10 @@ def update_scsi_devices_without_restart( + ) + ) + ) ++ for nvset_el in _get_transient_instance_attributes(cib): ++ _update_digest_attrs_in_transient_instance_attributes( ++ nvset_el, ++ resource_id, ++ resource_el.get("type", ""), ++ new_instance_attrs_digests, ++ ) +diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +index 6cb1f80c..db8953c8 100644 +--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py ++++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +@@ -35,6 +35,7 @@ DEFAULT_DIGEST = _DIGEST + "0" + ALL_DIGEST = _DIGEST + "1" + NONPRIVATE_DIGEST = _DIGEST + "2" + NONRELOADABLE_DIGEST = _DIGEST + "3" ++DIGEST_ATTR_VALUE_GOOD_FORMAT = f"stonith_id:stonith_type:{DEFAULT_DIGEST}," + DEV_1 = "/dev/sda" + DEV_2 = "/dev/sdb" + DEV_3 = "/dev/sdc" +@@ -148,33 +149,58 @@ def _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops): + return _fixture_lrm_rsc_ops("start", resource_id, lrm_start_ops) + + +-def _fixture_status_lrm_ops_base( +- resource_id, +- resource_type, +- lrm_ops, +-): ++def _fixture_status_lrm_ops(resource_id, resource_type, lrm_ops): + return f""" +- +- +- +- +- +- {lrm_ops} +- +- +- +- +- ++ ++ ++ ++ {lrm_ops} ++ ++ ++ ++ """ ++ ++ ++def _fixture_digest_nvpair(node_id, digest_name, digest_value): ++ return ( ++ f'' ++ ) ++ ++ ++def _fixture_transient_attributes(node_id, digests_nvpairs): ++ return f""" ++ ++ ++ ++ ++ {digests_nvpairs} ++ ++ ++ """ ++ ++ ++def _fixture_node_state(node_id, lrm_ops=None, transient_attrs=None): ++ if transient_attrs is None: ++ transient_attrs = "" ++ if lrm_ops is None: ++ lrm_ops = "" ++ return f""" ++ ++ {lrm_ops} ++ {transient_attrs} ++ + """ + + +-def _fixture_status_lrm_ops( ++def _fixture_status( + resource_id, + resource_type, + lrm_start_ops=DEFAULT_LRM_START_OPS, + lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, ++ digests_attrs_list=None, + ): +- return _fixture_status_lrm_ops_base( ++ lrm_ops = _fixture_status_lrm_ops( + resource_id, + resource_type, + "\n".join( +@@ -182,18 +208,52 @@ def _fixture_status_lrm_ops( + + _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops) + ), + ) ++ node_states_list = [] ++ if not digests_attrs_list: ++ node_states_list.append( ++ _fixture_node_state("1", lrm_ops, transient_attrs=None) ++ ) ++ else: ++ for node_id, digests_attrs in enumerate(digests_attrs_list, start=1): ++ transient_attrs = _fixture_transient_attributes( ++ node_id, ++ "\n".join( ++ _fixture_digest_nvpair(node_id, name, value) ++ for name, value in digests_attrs ++ ), ++ ) ++ node_state = _fixture_node_state( ++ node_id, ++ lrm_ops=lrm_ops if node_id == 1 else None, ++ transient_attrs=transient_attrs, ++ ) ++ node_states_list.append(node_state) ++ node_states = "\n".join(node_states_list) ++ return f""" ++ ++ {node_states} ++ ++ """ ++ + ++def fixture_digests_xml(resource_id, node_name, devices="", nonprivate=True): ++ nonprivate_xml = ( ++ f""" ++ ++ ++ ++ """ ++ if nonprivate ++ else "" ++ ) + +-def fixture_digests_xml(resource_id, node_name, devices=""): + return f""" + + + + + +- +- +- ++ {nonprivate_xml} + + + +@@ -331,6 +391,8 @@ class UpdateScsiDevicesMixin: + nodes_running_on=1, + start_digests=True, + monitor_digests=True, ++ digests_attrs_list=None, ++ crm_digests_xml=None, + ): + # pylint: disable=too-many-arguments + # pylint: disable=too-many-locals +@@ -343,11 +405,12 @@ class UpdateScsiDevicesMixin: + resource_ops=resource_ops, + host_map=host_map, + ), +- status=_fixture_status_lrm_ops( ++ status=_fixture_status( + self.stonith_id, + self.stonith_type, + lrm_start_ops=lrm_start_ops, + lrm_monitor_ops=lrm_monitor_ops, ++ digests_attrs_list=digests_attrs_list, + ), + ) + self.config.runner.pcmk.is_resource_digests_supported() +@@ -360,14 +423,17 @@ class UpdateScsiDevicesMixin: + nodes=FIXTURE_CRM_MON_NODES, + ) + devices_opt = "devices={}".format(devices_value) ++ ++ if crm_digests_xml is None: ++ crm_digests_xml = fixture_digests_xml( ++ self.stonith_id, SCSI_NODE, devices=devices_value ++ ) + if start_digests: + self.config.runner.pcmk.resource_digests( + self.stonith_id, + SCSI_NODE, + name="start.op.digests", +- stdout=fixture_digests_xml( +- self.stonith_id, SCSI_NODE, devices=devices_value +- ), ++ stdout=crm_digests_xml, + args=[devices_opt], + ) + if monitor_digests: +@@ -391,11 +457,7 @@ class UpdateScsiDevicesMixin: + self.stonith_id, + SCSI_NODE, + name=f"{name}-{num}.op.digests", +- stdout=fixture_digests_xml( +- self.stonith_id, +- SCSI_NODE, +- devices=devices_value, +- ), ++ stdout=crm_digests_xml, + args=args, + ) + +@@ -403,14 +465,16 @@ class UpdateScsiDevicesMixin: + self, + devices_before=DEVICES_1, + devices_updated=DEVICES_2, +- devices_add=(), +- devices_remove=(), ++ devices_add=None, ++ devices_remove=None, + unfence=None, + resource_ops=DEFAULT_OPS, + lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, + lrm_start_ops=DEFAULT_LRM_START_OPS, + lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, + lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, ++ digests_attrs_list=None, ++ digests_attrs_list_updated=None, + ): + # pylint: disable=too-many-arguments + self.config_cib( +@@ -419,6 +483,7 @@ class UpdateScsiDevicesMixin: + resource_ops=resource_ops, + lrm_monitor_ops=lrm_monitor_ops, + lrm_start_ops=lrm_start_ops, ++ digests_attrs_list=digests_attrs_list, + ) + if unfence: + self.config.corosync_conf.load_content( +@@ -442,20 +507,34 @@ class UpdateScsiDevicesMixin: + devices=devices_updated, + resource_ops=resource_ops, + ), +- status=_fixture_status_lrm_ops( ++ status=_fixture_status( + self.stonith_id, + self.stonith_type, + lrm_start_ops=lrm_start_ops_updated, + lrm_monitor_ops=lrm_monitor_ops_updated, ++ digests_attrs_list=digests_attrs_list_updated, + ), + ) +- self.command( +- devices_updated=devices_updated, +- devices_add=devices_add, +- devices_remove=devices_remove, +- )() ++ kwargs = dict(devices_updated=devices_updated) ++ if devices_add is not None: ++ kwargs["devices_add"] = devices_add ++ if devices_remove is not None: ++ kwargs["devices_remove"] = devices_remove ++ self.command(**kwargs)() + self.env_assist.assert_reports([]) + ++ def digest_attr_value_single(self, digest, last_comma=True): ++ comma = "," if last_comma else "" ++ return f"{self.stonith_id}:{self.stonith_type}:{digest}{comma}" ++ ++ def digest_attr_value_multiple(self, digest, last_comma=True): ++ if self.stonith_type == STONITH_TYPE_SCSI: ++ value = f"{STONITH_ID_MPATH}:{STONITH_TYPE_MPATH}:{DEFAULT_DIGEST}," ++ else: ++ value = f"{STONITH_ID_SCSI}:{STONITH_TYPE_SCSI}:{DEFAULT_DIGEST}," ++ ++ return f"{value}{self.digest_attr_value_single(digest, last_comma=last_comma)}" ++ + + class UpdateScsiDevicesFailuresMixin(UpdateScsiDevicesMixin): + def test_pcmk_doesnt_support_digests(self): +@@ -564,9 +643,7 @@ class UpdateScsiDevicesFailuresMixin(UpdateScsiDevicesMixin): + ) + + def test_no_lrm_start_op(self): +- self.config_cib( +- lrm_start_ops=(), start_digests=False, monitor_digests=False +- ) ++ self.config_cib(lrm_start_ops=(), monitor_digests=False) + self.env_assist.assert_raise_library_error( + self.command(), + [ +@@ -619,6 +696,59 @@ class UpdateScsiDevicesFailuresMixin(UpdateScsiDevicesMixin): + expected_in_processor=False, + ) + ++ def test_crm_resource_digests_missing_for_transient_digests_attrs(self): ++ self.config_cib( ++ digests_attrs_list=[ ++ [ ++ ( ++ "digests-secure", ++ self.digest_attr_value_single(ALL_DIGEST), ++ ), ++ ], ++ ], ++ crm_digests_xml=fixture_digests_xml( ++ self.stonith_id, SCSI_NODE, devices="", nonprivate=False ++ ), ++ ) ++ self.env_assist.assert_raise_library_error( ++ self.command(), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "necessary digest for '#digests-secure' attribute is " ++ "missing" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_multiple_digests_attributes(self): ++ self.config_cib( ++ digests_attrs_list=[ ++ 2 ++ * [ ++ ( ++ "digests-all", ++ self.digest_attr_value_single(DEFAULT_DIGEST), ++ ), ++ ], ++ ], ++ ) ++ self.env_assist.assert_raise_library_error( ++ self.command(), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=("multiple digests attributes: '#digests-all'"), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ + def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self): + self.config_cib( + resource_ops=( +@@ -809,7 +939,7 @@ class UpdateScsiDevicesFailuresMixin(UpdateScsiDevicesMixin): + stonith_type=self.stonith_type, + devices=DEVICES_2, + ), +- status=_fixture_status_lrm_ops( ++ status=_fixture_status( + self.stonith_id, + self.stonith_type, + lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, +@@ -956,6 +1086,28 @@ class UpdateScsiDevicesFailuresMixin(UpdateScsiDevicesMixin): + ] + ) + ++ def test_transient_digests_attrs_bad_value_format(self): ++ bad_format = f"{DIGEST_ATTR_VALUE_GOOD_FORMAT}id:type," ++ self.config_cib( ++ digests_attrs_list=[ ++ [ ++ ("digests-all", DIGEST_ATTR_VALUE_GOOD_FORMAT), ++ ("digests-secure", bad_format), ++ ] ++ ] ++ ) ++ self.env_assist.assert_raise_library_error( ++ self.command(), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=f"invalid digest attribute value: '{bad_format}'", ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ + + class UpdateScsiDevicesSetBase(UpdateScsiDevicesMixin, CommandSetMixin): + def test_update_1_to_1_devices(self): +@@ -999,80 +1151,6 @@ class UpdateScsiDevicesSetBase(UpdateScsiDevicesMixin, CommandSetMixin): + unfence=[DEV_3, DEV_4], + ) + +- def test_default_monitor(self): +- self.assert_command_success(unfence=[DEV_2]) +- +- def test_no_monitor_ops(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=(), +- lrm_monitor_ops=(), +- lrm_monitor_ops_updated=(), +- ) +- +- def test_1_monitor_with_timeout(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=(("monitor", "30s", "10s", None),), +- lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), +- lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), +- ) +- +- def test_2_monitor_ops_with_timeouts(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=( +- ("monitor", "30s", "10s", None), +- ("monitor", "40s", "20s", None), +- ), +- lrm_monitor_ops=( +- ("30000", DEFAULT_DIGEST, None, None), +- ("40000", DEFAULT_DIGEST, None, None), +- ), +- lrm_monitor_ops_updated=( +- ("30000", ALL_DIGEST, None, None), +- ("40000", ALL_DIGEST, None, None), +- ), +- ) +- +- def test_2_monitor_ops_with_one_timeout(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=( +- ("monitor", "30s", "10s", None), +- ("monitor", "60s", None, None), +- ), +- lrm_monitor_ops=( +- ("30000", DEFAULT_DIGEST, None, None), +- ("60000", DEFAULT_DIGEST, None, None), +- ), +- lrm_monitor_ops_updated=( +- ("30000", ALL_DIGEST, None, None), +- ("60000", ALL_DIGEST, None, None), +- ), +- ) +- +- def test_various_start_ops_one_lrm_start_op(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=( +- ("monitor", "60s", None, None), +- ("start", "0s", "40s", None), +- ("start", "0s", "30s", "1"), +- ("start", "10s", "5s", None), +- ("start", "20s", None, None), +- ), +- ) +- +- def test_1_nonrecurring_start_op_with_timeout(self): +- self.assert_command_success( +- unfence=[DEV_2], +- resource_ops=( +- ("monitor", "60s", None, None), +- ("start", "0s", "40s", None), +- ), +- ) +- + + class UpdateScsiDevicesAddRemoveBase( + UpdateScsiDevicesMixin, CommandAddRemoveMixin +@@ -1242,6 +1320,221 @@ class MpathFailuresMixin: + self.assert_failure("node1:1;node2=", ["node2", "node3"]) + + ++class UpdateScsiDevicesDigestsBase(UpdateScsiDevicesMixin): ++ def test_default_monitor(self): ++ self.assert_command_success(unfence=[DEV_2]) ++ ++ def test_no_monitor_ops(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=(), ++ lrm_monitor_ops=(), ++ lrm_monitor_ops_updated=(), ++ ) ++ ++ def test_1_monitor_with_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=(("monitor", "30s", "10s", None),), ++ lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), ++ lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), ++ ) ++ ++ def test_2_monitor_ops_with_timeouts(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "40s", "20s", None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("40000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("40000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_2_monitor_ops_with_one_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "60s", None, None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("60000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("60000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_various_start_ops_one_lrm_start_op(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ("start", "0s", "30s", "1"), ++ ("start", "10s", "5s", None), ++ ("start", "20s", None, None), ++ ), ++ ) ++ ++ def test_1_nonrecurring_start_op_with_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ), ++ ) ++ ++ def _digests_attrs_before(self, last_comma=True): ++ return [ ++ ( ++ "digests-all", ++ self.digest_attr_value_single(DEFAULT_DIGEST, last_comma), ++ ), ++ ( ++ "digests-secure", ++ self.digest_attr_value_single(DEFAULT_DIGEST, last_comma), ++ ), ++ ] ++ ++ def _digests_attrs_after(self, last_comma=True): ++ return [ ++ ( ++ "digests-all", ++ self.digest_attr_value_single(ALL_DIGEST, last_comma), ++ ), ++ ( ++ "digests-secure", ++ self.digest_attr_value_single(NONPRIVATE_DIGEST, last_comma), ++ ), ++ ] ++ ++ def _digests_attrs_before_multi(self, last_comma=True): ++ return [ ++ ( ++ "digests-all", ++ self.digest_attr_value_multiple(DEFAULT_DIGEST, last_comma), ++ ), ++ ( ++ "digests-secure", ++ self.digest_attr_value_multiple(DEFAULT_DIGEST, last_comma), ++ ), ++ ] ++ ++ def _digests_attrs_after_multi(self, last_comma=True): ++ return [ ++ ( ++ "digests-all", ++ self.digest_attr_value_multiple(ALL_DIGEST, last_comma), ++ ), ++ ( ++ "digests-secure", ++ self.digest_attr_value_multiple(NONPRIVATE_DIGEST, last_comma), ++ ), ++ ] ++ ++ def test_transient_digests_attrs_all_nodes(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=len(self.existing_nodes) ++ * [self._digests_attrs_before()], ++ digests_attrs_list_updated=len(self.existing_nodes) ++ * [self._digests_attrs_after()], ++ ) ++ ++ def test_transient_digests_attrs_not_on_all_nodes(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=[self._digests_attrs_before()], ++ digests_attrs_list_updated=[self._digests_attrs_after()], ++ ) ++ ++ def test_transient_digests_attrs_all_nodes_multi_value(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=len(self.existing_nodes) ++ * [self._digests_attrs_before_multi()], ++ digests_attrs_list_updated=len(self.existing_nodes) ++ * [self._digests_attrs_after_multi()], ++ ) ++ ++ def test_transient_digests_attrs_not_on_all_nodes_multi_value(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=[self._digests_attrs_before()], ++ digests_attrs_list_updated=[self._digests_attrs_after()], ++ ) ++ ++ def test_transient_digests_attrs_not_all_digest_types(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=len(self.existing_nodes) ++ * [self._digests_attrs_before()[0:1]], ++ digests_attrs_list_updated=len(self.existing_nodes) ++ * [self._digests_attrs_after()[0:1]], ++ ) ++ ++ def test_transient_digests_attrs_without_digests_attrs(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=len(self.existing_nodes) * [[]], ++ digests_attrs_list_updated=len(self.existing_nodes) * [[]], ++ ) ++ ++ def test_transient_digests_attrs_without_last_comma(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=[self._digests_attrs_before(last_comma=False)], ++ digests_attrs_list_updated=[ ++ self._digests_attrs_after(last_comma=False) ++ ], ++ ) ++ ++ def test_transient_digests_attrs_without_last_comma_multi_value(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=[ ++ self._digests_attrs_before_multi(last_comma=False) ++ ], ++ digests_attrs_list_updated=[ ++ self._digests_attrs_after_multi(last_comma=False) ++ ], ++ ) ++ ++ def test_transient_digests_attrs_no_digest_for_our_stonith_id(self): ++ digests_attrs_list = len(self.existing_nodes) * [ ++ [ ++ ("digests-all", DIGEST_ATTR_VALUE_GOOD_FORMAT), ++ ("digests-secure", DIGEST_ATTR_VALUE_GOOD_FORMAT), ++ ] ++ ] ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=digests_attrs_list, ++ digests_attrs_list_updated=digests_attrs_list, ++ ) ++ ++ def test_transient_digests_attrs_digests_with_empty_value(self): ++ digests_attrs_list = len(self.existing_nodes) * [ ++ [("digests-all", ""), ("digests-secure", "")] ++ ] ++ self.assert_command_success( ++ unfence=[DEV_2], ++ digests_attrs_list=digests_attrs_list, ++ digests_attrs_list_updated=digests_attrs_list, ++ ) ++ ++ + @mock.patch.object( + settings, + "pacemaker_api_result_schema", +@@ -1334,3 +1627,47 @@ class TestUpdateScsiDevicesAddRemoveFailuresScsi( + UpdateScsiDevicesAddRemoveFailuresBaseMixin, ScsiMixin, TestCase + ): + pass ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesDigestsSetScsi( ++ UpdateScsiDevicesDigestsBase, ScsiMixin, CommandSetMixin, TestCase ++): ++ pass ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesDigestsAddRemoveScsi( ++ UpdateScsiDevicesDigestsBase, ScsiMixin, CommandAddRemoveMixin, TestCase ++): ++ pass ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesDigestsSetMpath( ++ UpdateScsiDevicesDigestsBase, MpathMixin, CommandSetMixin, TestCase ++): ++ pass ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesDigestsAddRemoveMpath( ++ UpdateScsiDevicesDigestsBase, MpathMixin, CommandAddRemoveMixin, TestCase ++): ++ pass +-- +2.39.2 + diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch new file mode 100644 index 0000000..0ec8df4 --- /dev/null +++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch @@ -0,0 +1,53 @@ +From 4470259655fa10cb5908fee00653483e7056f1a7 Mon Sep 17 00:00:00 2001 +From: Ivan Devat +Date: Tue, 20 Nov 2018 15:03:56 +0100 +Subject: [PATCH] do not support cluster setup with udp(u) transport + +--- + pcs/pcs.8.in | 2 ++ + pcs/usage.py | 1 + + pcsd/public/css/style.css | 3 +++ + 3 files changed, 6 insertions(+) + +diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in +index d1a6dcf2..cd00f8ac 100644 +--- a/pcs/pcs.8.in ++++ b/pcs/pcs.8.in +@@ -436,6 +436,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable + + Transports udp and udpu: + .br ++WARNING: These transports are not supported in RHEL 8. ++.br + These transports are limited to one address per node. They do not support traffic encryption nor compression. + .br + Transport options are: ip_version, netmtu +diff --git a/pcs/usage.py b/pcs/usage.py +index c3174d82..0a6ffcb6 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1004,6 +1004,7 @@ Commands: + hash=sha256. To disable encryption, set cipher=none and hash=none. + + Transports udp and udpu: ++ WARNING: These transports are not supported in RHEL 8. + These transports are limited to one address per node. They do not + support traffic encryption nor compression. + Transport options are: +diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css +index 2f26e831..a7702ac4 100644 +--- a/pcsd/public/css/style.css ++++ b/pcsd/public/css/style.css +@@ -949,6 +949,9 @@ table.args-table td.reg { + width: 6ch; + text-align: right; + } ++#csetup-transport .transport-types { ++ display: none; ++} + #csetup-transport-options.udp .knet-only, + #csetup-transport-options.knet .without-knet + { +-- +2.38.1 + diff --git a/SOURCES/pcsd-rubygem-json-error-message-change.patch b/SOURCES/pcsd-rubygem-json-error-message-change.patch new file mode 100644 index 0000000..60e31f8 --- /dev/null +++ b/SOURCES/pcsd-rubygem-json-error-message-change.patch @@ -0,0 +1,40 @@ +From 91d13a82a0803f2a4653a2ec9379a27f4555dcb5 Mon Sep 17 00:00:00 2001 +From: Mamoru TASAKA +Date: Thu, 8 Dec 2022 22:47:59 +0900 +Subject: [PATCH 3/5] pcsd ruby: adjust to json 2.6.3 error message change + +json 2.6.3 now removes line number information from parser +error message. +Adjust regex pattern on pcs test code for ruby to support +this error format. + +Fixes #606 . +--- + pcsd/test/test_config.rb | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pcsd/test/test_config.rb b/pcsd/test/test_config.rb +index 7aaf4349..a580b24f 100644 +--- a/pcsd/test/test_config.rb ++++ b/pcsd/test/test_config.rb +@@ -126,7 +126,7 @@ class TestConfig < Test::Unit::TestCase + assert_equal('error', $logger.log[0][0]) + assert_match( + # the number is based on JSON gem version +- /Unable to parse pcs_settings file: \d+: unexpected token/, ++ /Unable to parse pcs_settings file: (\d+: )?unexpected token/, + $logger.log[0][1] + ) + assert_equal(fixture_empty_config, cfg.text) +@@ -723,7 +723,7 @@ class TestCfgKnownHosts < Test::Unit::TestCase + assert_equal('error', $logger.log[0][0]) + assert_match( + # the number is based on JSON gem version +- /Unable to parse known-hosts file: \d+: unexpected token/, ++ /Unable to parse known-hosts file: (\d+: )?unexpected token/, + $logger.log[0][1] + ) + assert_empty_data(cfg) +-- +2.39.0 + diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec new file mode 100644 index 0000000..fad4568 --- /dev/null +++ b/SPECS/pcs.spec @@ -0,0 +1,1159 @@ +Name: pcs +Version: 0.10.15 +Release: 4%{?dist}.1 +# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ +# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses +# GPL-2.0-only: pcs +# Apache-2.0: dataclasses, tornado +# Apache-2.0 or BSD-3-Clause: dateutil +# MIT: backports, dacite, daemons, ember, ethon, handlebars, jquery, jquery-ui, +# mustermann, rack, rack-protection, rack-test, sinatra, tilt +# GPL-2.0-only or Ruby: eventmachine, json +# (GPL-2.0-only or Ruby) and BSD-2-Clause: thin +# BSD-2-Clause or Ruby: open4, ruby2_keywords +# BSD-3-Clause and MIT: ffi +License: GPL-2.0-only AND Apache-2.0 AND MIT AND BSD-3-Clause AND (GPL-2.0-only OR Ruby) AND (BSD-2-Clause OR Ruby) AND BSD-2-Clause AND (Apache-2.0 OR BSD-3-Clause) +URL: https://github.com/ClusterLabs/pcs +Group: System Environment/Base +Summary: Pacemaker Configuration System +#building only for architectures with pacemaker and corosync available +ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 + +%global version_or_commit %{version} +# %%global version_or_commit %%{version}.27-cb2fb + +%global pcs_source_name %{name}-%{version_or_commit} + +# ui_commit can be determined by hash, tag or branch +%global ui_commit 0.1.13 +%global ui_modules_version 0.1.13 +%global ui_src_name pcs-web-ui-%{ui_commit} + +%global pcs_snmp_pkg_name pcs-snmp + +%global pyagentx_version 0.4.pcs.2 +%global dataclasses_version 0.8 +%global dacite_version 1.6.0 +%global dateutil_version 2.8.2 +%global version_rubygem_backports 3.23.0 +%global version_rubygem_daemons 1.4.1 +%global version_rubygem_ethon 0.16.0 +%global version_rubygem_eventmachine 1.2.7 +%global version_rubygem_ffi 1.15.5 +%global version_rubygem_json 2.6.3 +%global version_rubygem_mustermann 2.0.2 +%global version_rubygem_open4 1.3.4 +%global version_rubygem_rack 2.2.6.4 +%global version_rubygem_rack_protection 2.2.4 +%global version_rubygem_rack_test 2.0.2 +%global version_rubygem_rexml 3.2.5 +%global version_rubygem_ruby2_keywords 0.0.5 +%global version_rubygem_sinatra 2.2.4 +%global version_rubygem_thin 1.8.1 +%global version_rubygem_tilt 2.0.11 + +# javascript bundled libraries for old web-ui +%global ember_version 1.4.0 +%global handlebars_version 1.2.1 +%global jquery_ui_version 1.12.1 +%global jquery_version 3.6.0 + +# DO NOT UPDATE +# Tornado 6.2 requires Python 3.7+ +%global tornado_version 6.1.0 + +%global pcs_bundled_dir pcs_bundled +%global pcsd_public_dir pcsd/public +%global rubygem_bundle_dir pcsd/vendor/bundle +%global rubygem_cache_dir %{rubygem_bundle_dir}/cache + +# mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test from /usr/bin/env ruby to #!/usr/bin/ruby +#*** ERROR: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test.ru has shebang which doesn't start with '/' (../../bin/rackup) +#mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/rackup_stub.rb from /usr/bin/env ruby to #!/usr/bin/ruby +#*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/sample_rackup.ru is executable but has empty or no shebang, removing executable bit +#*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/lighttpd.conf is executable but has empty or no shebang, removing executable bit +#*** ERROR: ambiguous python shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/ffi-1.9.25/ext/ffi_c/libffi/generate-darwin-source-and-headers.py: #!/usr/bin/env python. Change it to python3 (or python2) explicitly. +%undefine __brp_mangle_shebangs + +# https://fedoraproject.org/wiki/Changes/Avoid_usr_bin_python_in_RPM_Build#Python_bytecompilation +# Enforce python3 because bytecompilation of tornado produced warnings: +# DEPRECATION WARNING: python2 invoked with /usr/bin/python. +# Use /usr/bin/python3 or /usr/bin/python2 +# /usr/bin/python will be removed or switched to Python 3 in the future. +%global __python %{__python3} + +Source0: %{url}/archive/%{version_or_commit}/%{pcs_source_name}.tar.gz +Source1: HAM-logo.png + +Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz +Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz +Source43: https://github.com/ericvsmith/dataclasses/archive/%{dataclasses_version}/dataclasses-%{dataclasses_version}.tar.gz +Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz +Source45: https://pypi.python.org/packages/source/p/python-dateutil/python-dateutil-%{dateutil_version}.tar.gz + +Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem +Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem +Source83: https://rubygems.org/downloads/ffi-%{version_rubygem_ffi}.gem +Source84: https://rubygems.org/downloads/json-%{version_rubygem_json}.gem +Source85: https://rubygems.org/downloads/rexml-%{version_rubygem_rexml}.gem +Source86: https://rubygems.org/downloads/mustermann-%{version_rubygem_mustermann}.gem +# We needed to re-upload open4 rubygem because of issues with sources in gating. +# Unfortunately, there was no newer version available, therefore we had to +# change its 'version' ourselves. +Source87: https://rubygems.org/downloads/open4-%{version_rubygem_open4}.gem#/open4-%{version_rubygem_open4}-1.gem +Source88: https://rubygems.org/downloads/rack-%{version_rubygem_rack}.gem +Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_protection}.gem +Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem +Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem +Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem +Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem +Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem +Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem +Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem + +Source100: https://github.com/ClusterLabs/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz +Source101: https://github.com/ClusterLabs/pcs-web-ui/releases/download/%{ui_modules_version}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz + +# Patches from upstream. +# They should come before downstream patches to avoid unnecessary conflicts. +# Z-streams are exception here: they can come from upstream but should be +# applied at the end to keep z-stream changes as straightforward as possible. + +# pcs patches: <= 200 +# Patch1: bzNUMBER-01-name.patch +Patch1: do-not-support-cluster-setup-with-udp-u-transport.patch +Patch2: bz2151511-01-add-warning-when-updating-a-misconfigured-resource.patch +Patch3: bz2151166-01-fix-displaying-bool-and-integer-values.patch +Patch4: pcsd-rubygem-json-error-message-change.patch +Patch5: bz2159455-01-add-agent-validation-option.patch +Patch6: bz2158804-01-fix-stonith-watchdog-timeout-validation.patch +Patch7: bz2166243-01-fix-stonith-watchdog-timeout-offline-update.patch +Patch8: bz2180700-01-fix-pcs-config-checkpoint-diff.patch +Patch9: bz2180706-01-fix-pcs-stonith-update-scsi-devices.patch + +# Downstream patches do not come from upstream. They adapt pcs for specific +# RHEL needs. +# Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch + +# ui patches: >200 + +# git for patches +BuildRequires: git-core +#printf from coreutils is used in makefile +BuildRequires: coreutils +# python for pcs +BuildRequires: platform-python +BuildRequires: python3-devel +BuildRequires: platform-python-setuptools +BuildRequires: python3-pycurl +BuildRequires: python3-pip +BuildRequires: python3-pyparsing +BuildRequires: python3-cryptography +BuildRequires: python3-lxml +# for building bundled python packages +BuildRequires: python3-wheel +# for bundled python dateutil +BuildRequires: python3-setuptools_scm +# gcc for compiling custom rubygems +BuildRequires: gcc +BuildRequires: gcc-c++ +# ruby and gems for pcsd +BuildRequires: ruby >= 2.2.0 +BuildRequires: ruby-devel +BuildRequires: rubygems +BuildRequires: rubygem-bundler +# ruby libraries for tests +BuildRequires: rubygem-test-unit +# for touching patch files (sanitization function) +BuildRequires: diffstat +# for post, preun and postun macros +BuildRequires: systemd +# pam is used for authentication inside daemon (python ctypes) +# needed for tier0 tests during build +BuildRequires: pam + +# pcsd fonts and font management tools for creating symlinks to fonts +BuildRequires: fontconfig +BuildRequires: liberation-sans-fonts +BuildRequires: make +BuildRequires: overpass-fonts +# Red Hat logo for creating symlink of favicon +BuildRequires: redhat-logos + +# for building web ui +BuildRequires: npm + +# cluster stack packages for pkg-config +BuildRequires: booth +BuildRequires: corosync-qdevice-devel +BuildRequires: corosynclib-devel >= 3.0 +BuildRequires: fence-agents-common +BuildRequires: pacemaker-libs-devel >= 2.0.0 +BuildRequires: resource-agents +BuildRequires: sbd + +# python and libraries for pcs, setuptools for pcs entrypoint +Requires: platform-python +Requires: python3-lxml +Requires: platform-python-setuptools +Requires: python3-clufter => 0.70.0 +Requires: python3-pycurl +Requires: python3-pyparsing +Requires: python3-cryptography +# ruby and gems for pcsd +Requires: ruby >= 2.2.0 +Requires: rubygems +# for killall +Requires: psmisc +# cluster stack and related packages +Requires: pcmk-cluster-manager >= 2.0.0 +Suggests: pacemaker +Requires: (corosync >= 2.99 if pacemaker) +# pcs enables corosync encryption by default so we require libknet1-plugins-all +Requires: (libknet1-plugins-all if corosync) +Requires: pacemaker-cli >= 2.0.0 +# for post, preun and postun macros +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +# pam is used for authentication inside daemon (python ctypes) +# more details: https://bugzilla.redhat.com/show_bug.cgi?id=1717113 +Requires: pam +# pcsd fonts +Requires: liberation-sans-fonts +Requires: overpass-fonts +# favicon Red Hat logo +Requires: redhat-logos +# needs logrotate for /etc/logrotate.d/pcsd +Requires: logrotate + +Provides: bundled(tornado) = %{tornado_version} +Provides: bundled(dataclasses) = %{dataclasses_version} +Provides: bundled(dacite) = %{dacite_version} +Provides: bundled(dateutil) = %{dateutil_version} +Provides: bundled(backports) = %{version_rubygem_backports} +Provides: bundled(daemons) = %{version_rubygem_daemons} +Provides: bundled(ethon) = %{version_rubygem_ethon} +Provides: bundled(eventmachine) = %{version_rubygem_eventmachine} +Provides: bundled(ffi) = %{version_rubygem_ffi} +Provides: bundled(json) = %{version_rubygem_json} +Provides: bundled(mustermann) = %{version_rubygem_mustermann} +Provides: bundled(open4) = %{version_rubygem_open4} +Provides: bundled(rack) = %{version_rubygem_rack} +Provides: bundled(rack_protection) = %{version_rubygem_rack_protection} +Provides: bundled(rack_test) = %{version_rubygem_rack_test} +Provides: bundled(rexml) = %{version_rubygem_rexml} +Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords} +Provides: bundled(sinatra) = %{version_rubygem_sinatra} +Provides: bundled(thin) = %{version_rubygem_thin} +Provides: bundled(tilt) = %{version_rubygem_tilt} + +# javascript bundled libraries for old web-ui +Provides: bundled(ember) = %{ember_version} +Provides: bundled(handlebars) = %{handlebars_version} +Provides: bundled(jquery) = %{jquery_version} +Provides: bundled(jquery-ui) = %{jquery_ui_version} + +%description +pcs is a corosync and pacemaker configuration tool. It permits users to +easily view, modify and create pacemaker based clusters. + +# pcs-snmp package definition +%package -n %{pcs_snmp_pkg_name} +Group: System Environment/Base +Summary: Pacemaker cluster SNMP agent +# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses +# GPL-2.0-only: pcs +# BSD-2-Clause: pyagentx +License: GPL-2.0-only and BSD-2-Clause +URL: https://github.com/ClusterLabs/pcs + +# tar for unpacking pyagetx source tar ball +BuildRequires: tar + +Requires: pcs = %{version}-%{release} +Requires: pacemaker +Requires: net-snmp + +Provides: bundled(pyagentx) = %{pyagentx_version} + +%description -n %{pcs_snmp_pkg_name} +SNMP agent that provides information about pacemaker cluster to the master agent (snmpd) + +%prep +# -- following is inspired by python-simplejon.el5 -- +# Update timestamps on the files touched by a patch, to avoid non-equal +# .pyc/.pyo files across the multilib peers within a build + +update_times(){ + # update_times ... + # set the access and modification times of each file_to_touch to the times + # of reference_file + + # put all args to file_list + file_list=("$@") + # first argument is reference_file: so take it and remove from file_list + reference_file=${file_list[0]} + unset file_list[0] + + for fname in ${file_list[@]}; do + # some files could be deleted by a patch therefore we test file for + # existance before touch to avoid exit with error: No such file or + # directory + # diffstat cannot create list of files without deleted files + test -e $fname && touch -r $reference_file $fname + done +} + +update_times_patch(){ + # update_times_patch + # set the access and modification times of each file in patch to the times + # of patch_file_name + + patch_file_name=$1 + + # diffstat + # -l lists only the filenames. No histogram is generated. + # -p override the logic that strips common pathnames, + # simulating the patch "-p" option. (Strip the smallest prefix containing + # num leading slashes from each file name found in the patch file) + update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}` +} + +# documentation for setup/autosetup/autopatch: +# * http://ftp.rpm.org/max-rpm/s1-rpm-inside-macros.html +# * https://rpm-software-management.github.io/rpm/manual/autosetup.html +# patch web-ui sources +%autosetup -D -T -b 100 -a 101 -S git -n %{ui_src_name} -N +%autopatch -p1 -m 201 +# update_times_patch %%{PATCH201} + +# patch pcs sources +%autosetup -S git -n %{pcs_source_name} -N +%autopatch -p1 -M 200 + +update_times_patch %{PATCH1} +update_times_patch %{PATCH2} +update_times_patch %{PATCH3} +update_times_patch %{PATCH4} +update_times_patch %{PATCH5} +update_times_patch %{PATCH6} +update_times_patch %{PATCH7} +update_times_patch %{PATCH8} +update_times_patch %{PATCH9} + +# update_times_patch %{PATCH101} + +cp -f %SOURCE1 %{pcsd_public_dir}/images + +# prepare dirs/files necessary for building all bundles +# ----------------------------------------------------- +# 1) rubygems sources + +mkdir -p %{rubygem_cache_dir} +cp -f %SOURCE81 %{rubygem_cache_dir} +cp -f %SOURCE82 %{rubygem_cache_dir} +cp -f %SOURCE83 %{rubygem_cache_dir} +cp -f %SOURCE84 %{rubygem_cache_dir} +cp -f %SOURCE85 %{rubygem_cache_dir} +cp -f %SOURCE86 %{rubygem_cache_dir} +# For reason why we are renaming open4 rubygem, see comment of source +# definition above. +cp -f %SOURCE87 %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem +cp -f %SOURCE88 %{rubygem_cache_dir} +cp -f %SOURCE89 %{rubygem_cache_dir} +cp -f %SOURCE90 %{rubygem_cache_dir} +cp -f %SOURCE91 %{rubygem_cache_dir} +cp -f %SOURCE92 %{rubygem_cache_dir} +cp -f %SOURCE93 %{rubygem_cache_dir} +cp -f %SOURCE94 %{rubygem_cache_dir} +cp -f %SOURCE95 %{rubygem_cache_dir} +cp -f %SOURCE96 %{rubygem_cache_dir} + + +# 2) prepare python bundles +mkdir -p %{pcs_bundled_dir}/src +cp -f %SOURCE41 rpm/ +cp -f %SOURCE42 rpm/ +cp -f %SOURCE43 rpm/ +cp -f %SOURCE44 rpm/ +cp -f %SOURCE45 rpm/ + +%build +%define debug_package %{nil} + +./autogen.sh +%{configure} --enable-local-build --enable-use-local-cache-only --enable-individual-bundling --enable-booth-enable-authfile-set --enable-booth-enable-authfile-unset PYTHON=%{__python3} ruby_CFLAGS="%{optflags}" ruby_LIBS="%{build_ldflags}" +make all + +# build pcs-web-ui +make -C %{_builddir}/%{ui_src_name} build BUILD_USE_EXISTING_NODE_MODULES=true + +%install +rm -rf $RPM_BUILD_ROOT +pwd + +%make_install + +# something like make install for pcs-web-ui +cp -r %{_builddir}/%{ui_src_name}/build ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/ui + +# prepare license files +# some rubygems do not have a license file (thin) +mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt +mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE +mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS +mv %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/LICENSE json_LICENSE +mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE +mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE +mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE +mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License +mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt +mv %{rubygem_bundle_dir}/gems/ruby2_keywords-%{version_rubygem_ruby2_keywords}/LICENSE ruby2_keywords_LICENSE +mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE +mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING + +# symlink favicon into pcsd directories +ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/images/favicon.png + + +cp %{pcs_bundled_dir}/src/pyagentx-*/LICENSE.txt pyagentx_LICENSE.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/README.md pyagentx_README.md + +cp %{pcs_bundled_dir}/src/tornado-*/LICENSE tornado_LICENSE +cp %{pcs_bundled_dir}/src/tornado-*/README.rst tornado_README.rst + +cp %{pcs_bundled_dir}/src/dataclasses-*/LICENSE.txt dataclasses_LICENSE.txt +cp %{pcs_bundled_dir}/src/dataclasses-*/README.rst dataclasses_README.rst + +cp %{pcs_bundled_dir}/src/dacite-*/LICENSE dacite_LICENSE +cp %{pcs_bundled_dir}/src/dacite-*/README.md dacite_README.md + +cp %{pcs_bundled_dir}/src/python-dateutil-*/LICENSE dateutil_LICENSE +cp %{pcs_bundled_dir}/src/python-dateutil-*/README.rst dateutil_README.rst + +# We are not building debug package for pcs but we need to add MiniDebuginfo +# to the bundled shared libraries from rubygem extensions in order to satisfy +# rpmdiff's binary stripping checker. +# Therefore we call find-debuginfo.sh script manually in order to strip +# binaries and add MiniDebugInfo with .gnu_debugdata section +/usr/lib/rpm/find-debuginfo.sh -j2 -m -i -S debugsourcefiles.list +# find-debuginfo.sh generated some files into /usr/lib/debug and +# /usr/src/debug/ that we don't want in the package +rm -rf $RPM_BUILD_ROOT%{_libdir}/debug +rm -rf $RPM_BUILD_ROOT/usr/lib/debug +rm -rf $RPM_BUILD_ROOT%{_prefix}/src/debug + +# We can remove files required for gem compilation +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext + +%check +run_all_tests(){ + #run pcs tests + + # disabled tests: + # + # pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe + # For an unknown reason this test is failing in mock environment and + # passing outside the mock environment. + # TODO: Investigate the issue + + %{__python3} pcs_test/suite --tier0 -v --vanilla --all-but \ + pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \ + pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \ + + test_result_python=$? + + #run pcsd tests and remove them + GEM_HOME=$RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir} ruby \ + -I$RPM_BUILD_ROOT%{_libdir}/pcsd \ + -Ipcsd/test \ + pcsd/test/test_all_suite.rb + test_result_ruby=$? + + if [ $test_result_python -ne 0 ]; then + return $test_result_python + fi + return $test_result_ruby +} + +remove_all_tests() { + # remove javascript testing files + rm -r -v $RPM_BUILD_ROOT%{_libdir}/%{pcsd_public_dir}/js/dev +} + +run_all_tests +remove_all_tests + +%posttrans +# Make sure the new version of the daemon is running. +# Also, make sure to start pcsd-ruby if it hasn't been started or even +# installed before. This is done by restarting pcsd.service. +%{_bindir}/systemctl daemon-reload +%{_bindir}/systemctl try-restart pcsd.service + + +%post +%systemd_post pcsd.service +%systemd_post pcsd-ruby.service + +%post -n %{pcs_snmp_pkg_name} +%systemd_post pcs_snmp_agent.service + +%preun +%systemd_preun pcsd.service +%systemd_preun pcsd-ruby.service + +%preun -n %{pcs_snmp_pkg_name} +%systemd_preun pcs_snmp_agent.service + +%postun +%systemd_postun_with_restart pcsd.service +%systemd_postun_with_restart pcsd-ruby.service + +%postun -n %{pcs_snmp_pkg_name} +%systemd_postun_with_restart pcs_snmp_agent.service + +%files +%doc CHANGELOG.md +%doc README.md +%doc tornado_README.rst +%doc dacite_README.md +%doc dateutil_README.rst +%doc dataclasses_README.rst +%license tornado_LICENSE +%license dacite_LICENSE +%license dateutil_LICENSE +%license dataclasses_LICENSE.txt +%license COPYING +# rugygem licenses +%license backports_LICENSE.txt +%license daemons_LICENSE +%license ethon_LICENSE +%license eventmachine_LICENSE +%license eventmachine_GNU +%license ffi_COPYING +%license ffi_LICENSE +%license ffi_LICENSE.SPECS +%license json_LICENSE +%license mustermann_LICENSE +%license open4_LICENSE +%license rack_MIT-LICENSE +%license rack-protection_License +%license rack-test_MIT-LICENSE.txt +%license ruby2_keywords_LICENSE +%license sinatra_LICENSE +%license tilt_COPYING +%{python3_sitelib}/* +%{_sbindir}/pcs +%{_sbindir}/pcsd +%{_libdir}/pcs/* +%{_libdir}/pcsd/* +%{_unitdir}/pcsd.service +%{_unitdir}/pcsd-ruby.service +%{_datadir}/bash-completion/completions/pcs +%{_sharedstatedir}/pcsd +%config(noreplace) %{_sysconfdir}/pam.d/pcsd +%dir %{_var}/log/pcsd +%config(noreplace) %{_sysconfdir}/logrotate.d/pcsd +%config(noreplace) %{_sysconfdir}/sysconfig/pcsd +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/cfgsync_ctl +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/known-hosts +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.cookiesecret +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.crt +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.key +%ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_settings.conf +%ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_users.conf +%{_mandir}/man8/pcs.* +%{_mandir}/man8/pcsd.* +%exclude %{_libdir}/pcs/pcs_snmp_agent +%exclude %{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* + + +%files -n %{pcs_snmp_pkg_name} +%{_libdir}/pcs/pcs_snmp_agent +%{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* +%{_unitdir}/pcs_snmp_agent.service +%{_datadir}/snmp/mibs/PCMK-PCS*-MIB.txt +%{_mandir}/man8/pcs_snmp_agent.* +%config(noreplace) %{_sysconfdir}/sysconfig/pcs_snmp_agent +%doc CHANGELOG.md +%doc pyagentx_CONTRIBUTORS.txt +%doc pyagentx_README.md +%license COPYING +%license pyagentx_LICENSE.txt + +%changelog +* Thu Mar 30 2023 Michal Pospisil - 0.10.15-4.el8_8.1 +- Fix displaying differences between configuration checkpoints in “pcs config checkpoint diff” command +- Fix “pcs stonith update-scsi-devices” command which was broken since Pacemaker-2.1.5-rc1 +- Updated bundled rubygem rack +- Resolves: rhbz#2180700 rhbz#2180706 rhbz#2180713 rhbz#2180974 + +* Thu Feb 9 2023 Michal Pospisil - 0.10.15-4 +- Fixed enabling/disabling sbd when cluster is not running +- Added BuildRequires: pam - needed for tier0 tests during build +- Resolves: rhbz#2166243 + +* Mon Jan 16 2023 Michal Pospisil - 0.10.15-3 +- Allow time values in stonith-watchdog-time property +- Resource/stonith agent self-validation of instance attributes is now disabled by default, as many agents do not work with it properly +- Updated bundled rubygems: rack, rack-protection, sinatra +- Added license for ruby2_keywords +- Resolves: rhbz#2158804 rhbz#2159455 + +* Fri Dec 16 2022 Michal Pospisil - 0.10.15-2 +- Added warning when omitting validation of misconfigured resource +- Fixed displaying of bool and integer values in `pcs resource config` command +- Updated bundled rubygems: ethon, json, rack-protection, sinatra +- Resolves: rhbz#2151166 rhbz#2151511 + +* Wed Nov 23 2022 Michal Pospisil - 0.10.15-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated Python bundled dependency dateutil +- Resolves: rhbz#2112002 rhbz#2112263 rhbz#2112291 rhbz#2132582 + +* Tue Oct 25 2022 Miroslav Lisik - 0.10.14-6 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated rubygem bundled packages: mustermann, rack, rack-protection, rack-test, sinatra, tilt +- Resolves: rhbz#1816852 rhbz#1918527 rhbz#2112267 rhbz#2112291 + +* Wed Aug 17 2022 Miroslav Lisik - 0.10.14-4 +- Fixed enable sbd from webui +- Resolves: rhbz#2117650 + +* Mon Aug 08 2022 Miroslav Lisik - 0.10.14-3 +- Fixed `pcs quorum device remove` +- Resolves: rhbz#2115326 + +* Thu Jul 28 2022 Miroslav Lisik - 0.10.14-2 +- Fixed booth ticket mode value case insensitive +- Fixed booth sync check whether /etc/booth exists +- Resolves: rhbz#1786964 rhbz#1791670 + +* Fri Jun 24 2022 Miroslav Lisik - 0.10.14-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated bundled rubygems: rack +- Resolves: rhbz#2059500 rhbz#2096787 rhbz#2097383 rhbz#2097391 rhbz#2097392 rhbz#2097393 + +* Tue May 24 2022 Miroslav Lisik - 0.10.13-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Updated bundled rubygems: backports, daemons, ethon ffi, json, ruby2_keywords, thin +- Resolves: rhbz#1730232 rhbz#1786964 rhbz#1791661 rhbz#1791670 rhbz#1874624 rhbz#1909904 rhbz#1950551 rhbz#1954099 rhbz#2019894 rhbz#2023845 rhbz#2059500 rhbz#2064805 rhbz#2068456 + +* Thu May 05 2022 Miroslav Lisik - 0.10.12-7 +- Updated bundled rubygems: sinatra, rack-protection +- Resolves: rhbz#2081332 + +* Fri Feb 11 2022 Miroslav Lisik - 0.10.12-6 +- Fixed processing agents not conforming to OCF schema +- Resolves: rhbz#2050274 + +* Tue Feb 01 2022 Miroslav Lisik - 0.10.12-5 +- Fixed snmp client +- Resolves: rhbz#2047983 + +* Tue Jan 25 2022 Miroslav Lisik - 0.10.12-4 +- Fixed cluster destroy in web ui +- Fixed covscan issue in web ui +- Resolves: rhbz#1970508 + +* Fri Jan 14 2022 Miroslav Lisik - 0.10.12-3 +- Fixed 'pcs resource move --autodelete' command +- Fixed removing of unavailable fence-scsi storage device +- Fixed ocf validation of ocf linbit drdb agent +- Fixed creating empty cib +- Updated pcs-web-ui +- Resolves: rhbz#1990784 rhbz#2022463 rhbz#2032997 rhbz#2036633 + +* Wed Dec 15 2021 Miroslav Lisik - 0.10.12-2 +- Fixed rsc update cmd when unable to get agent metadata +- Fixed enabling corosync-qdevice +- Resolves: rhbz#1384485 rhbz#2028902 + +* Thu Dec 02 2021 Miroslav Lisik - 0.10.12-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Resolves: rhbz#1552470 rhbz#1997011 rhbz#2017311 rhbz#2017312 rhbz#2024543 rhbz#2012128 + +* Mon Nov 22 2021 Miroslav Lisik - 0.10.11-2 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Removed 'export PYTHONCOERCECLOCALE=0' +- Resolves: rhbz#1384485 rhbz#1936833 rhbz#1968088 rhbz#1990784 rhbz#2012128 + +* Mon Nov 01 2021 Miroslav Lisik - 0.10.11-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Enabled wui patching +- Resolves: rhbz#1533090 rhbz#1970508 rhbz#1997011 rhbz#2003066 rhbz#2003068 rhbz#2012128 + +* Fri Aug 27 2021 Miroslav Lisik - 0.10.10-2 +- Fixed create resources with depth operation attribute +- Resolves: rhbz#1998454 + +* Thu Aug 19 2021 Ondrej Mular - 0.10.10-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Resolves: rhbz#1885293 rhbz#1847102 rhbz#1935594 + +* Tue Aug 10 2021 Miroslav Lisik - 0.10.9-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1432097 rhbz#1847102 rhbz#1935594 rhbz#1984901 + +* Tue Jul 20 2021 Miroslav Lisik - 0.10.8-4 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1759995 rhbz#1872378 rhbz#1935594 + +* Thu Jul 08 2021 Miroslav Lisik - 0.10.8-3 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Gating changes +- Resolves: rhbz#1678273 rhbz#1690419 rhbz#1750240 rhbz#1759995 rhbz#1872378 rhbz#1909901 rhbz#1935594 + +* Thu Jun 10 2021 Miroslav Lisik - 0.10.8-2 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Resolves: rhbz#1285269 rhbz#1290830 rhbz#1720221 rhbz#1841019 rhbz#1854238 rhbz#1882291 rhbz#1885302 rhbz#1886342 rhbz#1896458 rhbz#1922996 rhbz#1927384 rhbz#1927394 rhbz#1930886 rhbz#1935594 + +* Mon Feb 01 2021 Miroslav Lisik - 0.10.8-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Updated python bundled dependencies: dacite, dataclasses +- Resolves: rhbz#1457314 rhbz#1619818 rhbz#1667066 rhbz#1762816 rhbz#1794062 rhbz#1845470 rhbz#1856397 rhbz#1877762 rhbz#1917286 + +* Thu Dec 17 2020 Miroslav Lisik - 0.10.7-3 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Add BuildRequires: make +- Resolves: rhbz#1667061 rhbz#1667066 rhbz#1774143 rhbz#1885658 + +* Fri Nov 13 2020 Miroslav Lisik - 0.10.7-2 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Changed BuildRequires from git to git-core +- Resolves: rhbz#1869399 rhbz#1885658 rhbz#1896379 + +* Wed Oct 14 2020 Miroslav Lisik - 0.10.7-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added python bundled dependency dateutil +- Fixed virtual bundle provides for ember, handelbars, jquery and jquery-ui +- Resolves: rhbz#1222691 rhbz#1741056 rhbz#1851335 rhbz#1862966 rhbz#1869399 rhbz#1873691 rhbz#1875301 rhbz#1883445 rhbz#1885658 rhbz#1885841 + +* Tue Aug 11 2020 Miroslav Lisik - 0.10.6-4 +- Fixed invalid CIB error caused by resource and operation defaults with mixed and-or rules +- Updated pcs-web-ui +- Resolves: rhbz#1867516 + +* Thu Jul 16 2020 Miroslav Lisik - 0.10.6-3 +- Added Upgrade CIB if user specifies on-fail=demote +- Fixed rpmdiff issue with binary stripping checker +- Fixed removing non-empty tag by removing tagged resource group or clone +- Resolves: rhbz#1843079 rhbz#1857295 + +* Thu Jun 25 2020 Miroslav Lisik - 0.10.6-2 +- Added resource and operation defaults that apply to specific resource/operation types +- Added Requires/BuildRequires: python3-pyparsing +- Added Requires: logrotate +- Fixed resource and stonith documentation +- Fixed rubygem licenses +- Fixed update_times() +- Updated rubygem rack to version 2.2.3 +- Removed BuildRequires execstack (it is not needed) +- Resolves: rhbz#1805082 rhbz#1817547 + +* Thu Jun 11 2020 Miroslav Lisik - 0.10.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added python bundled dependencies: dacite, dataclasses +- Added new bundled rubygem ruby2_keywords +- Updated rubygem bundled packages: backports, ethon, ffi, json, mustermann, rack, rack_protection, rack_test, sinatra, tilt +- Updated pcs-web-ui +- Updated test run, only tier0 tests are running during build +- Removed BuildRequires needed for tier1 tests which were removed for build (pacemaker-cli, fence_agents-*, fence_virt, booth-site) +- Resolves: rhbz#1387358 rhbz#1684676 rhbz#1722970 rhbz#1778672 rhbz#1782553 rhbz#1790460 rhbz#1805082 rhbz#1810017 rhbz#1817547 rhbz#1830552 rhbz#1832973 rhbz#1833114 rhbz#1833506 rhbz#1838853 rhbz#1839637 + +* Fri Mar 20 2020 Miroslav Lisik - 0.10.4-6 +- Fixed communication between python and ruby daemons +- Resolves: rhbz#1783106 + +* Thu Feb 13 2020 Miroslav Lisik - 0.10.4-5 +- Fixed link to sbd man page from `sbd enable` doc +- Fixed safe-disabling clones, groups, bundles +- Fixed sinatra wrapper performance issue +- Fixed detecting fence history support +- Fixed cookie options +- Updated hint for 'resource create ... master' +- Updated gating tests execution, smoke tests run from upstream sources +- Resolves: rhbz#1750427 rhbz#1781303 rhbz#1783106 rhbz#1793574 + +* Mon Jan 20 2020 Tomas Jelinek - 0.10.4-4 +- Fix testsuite for pacemaker-2.0.3-4 +- Resolves: rhbz#1792946 + +* Mon Dec 02 2019 Ivan Devat - 0.10.4-3 +- Added basic resource views in new webUI +- Resolves: rhbz#1744060 + +* Fri Nov 29 2019 Miroslav Lisik - 0.10.4-2 +- Added disaster recovery support +- Fixed error message when cluster is not set up +- Removed '-g' option from rubygem's cflags because it does not generate .gnu_debugdata and option '-K' for strip command was removed +- Resolves: rhbz#1676431 rhbz#1743731 + +* Thu Nov 28 2019 Miroslav Lisik - 0.10.4-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Add '-g' to rubygem's cflags +- Resolves: rhbz#1743704 rhbz#1741586 rhbz#1750427 + +* Mon Nov 18 2019 Miroslav Lisik - 0.10.3-2 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Do not strip .gnu_debugdata section from binaries +- Resolves: rhbz#1631514 rhbz#1631519 rhbz#1734361 rhbz#1743704 + +* Mon Oct 21 2019 Miroslav Lisik - 0.10.3-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1442116 rhbz#1631514 rhbz#1631519 rhbz#1673835 rhbz#1698763 rhbz#1728890 rhbz#1734361 rhbz#1743704 rhbz#1743735 rhbz#1744056 + +* Tue Aug 13 2019 Tomas Jelinek - 0.10.2-4 +- Generate 256 bytes long corosync authkey so clusters can start when FIPS is enabled +- Resolves: rhbz#1740218 + +* Mon Jul 08 2019 Ivan Devat - 0.10.2-3 +- Options starting with - and -- are no longer ignored for non-root users +- Resolves: rhbz#1725183 + +* Thu Jun 27 2019 Ivan Devat - 0.10.2-2 +- Fixed crashes in the `pcs host auth` command +- Command `pcs resource bundle reset` no longer accepts the container type +- Fixed id conflict with current bundle configuration in i`pcs resource bundle reset` +- Resolves: rhbz#1657166 rhbz#1676957 + +* Thu Jun 13 2019 Ivan Devat - 0.10.2-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added pam as required package +- An alternative webUI rebased to latest upstream sources +- Resolves: rhbz#1673907 rhbz#1679196 rhbz#1714663 rhbz#1717113 + +* Tue May 21 2019 Ivan Devat - 0.10.1-9 +- Added git as required package in tests/tests.yml +- Resolves: rhbz#1673907 + +* Mon May 20 2019 Ivan Devat - 0.10.1-8 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added an alternative webUI +- Resolves: rhbz#1673907 rhbz#1679197 rhbz#1667058 + +* Mon May 06 2019 Ondrej Mular - 0.10.1-7 +- Enable upstream tests in gating +- Update tilt ruby gem +- Resolves: rhbz#1682129 + +* Thu May 02 2019 Ondrej Mular - 0.10.1-6 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated Red Hat logo +- Improved configuration files permissions in rpm +- Removed multi_json rubygem +- Excluded files required for building gems from rpm +- Resolves: rhbz#1625386 rhbz#1653316 rhbz#1655055 rhbz#1657166 rhbz#1659144 rhbz#1660702 rhbz#1664828 rhbz#1665404 rhbz#1667040 rhbz#1667053 rhbz#1667058 rhbz#1667090 rhbz#1668223 rhbz#1673822 rhbz#1673825 rhbz#1674005 rhbz#1676945 rhbz#1676957 rhbz#1682129 rhbz#1687562 rhbz#1687965 rhbz#1698373 rhbz#1700543 + +* Mon Mar 25 2019 Ondrej Mular - 0.10.1-5 +- Enable gating +- Resolves: rhbz#1682129 + +* Wed Jan 30 2019 Ivan Devat - 0.10.1-4 +- Fixed crash when using unsupported options in commands `pcs status` and `pcs config` +- Resolves: rhbz#1668422 + +* Mon Jan 14 2019 Ivan Devat - 0.10.1-3 +- Fixed configuration names of link options that pcs puts to corosync.conf during cluster setup +- Fixed webUI issues in cluster setup +- Command `pcs resource show` was returned back and was signed as deprecated +- Added dependency on libknet1-plugins-all +- Resolves: rhbz#1661059 rhbz#1659051 rhbz#1664057 + +* Thu Dec 13 2018 Ondrej Mular - 0.10.1-2 +- Fix documentation +- Resolves: rhbz#1656953 + +* Fri Nov 23 2018 Ivan Devát - 0.10.1-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Udp/udpu transport is marked as unsupported +- Require pcmk-cluster-manager instead of pacemaker +- Require platform-python-setuptools instead of python3-setuptools +- Resolves: rhbz#1650109 rhbz#1183103 rhbz#1648942 rhbz#1650510 rhbz#1388398 rhbz#1651197 rhbz#1553736 + +* Fri Oct 26 2018 Ondrej Mular - 0.10.0.alpha.7-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1553736 rhbz#1615891 rhbz#1436217 rhbz#1596050 rhbz#1554310 rhbz#1553718 rhbz#1638852 rhbz#1620190 rhbz#1158816 rhbz#1640477 + +* Wed Oct 17 2018 Ondrej Mular - 0.10.0.alpha.6-3 +- Add dependency on rubygems package +- Resolves: rhbz#1553736 + +* Thu Oct 04 2018 Ondrej Mular - 0.10.0.alpha.6-2 +- Enable tests +- Cleanup of unnecessary bundle ruby-gem files +- Switch Require: python3 dependency to platform-python +- Added required linker flags +- Resolves: rhbz#1633613 rhbz#1630616 + +* Wed Sep 19 2018 Ivan Devát - 0.10.0.alpha.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1553736 + +* Thu Sep 13 2018 Ivan Devát - 0.10.0.alpha.5-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1553736 rhbz#1542288 rhbz#1619620 + +* Thu Sep 13 2018 Ivan Devát - 0.10.0.alpha.4-2 +- Fixed symlinks correction for rubygem ffi +- Resolves: rhbz#1553736 + +* Wed Sep 12 2018 Ivan Devát - 0.10.0.alpha.4-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1549535 rhbz#1578955 rhbz#1158816 rhbz#1183103 rhbz#1536121 rhbz#1573344 rhbz#1619620 rhbz#1533866 rhbz#1578898 rhbz#1595829 rhbz#1605185 rhbz#1615420 rhbz#1566430 rhbz#1578891 rhbz#1591308 rhbz#1554953 + +* Mon Aug 06 2018 Ivan Devát - 0.10.0.alpha.3-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Removed ruby dependencies (the dependencies are bundled instead) +- Resolves: rhbz#1611990 + +* Thu Aug 02 2018 Ivan Devát - 0.10.0.alpha.2-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Wed Jul 18 2018 Ivan Devát - 0.10.0.alpha.1-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Mon Apr 09 2018 Ondrej Mular - 0.9.164-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Fixed: CVE-2018-1086, CVE-2018-1079 + +* Mon Feb 26 2018 Ivan Devát - 0.9.163-2 +- Fixed crash when adding a node to a cluster + +* Tue Feb 20 2018 Ivan Devát - 0.9.163-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Adapted for Rack 2 and Sinatra 2 + +* Fri Feb 09 2018 Igor Gnatenko - 0.9.160-5 +- Escape macros in %%changelog + +* Thu Feb 08 2018 Fedora Release Engineering - 0.9.160-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild + +* Sat Jan 20 2018 Björn Esser - 0.9.160-3 +- Rebuilt for switch to libxcrypt + +* Fri Jan 05 2018 Mamoru TASAKA - 0.9.160-2 +- F-28: rebuild for ruby25 +- Workaround for gem install option + +* Wed Oct 18 2017 Ondrej Mular - 0.9.160-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- All pcs tests are temporarily disabled because of issues in pacemaker. + +* Thu Sep 14 2017 Ondrej Mular - 0.9.159-4 +- Bundle rubygem-rack-protection which is being updated to 2.0.0 in Fedora. +- Removed setuptools patch. +- Disabled debuginfo subpackage. + +* Thu Aug 03 2017 Fedora Release Engineering - 0.9.159-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Thu Jul 27 2017 Fedora Release Engineering - 0.9.159-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Wed Jul 12 2017 Ondrej Mular - 0.9.159-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Tue May 23 2017 Tomas Jelinek - 0.9.156-3 +- Fixed python locales issue preventing build-time tests to pass +- Bundle rubygem-tilt which is being retired from Fedora + +* Thu Mar 23 2017 Tomas Jelinek - 0.9.156-2 +- Fixed Cross-site scripting (XSS) vulnerability in web UI CVE-2017-2661 +- Re-added support for clufter as it is now available for Python 3 + +* Wed Feb 22 2017 Tomas Jelinek - 0.9.156-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Sat Feb 11 2017 Fedora Release Engineering - 0.9.155-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Jan 12 2017 Vít Ondruch - 0.9.155-2 +- Rebuilt for https://fedoraproject.org/wiki/Changes/Ruby_2.4 + +* Wed Jan 04 2017 Adam Williamson - 0.9.155-1 +- Latest release 0.9.155 +- Fix tests with Python 3.6 and lxml 3.7 +- Package the license as license, not doc +- Use -f param for rm when wiping test directories as they are nested now + +* Mon Dec 19 2016 Miro Hrončok +- Rebuild for Python 3.6 + +* Tue Oct 18 2016 Tomas Jelinek - 0.9.154-2 +- Fixed upgrading from pcs-0.9.150 + +* Thu Sep 22 2016 Tomas Jelinek - 0.9.154-1 +- Re-synced to upstream sources +- Spec file cleanup and fixes + +* Tue Jul 19 2016 Fedora Release Engineering - 0.9.150-2 +- https://fedoraproject.org/wiki/Changes/Automatic_Provides_for_Python_RPM_Packages + +* Mon Apr 11 2016 Tomas Jelinek - 0.9.150-1 +- Re-synced to upstream sources +- Make pcs depend on python3 +- Spec file cleanup + +* Tue Feb 23 2016 Tomas Jelinek - 0.9.149-2 +- Fixed rubygems issues which prevented pcsd from starting +- Added missing python-lxml dependency + +* Thu Feb 18 2016 Tomas Jelinek - 0.9.149-1 +- Re-synced to upstream sources +- Security fix for CVE-2016-0720, CVE-2016-0721 +- Fixed rubygems issues which prevented pcsd from starting +- Rubygems built with RELRO +- Spec file cleanup +- Fixed multilib .pyc/.pyo issue + +* Thu Feb 04 2016 Fedora Release Engineering - 0.9.144-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Tue Jan 12 2016 Vít Ondruch - 0.9.144-2 +- Rebuilt for https://fedoraproject.org/wiki/Changes/Ruby_2.3 + +* Fri Sep 18 2015 Tomas Jelinek - 0.9.144-1 +- Re-synced to upstream sources + +* Tue Jun 23 2015 Tomas Jelinek - 0.9.141-2 +- Added requirement for psmisc for killall + +* Tue Jun 23 2015 Tomas Jelinek - 0.9.141-1 +- Re-synced to upstream sources + +* Thu Jun 18 2015 Fedora Release Engineering - 0.9.140-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Fri Jun 05 2015 Tomas Jelinek - 0.9.140-1 +- Re-synced to upstream sources + +* Fri May 22 2015 Tomas Jelinek - 0.9.139-4 +- Fix for CVE-2015-1848, CVE-2015-3983 (sessions not signed) + +* Thu Mar 26 2015 Tomas Jelinek - 0.9.139-3 +- Add BuildRequires: systemd (rhbz#1206253) + +* Fri Feb 27 2015 Tomas Jelinek - 0.9.139-2 +- Reflect clufter inclusion (rhbz#1180723) + +* Thu Feb 19 2015 Tomas Jelinek - 0.9.139-1 +- Re-synced to upstream sources + +* Sat Jan 17 2015 Mamoru TASAKA - 0.9.115-5 +- Rebuild for https://fedoraproject.org/wiki/Changes/Ruby_2.2 + +* Sun Aug 17 2014 Fedora Release Engineering - 0.9.115-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Fri Jun 06 2014 Fedora Release Engineering - 0.9.115-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Tue May 06 2014 Tomas Jelinek - 0.9.115-2 +- Rebuild to fix ruby dependencies + +* Mon Apr 21 2014 Chris Feist - 0.9.115-1 +- Re-synced to upstream sources + +* Fri Dec 13 2013 Chris Feist - 0.9.102-1 +- Re-synced to upstream sources + +* Wed Jun 19 2013 Chris Feist - 0.9.48-1 +- Rebuild with upstream sources + +* Thu Jun 13 2013 Chris Feist - 0.9.44-5 +- Added fixes for building rpam with ruby-2.0.0 + +* Mon Jun 03 2013 Chris Feist - 0.9.44-4 +- Rebuild with upstream sources + +* Tue May 07 2013 Chris Feist - 0.9.41-2 +- Resynced to upstream sources + +* Fri Apr 19 2013 Chris Feist - 0.9.39-1 +- Fixed gem building +- Re-synced to upstream sources + +* Mon Mar 25 2013 Chris Feist - 0.9.36-4 +- Don't try to build gems at all + +* Mon Mar 25 2013 Chris Feist - 0.9.36-3 +- Removed all gems from build, will need to find pam package in the future + +* Mon Mar 25 2013 Chris Feist - 0.9.36-2 +- Removed duplicate libraries already present in fedora + +* Mon Mar 18 2013 Chris Feist - 0.9.36-1 +- Resynced to latest upstream + +* Mon Mar 11 2013 Chris Feist - 0.9.33-1 +- Resynched to latest upstream +- pcsd has been moved to /usr/lib to fix /usr/local packaging issues + +* Thu Feb 21 2013 Chris Feist - 0.9.32-1 +- Resynced to latest version of pcs/pcsd + +* Mon Nov 05 2012 Chris Feist - 0.9.27-3 +- Build on all archs + +* Thu Oct 25 2012 Chris Feist - 0.9.27-2 +- Resync to latest version of pcs +- Added pcsd daemon + +* Mon Oct 08 2012 Chris Feist - 0.9.26-1 +- Resync to latest version of pcs + +* Thu Sep 20 2012 Chris Feist - 0.9.24-1 +- Resync to latest version of pcs + +* Thu Sep 20 2012 Chris Feist - 0.9.23-1 +- Resync to latest version of pcs + +* Wed Sep 12 2012 Chris Feist - 0.9.22-1 +- Resync to latest version of pcs + +* Thu Sep 06 2012 Chris Feist - 0.9.19-1 +- Resync to latest version of pcs + +* Tue Aug 07 2012 Chris Feist - 0.9.12-1 +- Resync to latest version of pcs + +* Fri Jul 20 2012 Fedora Release Engineering - 0.9.3.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu May 24 2012 Chris Feist - 0.9.4-1 +- Resync to latest version of pcs +- Move cluster creation options to cluster sub command. + +* Mon May 07 2012 Chris Feist - 0.9.3.1-1 +- Resync to latest version of pcs which includes fixes to work with F17. + +* Mon Mar 19 2012 Chris Feist - 0.9.2.4-1 +- Resynced to latest version of pcs + +* Mon Jan 23 2012 Chris Feist - 0.9.1-1 +- Updated BuildRequires and %%doc section for fedora + +* Fri Jan 20 2012 Chris Feist - 0.9.0-2 +- Updated spec file for fedora specific changes + +* Mon Jan 16 2012 Chris Feist - 0.9.0-1 +- Initial Build