diff --git a/cts/cli/regression.acls.exp b/cts/cli/regression.acls.exp
index be91b93455a..17ef44847af 100644
--- a/cts/cli/regression.acls.exp
+++ b/cts/cli/regression.acls.exp
@@ -1040,9 +1040,9 @@ crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: Creation of scaffolding with id="dummy-meta_attributes" is implicitly allowed
pcmk__apply_creation_acl trace: ACLs allow creation of with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
@@ -1128,9 +1128,9 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
@@ -1214,9 +1214,9 @@ Stopped
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
@@ -1298,9 +1298,9 @@ Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: ACLs allow creation of with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
diff --git a/cts/cli/regression.crm_attribute.exp b/cts/cli/regression.crm_attribute.exp
index b2005095ba8..5d581153046 100644
--- a/cts/cli/regression.crm_attribute.exp
+++ b/cts/cli/regression.crm_attribute.exp
@@ -1333,15 +1333,15 @@ Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
@@ -1601,9 +1601,9 @@ scope=status name=fail-count-foo value=3
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp
index 9859fe316dd..63280a1896f 100644
--- a/cts/cli/regression.crm_resource.exp
+++ b/cts/cli/regression.crm_resource.exp
@@ -855,9 +855,9 @@ crm_resource: --class, --agent, and --provider can only be used with --validate
=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given resource config with invalid action
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
@@ -886,9 +886,9 @@ Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attribute
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
@@ -917,9 +917,9 @@ false
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
@@ -946,9 +946,6 @@ Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
@@ -957,38 +954,50 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute (XML)
=#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running (XML)
=#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
@@ -996,9 +1005,9 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#=
@@ -1025,12 +1034,12 @@ Attribute 'nonexistent' not found for 'dummy'
=#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
@@ -1060,9 +1069,9 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
ocf
=#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#=
@@ -1089,11 +1098,14 @@ ocf
=#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get an existent attribute from a resource element
=#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#=
@@ -1120,11 +1132,14 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#=
@@ -1151,11 +1166,14 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#=
@@ -1182,11 +1200,14 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#=
@@ -1213,9 +1234,9 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#=
@@ -1242,9 +1263,9 @@ Set attribute: name=description value=test_description
=#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element
=#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#=
@@ -1271,9 +1292,9 @@ Set attribute: name=description value=test_description
=#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element
=#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#=
@@ -1300,9 +1321,9 @@ Deleted attribute: description
=#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element
=#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#=
@@ -1329,9 +1350,9 @@ Deleted attribute: description
=#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element
=#=#=#= Begin test: Create a resource attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
@@ -1361,9 +1382,9 @@ Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attrib
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
@@ -1394,14 +1415,17 @@ Full List of Resources:
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List the configured resources (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: List the configured resources (XML) =#=#=#=
@@ -1431,24 +1455,24 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
=#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources (XML)
=#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - Implicitly list the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
@@ -1460,9 +1484,6 @@ Resource XML:
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
@@ -1474,14 +1495,20 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure
]]>
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource (XML)
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
@@ -1512,9 +1539,9 @@ To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Node 'i.do.not.exist' not found
Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
@@ -1730,6 +1757,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
+warning: More than one node entry has name 'node1'
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
@@ -2187,6 +2215,9 @@ Revised Cluster Status:
=#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1 (XML)
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
@@ -3084,6 +3115,9 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
@@ -3124,6 +3158,9 @@ Removing constraint: cli-ban-dummy-on-node1
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: sleep - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp
index c98b485ea26..3b70f24163f 100644
--- a/cts/cli/regression.validity.exp
+++ b/cts/cli/regression.validity.exp
@@ -66,15 +66,20 @@ Call failed: Update does not conform to the configured schema
=#=#=#= End test: Set invalid rsc_order first-action value (schema validation disabled) - OK (0) =#=#=#=
* Passed: cibadmin - Set invalid rsc_order first-action value (schema validation disabled)
=#=#=#= Begin test: Run crm_simulate with invalid rsc_order first-action (schema validation disabled) =#=#=#=
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release)
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-invert_action warning: Unknown action 'break' specified in order constraint
-invert_action warning: Unknown action 'break' specified in order constraint
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+warning: Unknown action 'break' specified in order constraint
+warning: Unknown action 'break' specified in order constraint
+warning: Cannot invert constraint 'ord_1-2' (please specify inverse manually)
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* dummy1 (ocf:pacemaker:Dummy): Stopped
diff --git a/cts/scheduler/summary/797.summary b/cts/scheduler/summary/797.summary
index d31572ba3db..3618f487d6b 100644
--- a/cts/scheduler/summary/797.summary
+++ b/cts/scheduler/summary/797.summary
@@ -14,6 +14,7 @@ Current cluster status:
* child_DoFencing:1 (stonith:ssh): Started c001n02
* child_DoFencing:2 (stonith:ssh): Started c001n03
* child_DoFencing:3 (stonith:ssh): Stopped
+warning: Node c001n08 is unclean but cannot be fenced
Transition Summary:
* Stop DcIPaddr ( c001n03 ) due to no quorum
diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary
index 3890a02730a..83b96772759 100644
--- a/cts/scheduler/summary/bug-1822.summary
+++ b/cts/scheduler/summary/bug-1822.summary
@@ -1,3 +1,4 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ process1a process2b ]
@@ -10,6 +11,9 @@ Current cluster status:
* Resource Group: ms-sf_group:1:
* promotable_Stateful:1 (ocf:heartbeat:Dummy-statful): Promoted process1a
* promotable_procdctl:1 (ocf:heartbeat:procdctl): Promoted process1a
+error: Resetting 'on-fail' for promotable_Stateful:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for promotable_Stateful:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for promotable_procdctl:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop promotable_Stateful:1 ( Promoted process1a ) due to node availability
diff --git a/cts/scheduler/summary/bug-cl-5212.summary b/cts/scheduler/summary/bug-cl-5212.summary
index 7cbe97558b6..496c0649892 100644
--- a/cts/scheduler/summary/bug-cl-5212.summary
+++ b/cts/scheduler/summary/bug-cl-5212.summary
@@ -19,6 +19,8 @@ Current cluster status:
* prmPingd (ocf:pacemaker:ping): Started srv02 (UNCLEAN)
* prmPingd (ocf:pacemaker:ping): Started srv01 (UNCLEAN)
* Started: [ srv03 ]
+warning: Node srv01 is unclean but cannot be fenced
+warning: Node srv02 is unclean but cannot be fenced
Transition Summary:
* Stop prmStonith1-1 ( srv02 ) blocked
diff --git a/cts/scheduler/summary/bug-lf-1852.summary b/cts/scheduler/summary/bug-lf-1852.summary
index 26c73e166a5..bc8239c7637 100644
--- a/cts/scheduler/summary/bug-lf-1852.summary
+++ b/cts/scheduler/summary/bug-lf-1852.summary
@@ -1,3 +1,8 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
@@ -25,6 +30,11 @@ Executing Cluster Transition:
* Resource action: drbd0:0 notify on mysql-02
* Resource action: drbd0:1 notify on mysql-01
* Pseudo action: ms-drbd0_confirmed-post_notify_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/bug-lf-2171.summary b/cts/scheduler/summary/bug-lf-2171.summary
index 5117608a20c..b1bd1b99c2d 100644
--- a/cts/scheduler/summary/bug-lf-2171.summary
+++ b/cts/scheduler/summary/bug-lf-2171.summary
@@ -1,3 +1,5 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
+warning: Support for the 'collocated' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
diff --git a/cts/scheduler/summary/bug-lf-2606.summary b/cts/scheduler/summary/bug-lf-2606.summary
index e0b7ebf0e68..9831385949b 100644
--- a/cts/scheduler/summary/bug-lf-2606.summary
+++ b/cts/scheduler/summary/bug-lf-2606.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Clone Set: ms3 [rsc3] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node2 'rsc1 failed there'
diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary
index c3f8f5b3af0..37f327fed94 100644
--- a/cts/scheduler/summary/bug-pm-11.summary
+++ b/cts/scheduler/summary/bug-pm-11.summary
@@ -1,3 +1,4 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary
index 8defffe8d68..9f82560b3fb 100644
--- a/cts/scheduler/summary/bug-pm-12.summary
+++ b/cts/scheduler/summary/bug-pm-12.summary
@@ -1,3 +1,4 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
diff --git a/cts/scheduler/summary/bug-rh-1097457.summary b/cts/scheduler/summary/bug-rh-1097457.summary
index f68a509609b..0b0f14e1222 100644
--- a/cts/scheduler/summary/bug-rh-1097457.summary
+++ b/cts/scheduler/summary/bug-rh-1097457.summary
@@ -32,6 +32,10 @@ Current cluster status:
* FAKE4-IP (ocf:heartbeat:IPaddr2): Started lamaVM2
* Clone Set: FAKE6-clone [FAKE6]:
* Started: [ lamaVM1 lamaVM2 lamaVM3 ]
+warning: Invalid ordering constraint between FSlun4 and VM3
+warning: Invalid ordering constraint between FSlun3 and VM2
+warning: Invalid ordering constraint between FSlun2 and VM1
+warning: Invalid ordering constraint between FSlun1 and VM1
Transition Summary:
* Fence (reboot) lamaVM2 (resource: VM2) 'guest is unclean'
diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary
index 945f3c81da5..fd60a855d42 100644
--- a/cts/scheduler/summary/cancel-behind-moving-remote.summary
+++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary
@@ -1,3 +1,99 @@
+warning: compute-0 requires fencing but fencing is disabled
+warning: compute-1 requires fencing but fencing is disabled
+warning: galera-bundle requires fencing but fencing is disabled
+warning: galera-bundle-master requires fencing but fencing is disabled
+warning: galera:0 requires fencing but fencing is disabled
+warning: galera:1 requires fencing but fencing is disabled
+warning: galera:2 requires fencing but fencing is disabled
+warning: galera-bundle-podman-0 requires fencing but fencing is disabled
+warning: galera-bundle-0 requires fencing but fencing is disabled
+warning: galera-bundle-podman-1 requires fencing but fencing is disabled
+warning: galera-bundle-1 requires fencing but fencing is disabled
+warning: galera-bundle-podman-2 requires fencing but fencing is disabled
+warning: galera-bundle-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle requires fencing but fencing is disabled
+warning: rabbitmq-bundle-clone requires fencing but fencing is disabled
+warning: rabbitmq:0 requires fencing but fencing is disabled
+warning: rabbitmq:1 requires fencing but fencing is disabled
+warning: rabbitmq:2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-2 requires fencing but fencing is disabled
+warning: redis-bundle requires fencing but fencing is disabled
+warning: redis-bundle-master requires fencing but fencing is disabled
+warning: redis:0 requires fencing but fencing is disabled
+warning: redis:1 requires fencing but fencing is disabled
+warning: redis:2 requires fencing but fencing is disabled
+warning: redis-bundle-podman-0 requires fencing but fencing is disabled
+warning: redis-bundle-0 requires fencing but fencing is disabled
+warning: redis-bundle-podman-1 requires fencing but fencing is disabled
+warning: redis-bundle-1 requires fencing but fencing is disabled
+warning: redis-bundle-podman-2 requires fencing but fencing is disabled
+warning: redis-bundle-2 requires fencing but fencing is disabled
+warning: ip-192.168.24.150 requires fencing but fencing is disabled
+warning: ip-10.0.0.150 requires fencing but fencing is disabled
+warning: ip-172.17.1.151 requires fencing but fencing is disabled
+warning: ip-172.17.1.150 requires fencing but fencing is disabled
+warning: ip-172.17.3.150 requires fencing but fencing is disabled
+warning: ip-172.17.4.150 requires fencing but fencing is disabled
+warning: haproxy-bundle requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-0 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-1 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-master requires fencing but fencing is disabled
+warning: ovndb_servers:0 requires fencing but fencing is disabled
+warning: ovndb_servers:1 requires fencing but fencing is disabled
+warning: ovndb_servers:2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-2 requires fencing but fencing is disabled
+warning: ip-172.17.1.87 requires fencing but fencing is disabled
+warning: stonith-fence_compute-fence-nova requires fencing but fencing is disabled
+warning: compute-unfence-trigger-clone requires fencing but fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:0 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:1 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:2 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:3 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:4 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:5 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:6 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:7 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:8 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:9 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:10 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:11 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:12 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:13 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:14 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:15 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:16 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:17 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:18 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:19 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:20 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:21 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:22 to "quorum" because fencing is disabled
+warning: nova-evacuate requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400aa1373 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400dc23e0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540040bb56 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400addd38 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540078fb07 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400ea59b0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400066e50 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e1534e requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540060dbba requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e018b6 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400c87cdb requires fencing but fencing is disabled
+warning: openstack-cinder-volume requires fencing but fencing is disabled
+warning: openstack-cinder-volume-podman-0 requires fencing but fencing is disabled
Using the original execution date of: 2021-02-15 01:40:51Z
Current cluster status:
* Node List:
@@ -130,6 +226,102 @@ Executing Cluster Transition:
* Pseudo action: ovn-dbs-bundle_promoted_0
* Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-2
* Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0
+warning: compute-0 requires fencing but fencing is disabled
+warning: compute-1 requires fencing but fencing is disabled
+warning: galera-bundle requires fencing but fencing is disabled
+warning: galera-bundle-master requires fencing but fencing is disabled
+warning: galera:0 requires fencing but fencing is disabled
+warning: galera:1 requires fencing but fencing is disabled
+warning: galera:2 requires fencing but fencing is disabled
+warning: galera-bundle-podman-0 requires fencing but fencing is disabled
+warning: galera-bundle-0 requires fencing but fencing is disabled
+warning: galera-bundle-podman-1 requires fencing but fencing is disabled
+warning: galera-bundle-1 requires fencing but fencing is disabled
+warning: galera-bundle-podman-2 requires fencing but fencing is disabled
+warning: galera-bundle-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle requires fencing but fencing is disabled
+warning: rabbitmq-bundle-clone requires fencing but fencing is disabled
+warning: rabbitmq:0 requires fencing but fencing is disabled
+warning: rabbitmq:1 requires fencing but fencing is disabled
+warning: rabbitmq:2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-2 requires fencing but fencing is disabled
+warning: redis-bundle requires fencing but fencing is disabled
+warning: redis-bundle-master requires fencing but fencing is disabled
+warning: redis:0 requires fencing but fencing is disabled
+warning: redis:1 requires fencing but fencing is disabled
+warning: redis:2 requires fencing but fencing is disabled
+warning: redis-bundle-podman-0 requires fencing but fencing is disabled
+warning: redis-bundle-0 requires fencing but fencing is disabled
+warning: redis-bundle-podman-1 requires fencing but fencing is disabled
+warning: redis-bundle-1 requires fencing but fencing is disabled
+warning: redis-bundle-podman-2 requires fencing but fencing is disabled
+warning: redis-bundle-2 requires fencing but fencing is disabled
+warning: ip-192.168.24.150 requires fencing but fencing is disabled
+warning: ip-10.0.0.150 requires fencing but fencing is disabled
+warning: ip-172.17.1.151 requires fencing but fencing is disabled
+warning: ip-172.17.1.150 requires fencing but fencing is disabled
+warning: ip-172.17.3.150 requires fencing but fencing is disabled
+warning: ip-172.17.4.150 requires fencing but fencing is disabled
+warning: haproxy-bundle requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-0 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-1 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-master requires fencing but fencing is disabled
+warning: ovndb_servers:0 requires fencing but fencing is disabled
+warning: ovndb_servers:1 requires fencing but fencing is disabled
+warning: ovndb_servers:2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-2 requires fencing but fencing is disabled
+warning: ip-172.17.1.87 requires fencing but fencing is disabled
+warning: stonith-fence_compute-fence-nova requires fencing but fencing is disabled
+warning: compute-unfence-trigger-clone requires fencing but fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:0 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:1 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:2 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:3 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:4 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:5 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:6 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:7 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:8 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:9 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:10 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:11 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:12 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:13 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:14 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:15 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:16 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:17 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:18 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:19 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:20 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:21 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:22 to "quorum" because fencing is disabled
+warning: nova-evacuate requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400aa1373 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400dc23e0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540040bb56 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400addd38 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540078fb07 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400ea59b0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400066e50 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e1534e requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540060dbba requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e018b6 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400c87cdb requires fencing but fencing is disabled
+warning: openstack-cinder-volume requires fencing but fencing is disabled
+warning: openstack-cinder-volume-podman-0 requires fencing but fencing is disabled
Using the original execution date of: 2021-02-15 01:40:51Z
Revised Cluster Status:
diff --git a/cts/scheduler/summary/clone-anon-failcount.summary b/cts/scheduler/summary/clone-anon-failcount.summary
index 8d4f369e3e1..2b39b0b6874 100644
--- a/cts/scheduler/summary/clone-anon-failcount.summary
+++ b/cts/scheduler/summary/clone-anon-failcount.summary
@@ -36,6 +36,11 @@ Current cluster status:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv01 srv02 srv03 srv04 ]
+error: Resetting 'on-fail' for UmDummy01 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:2 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:3 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Move UmVIPcheck ( srv01 -> srv04 )
diff --git a/cts/scheduler/summary/clone-anon-probe-1.summary b/cts/scheduler/summary/clone-anon-probe-1.summary
index 51cf914a004..5539042553c 100644
--- a/cts/scheduler/summary/clone-anon-probe-1.summary
+++ b/cts/scheduler/summary/clone-anon-probe-1.summary
@@ -1,3 +1,6 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
@@ -17,6 +20,9 @@ Executing Cluster Transition:
* Resource action: drbd0:0 start on mysql-01
* Resource action: drbd0:1 start on mysql-02
* Pseudo action: ms-drbd0_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/clone-anon-probe-2.summary b/cts/scheduler/summary/clone-anon-probe-2.summary
index 79a2fb8785e..aa37f7a828f 100644
--- a/cts/scheduler/summary/clone-anon-probe-2.summary
+++ b/cts/scheduler/summary/clone-anon-probe-2.summary
@@ -1,3 +1,6 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
@@ -14,6 +17,9 @@ Executing Cluster Transition:
* Pseudo action: ms-drbd0_start_0
* Resource action: drbd0:1 start on mysql-01
* Pseudo action: ms-drbd0_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/clone-require-all-1.summary b/cts/scheduler/summary/clone-require-all-1.summary
index 7037eb8caa1..cf4274b2fb0 100644
--- a/cts/scheduler/summary/clone-require-all-1.summary
+++ b/cts/scheduler/summary/clone-require-all-1.summary
@@ -9,6 +9,7 @@ Current cluster status:
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start B:0 ( rhel7-auto3 )
diff --git a/cts/scheduler/summary/clone-require-all-2.summary b/cts/scheduler/summary/clone-require-all-2.summary
index 72d6f243f65..676810d22db 100644
--- a/cts/scheduler/summary/clone-require-all-2.summary
+++ b/cts/scheduler/summary/clone-require-all-2.summary
@@ -11,6 +11,7 @@ Current cluster status:
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto3 )
diff --git a/cts/scheduler/summary/clone-require-all-3.summary b/cts/scheduler/summary/clone-require-all-3.summary
index b828bffce28..485595407a3 100644
--- a/cts/scheduler/summary/clone-require-all-3.summary
+++ b/cts/scheduler/summary/clone-require-all-3.summary
@@ -12,6 +12,7 @@ Current cluster status:
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto3 )
diff --git a/cts/scheduler/summary/clone-require-all-4.summary b/cts/scheduler/summary/clone-require-all-4.summary
index ebd7b6bb467..2632aebbec4 100644
--- a/cts/scheduler/summary/clone-require-all-4.summary
+++ b/cts/scheduler/summary/clone-require-all-4.summary
@@ -11,6 +11,7 @@ Current cluster status:
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto2 )
diff --git a/cts/scheduler/summary/clone-require-all-5.summary b/cts/scheduler/summary/clone-require-all-5.summary
index b47049e8831..cae968b1ebb 100644
--- a/cts/scheduler/summary/clone-require-all-5.summary
+++ b/cts/scheduler/summary/clone-require-all-5.summary
@@ -9,6 +9,7 @@ Current cluster status:
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:2 ( rhel7-auto3 )
diff --git a/cts/scheduler/summary/clone-require-all-6.summary b/cts/scheduler/summary/clone-require-all-6.summary
index 5bae20c7285..ef1a99b2d30 100644
--- a/cts/scheduler/summary/clone-require-all-6.summary
+++ b/cts/scheduler/summary/clone-require-all-6.summary
@@ -10,6 +10,7 @@ Current cluster status:
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Stop A:0 ( rhel7-auto1 ) due to node availability
diff --git a/cts/scheduler/summary/clone-require-all-7.summary b/cts/scheduler/summary/clone-require-all-7.summary
index f0f2820c26d..ac4af30a846 100644
--- a/cts/scheduler/summary/clone-require-all-7.summary
+++ b/cts/scheduler/summary/clone-require-all-7.summary
@@ -8,6 +8,7 @@ Current cluster status:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto2 )
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
index 646bfa3ef5c..50da4cc2166 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
@@ -11,6 +11,7 @@ Current cluster status:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto3 )
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
index e40230cb527..bbd012cec27 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
@@ -11,6 +11,7 @@ Current cluster status:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto4 )
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
index a22bf455b6a..85a03a0b378 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
@@ -14,6 +14,7 @@ Current cluster status:
* Clone Set: C-clone [C]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Stopped: [ rhel7-auto3 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move A:0 ( rhel7-auto4 -> rhel7-auto3 )
diff --git a/cts/scheduler/summary/coloc-clone-stays-active.summary b/cts/scheduler/summary/coloc-clone-stays-active.summary
index cb212e1cde9..9e35a5d13a7 100644
--- a/cts/scheduler/summary/coloc-clone-stays-active.summary
+++ b/cts/scheduler/summary/coloc-clone-stays-active.summary
@@ -1,3 +1,4 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
9 of 87 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
diff --git a/cts/scheduler/summary/colocate-primitive-with-clone.summary b/cts/scheduler/summary/colocate-primitive-with-clone.summary
index e884428ee40..881ac31fb20 100644
--- a/cts/scheduler/summary/colocate-primitive-with-clone.summary
+++ b/cts/scheduler/summary/colocate-primitive-with-clone.summary
@@ -52,6 +52,9 @@ Current cluster status:
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
+error: Resetting 'on-fail' for clnG3dummy02:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:2 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Start UmVIPcheck ( srv04 )
diff --git a/cts/scheduler/summary/colocation-influence.summary b/cts/scheduler/summary/colocation-influence.summary
index e240003d929..2cd66b670df 100644
--- a/cts/scheduler/summary/colocation-influence.summary
+++ b/cts/scheduler/summary/colocation-influence.summary
@@ -52,6 +52,7 @@ Current cluster status:
* Promoted: [ rhel7-4 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ]
* Stopped: [ rhel7-5 ]
+error: Constraint 'colocation-rsc1a-rsc1b-INFINITY' has invalid value for influence (using default)
Transition Summary:
* Move rsc1a ( rhel7-2 -> rhel7-3 )
diff --git a/cts/scheduler/summary/container-is-remote-node.summary b/cts/scheduler/summary/container-is-remote-node.summary
index c022e896f44..a33c9ed7db1 100644
--- a/cts/scheduler/summary/container-is-remote-node.summary
+++ b/cts/scheduler/summary/container-is-remote-node.summary
@@ -24,6 +24,9 @@ Current cluster status:
* FSdata1 (ocf:heartbeat:Filesystem): Started RNVM1
* RES1-IP (ocf:heartbeat:IPaddr2): Started RNVM1
* res-rsyslog (ocf:heartbeat:rsyslog.test): Started RNVM1
+warning: Invalid ordering constraint between gfs2-lv_1_1:0 and VM1
+warning: Invalid ordering constraint between clvmd:0 and VM1
+warning: Invalid ordering constraint between dlm:0 and VM1
Transition Summary:
diff --git a/cts/scheduler/summary/expire-non-blocked-failure.summary b/cts/scheduler/summary/expire-non-blocked-failure.summary
index 0ca6c540468..92ba7c8a82f 100644
--- a/cts/scheduler/summary/expire-non-blocked-failure.summary
+++ b/cts/scheduler/summary/expire-non-blocked-failure.summary
@@ -1,3 +1,4 @@
+warning: Ignoring failure timeout (1m) for rsc1 because it conflicts with on-fail=block
0 of 3 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
@@ -13,6 +14,7 @@ Transition Summary:
Executing Cluster Transition:
* Cluster action: clear_failcount for rsc2 on node1
+warning: Ignoring failure timeout (1m) for rsc1 because it conflicts with on-fail=block
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/failcount-block.summary b/cts/scheduler/summary/failcount-block.summary
index 646f76b400b..179497942da 100644
--- a/cts/scheduler/summary/failcount-block.summary
+++ b/cts/scheduler/summary/failcount-block.summary
@@ -1,3 +1,6 @@
+error: Ignoring invalid node_state entry without id
+warning: Ignoring failure timeout (10s) for rsc_pcmk-2 because it conflicts with on-fail=block
+warning: Ignoring failure timeout (10s) for rsc_pcmk-4 because it conflicts with on-fail=block
0 of 5 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
@@ -25,6 +28,8 @@ Executing Cluster Transition:
* Cluster action: clear_failcount for rsc_pcmk-5 on pcmk-1
* Resource action: rsc_pcmk-3 monitor=5000 on pcmk-1
* Resource action: rsc_pcmk-4 monitor=5000 on pcmk-1
+error: Ignoring invalid node_state entry without id
+warning: Ignoring failure timeout (10s) for rsc_pcmk-2 because it conflicts with on-fail=block
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/force-anon-clone-max.summary b/cts/scheduler/summary/force-anon-clone-max.summary
index d2320e9c571..2886410ab6e 100644
--- a/cts/scheduler/summary/force-anon-clone-max.summary
+++ b/cts/scheduler/summary/force-anon-clone-max.summary
@@ -1,3 +1,10 @@
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:2 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:1 can be used only as anonymous clones
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
@@ -59,6 +66,14 @@ Executing Cluster Transition:
* Resource action: dummy2:1 monitor=5000 on node2
* Resource action: lsb3:1 monitor=5000 on node2
* Pseudo action: clone3_running_0
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:2 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:2 can be used only as anonymous clones
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/group-dependents.summary b/cts/scheduler/summary/group-dependents.summary
index 33652555477..a8ce9c2915f 100644
--- a/cts/scheduler/summary/group-dependents.summary
+++ b/cts/scheduler/summary/group-dependents.summary
@@ -1,3 +1,4 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ asttest1 asttest2 ]
diff --git a/cts/scheduler/summary/guest-host-not-fenceable.summary b/cts/scheduler/summary/guest-host-not-fenceable.summary
index 9e3b5db405c..8fe32428bcd 100644
--- a/cts/scheduler/summary/guest-host-not-fenceable.summary
+++ b/cts/scheduler/summary/guest-host-not-fenceable.summary
@@ -18,6 +18,8 @@ Current cluster status:
* stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN)
+warning: Node node2 is unclean but cannot be fenced
+warning: Node node3 is unclean but cannot be fenced
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( node1 ) due to no quorum
diff --git a/cts/scheduler/summary/intervals.summary b/cts/scheduler/summary/intervals.summary
index f6dc2e4b7fb..b4ebad3f69c 100644
--- a/cts/scheduler/summary/intervals.summary
+++ b/cts/scheduler/summary/intervals.summary
@@ -13,6 +13,8 @@ Current cluster status:
* rsc4 (ocf:pacemaker:Dummy): FAILED rhel7-5 (blocked)
* rsc5 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc6 (ocf:pacemaker:Dummy): Started rhel7-2
+error: Operation rsc3-monitor-interval-P40S is duplicate of rsc3-monitor-interval-40s (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-interval-P40S is duplicate of rsc3-monitor-interval-40s (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc2 ( rhel7-3 )
diff --git a/cts/scheduler/summary/leftover-pending-monitor.summary b/cts/scheduler/summary/leftover-pending-monitor.summary
index 04b03f29d85..d5e7e39f107 100644
--- a/cts/scheduler/summary/leftover-pending-monitor.summary
+++ b/cts/scheduler/summary/leftover-pending-monitor.summary
@@ -9,6 +9,7 @@ Current cluster status:
* Clone Set: promotable-1 [stateful-1] (promotable):
* Promoted: [ node-3 ]
* Stopped: [ node-1 node-2 ]
+warning: Support for the Promoted role is deprecated and will be removed in a future release. Use Promoted instead.
Transition Summary:
* Start stateful-1:1 ( node-1 ) due to unrunnable stateful-1:0 monitor (blocked)
diff --git a/cts/scheduler/summary/novell-239079.summary b/cts/scheduler/summary/novell-239079.summary
index 0afbba57970..401ccd11d76 100644
--- a/cts/scheduler/summary/novell-239079.summary
+++ b/cts/scheduler/summary/novell-239079.summary
@@ -1,3 +1,7 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
@@ -22,6 +26,11 @@ Executing Cluster Transition:
* Resource action: drbd0:0 notify on xen-1
* Resource action: drbd0:1 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/novell-239082.summary b/cts/scheduler/summary/novell-239082.summary
index 051c0220e01..5d27e93076b 100644
--- a/cts/scheduler/summary/novell-239082.summary
+++ b/cts/scheduler/summary/novell-239082.summary
@@ -1,3 +1,8 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
@@ -7,6 +12,8 @@ Current cluster status:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-1 ]
* Unpromoted: [ xen-2 ]
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
Transition Summary:
* Move fs_1 ( xen-1 -> xen-2 )
@@ -47,6 +54,11 @@ Executing Cluster Transition:
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0
* Resource action: fs_1 start on xen-2
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/novell-239087.summary b/cts/scheduler/summary/novell-239087.summary
index 0c158d3873d..df2db7abfb9 100644
--- a/cts/scheduler/summary/novell-239087.summary
+++ b/cts/scheduler/summary/novell-239087.summary
@@ -1,3 +1,7 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
@@ -7,6 +11,12 @@ Current cluster status:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-1 ]
* Unpromoted: [ xen-2 ]
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Transition Summary:
diff --git a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
index 58c572d199e..13eeacbffeb 100644
--- a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
+++ b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
@@ -132,6 +132,7 @@ Current cluster status:
* mrg-07 (ocf:pacemaker:remote): Started rdo7-node1
* mrg-08 (ocf:pacemaker:remote): Started rdo7-node2
* mrg-09 (ocf:pacemaker:remote): Started rdo7-node3
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start keystone:0 ( rdo7-node2 )
diff --git a/cts/scheduler/summary/order-serialize-set.summary b/cts/scheduler/summary/order-serialize-set.summary
index b0b759b51ce..54fd7b18d44 100644
--- a/cts/scheduler/summary/order-serialize-set.summary
+++ b/cts/scheduler/summary/order-serialize-set.summary
@@ -14,6 +14,8 @@ Current cluster status:
* edge (ocf:heartbeat:Xen): Started xen-a
* base (ocf:heartbeat:Xen): Started xen-a
* Email_Alerting (ocf:heartbeat:MailTo): Started xen-b
+warning: Ignoring symmetrical for 'serialize-xen' because not valid with kind of 'Serialize'
+warning: Ignoring symmetrical for 'xen-set' because not valid with kind of 'Serialize'
Transition Summary:
* Restart xen-a-fencing ( xen-b ) due to resource definition change
diff --git a/cts/scheduler/summary/order-wrong-kind.summary b/cts/scheduler/summary/order-wrong-kind.summary
index 903a25c7239..48c34546218 100644
--- a/cts/scheduler/summary/order-wrong-kind.summary
+++ b/cts/scheduler/summary/order-wrong-kind.summary
@@ -1,4 +1,7 @@
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Current cluster status:
* Node List:
* Online: [ node1 ]
@@ -8,6 +11,10 @@ Current cluster status:
* rsc2 (ocf:heartbeat:apache): Started node1
* rsc3 (ocf:heartbeat:apache): Stopped
* rsc4 (ocf:heartbeat:apache): Started node1
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
Transition Summary:
* Start rsc1 ( node1 )
diff --git a/cts/scheduler/summary/ordered-set-natural.summary b/cts/scheduler/summary/ordered-set-natural.summary
index b944e0d6f40..bf96e250f78 100644
--- a/cts/scheduler/summary/ordered-set-natural.summary
+++ b/cts/scheduler/summary/ordered-set-natural.summary
@@ -21,6 +21,7 @@ Current cluster status:
* dummy3-5 (ocf:heartbeat:Dummy): Stopped
* dummy2-4 (ocf:heartbeat:Dummy): Stopped
* dummy2-5 (ocf:heartbeat:Dummy): Stopped
+warning: Support for 'ordering' other than 'group' in resource_set (such as pcs_rsc_set_dummy3-1_dummy3-2_dummy3-3_dummy3-4_dummy3-5-1) is deprecated and will be removed in a future release
Transition Summary:
* Start dummy1-1 ( node1 ) due to no quorum (blocked)
diff --git a/cts/scheduler/summary/priority-fencing-delay.summary b/cts/scheduler/summary/priority-fencing-delay.summary
index ce5aff2562f..0c6bc702f20 100644
--- a/cts/scheduler/summary/priority-fencing-delay.summary
+++ b/cts/scheduler/summary/priority-fencing-delay.summary
@@ -24,6 +24,12 @@ Current cluster status:
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-01_kiff-02
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
diff --git a/cts/scheduler/summary/promoted-9.summary b/cts/scheduler/summary/promoted-9.summary
index 69dab46a2ce..7be9cf7c72c 100644
--- a/cts/scheduler/summary/promoted-9.summary
+++ b/cts/scheduler/summary/promoted-9.summary
@@ -29,6 +29,8 @@ Current cluster status:
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
+warning: Node sgi2 is unclean but cannot be fenced
+warning: Node test02 is unclean but cannot be fenced
Transition Summary:
* Start DcIPaddr ( va1 ) due to no quorum (blocked)
diff --git a/cts/scheduler/summary/promoted-asymmetrical-order.summary b/cts/scheduler/summary/promoted-asymmetrical-order.summary
index 591ff18a04f..1702272f720 100644
--- a/cts/scheduler/summary/promoted-asymmetrical-order.summary
+++ b/cts/scheduler/summary/promoted-asymmetrical-order.summary
@@ -11,6 +11,22 @@ Current cluster status:
* Clone Set: ms2 [rsc2] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
diff --git a/cts/scheduler/summary/promoted-failed-demote-2.summary b/cts/scheduler/summary/promoted-failed-demote-2.summary
index 3f317fabeaa..02f3ee7e670 100644
--- a/cts/scheduler/summary/promoted-failed-demote-2.summary
+++ b/cts/scheduler/summary/promoted-failed-demote-2.summary
@@ -10,6 +10,9 @@ Current cluster status:
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
+error: Resetting 'on-fail' for stateful-1:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-1:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-2:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
diff --git a/cts/scheduler/summary/promoted-failed-demote.summary b/cts/scheduler/summary/promoted-failed-demote.summary
index 70b3e1b2cff..e9f1a1baa92 100644
--- a/cts/scheduler/summary/promoted-failed-demote.summary
+++ b/cts/scheduler/summary/promoted-failed-demote.summary
@@ -10,6 +10,9 @@ Current cluster status:
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
+error: Resetting 'on-fail' for stateful-1:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-1:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-2:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
diff --git a/cts/scheduler/summary/promoted-group.summary b/cts/scheduler/summary/promoted-group.summary
index 44b380c25b7..03a7f79afaf 100644
--- a/cts/scheduler/summary/promoted-group.summary
+++ b/cts/scheduler/summary/promoted-group.summary
@@ -1,3 +1,5 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
+error: Resetting 'on-fail' for monitor of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Current cluster status:
* Node List:
* Online: [ rh44-1 rh44-2 ]
@@ -10,6 +12,8 @@ Current cluster status:
* promotable_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2
* Resource Group: grp_ms_sf:1:
* promotable_Stateful:1 (ocf:heartbeat:Stateful): Unpromoted rh44-1
+error: Resetting 'on-fail' for stop of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
+error: Resetting 'on-fail' for start of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Transition Summary:
* Promote promotable_Stateful:1 ( Unpromoted -> Promoted rh44-1 )
@@ -22,6 +26,7 @@ Executing Cluster Transition:
* Pseudo action: grp_ms_sf:1_promoted_0
* Resource action: promotable_Stateful:1 monitor=6000 on rh44-1
* Pseudo action: ms-sf_promoted_0
+error: Resetting 'on-fail' for monitor of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/promoted-notify.summary b/cts/scheduler/summary/promoted-notify.summary
index f0fb04027d8..098e945dce2 100644
--- a/cts/scheduler/summary/promoted-notify.summary
+++ b/cts/scheduler/summary/promoted-notify.summary
@@ -6,6 +6,18 @@ Current cluster status:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: fake-master [fake] (promotable):
* Unpromoted: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote fake:0 ( Unpromoted -> Promoted rhel7-auto1 )
diff --git a/cts/scheduler/summary/promoted-ordering.summary b/cts/scheduler/summary/promoted-ordering.summary
index 0ef1bd89e86..84158af223a 100644
--- a/cts/scheduler/summary/promoted-ordering.summary
+++ b/cts/scheduler/summary/promoted-ordering.summary
@@ -1,3 +1,5 @@
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:1 can be used only as anonymous clones
Current cluster status:
* Node List:
* Online: [ webcluster01 ]
@@ -21,6 +23,14 @@ Current cluster status:
* Clone Set: ms_drbd_mysql [drbd_mysql] (promotable):
* Stopped: [ webcluster01 webcluster02 ]
* fs_mysql (ocf:heartbeat:Filesystem): Stopped
+warning: No resource, template, or tag named 'drbd_mysql'
+error: Ignoring constraint 'colo_drbd_mysql_ip0' because 'drbd_mysql' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_mysql'
+error: Ignoring constraint 'colo_drbd_mysql_ip1' because 'drbd_mysql' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_www'
+error: Ignoring constraint 'colo_drbd_www_ip0' because 'drbd_www' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_www'
+error: Ignoring constraint 'colo_drbd_www_ip1' because 'drbd_www' is not a valid resource or tag
Transition Summary:
* Start extip_1 ( webcluster01 )
@@ -68,6 +78,8 @@ Executing Cluster Transition:
* Pseudo action: ms_drbd_www_confirmed-post_notify_running_0
* Resource action: drbd_mysql:0 notify on webcluster01
* Pseudo action: ms_drbd_mysql_confirmed-post_notify_running_0
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:1 can be used only as anonymous clones
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/promoted-with-blocked.summary b/cts/scheduler/summary/promoted-with-blocked.summary
index 82177a9a6a2..c38b1ce49fd 100644
--- a/cts/scheduler/summary/promoted-with-blocked.summary
+++ b/cts/scheduler/summary/promoted-with-blocked.summary
@@ -10,6 +10,7 @@ Current cluster status:
* Clone Set: rsc2-clone [rsc2] (promotable):
* Stopped: [ node1 node2 node3 node4 node5 ]
* rsc3 (ocf:pacemaker:Dummy): Stopped (disabled)
+warning: Support for the Promoted role is deprecated and will be removed in a future release. Use Promoted instead.
Transition Summary:
* Start rsc1 ( node2 ) due to unrunnable rsc3 start (blocked)
diff --git a/cts/scheduler/summary/quorum-4.summary b/cts/scheduler/summary/quorum-4.summary
index 3d0c88e81f6..0132adc92bb 100644
--- a/cts/scheduler/summary/quorum-4.summary
+++ b/cts/scheduler/summary/quorum-4.summary
@@ -6,6 +6,8 @@ Current cluster status:
* Full List of Resources:
* child_DoFencing (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing ( hadev2 )
diff --git a/cts/scheduler/summary/quorum-5.summary b/cts/scheduler/summary/quorum-5.summary
index 1e7abf38ee9..407dad631d6 100644
--- a/cts/scheduler/summary/quorum-5.summary
+++ b/cts/scheduler/summary/quorum-5.summary
@@ -8,6 +8,8 @@ Current cluster status:
* Resource Group: group1:
* child_DoFencing_1 (stonith:ssh): Stopped
* child_DoFencing_2 (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing_1 ( hadev2 )
diff --git a/cts/scheduler/summary/quorum-6.summary b/cts/scheduler/summary/quorum-6.summary
index 321410d5b5e..04f41803b4e 100644
--- a/cts/scheduler/summary/quorum-6.summary
+++ b/cts/scheduler/summary/quorum-6.summary
@@ -14,6 +14,8 @@ Current cluster status:
* child_DoFencing:5 (stonith:ssh): Stopped
* child_DoFencing:6 (stonith:ssh): Stopped
* child_DoFencing:7 (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing:0 ( hadev2 )
diff --git a/cts/scheduler/summary/rec-node-10.summary b/cts/scheduler/summary/rec-node-10.summary
index a77b2a14eed..2df3f57eb8d 100644
--- a/cts/scheduler/summary/rec-node-10.summary
+++ b/cts/scheduler/summary/rec-node-10.summary
@@ -7,6 +7,7 @@ Current cluster status:
* stonith-1 (stonith:dummy): Stopped
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
+warning: Node node1 is unclean but cannot be fenced
Transition Summary:
* Start stonith-1 ( node2 ) due to no quorum (blocked)
diff --git a/cts/scheduler/summary/rec-node-5.summary b/cts/scheduler/summary/rec-node-5.summary
index a4128ca167d..9ed88580a6c 100644
--- a/cts/scheduler/summary/rec-node-5.summary
+++ b/cts/scheduler/summary/rec-node-5.summary
@@ -6,6 +6,8 @@ Current cluster status:
* Full List of Resources:
* rsc1 (ocf:heartbeat:apache): Stopped
* rsc2 (ocf:heartbeat:apache): Stopped
+warning: Node node1 is unclean but cannot be fenced
+warning: Resource functionality and data integrity cannot be guaranteed (configure, enable, and test fencing to correct this)
Transition Summary:
* Start rsc1 ( node2 )
diff --git a/cts/scheduler/summary/rec-node-8.summary b/cts/scheduler/summary/rec-node-8.summary
index 226e333dfc1..c20908be574 100644
--- a/cts/scheduler/summary/rec-node-8.summary
+++ b/cts/scheduler/summary/rec-node-8.summary
@@ -8,6 +8,7 @@ Current cluster status:
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc3 (ocf:heartbeat:apache): Stopped
+warning: Node node1 is unclean but cannot be fenced
Transition Summary:
* Start stonith-1 ( node2 ) due to quorum freeze (blocked)
diff --git a/cts/scheduler/summary/remote-orphaned2.summary b/cts/scheduler/summary/remote-orphaned2.summary
index 9b0091467b0..f9e0c032425 100644
--- a/cts/scheduler/summary/remote-orphaned2.summary
+++ b/cts/scheduler/summary/remote-orphaned2.summary
@@ -1,3 +1,6 @@
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* RemoteNode mrg-02: UNCLEAN (offline)
@@ -10,6 +13,12 @@ Current cluster status:
* libvirtd-compute (systemd:libvirtd): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* nova-compute (systemd:openstack-nova-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
+warning: Node mrg-02 is unclean but cannot be fenced
+warning: Node mrg-03 is unclean but cannot be fenced
+warning: Node mrg-04 is unclean but cannot be fenced
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Transition Summary:
diff --git a/cts/scheduler/summary/rsc-discovery-per-node.summary b/cts/scheduler/summary/rsc-discovery-per-node.summary
index 3c34ced4ff1..150799f577b 100644
--- a/cts/scheduler/summary/rsc-discovery-per-node.summary
+++ b/cts/scheduler/summary/rsc-discovery-per-node.summary
@@ -1,3 +1,6 @@
+warning: Ignoring resource-discovery-enabled attribute for 18node1 because disabling resource discovery is not allowed for cluster nodes
+warning: Ignoring resource-discovery-enabled attribute for 18node2 because disabling resource discovery is not allowed for cluster nodes
+warning: Support for the resource-discovery-enabled node attribute is deprecated and will be removed (and behave as 'true') in a future release.
Current cluster status:
* Node List:
* Online: [ 18builder 18node1 18node2 18node3 18node4 ]
@@ -110,6 +113,8 @@ Executing Cluster Transition:
* Resource action: FAKECLONE2:4 monitor=60000 on remote1
* Resource action: FAKECLONE2:5 monitor=60000 on 18builder
* Resource action: FAKE4 monitor=60000 on remote1
+warning: Ignoring resource-discovery-enabled attribute for 18node1 because disabling resource discovery is not allowed for cluster nodes
+warning: Ignoring resource-discovery-enabled attribute for 18node2 because disabling resource discovery is not allowed for cluster nodes
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/stop-failure-no-fencing.summary b/cts/scheduler/summary/stop-failure-no-fencing.summary
index bb164fd5be4..9d7cd66ff56 100644
--- a/cts/scheduler/summary/stop-failure-no-fencing.summary
+++ b/cts/scheduler/summary/stop-failure-no-fencing.summary
@@ -1,3 +1,6 @@
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
0 of 9 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
@@ -10,6 +13,11 @@ Current cluster status:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Transition Summary:
diff --git a/cts/scheduler/summary/stop-failure-no-quorum.summary b/cts/scheduler/summary/stop-failure-no-quorum.summary
index e76827ddfc2..a516415c28a 100644
--- a/cts/scheduler/summary/stop-failure-no-quorum.summary
+++ b/cts/scheduler/summary/stop-failure-no-quorum.summary
@@ -16,6 +16,8 @@ Current cluster status:
* Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
Transition Summary:
* Fence (reboot) pcmk-2 'clvm:0 failed there'
diff --git a/cts/scheduler/summary/stop-failure-with-fencing.summary b/cts/scheduler/summary/stop-failure-with-fencing.summary
index 437708ef2e2..9048b95ba6a 100644
--- a/cts/scheduler/summary/stop-failure-with-fencing.summary
+++ b/cts/scheduler/summary/stop-failure-with-fencing.summary
@@ -13,6 +13,8 @@ Current cluster status:
* Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
Transition Summary:
* Fence (reboot) pcmk-2 'clvm:0 failed there'
diff --git a/cts/scheduler/summary/target-1.summary b/cts/scheduler/summary/target-1.summary
index edc1daf32b5..0c9572b366c 100644
--- a/cts/scheduler/summary/target-1.summary
+++ b/cts/scheduler/summary/target-1.summary
@@ -1,3 +1,5 @@
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
@@ -29,6 +31,11 @@ Executing Cluster Transition:
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n02
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/target-2.summary b/cts/scheduler/summary/target-2.summary
index a6194ae01ef..c39a2aa6b29 100644
--- a/cts/scheduler/summary/target-2.summary
+++ b/cts/scheduler/summary/target-2.summary
@@ -1,3 +1,7 @@
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
@@ -31,6 +35,16 @@ Executing Cluster Transition:
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n02
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
Revised Cluster Status:
* Node List:
diff --git a/cts/scheduler/summary/template-coloc-3.summary b/cts/scheduler/summary/template-coloc-3.summary
index a7ff63e8dec..b26ffea9b19 100644
--- a/cts/scheduler/summary/template-coloc-3.summary
+++ b/cts/scheduler/summary/template-coloc-3.summary
@@ -9,6 +9,7 @@ Current cluster status:
* rsc4 (ocf:pacemaker:Dummy): Stopped
* rsc5 (ocf:pacemaker:Dummy): Stopped
* rsc6 (ocf:pacemaker:Dummy): Stopped
+error: Ignoring constraint 'template1-colo-template2' because two templates or tags cannot be colocated
Transition Summary:
* Start rsc1 ( node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-1.summary b/cts/scheduler/summary/ticket-promoted-1.summary
index 6bc13645dfb..5bd56c510a1 100644
--- a/cts/scheduler/summary/ticket-promoted-1.summary
+++ b/cts/scheduler/summary/ticket-promoted-1.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-10.summary b/cts/scheduler/summary/ticket-promoted-10.summary
index eab3d91008b..c9133fe985b 100644
--- a/cts/scheduler/summary/ticket-promoted-10.summary
+++ b/cts/scheduler/summary/ticket-promoted-10.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-promoted-11.summary b/cts/scheduler/summary/ticket-promoted-11.summary
index 381603997eb..9bd1f55eb90 100644
--- a/cts/scheduler/summary/ticket-promoted-11.summary
+++ b/cts/scheduler/summary/ticket-promoted-11.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-12.summary b/cts/scheduler/summary/ticket-promoted-12.summary
index b51c277faf7..68768df73bb 100644
--- a/cts/scheduler/summary/ticket-promoted-12.summary
+++ b/cts/scheduler/summary/ticket-promoted-12.summary
@@ -7,6 +7,10 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-13.summary b/cts/scheduler/summary/ticket-promoted-13.summary
index 6b5d14a64dd..821da141782 100644
--- a/cts/scheduler/summary/ticket-promoted-13.summary
+++ b/cts/scheduler/summary/ticket-promoted-13.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-14.summary b/cts/scheduler/summary/ticket-promoted-14.summary
index ee8912b2e97..31c16b5b4de 100644
--- a/cts/scheduler/summary/ticket-promoted-14.summary
+++ b/cts/scheduler/summary/ticket-promoted-14.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-promoted-15.summary b/cts/scheduler/summary/ticket-promoted-15.summary
index ee8912b2e97..31c16b5b4de 100644
--- a/cts/scheduler/summary/ticket-promoted-15.summary
+++ b/cts/scheduler/summary/ticket-promoted-15.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-promoted-16.summary b/cts/scheduler/summary/ticket-promoted-16.summary
index 851e54ebd50..a71fb4a7f8f 100644
--- a/cts/scheduler/summary/ticket-promoted-16.summary
+++ b/cts/scheduler/summary/ticket-promoted-16.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-17.summary b/cts/scheduler/summary/ticket-promoted-17.summary
index ee25f92c4e2..3ff57a331ec 100644
--- a/cts/scheduler/summary/ticket-promoted-17.summary
+++ b/cts/scheduler/summary/ticket-promoted-17.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-18.summary b/cts/scheduler/summary/ticket-promoted-18.summary
index ee25f92c4e2..3ff57a331ec 100644
--- a/cts/scheduler/summary/ticket-promoted-18.summary
+++ b/cts/scheduler/summary/ticket-promoted-18.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-19.summary b/cts/scheduler/summary/ticket-promoted-19.summary
index 851e54ebd50..a71fb4a7f8f 100644
--- a/cts/scheduler/summary/ticket-promoted-19.summary
+++ b/cts/scheduler/summary/ticket-promoted-19.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-2.summary b/cts/scheduler/summary/ticket-promoted-2.summary
index dc67f96156b..1c5370a6800 100644
--- a/cts/scheduler/summary/ticket-promoted-2.summary
+++ b/cts/scheduler/summary/ticket-promoted-2.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-promoted-20.summary b/cts/scheduler/summary/ticket-promoted-20.summary
index ee25f92c4e2..3ff57a331ec 100644
--- a/cts/scheduler/summary/ticket-promoted-20.summary
+++ b/cts/scheduler/summary/ticket-promoted-20.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-21.summary b/cts/scheduler/summary/ticket-promoted-21.summary
index f116a2eea0b..c4b3a55fb41 100644
--- a/cts/scheduler/summary/ticket-promoted-21.summary
+++ b/cts/scheduler/summary/ticket-promoted-21.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node1 'deadman ticket was lost'
diff --git a/cts/scheduler/summary/ticket-promoted-22.summary b/cts/scheduler/summary/ticket-promoted-22.summary
index 851e54ebd50..a71fb4a7f8f 100644
--- a/cts/scheduler/summary/ticket-promoted-22.summary
+++ b/cts/scheduler/summary/ticket-promoted-22.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-23.summary b/cts/scheduler/summary/ticket-promoted-23.summary
index ee25f92c4e2..3ff57a331ec 100644
--- a/cts/scheduler/summary/ticket-promoted-23.summary
+++ b/cts/scheduler/summary/ticket-promoted-23.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-24.summary b/cts/scheduler/summary/ticket-promoted-24.summary
index b51c277faf7..68768df73bb 100644
--- a/cts/scheduler/summary/ticket-promoted-24.summary
+++ b/cts/scheduler/summary/ticket-promoted-24.summary
@@ -7,6 +7,10 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-promoted-3.summary b/cts/scheduler/summary/ticket-promoted-3.summary
index ee8912b2e97..31c16b5b4de 100644
--- a/cts/scheduler/summary/ticket-promoted-3.summary
+++ b/cts/scheduler/summary/ticket-promoted-3.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-promoted-4.summary b/cts/scheduler/summary/ticket-promoted-4.summary
index eab3d91008b..c9133fe985b 100644
--- a/cts/scheduler/summary/ticket-promoted-4.summary
+++ b/cts/scheduler/summary/ticket-promoted-4.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-promoted-5.summary b/cts/scheduler/summary/ticket-promoted-5.summary
index 381603997eb..9bd1f55eb90 100644
--- a/cts/scheduler/summary/ticket-promoted-5.summary
+++ b/cts/scheduler/summary/ticket-promoted-5.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-6.summary b/cts/scheduler/summary/ticket-promoted-6.summary
index ee25f92c4e2..3ff57a331ec 100644
--- a/cts/scheduler/summary/ticket-promoted-6.summary
+++ b/cts/scheduler/summary/ticket-promoted-6.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-7.summary b/cts/scheduler/summary/ticket-promoted-7.summary
index eab3d91008b..c9133fe985b 100644
--- a/cts/scheduler/summary/ticket-promoted-7.summary
+++ b/cts/scheduler/summary/ticket-promoted-7.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-promoted-8.summary b/cts/scheduler/summary/ticket-promoted-8.summary
index 381603997eb..9bd1f55eb90 100644
--- a/cts/scheduler/summary/ticket-promoted-8.summary
+++ b/cts/scheduler/summary/ticket-promoted-8.summary
@@ -6,6 +6,14 @@ Current cluster status:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
diff --git a/cts/scheduler/summary/ticket-promoted-9.summary b/cts/scheduler/summary/ticket-promoted-9.summary
index f116a2eea0b..c4b3a55fb41 100644
--- a/cts/scheduler/summary/ticket-promoted-9.summary
+++ b/cts/scheduler/summary/ticket-promoted-9.summary
@@ -7,6 +7,14 @@ Current cluster status:
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node1 'deadman ticket was lost'
diff --git a/cts/scheduler/summary/ticket-rsc-sets-1.summary b/cts/scheduler/summary/ticket-rsc-sets-1.summary
index d119ce5176e..e7a300c5a2c 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-1.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-1.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc5:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-rsc-sets-10.summary b/cts/scheduler/summary/ticket-rsc-sets-10.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-10.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-10.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-11.summary b/cts/scheduler/summary/ticket-rsc-sets-11.summary
index 03153aa264b..2775ac69305 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-11.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-11.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-rsc-sets-12.summary b/cts/scheduler/summary/ticket-rsc-sets-12.summary
index 68e0827f78b..b387a94fcd4 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-12.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-12.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-13.summary b/cts/scheduler/summary/ticket-rsc-sets-13.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-13.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-13.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-14.summary b/cts/scheduler/summary/ticket-rsc-sets-14.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-14.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-14.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-2.summary b/cts/scheduler/summary/ticket-rsc-sets-2.summary
index fccf3cad1ba..5e6c47b66f7 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-2.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-2.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1 ( node2 )
diff --git a/cts/scheduler/summary/ticket-rsc-sets-3.summary b/cts/scheduler/summary/ticket-rsc-sets-3.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-3.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-3.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-4.summary b/cts/scheduler/summary/ticket-rsc-sets-4.summary
index d119ce5176e..e7a300c5a2c 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-4.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-4.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc5:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-rsc-sets-5.summary b/cts/scheduler/summary/ticket-rsc-sets-5.summary
index 217243a7b25..9d808a2ebd8 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-5.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-5.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1 ( node2 )
diff --git a/cts/scheduler/summary/ticket-rsc-sets-6.summary b/cts/scheduler/summary/ticket-rsc-sets-6.summary
index 7336f70db30..4d446693ea9 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-6.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-6.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc4:0 ( node2 )
diff --git a/cts/scheduler/summary/ticket-rsc-sets-7.summary b/cts/scheduler/summary/ticket-rsc-sets-7.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-7.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-7.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/ticket-rsc-sets-8.summary b/cts/scheduler/summary/ticket-rsc-sets-8.summary
index 03153aa264b..2775ac69305 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-8.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-8.summary
@@ -12,6 +12,14 @@ Current cluster status:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
diff --git a/cts/scheduler/summary/ticket-rsc-sets-9.summary b/cts/scheduler/summary/ticket-rsc-sets-9.summary
index 3bc9d648ac3..f8612ba8a2f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-9.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-9.summary
@@ -13,6 +13,14 @@ Current cluster status:
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
diff --git a/cts/scheduler/summary/unrunnable-1.summary b/cts/scheduler/summary/unrunnable-1.summary
index 75fda238563..9ba6f2ecf54 100644
--- a/cts/scheduler/summary/unrunnable-1.summary
+++ b/cts/scheduler/summary/unrunnable-1.summary
@@ -18,6 +18,7 @@ Current cluster status:
* child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN)
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
+warning: Node c001n02 is unclean but cannot be fenced
Transition Summary:
* Start DcIPaddr ( c001n03 ) due to no quorum (blocked)
diff --git a/cts/scheduler/summary/unrunnable-2.summary b/cts/scheduler/summary/unrunnable-2.summary
index 26c63510785..0c0ee882ad4 100644
--- a/cts/scheduler/summary/unrunnable-2.summary
+++ b/cts/scheduler/summary/unrunnable-2.summary
@@ -85,6 +85,7 @@ Current cluster status:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start openstack-cinder-volume ( overcloud-controller-2 ) due to unrunnable openstack-cinder-scheduler-clone running (blocked)
diff --git a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
index 78506c5354e..79c058252dd 100644
--- a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
+++ b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
@@ -24,6 +24,12 @@ Current cluster status:
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01
+warning: Invalid ordering constraint between shared0:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-01_kiff-02
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c
index a74f5993a6c..db056f59b16 100644
--- a/daemons/execd/cts-exec-helper.c
+++ b/daemons/execd/cts-exec-helper.c
@@ -475,7 +475,14 @@ generate_params(void)
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = cib_xml_copy;
scheduler->priv->now = crm_time_new(NULL);
- cluster_status(scheduler);
+
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* pcmk_unpack_scheduler_input -> pcmk__config_err will already log an error */
+ pe_free_working_set(scheduler);
+ return rc;
+ }
// Find resource in CIB
rsc = pe_find_resource_with_flags(scheduler->priv->resources,
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index d04681d5167..bddd8e07d34 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -30,8 +30,7 @@ extern "C" {
const char *rsc_printable_id(const pcmk_resource_t *rsc);
-// NOTE: sbd (as of at least 1.5.2) uses this
-gboolean cluster_status(pcmk_scheduler_t *scheduler);
+int pcmk_unpack_scheduler_input(pcmk_scheduler_t *scheduler);
// NOTE: sbd (as of at least 1.5.2) uses this
pcmk_scheduler_t *pe_new_working_set(void);
diff --git a/include/crm/pengine/status_compat.h b/include/crm/pengine/status_compat.h
index 5488bb8e7d4..e360dcaf41e 100644
--- a/include/crm/pengine/status_compat.h
+++ b/include/crm/pengine/status_compat.h
@@ -27,6 +27,9 @@ extern "C" {
* release.
*/
+// NOTE: sbd (as of at least 1.5.2) uses this
+gboolean cluster_status(pcmk_scheduler_t *scheduler);
+
// NOTE: sbd (as of at least 1.5.2) uses this
//! \deprecated Use pcmk_find_node() with scheduler object instead
pcmk_node_t *pe_find_node(const GList *node_list, const char *node_name);
diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h
index 77eb98ec8c1..cea22e57301 100644
--- a/include/pcmki/pcmki_scheduler.h
+++ b/include/pcmki/pcmki_scheduler.h
@@ -36,8 +36,8 @@ typedef struct {
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler);
-void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
- pcmk_scheduler_t *scheduler);
+int pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
+ pcmk_scheduler_t *scheduler);
GList *pcmk__copy_node_list(const GList *list, bool reset);
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index 8960c298f57..ba662b61309 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -734,14 +734,16 @@ log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
* \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
*/
-static void
+static int
unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
if (pcmk_is_set(scheduler->flags, pcmk__sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pcmk__set_scheduler_flags(scheduler, flags);
- return;
+ return pcmk_rc_ok;
}
pcmk__assert(cib != NULL);
@@ -756,7 +758,8 @@ unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
pcmk__set_scheduler_flags(scheduler, flags);
scheduler->input = cib;
- cluster_status(scheduler); // Sets pcmk__sched_have_status
+ // Sets pcmk__sched_have_status
+ return pcmk_unpack_scheduler_input(scheduler);
}
/*!
@@ -766,17 +769,24 @@ unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
* \param[in,out] cib CIB XML to use as scheduler input
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
*/
-void
+int
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pcmk_scheduler_t *scheduler)
{
- unpack_cib(cib, flags, scheduler);
+ int rc = unpack_cib(cib, flags, scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
pcmk__set_assignment_methods(scheduler);
pcmk__apply_node_health(scheduler);
pcmk__unpack_constraints(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_validate_only)) {
- return;
+ return pcmk_rc_ok;
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)
@@ -787,7 +797,7 @@ pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
apply_node_criteria(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
- return;
+ return pcmk_rc_ok;
}
pcmk__create_internal_constraints(scheduler);
@@ -808,6 +818,8 @@ pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(scheduler);
}
+
+ return pcmk_rc_ok;
}
/*!
@@ -835,6 +847,8 @@ int
pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
pcmk_scheduler_t **scheduler)
{
+ int rc = pcmk_rc_ok;
+
// Allows for cleaner syntax than dereferencing the scheduler argument
pcmk_scheduler_t *new_scheduler = NULL;
@@ -857,7 +871,7 @@ pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *da
}
} else {
- int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
+ rc = cib__signon_query(out, NULL, &(new_scheduler->input));
if (rc != pcmk_rc_ok) {
pe_free_working_set(new_scheduler);
@@ -873,7 +887,12 @@ pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *da
}
// Unpack everything
- cluster_status(new_scheduler);
+ rc = pcmk_unpack_scheduler_input(new_scheduler);
+ if (rc != pcmk_rc_ok) {
+ pe_free_working_set(new_scheduler);
+ return rc;
+ }
+
*scheduler = new_scheduler;
return pcmk_rc_ok;
diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c
index 7b3080d57e5..30d9c408e1d 100644
--- a/lib/pacemaker/pcmk_simulate.c
+++ b/lib/pacemaker/pcmk_simulate.c
@@ -362,6 +362,7 @@ profile_file(const char *xml_file, long long repeat,
}
for (int i = 0; i < repeat; ++i) {
+ int rc;
xmlNode *input = cib_object;
if (repeat > 1) {
@@ -369,8 +370,12 @@ profile_file(const char *xml_file, long long repeat,
}
scheduler->input = input;
set_effective_date(scheduler, false, use_date);
- pcmk__schedule_actions(input, scheduler_flags, scheduler);
+ rc = pcmk__schedule_actions(input, scheduler_flags, scheduler);
pe_reset_working_set(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ break;
+ }
}
end = clock();
@@ -809,7 +814,11 @@ pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
}
reset(scheduler, input, out, use_date, flags);
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ goto simulate_done;
+ }
if (!out->is_quiet(out)) {
const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending);
@@ -862,7 +871,12 @@ pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
cleanup_calculations(scheduler);
reset(scheduler, input, out, use_date, flags);
- cluster_status(scheduler);
+ /* pcmk_unpack_scheduler_input only returns error on scheduler being
+ * NULL or the feature set being unsupported. Neither of those
+ * conditions could have changed since the first call, so there's no
+ * need to check the return value again.
+ */
+ pcmk_unpack_scheduler_input(scheduler);
}
if (input_file != NULL) {
@@ -911,6 +925,11 @@ pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
scheduler->priv->out = logger_out;
}
+ /* Likewise here - pcmk__schedule_actions only returns an error if
+ * cluster_status did, and there's nothing that could have changed since
+ * the first call to cause new errors here. So we don't need to check
+ * this return value either.
+ */
pcmk__schedule_actions(input, scheduler_flags, scheduler);
if (logger_out == NULL) {
@@ -972,9 +991,11 @@ pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization);
}
- cluster_status(scheduler);
- print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
- true);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc == pcmk_rc_ok) {
+ print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
+ true);
+ }
simulate_done:
cib__clean_up_connection(&cib);
diff --git a/lib/pacemaker/pcmk_status.c b/lib/pacemaker/pcmk_status.c
index 002817c06ae..70f43b03123 100644
--- a/lib/pacemaker/pcmk_status.c
+++ b/lib/pacemaker/pcmk_status.c
@@ -104,7 +104,14 @@ pcmk__output_cluster_status(pcmk_scheduler_t *scheduler, stonith_t *stonith,
pe_reset_working_set(scheduler);
scheduler->input = cib_copy;
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* Now that we've set up the scheduler, it's up to the caller to clean up.
+ * Doing cleanup here can result in double frees of XML or CIB data.
+ */
+ return rc;
+ }
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
diff --git a/lib/pacemaker/pcmk_verify.c b/lib/pacemaker/pcmk_verify.c
index 893299c9ed4..36d307fe9b9 100644
--- a/lib/pacemaker/pcmk_verify.c
+++ b/lib/pacemaker/pcmk_verify.c
@@ -108,7 +108,10 @@ pcmk__verify(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
* ownership of the passed-in XML object, hence we pass in a copy
* to the scheduler.
*/
- pcmk__schedule_actions(cib_object_copy, flags, scheduler);
+ rc = pcmk__schedule_actions(cib_object_copy, flags, scheduler);
+ if (rc != pcmk_rc_ok) {
+ pcmk__config_has_error = true;
+ }
}
verify_done:
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index 1fce0b32af4..d087cfa6fb0 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -88,8 +88,9 @@ check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
}
}
-/*
- * Unpack everything
+/*!
+ * Unpack scheduler input
+ *
* At the end you'll have:
* - A list of nodes
* - A list of resources (each with any dependencies on other resources)
@@ -98,15 +99,17 @@ check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
* - A list of nodes that need to be stonith'd
* - A list of nodes that need to be shutdown
* - A list of the possible stop/start actions (without dependencies)
+ *
+ * \return Standard Pacemaker return code
*/
-gboolean
-cluster_status(pcmk_scheduler_t * scheduler)
+int
+pcmk_unpack_scheduler_input(pcmk_scheduler_t *scheduler)
{
const char *new_version = NULL;
xmlNode *section = NULL;
if ((scheduler == NULL) || (scheduler->input == NULL)) {
- return FALSE;
+ return EINVAL;
}
new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET);
@@ -114,7 +117,7 @@ cluster_status(pcmk_scheduler_t * scheduler)
if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) {
pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'",
new_version, CRM_FEATURE_SET);
- return FALSE;
+ return pcmk_rc_schema_validation;
}
crm_trace("Beginning unpack");
@@ -202,7 +205,7 @@ cluster_status(pcmk_scheduler_t * scheduler)
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_status);
- return TRUE;
+ return pcmk_rc_ok;
}
/*!
@@ -528,6 +531,12 @@ pe_find_node_id(const GList *nodes, const char *id)
#include
+gboolean
+cluster_status(pcmk_scheduler_t * scheduler)
+{
+ return pcmk_unpack_scheduler_input(scheduler) == pcmk_rc_ok;
+}
+
/*!
* \brief Find a node by name in a list of nodes
*
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
index 676d22a88d4..fa8e7e13562 100644
--- a/lib/pengine/tests/native/native_find_rsc_test.c
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -46,7 +46,7 @@ setup(void **state) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = input;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
/* Get references to the cluster nodes so we don't have to find them repeatedly. */
cluster01 = pcmk_find_node(scheduler, "cluster01");
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
index 1a08480974b..d8e4eff11af 100644
--- a/lib/pengine/tests/native/pe_base_name_eq_test.c
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -45,7 +45,7 @@ setup(void **state) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = input;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
/* Get references to several resources we use frequently. */
for (GList *iter = scheduler->priv->resources;
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 149ff7737e0..7441d358856 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -237,6 +237,59 @@ static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
static void refresh_after_event(gboolean data_updated, gboolean enforce);
+struct output_config_ctx {
+ pcmk__output_t *out;
+ bool quiet;
+};
+
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ struct output_config_ctx *occ = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!occ->quiet) {
+ occ->out->err(occ->out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ struct output_config_ctx *occ = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!occ->quiet) {
+ occ->out->err(occ->out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static uint32_t
all_includes(mon_output_format_t fmt) {
if ((fmt == mon_output_plain) || (fmt == mon_output_console)) {
@@ -1408,6 +1461,7 @@ int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
+ struct output_config_ctx *ctx = NULL;
GOptionGroup *output_group = NULL;
args = pcmk__new_common_args(SUMMARY);
@@ -1577,6 +1631,13 @@ main(int argc, char **argv)
return clean_up(CRM_EX_OK);
}
+ ctx = pcmk__assert_alloc(1, sizeof(struct output_config_ctx));
+ ctx->out = out;
+ ctx->quiet = args->quiet;
+
+ pcmk__set_config_error_handler(output_config_error, ctx);
+ pcmk__set_config_warning_handler(output_config_warning, ctx);
+
if (output_format == mon_output_xml) {
show_opts |= pcmk_show_inactive_rscs | pcmk_show_timing;
}
@@ -1664,6 +1725,7 @@ main(int argc, char **argv)
crm_info("Exiting %s", crm_system_name);
+ free(ctx);
return clean_up(CRM_EX_OK);
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index b308d847ada..8c4a041a209 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -139,6 +139,54 @@ static pcmk__supported_format_t formats[] = {
{ NULL, NULL, NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
@@ -959,7 +1007,15 @@ clear_constraints(pcmk__output_t *out)
}
scheduler->input = cib_xml;
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler, and
+ * cleaning up scheduler is handled by the bye() function.
+ */
+ g_list_free(before);
+ return rc;
+ }
after = build_constraint_list(scheduler->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
@@ -993,7 +1049,14 @@ initialize_scheduler_data(xmlNode **cib_xml_orig)
return rc;
}
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler, and
+ * cleaning up scheduler is handled by the bye() function.
+ */
+ return rc;
+ }
+
return pcmk_rc_ok;
}
@@ -1476,6 +1539,9 @@ main(int argc, char **argv)
out->quiet = args->quiet;
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
crm_log_args(argc, argv);
/*
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 0c1384ddf48..fca17f851c0 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1471,8 +1471,12 @@ update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig,
goto done;
}
- pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
- scheduler);
+ rc = pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
+ scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ goto done;
+ }
prev_quiet = out->is_quiet(out);
out->quiet = true;
@@ -1491,7 +1495,7 @@ update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig,
pcmk__xml_free(*cib_xml_orig);
*cib_xml_orig = xml;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
}
done:
@@ -2101,8 +2105,13 @@ wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib)
if (rc != pcmk_rc_ok) {
break;
}
- pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
- scheduler);
+
+ rc = pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
+ scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ break;
+ }
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index df5359cb4f2..03969e709c2 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -342,6 +342,54 @@ static GOptionEntry source_entries[] = {
{ NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static int
setup_input(pcmk__output_t *out, const char *input, const char *output,
GError **error)
@@ -501,6 +549,9 @@ main(int argc, char **argv)
out->quiet = args->quiet;
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
if (args->version) {
out->version(out, false);
goto done;
diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c
index b6644a1779e..d9319d56ee2 100644
--- a/tools/crm_ticket.c
+++ b/tools/crm_ticket.c
@@ -261,6 +261,54 @@ static GOptionEntry deprecated_entries[] = {
{ NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static void
ticket_grant_warning(gchar *ticket_id)
{
@@ -382,6 +430,11 @@ main(int argc, char **argv)
pe__register_messages(out);
pcmk__register_lib_messages(out);
+ out->quiet = options.quiet;
+
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
if (args->version) {
out->version(out, false);
goto done;
@@ -441,7 +494,12 @@ main(int argc, char **argv)
scheduler->input = cib_xml_copy;
scheduler->priv->now = crm_time_new(NULL);
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ exit_code = pcmk_rc2exitc(rc);
+ goto done;
+ }
/* For recording the tickets that are referenced in PCMK_XE_RSC_TICKET
* constraints but have never been granted yet.