Use helper functions stack_suspend and stack_resume
These wait for the stack state to get to complete,
this wasn't done consistently. In test_autoscaling we were
only waiting for the resource state to get to SUSPEND_COMPLETE
and this lead to a time sensitive bug.
Change-Id: Id985d833dc0b4cab1e3cb9d8f67d5d8cc94b5863
Closes-bug: #1438717
diff --git a/functional/test_autoscaling.py b/functional/test_autoscaling.py
index 932245d..489ae63 100644
--- a/functional/test_autoscaling.py
+++ b/functional/test_autoscaling.py
@@ -318,21 +318,11 @@
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'JobServerGroup')
- self.client.actions.suspend(stack_id=stack_identifier)
- self._wait_for_resource_status(
- stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
- for res in self.client.resources.list(nested_ident):
- self._wait_for_resource_status(nested_ident,
- res.resource_name,
- 'SUSPEND_COMPLETE')
+ self.stack_suspend(stack_identifier)
+ self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
- self.client.actions.resume(stack_id=stack_identifier)
- self._wait_for_resource_status(
- stack_identifier, 'JobServerGroup', 'RESUME_COMPLETE')
- for res in self.client.resources.list(nested_ident):
- self._wait_for_resource_status(nested_ident,
- res.resource_name,
- 'RESUME_COMPLETE')
+ self.stack_resume(stack_identifier)
+ self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):