Merge "Fix the scope to share a server between tests"
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 5028cad..6fe3186 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -33,11 +33,8 @@
     def setUpClass(cls):
         super(ServersAdminTestJSON, cls).setUpClass()
         cls.client = cls.os_adm.servers_client
+        cls.non_admin_client = cls.servers_client
         cls.flavors_client = cls.os_adm.flavors_client
-        cls.identity_client = cls._get_identity_admin_client()
-        tenant = cls.identity_client.get_tenant_by_name(
-            cls.client.tenant_name)
-        cls.tenant_id = tenant['id']
 
         cls.s1_name = data_utils.rand_name('server')
         resp, server = cls.create_test_server(name=cls.s1_name,
@@ -116,6 +113,34 @@
         for key in basic_attrs:
             self.assertIn(key, str(diagnostic.keys()))
 
+    @attr(type='gate')
+    def test_rebuild_server_in_error_state(self):
+        # The server in error state should be rebuilt using the provided
+        # image and changed to ACTIVE state
+
+        # resetting vm state require admin priviledge
+        resp, server = self.client.reset_state(self.s1_id, state='error')
+        self.assertEqual(202, resp.status)
+        resp, rebuilt_server = self.non_admin_client.rebuild(
+            self.s1_id, self.image_ref_alt)
+        self.addCleanup(self.non_admin_client.wait_for_server_status,
+                        self.s1_id, 'ACTIVE')
+        self.addCleanup(self.non_admin_client.rebuild, self.s1_id,
+                        self.image_ref)
+
+        # Verify the properties in the initial response are correct
+        self.assertEqual(self.s1_id, rebuilt_server['id'])
+        rebuilt_image_id = rebuilt_server['image']['id']
+        self.assertEqual(self.image_ref_alt, rebuilt_image_id)
+        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
+        self.non_admin_client.wait_for_server_status(rebuilt_server['id'],
+                                                     'ACTIVE',
+                                                     raise_on_error=False)
+        # Verify the server properties after rebuilding
+        resp, server = self.non_admin_client.get_server(rebuilt_server['id'])
+        rebuilt_image_id = server['image']['id']
+        self.assertEqual(self.image_ref_alt, rebuilt_image_id)
+
 
 class ServersAdminTestXML(ServersAdminTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 2e45b28..f244155 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -123,6 +123,7 @@
                                                    metadata=meta,
                                                    personality=personality,
                                                    adminPass=password)
+        self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -133,15 +134,44 @@
         # Verify the server properties after the rebuild completes
         self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
         resp, server = self.client.get_server(rebuilt_server['id'])
-        rebuilt_image_id = rebuilt_server['image']['id']
+        rebuilt_image_id = server['image']['id']
         self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
-        self.assertEqual(new_name, rebuilt_server['name'])
+        self.assertEqual(new_name, server['name'])
 
         if self.run_ssh:
             # Verify that the user can authenticate with the provided password
             linux_client = RemoteClient(server, self.ssh_user, password)
             self.assertTrue(linux_client.can_authenticate())
 
+    @attr(type='gate')
+    def test_rebuild_server_in_stop_state(self):
+        # The server in stop state  should be rebuilt using the provided
+        # image and remain in SHUTOFF state
+        resp, server = self.client.get_server(self.server_id)
+        old_image = server['image']['id']
+        new_image = self.image_ref_alt \
+            if old_image == self.image_ref else self.image_ref
+        resp, server = self.client.stop(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+        self.addCleanup(self.client.start, self.server_id)
+        resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
+        self.addCleanup(self.client.wait_for_server_status, self.server_id,
+                        'SHUTOFF')
+        self.addCleanup(self.client.rebuild, self.server_id, old_image)
+
+        # Verify the properties in the initial response are correct
+        self.assertEqual(self.server_id, rebuilt_server['id'])
+        rebuilt_image_id = rebuilt_server['image']['id']
+        self.assertEqual(new_image, rebuilt_image_id)
+        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
+
+        # Verify the server properties after the rebuild completes
+        self.client.wait_for_server_status(rebuilt_server['id'], 'SHUTOFF')
+        resp, server = self.client.get_server(rebuilt_server['id'])
+        rebuilt_image_id = server['image']['id']
+        self.assertEqual(new_image, rebuilt_image_id)
+
     def _detect_server_image_flavor(self, server_id):
         # Detects the current server image flavor ref.
         resp, server = self.client.get_server(self.server_id)
diff --git a/tempest/api/compute/v3/admin/test_services.py b/tempest/api/compute/v3/admin/test_services.py
index 67f9947..64135ed 100644
--- a/tempest/api/compute/v3/admin/test_services.py
+++ b/tempest/api/compute/v3/admin/test_services.py
@@ -113,23 +113,6 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(0, len(services))
 
-    @attr(type='gate')
-    def test_service_enable_disable(self):
-        resp, services = self.client.list_services()
-        host_name = services[0]['host']
-        binary_name = services[0]['binary']
-
-        resp, service = self.client.disable_service(host_name, binary_name)
-        self.assertEqual(200, resp.status)
-        params = {'host': host_name, 'binary': binary_name}
-        resp, services = self.client.list_services(params)
-        self.assertEqual('disabled', services[0]['status'])
-
-        resp, service = self.client.enable_service(host_name, binary_name)
-        self.assertEqual(200, resp.status)
-        resp, services = self.client.list_services(params)
-        self.assertEqual('enabled', services[0]['status'])
-
 
 class ServicesAdminV3TestXML(ServicesAdminV3TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index f214641..f54e9b3 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -43,9 +43,8 @@
         cls.volume_list = []
         cls.volume_id_list = []
         for i in range(3):
-            v_name = data_utils.rand_name('volume-%s')
+            v_name = data_utils.rand_name('volume-%s' % cls._interface)
             metadata = {'Type': 'work'}
-            v_name += cls._interface
             try:
                 resp, volume = cls.client.create_volume(size=1,
                                                         display_name=v_name,
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 9322f1b..9aca2ff 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -506,18 +506,26 @@
         if resp.status in (500, 501):
             message = resp_body
             if parse_resp:
-                resp_body = self._parse_resp(resp_body)
-                # I'm seeing both computeFault and cloudServersFault come back.
-                # Will file a bug to fix, but leave as is for now.
-                if 'cloudServersFault' in resp_body:
-                    message = resp_body['cloudServersFault']['message']
-                elif 'computeFault' in resp_body:
-                    message = resp_body['computeFault']['message']
-                elif 'error' in resp_body:  # Keystone errors
-                    message = resp_body['error']['message']
-                    raise exceptions.IdentityError(message)
-                elif 'message' in resp_body:
-                    message = resp_body['message']
+                try:
+                    resp_body = self._parse_resp(resp_body)
+                except ValueError:
+                    # If response body is a non-json string message.
+                    # Use resp_body as is and raise InvalidResponseBody
+                    # exception.
+                    raise exceptions.InvalidHTTPResponseBody(message)
+                else:
+                    # I'm seeing both computeFault
+                    # and cloudServersFault come back.
+                    # Will file a bug to fix, but leave as is for now.
+                    if 'cloudServersFault' in resp_body:
+                        message = resp_body['cloudServersFault']['message']
+                    elif 'computeFault' in resp_body:
+                        message = resp_body['computeFault']['message']
+                    elif 'error' in resp_body:  # Keystone errors
+                        message = resp_body['error']['message']
+                        raise exceptions.IdentityError(message)
+                    elif 'message' in resp_body:
+                        message = resp_body['message']
 
             raise exceptions.ServerFault(message)
 
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index bea2cdc..497a297 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -25,7 +25,7 @@
 
 # NOTE(afazekas): This function needs to know a token and a subject.
 def wait_for_server_status(client, server_id, status, ready_wait=True,
-                           extra_timeout=0):
+                           extra_timeout=0, raise_on_error=True):
     """Waits for a server to reach a given status."""
 
     def _get_task_state(body):
@@ -69,7 +69,7 @@
                      '/'.join((old_status, str(old_task_state))),
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
-        if server_status == 'ERROR':
+        if (server_status == 'ERROR') and raise_on_error:
             raise exceptions.BuildErrorException(server_id=server_id)
 
         timed_out = int(time.time()) - start_time >= timeout
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 02fc231..0bab9c7 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -184,3 +184,7 @@
 class ResponseWithEntity(RFCViolation):
     message = ("RFC Violation! Response with 205 HTTP Status Code "
                "MUST NOT have an entity")
+
+
+class InvalidHTTPResponseBody(RestClientException):
+    message = "HTTP response body is invalid json or xml"
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
index 745eb05..4651284 100644
--- a/tempest/scenario/orchestration/test_autoscaling.yaml
+++ b/tempest/scenario/orchestration/test_autoscaling.yaml
@@ -23,11 +23,11 @@
     Default: '420'
   ScaleUpThreshold:
     Description: Memory percentage threshold to scale up on
-    Type: Number
+    Type: String
     Default: '70'
   ScaleDownThreshold:
     Description: Memory percentage threshold to scale down on
-    Type: Number
+    Type: String
     Default: '60'
   ConsumeMemoryLimit:
     Description: Memory percentage threshold to consume
@@ -182,4 +182,4 @@
             # wait ConsumeStartSeconds then ramp up memory consumption
             # until it is over ConsumeMemoryLimit%
             # then exits ConsumeStopSeconds seconds after stack launch
-            /root/consume_memory > /root/consume_memory.log &
\ No newline at end of file
+            /root/consume_memory > /root/consume_memory.log &
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 3c6a40f..eb1a0c3 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -154,10 +154,12 @@
         body = json.loads(body)
         return resp, body
 
-    def wait_for_server_status(self, server_id, status, extra_timeout=0):
+    def wait_for_server_status(self, server_id, status, extra_timeout=0,
+                               raise_on_error=True):
         """Waits for a server to reach a given status."""
         return waiters.wait_for_server_status(self, server_id, status,
-                                              extra_timeout=extra_timeout)
+                                              extra_timeout=extra_timeout,
+                                              raise_on_error=raise_on_error)
 
     def wait_for_server_termination(self, server_id, ignore_error=False):
         """Waits for server to reach termination."""
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 7d40d0e..68f6cf0 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -363,10 +363,12 @@
         server = self._parse_server(etree.fromstring(body))
         return resp, server
 
-    def wait_for_server_status(self, server_id, status, extra_timeout=0):
+    def wait_for_server_status(self, server_id, status, extra_timeout=0,
+                               raise_on_error=True):
         """Waits for a server to reach a given status."""
         return waiters.wait_for_server_status(self, server_id, status,
-                                              extra_timeout=extra_timeout)
+                                              extra_timeout=extra_timeout,
+                                              raise_on_error=raise_on_error)
 
     def wait_for_server_termination(self, server_id, ignore_error=False):
         """Waits for server to reach termination."""