Merge "Fix compute tests init"
diff --git a/tempest/testboto.py b/tempest/testboto.py
index 14844b3..5ce15b7 100644
--- a/tempest/testboto.py
+++ b/tempest/testboto.py
@@ -20,13 +20,13 @@
import re
import boto
-from boto.exception import BotoServerError
-from boto.exception import EC2ResponseError
-from boto.s3.bucket import Bucket
+from boto import ec2
+from boto import exception
+from boto import s3
import testresources
import testtools
-from tempest.exceptions import TearDownException
+from tempest import exceptions
import tempest.tests.boto
from tempest.tests.boto.utils.wait import re_search_wait
from tempest.tests.boto.utils.wait import state_wait
@@ -40,7 +40,7 @@
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
- if not isinstance(exc, BotoServerError):
+ if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
@@ -164,7 +164,7 @@
key_name)"""
try:
callableObj(*args, **kwargs)
- except BotoServerError as exc:
+ except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException, error_msg
@@ -190,7 +190,7 @@
finally:
del cls._resource_trash_bin[key]
if fail_count:
- raise TearDownException(num=fail_count)
+ raise exceptions.TearDownException(num=fail_count)
ec2_error_code = BotoExceptionMatcher()
# InsufficientInstanceCapacity can be both server and client error
@@ -210,15 +210,50 @@
gone_set = set(('_GONE',))
+ @classmethod
+ def get_lfunction_gone(cls, obj):
+ """ If the object is instance of a well know type returns back with
+ with the correspoding function otherwise it assumes the obj itself
+ is the function"""
+ ec = cls.ec2_error_code
+ if isinstance(obj, ec2.instance.Instance):
+ colusure_matcher = ec.client.InvalidInstanceID.NotFound
+ status_attr = "state"
+ elif isinstance(obj, ec2.image.Image):
+ colusure_matcher = ec.client.InvalidAMIID.NotFound
+ status_attr = "state"
+ elif isinstance(obj, ec2.snapshot.Snapshot):
+ colusure_matcher = ec.client.InvalidSnapshot.NotFound
+ status_attr = "status"
+ elif isinstance(obj, ec2.volume.Volume):
+ colusure_matcher = ec.client.InvalidVolume.NotFound
+ status_attr = "status"
+ else:
+ return obj
+
+ def _status():
+ try:
+ obj.update(validate=True)
+ except ValueError:
+ return "_GONE"
+ except exception.EC2ResponseError as exc:
+ if colusure_matcher.match(exc):
+ return "_GONE"
+ else:
+ raise
+ return getattr(obj, status_attr)
+
+ return _status
+
def state_wait_gone(self, lfunction, final_set, valid_set):
if not isinstance(final_set, set):
final_set = set((final_set,))
final_set |= self.gone_set
+ lfunction = self.get_lfunction_gone(lfunction)
state = state_wait(lfunction, final_set, valid_set)
self.assertIn(state, valid_set | self.gone_set)
return state
- #TODO(afazekas): object based versions for resurces which supports update
def waitImageState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_image_state)
@@ -227,14 +262,14 @@
return self.state_wait_gone(lfunction, wait_for,
self.valid_instance_state)
- def waitVolumeStatus(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_volume_status)
-
def waitSnapshotStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_snapshot_status)
+ def waitVolumeStatus(self, lfunction, wait_for):
+ return self.state_wait_gone(lfunction, wait_for,
+ self.valid_volume_status)
+
def assertImageStateWait(self, lfunction, wait_for):
state = self.waitImageState(lfunction, wait_for)
self.assertIn(state, wait_for)
@@ -310,7 +345,7 @@
with closing(boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
- assert isinstance(bucket, Bucket)
+ assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
@@ -323,7 +358,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_reservation(cls, reservation):
@@ -335,7 +370,7 @@
instance.update(validate=True)
except ValueError:
return "_GONE"
- except EC2ResponseError as exc:
+ except exception.EC2ResponseError as exc:
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
return "_GONE"
@@ -354,7 +389,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
#NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@@ -397,7 +432,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_snapshot_wait(cls, snapshot):
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 403ec4c..b0a7fcd 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -98,20 +98,16 @@
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
- def _state():
- instance.update(validate=True)
- return instance.state
-
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
- self.assertInstanceStateWait(_state, "running")
+ self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
- self.assertInstanceStateWait(_state, "stopped")
+ self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
@@ -174,18 +170,9 @@
volume = self.ec2_client.create_volume(1, self.zone)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
-
- def _instance_state():
- instance.update(validate=True)
- return instance.state
-
- def _volume_state():
- volume.update(validate=True)
- return volume.status
-
LOG.info("state: %s", instance.state)
if instance.state != "running":
- self.assertInstanceStateWait(_instance_state, "running")
+ self.assertInstanceStateWait(instance, "running")
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
@@ -194,7 +181,7 @@
rcuk_da = self.addResourceCleanUp(address.disassociate)
#TODO(afazekas): ping test. dependecy/permission ?
- self.assertVolumeStatusWait(_volume_state, "available")
+ self.assertVolumeStatusWait(volume, "available")
#NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
@@ -213,6 +200,10 @@
# "attaching" invalid EC2 state ! #1074901
volume.attach(instance.id, "/dev/vdh")
+ def _volume_state():
+ volume.update(validate=True)
+ return volume.status
+
#self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901
re_search_wait(_volume_state, "in-use")
@@ -251,7 +242,7 @@
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
- self.assertInstanceStateWait(_instance_state, "stopped")
+ self.assertInstanceStateWait(instance, "stopped")
#TODO(afazekas): move steps from teardown to the test case
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index aa2325f..dc8ff31 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -49,12 +49,7 @@
retrieved = self.client.get_all_volumes((volume.id,))
self.assertEqual(1, len(retrieved))
self.assertTrue(compare_volumes(volume, retrieved[0]))
-
- def _status():
- volume.update(validate=True)
- return volume.status
-
- self.assertVolumeStatusWait(_status, "available")
+ self.assertVolumeStatusWait(volume, "available")
self.client.delete_volume(volume.id)
self.cancelResourceCleanUp(cuk)
@@ -63,28 +58,13 @@
# EC2 Create volume from snapshot
volume = self.client.create_volume(1, self.zone)
self.addResourceCleanUp(self.client.delete_volume, volume.id)
-
- def _status():
- volume.update(validate=True)
- return volume.status
-
- self.assertVolumeStatusWait(_status, "available")
+ self.assertVolumeStatusWait(volume, "available")
snap = self.client.create_snapshot(volume.id)
self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
-
- def _snap_status():
- snap.update(validate=True)
- return snap.status
-
- self.assertSnapshotStatusWait(_snap_status, "completed")
+ self.assertSnapshotStatusWait(snap, "completed")
svol = self.client.create_volume(1, self.zone, snapshot=snap)
cuk = self.addResourceCleanUp(svol.delete)
-
- def _snap_vol_status():
- svol.update(validate=True)
- return svol.status
-
- self.assertVolumeStatusWait(_snap_vol_status, "available")
+ self.assertVolumeStatusWait(svol, "available")
svol.delete()
self.cancelResourceCleanUp(cuk)
diff --git a/tempest/tests/boto/test_s3_ec2_images.py b/tempest/tests/boto/test_s3_ec2_images.py
index 8913395..0f7628b 100644
--- a/tempest/tests/boto/test_s3_ec2_images.py
+++ b/tempest/tests/boto/test_s3_ec2_images.py
@@ -74,8 +74,7 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
- state = retrieved_image.state
- if state != "available":
+ if retrieved_image.state != "available":
def _state():
retr = self.images_client.get_image(image["image_id"])
return retr.state
@@ -103,10 +102,7 @@
self.assertTrue(retrieved_image.id == image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
- def _state():
- retr = self.images_client.get_image(image["image_id"])
- return retr.state
- self.assertImageStateWait(_state, "available")
+ self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
#TODO(afazekas): verify deregister in a better way
retrieved_image = self.images_client.get_image(image["image_id"])
@@ -129,10 +125,7 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
- def _state():
- retr = self.images_client.get_image(image["image_id"])
- return retr.state
- self.assertImageStateWait(_state, "available")
+ self.assertImageStateWait(retrieved_image, "available")
self.assertIn(retrieved_image.state, self.valid_image_state)
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
diff --git a/tempest/tests/compute/limits/test_absolute_limits.py b/tempest/tests/compute/limits/test_absolute_limits.py
index d520b92..4b17af9 100644
--- a/tempest/tests/compute/limits/test_absolute_limits.py
+++ b/tempest/tests/compute/limits/test_absolute_limits.py
@@ -26,22 +26,18 @@
def setUpClass(cls):
cls.client = cls.limits_client
- @testtools.skip("Skipped until the Bug #1025294 is resolved")
def test_absLimits_get(self):
# To check if all limits are present in the response
resp, absolute_limits = self.client.get_absolute_limits()
expected_elements = ['maxImageMeta', 'maxPersonality',
'maxPersonalitySize',
- 'maxPersonalityFilePathSize',
'maxServerMeta', 'maxTotalCores',
'maxTotalFloatingIps', 'maxSecurityGroups',
'maxSecurityGroupRules', 'maxTotalInstances',
'maxTotalKeypairs', 'maxTotalRAMSize',
- 'maxTotalVolumeGigabytes', 'maxTotalVolumes',
'totalCoresUsed', 'totalFloatingIpsUsed',
'totalSecurityGroupsUsed', 'totalInstancesUsed',
- 'totalKeyPairsUsed', 'totalRAMUsed',
- 'totalVolumeGigabytesUsed', 'totalVolumesUsed']
+ 'totalRAMUsed']
# check whether all expected elements exist
missing_elements =\
[ele for ele in expected_elements if ele not in absolute_limits]