Merge "T4xx fixes"
diff --git a/run_tests.sh b/run_tests.sh
index 3a2bd94..968f9a3 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -8,6 +8,7 @@
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+ echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
echo " -w, --whitebox Only run whitebox tests"
echo " -c, --nova-coverage Enable Nova coverage collection"
@@ -30,6 +31,7 @@
wrapper=""
nova_coverage=0
config_file=""
+update=0
if ! options=$(getopt -o VNnfswcphdsC: -l virtual-env,no-virtual-env,no-site-packages,force,smoke,whitebox,nova-coverage,pep8,help,debug,stdout,config: -- "$@")
then
@@ -47,6 +49,7 @@
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-n|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
+ -u|--update) update=1;;
-d|--debug) set -o xtrace;;
-c|--nova-coverage) let nova_coverage=1;;
-C|--config) config_file=$2; shift;;
@@ -121,6 +124,10 @@
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
+ if [ $update -eq 1 ]; then
+ echo "Updating virtualenv..."
+ python tools/install_venv.py $installvenvopts
+ fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
diff --git a/tempest/testboto.py b/tempest/testboto.py
index f5f5d8b..30c7e93 100644
--- a/tempest/testboto.py
+++ b/tempest/testboto.py
@@ -20,13 +20,13 @@
import re
import boto
-from boto.exception import BotoServerError
-from boto.exception import EC2ResponseError
-from boto.s3.bucket import Bucket
+from boto import ec2
+from boto import exception
+from boto import s3
import testresources
import testtools
-from tempest.exceptions import TearDownException
+from tempest import exceptions
import tempest.tests.boto
from tempest.tests.boto.utils.wait import re_search_wait
from tempest.tests.boto.utils.wait import state_wait
@@ -40,7 +40,7 @@
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
- if not isinstance(exc, BotoServerError):
+ if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
@@ -166,7 +166,7 @@
"""
try:
callableObj(*args, **kwargs)
- except BotoServerError as exc:
+ except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException, error_msg
@@ -193,7 +193,7 @@
finally:
del cls._resource_trash_bin[key]
if fail_count:
- raise TearDownException(num=fail_count)
+ raise exceptions.TearDownException(num=fail_count)
ec2_error_code = BotoExceptionMatcher()
# InsufficientInstanceCapacity can be both server and client error
@@ -213,15 +213,50 @@
gone_set = set(('_GONE',))
+ @classmethod
+ def get_lfunction_gone(cls, obj):
+ """ If the object is instance of a well know type returns back with
+ with the correspoding function otherwise it assumes the obj itself
+ is the function"""
+ ec = cls.ec2_error_code
+ if isinstance(obj, ec2.instance.Instance):
+ colusure_matcher = ec.client.InvalidInstanceID.NotFound
+ status_attr = "state"
+ elif isinstance(obj, ec2.image.Image):
+ colusure_matcher = ec.client.InvalidAMIID.NotFound
+ status_attr = "state"
+ elif isinstance(obj, ec2.snapshot.Snapshot):
+ colusure_matcher = ec.client.InvalidSnapshot.NotFound
+ status_attr = "status"
+ elif isinstance(obj, ec2.volume.Volume):
+ colusure_matcher = ec.client.InvalidVolume.NotFound
+ status_attr = "status"
+ else:
+ return obj
+
+ def _status():
+ try:
+ obj.update(validate=True)
+ except ValueError:
+ return "_GONE"
+ except exception.EC2ResponseError as exc:
+ if colusure_matcher.match(exc):
+ return "_GONE"
+ else:
+ raise
+ return getattr(obj, status_attr)
+
+ return _status
+
def state_wait_gone(self, lfunction, final_set, valid_set):
if not isinstance(final_set, set):
final_set = set((final_set,))
final_set |= self.gone_set
+ lfunction = self.get_lfunction_gone(lfunction)
state = state_wait(lfunction, final_set, valid_set)
self.assertIn(state, valid_set | self.gone_set)
return state
- #TODO(afazekas): object based versions for resurces which supports update
def waitImageState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_image_state)
@@ -230,14 +265,14 @@
return self.state_wait_gone(lfunction, wait_for,
self.valid_instance_state)
- def waitVolumeStatus(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_volume_status)
-
def waitSnapshotStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_snapshot_status)
+ def waitVolumeStatus(self, lfunction, wait_for):
+ return self.state_wait_gone(lfunction, wait_for,
+ self.valid_volume_status)
+
def assertImageStateWait(self, lfunction, wait_for):
state = self.waitImageState(lfunction, wait_for)
self.assertIn(state, wait_for)
@@ -313,7 +348,7 @@
with closing(boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
- assert isinstance(bucket, Bucket)
+ assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
@@ -326,7 +361,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_reservation(cls, reservation):
@@ -338,7 +373,7 @@
instance.update(validate=True)
except ValueError:
return "_GONE"
- except EC2ResponseError as exc:
+ except exception.EC2ResponseError as exc:
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
return "_GONE"
@@ -357,7 +392,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
#NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@@ -400,7 +435,7 @@
LOG.exception(exc)
exc_num += 1
if exc_num:
- raise TearDownException(num=exc_num)
+ raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_snapshot_wait(cls, snapshot):
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 403ec4c..b0a7fcd 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -98,20 +98,16 @@
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
- def _state():
- instance.update(validate=True)
- return instance.state
-
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
- self.assertInstanceStateWait(_state, "running")
+ self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
- self.assertInstanceStateWait(_state, "stopped")
+ self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
@@ -174,18 +170,9 @@
volume = self.ec2_client.create_volume(1, self.zone)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
-
- def _instance_state():
- instance.update(validate=True)
- return instance.state
-
- def _volume_state():
- volume.update(validate=True)
- return volume.status
-
LOG.info("state: %s", instance.state)
if instance.state != "running":
- self.assertInstanceStateWait(_instance_state, "running")
+ self.assertInstanceStateWait(instance, "running")
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
@@ -194,7 +181,7 @@
rcuk_da = self.addResourceCleanUp(address.disassociate)
#TODO(afazekas): ping test. dependecy/permission ?
- self.assertVolumeStatusWait(_volume_state, "available")
+ self.assertVolumeStatusWait(volume, "available")
#NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
@@ -213,6 +200,10 @@
# "attaching" invalid EC2 state ! #1074901
volume.attach(instance.id, "/dev/vdh")
+ def _volume_state():
+ volume.update(validate=True)
+ return volume.status
+
#self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901
re_search_wait(_volume_state, "in-use")
@@ -251,7 +242,7 @@
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
- self.assertInstanceStateWait(_instance_state, "stopped")
+ self.assertInstanceStateWait(instance, "stopped")
#TODO(afazekas): move steps from teardown to the test case
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index aa2325f..dc8ff31 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -49,12 +49,7 @@
retrieved = self.client.get_all_volumes((volume.id,))
self.assertEqual(1, len(retrieved))
self.assertTrue(compare_volumes(volume, retrieved[0]))
-
- def _status():
- volume.update(validate=True)
- return volume.status
-
- self.assertVolumeStatusWait(_status, "available")
+ self.assertVolumeStatusWait(volume, "available")
self.client.delete_volume(volume.id)
self.cancelResourceCleanUp(cuk)
@@ -63,28 +58,13 @@
# EC2 Create volume from snapshot
volume = self.client.create_volume(1, self.zone)
self.addResourceCleanUp(self.client.delete_volume, volume.id)
-
- def _status():
- volume.update(validate=True)
- return volume.status
-
- self.assertVolumeStatusWait(_status, "available")
+ self.assertVolumeStatusWait(volume, "available")
snap = self.client.create_snapshot(volume.id)
self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
-
- def _snap_status():
- snap.update(validate=True)
- return snap.status
-
- self.assertSnapshotStatusWait(_snap_status, "completed")
+ self.assertSnapshotStatusWait(snap, "completed")
svol = self.client.create_volume(1, self.zone, snapshot=snap)
cuk = self.addResourceCleanUp(svol.delete)
-
- def _snap_vol_status():
- svol.update(validate=True)
- return svol.status
-
- self.assertVolumeStatusWait(_snap_vol_status, "available")
+ self.assertVolumeStatusWait(svol, "available")
svol.delete()
self.cancelResourceCleanUp(cuk)
diff --git a/tempest/tests/boto/test_s3_ec2_images.py b/tempest/tests/boto/test_s3_ec2_images.py
index 8913395..0f7628b 100644
--- a/tempest/tests/boto/test_s3_ec2_images.py
+++ b/tempest/tests/boto/test_s3_ec2_images.py
@@ -74,8 +74,7 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
- state = retrieved_image.state
- if state != "available":
+ if retrieved_image.state != "available":
def _state():
retr = self.images_client.get_image(image["image_id"])
return retr.state
@@ -103,10 +102,7 @@
self.assertTrue(retrieved_image.id == image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
- def _state():
- retr = self.images_client.get_image(image["image_id"])
- return retr.state
- self.assertImageStateWait(_state, "available")
+ self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
#TODO(afazekas): verify deregister in a better way
retrieved_image = self.images_client.get_image(image["image_id"])
@@ -129,10 +125,7 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
- def _state():
- retr = self.images_client.get_image(image["image_id"])
- return retr.state
- self.assertImageStateWait(_state, "available")
+ self.assertImageStateWait(retrieved_image, "available")
self.assertIn(retrieved_image.state, self.valid_image_state)
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
diff --git a/tempest/tests/compute/__init__.py b/tempest/tests/compute/__init__.py
index a3c6380..36893e3 100644
--- a/tempest/tests/compute/__init__.py
+++ b/tempest/tests/compute/__init__.py
@@ -21,7 +21,6 @@
from tempest import clients
from tempest import config
from tempest.exceptions import InvalidConfiguration
-from testresources import TestResourceManager
LOG = logging.getLogger(__name__)
@@ -79,8 +78,3 @@
"tenant or password: alt_tenant_name=%s alt_password=%s"
% (user2_tenant_name, user2_password))
raise InvalidConfiguration(msg)
-
-
-class ComputeResource(TestResourceManager):
- def make(self, dependency_resources=None):
- return generic_setup_package()
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 2aae89c..2c6b861 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -38,7 +38,7 @@
testresources.ResourcedTestCase):
"""Base test case class for all Compute API tests."""
- resources = [('compute_init', compute.ComputeResource())]
+ conclusion = compute.generic_setup_package()
@classmethod
def setUpClass(cls):
diff --git a/tempest/tests/compute/limits/test_absolute_limits.py b/tempest/tests/compute/limits/test_absolute_limits.py
index d520b92..4b17af9 100644
--- a/tempest/tests/compute/limits/test_absolute_limits.py
+++ b/tempest/tests/compute/limits/test_absolute_limits.py
@@ -26,22 +26,18 @@
def setUpClass(cls):
cls.client = cls.limits_client
- @testtools.skip("Skipped until the Bug #1025294 is resolved")
def test_absLimits_get(self):
# To check if all limits are present in the response
resp, absolute_limits = self.client.get_absolute_limits()
expected_elements = ['maxImageMeta', 'maxPersonality',
'maxPersonalitySize',
- 'maxPersonalityFilePathSize',
'maxServerMeta', 'maxTotalCores',
'maxTotalFloatingIps', 'maxSecurityGroups',
'maxSecurityGroupRules', 'maxTotalInstances',
'maxTotalKeypairs', 'maxTotalRAMSize',
- 'maxTotalVolumeGigabytes', 'maxTotalVolumes',
'totalCoresUsed', 'totalFloatingIpsUsed',
'totalSecurityGroupsUsed', 'totalInstancesUsed',
- 'totalKeyPairsUsed', 'totalRAMUsed',
- 'totalVolumeGigabytesUsed', 'totalVolumesUsed']
+ 'totalRAMUsed']
# check whether all expected elements exist
missing_elements =\
[ele for ele in expected_elements if ele not in absolute_limits]
diff --git a/tempest/tests/compute/servers/test_create_server.py b/tempest/tests/compute/servers/test_create_server.py
index 54f1131..838b382 100644
--- a/tempest/tests/compute/servers/test_create_server.py
+++ b/tempest/tests/compute/servers/test_create_server.py
@@ -17,6 +17,7 @@
import base64
+import netaddr
import testtools
@@ -36,10 +37,7 @@
def setUpClass(cls):
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '::babe:220.12.22.2'
- # See: http://tools.ietf.org/html/rfc5952 (section 4)
- # This is the canonicalized form of the above.
- cls.accessIPv6canon = '::babe:dc0c:1602'
+ cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = rand_name('server')
file_contents = 'This is a test file.'
personality = [{'path': '/test.txt',
@@ -73,8 +71,10 @@
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
- self.assertIn(self.server['accessIPv6'],
- [self.accessIPv6, self.accessIPv6canon])
+ # NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
+ # Here we compare directly with the canonicalized format.
+ self.assertEqual(self.server['accessIPv6'],
+ str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(str(self.flavor_ref), self.server['flavor']['id'])