Read template's expected value orchest test Part-1
In tests under /tempest/api/orchestration/, resources gets
created based on parameters defined in template (yaml, json).
But while verification of output, expected values are hard coded
in tests. Expected values should be read from template only.
This patch adds the helper to load the values from template
(yaml, json) and apply the changes in
tempest/api/orchestration/test_volumes.py
This patch also rename the existing load_template() function
as it read the template and newly added function load the template
This rename the load_template() to read_template in
tempest/api/orchestration/base.py and corresponding calling files.
Partial-Bug: #1327919
Change-Id: Ie0817e20cb7bacdf33b16fb222a7ee9c91109fc8
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 446f4ab..f83169f 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -11,6 +11,7 @@
# under the License.
import os.path
+import yaml
from tempest import clients
from tempest.common.utils import data_utils
@@ -125,7 +126,7 @@
pass
@classmethod
- def load_template(cls, name, ext='yaml'):
+ def read_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
@@ -134,6 +135,14 @@
return content
@classmethod
+ def load_template(cls, name, ext='yaml'):
+ loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+ fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+ with open(fullpath, "r") as f:
+ return yaml.safe_load(f)
+
+ @classmethod
def tearDownClass(cls):
cls._clear_stacks()
cls._clear_keypairs()
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 3911e72..bc46901 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -28,7 +28,7 @@
def test_environment_parameter(self):
"""Test passing a stack parameter via the environment."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('random_string')
+ template = self.read_template('random_string')
environment = {'parameters': {'random_length': 20}}
stack_identifier = self.create_stack(stack_name, template,
@@ -56,7 +56,7 @@
'''
environment = {'resource_registry':
{'My:Random::String': 'my_random.yaml'}}
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
environment=environment,
@@ -82,7 +82,7 @@
random_value:
value: {get_attr: [random, random_value]}
'''
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
files=files)
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index e92b945..27c6196 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -39,7 +39,7 @@
raise cls.skipException("Neutron support is required")
cls.network_client = os.network_client
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('neutron_basic')
+ template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 585c90b..a97c561 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('non_empty_stack')
+ template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
# create the stack
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index a81a540..336fc99 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('nova_keypair', ext=cls._tpl_type)
+ template = cls.read_template('nova_keypair', ext=cls._tpl_type)
# create the stack, avoid any duplicated key.
cls.stack_identifier = cls.create_stack(
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 6d53fb2..2ba2811 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -30,7 +30,7 @@
def setUpClass(cls):
super(SwiftResourcesTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('swift_basic')
+ template = cls.read_template('swift_basic')
os = clients.Manager()
if not CONF.service_available.swift:
raise cls.skipException("Swift support is required")
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index 5ac2a8d..d422752 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -31,43 +31,44 @@
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
- def _cinder_verify(self, volume_id):
+ def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
resp, volume = self.volumes_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
self.assertEqual('available', volume.get('status'))
- self.assertEqual(1, volume.get('size'))
- self.assertEqual('a descriptive description',
- volume.get('display_description'))
- self.assertEqual('volume_name',
- volume.get('display_name'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'size'], volume.get('size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], volume.get('display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], volume.get('display_name'))
- def _outputs_verify(self, stack_identifier):
+ def _outputs_verify(self, stack_identifier, template):
self.assertEqual('available',
self.get_stack_output(stack_identifier, 'status'))
- self.assertEqual('1',
- self.get_stack_output(stack_identifier, 'size'))
- self.assertEqual('a descriptive description',
- self.get_stack_output(stack_identifier,
- 'display_description'))
- self.assertEqual('volume_name',
- self.get_stack_output(stack_identifier,
- 'display_name'))
+ self.assertEqual(str(template['resources']['volume']['properties'][
+ 'size']), self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], self.get_stack_output(stack_identifier,
+ 'display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic')
+ template = self.read_template('cinder_basic')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
- self._cinder_verify(volume_id)
+ cinder_basic_template = self.load_template('cinder_basic')
+ self._cinder_verify(volume_id, cinder_basic_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, cinder_basic_template)
# Delete the stack and ensure the volume is gone
self.client.delete_stack(stack_identifier)
@@ -86,21 +87,22 @@
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic_delete_retain')
+ template = self.read_template('cinder_basic_delete_retain')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
self.addCleanup(self._cleanup_volume, volume_id)
- self._cinder_verify(volume_id)
+ retain_template = self.load_template('cinder_basic_delete_retain')
+ self._cinder_verify(volume_id, retain_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, retain_template)
# Delete the stack and ensure the volume is *not* gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
- self._cinder_verify(volume_id)
+ self._cinder_verify(volume_id, retain_template)
# Volume cleanup happens via addCleanup calling _cleanup_volume