Initial movement to new repo with cleanup
diff --git a/heat_tempest_plugin/__init__.py b/heat_tempest_plugin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/__init__.py
diff --git a/heat_tempest_plugin/common/__init__.py b/heat_tempest_plugin/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/common/__init__.py
diff --git a/heat_tempest_plugin/common/exceptions.py b/heat_tempest_plugin/common/exceptions.py
new file mode 100644
index 0000000..b092fd0
--- /dev/null
+++ b/heat_tempest_plugin/common/exceptions.py
@@ -0,0 +1,78 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+class IntegrationException(Exception):
+    """Base Tempest Exception.
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+    """
+    message = "An unknown exception occurred"
+
+    def __init__(self, *args, **kwargs):
+        super(IntegrationException, self).__init__()
+        try:
+            self._error_string = self.message % kwargs
+        except Exception:
+            # at least get the core message out if something happened
+            self._error_string = self.message
+        if len(args) > 0:
+            # If there is a non-kwarg parameter, assume it's the error
+            # message or reason description and tack it on to the end
+            # of the exception message
+            # Convert all arguments into their string representations...
+            args = ["%s" % arg for arg in args]
+            self._error_string = (self._error_string +
+                                  "\nDetails: %s" % '\n'.join(args))
+
+    def __str__(self):
+        return self._error_string
+
+
+class InvalidCredentials(IntegrationException):
+    message = "Invalid Credentials"
+
+
+class TimeoutException(IntegrationException):
+    message = "Request timed out"
+
+
+class BuildErrorException(IntegrationException):
+    message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+class StackBuildErrorException(IntegrationException):
+    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+               "due to '%(stack_status_reason)s'")
+
+
+class StackResourceBuildErrorException(IntegrationException):
+    message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
+               "in %(resource_status)s status due to "
+               "'%(resource_status_reason)s'")
+
+
+class SSHTimeout(IntegrationException):
+    message = ("Connection to the %(host)s via SSH timed out.\n"
+               "User: %(user)s, Password: %(password)s")
+
+
+class SSHExecCommandFailed(IntegrationException):
+    """Raised when remotely executed command returns nonzero status."""
+    message = ("Command '%(command)s', exit status: %(exit_status)d, "
+               "Error:\n%(strerror)s")
+
+
+class ServerUnreachable(IntegrationException):
+    message = "The server is not reachable via the configured network"
diff --git a/heat_tempest_plugin/common/remote_client.py b/heat_tempest_plugin/common/remote_client.py
new file mode 100644
index 0000000..f23622b
--- /dev/null
+++ b/heat_tempest_plugin/common/remote_client.py
@@ -0,0 +1,202 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+import select
+import socket
+import time
+
+from oslo_log import log as logging
+import paramiko
+import six
+
+from heat_tempest_plugin.common import exceptions
+
+LOG = logging.getLogger(__name__)
+
+
+class Client(object):
+
+    def __init__(self, host, username, password=None, timeout=300, pkey=None,
+                 channel_timeout=10, look_for_keys=False, key_filename=None):
+        self.host = host
+        self.username = username
+        self.password = password
+        if isinstance(pkey, six.string_types):
+            pkey = paramiko.RSAKey.from_private_key(
+                six.moves.cStringIO(str(pkey)))
+        self.pkey = pkey
+        self.look_for_keys = look_for_keys
+        self.key_filename = key_filename
+        self.timeout = int(timeout)
+        self.channel_timeout = float(channel_timeout)
+        self.buf_size = 1024
+
+    def _get_ssh_connection(self, sleep=1.5, backoff=1):
+        """Returns an ssh connection to the specified host."""
+        bsleep = sleep
+        ssh = paramiko.SSHClient()
+        ssh.set_missing_host_key_policy(
+            paramiko.AutoAddPolicy())
+        _start_time = time.time()
+        if self.pkey is not None:
+            LOG.info("Creating ssh connection to '%s' as '%s'"
+                     " with public key authentication",
+                     self.host, self.username)
+        else:
+            LOG.info("Creating ssh connection to '%s' as '%s'"
+                     " with password %s",
+                     self.host, self.username, str(self.password))
+        attempts = 0
+        while True:
+            try:
+                ssh.connect(self.host, username=self.username,
+                            password=self.password,
+                            look_for_keys=self.look_for_keys,
+                            key_filename=self.key_filename,
+                            timeout=self.channel_timeout, pkey=self.pkey)
+                LOG.info("ssh connection to %s@%s successfuly created",
+                         self.username, self.host)
+                return ssh
+            except (socket.error,
+                    paramiko.SSHException) as e:
+                if self._is_timed_out(_start_time):
+                    LOG.exception("Failed to establish authenticated ssh"
+                                  " connection to %s@%s after %d attempts",
+                                  self.username, self.host, attempts)
+                    raise exceptions.SSHTimeout(host=self.host,
+                                                user=self.username,
+                                                password=self.password)
+                bsleep += backoff
+                attempts += 1
+                LOG.warning("Failed to establish authenticated ssh"
+                            " connection to %s@%s (%s). Number attempts: %s."
+                            " Retry after %d seconds.",
+                            self.username, self.host, e, attempts, bsleep)
+                time.sleep(bsleep)
+
+    def _is_timed_out(self, start_time):
+        return (time.time() - self.timeout) > start_time
+
+    def exec_command(self, cmd):
+        """Execute the specified command on the server.
+
+        Note that this method is reading whole command outputs to memory, thus
+        shouldn't be used for large outputs.
+
+        :returns: data read from standard output of the command.
+        :raises: SSHExecCommandFailed if command returns nonzero
+                 status. The exception contains command status stderr content.
+        """
+        ssh = self._get_ssh_connection()
+        transport = ssh.get_transport()
+        channel = transport.open_session()
+        channel.fileno()  # Register event pipe
+        channel.exec_command(cmd)
+        channel.shutdown_write()
+        out_data = []
+        err_data = []
+        poll = select.poll()
+        poll.register(channel, select.POLLIN)
+        start_time = time.time()
+
+        while True:
+            ready = poll.poll(self.channel_timeout)
+            if not any(ready):
+                if not self._is_timed_out(start_time):
+                    continue
+                raise exceptions.TimeoutException(
+                    "Command: '{0}' executed on host '{1}'.".format(
+                        cmd, self.host))
+            if not ready[0]:  # If there is nothing to read.
+                continue
+            out_chunk = err_chunk = None
+            if channel.recv_ready():
+                out_chunk = channel.recv(self.buf_size)
+                out_data += out_chunk,
+            if channel.recv_stderr_ready():
+                err_chunk = channel.recv_stderr(self.buf_size)
+                err_data += err_chunk,
+            if channel.closed and not err_chunk and not out_chunk:
+                break
+        exit_status = channel.recv_exit_status()
+        if 0 != exit_status:
+            raise exceptions.SSHExecCommandFailed(
+                command=cmd, exit_status=exit_status,
+                strerror=''.join(err_data))
+        return ''.join(out_data)
+
+    def test_connection_auth(self):
+        """Raises an exception when we can not connect to server via ssh."""
+        connection = self._get_ssh_connection()
+        connection.close()
+
+
+class RemoteClient(object):
+
+    # NOTE(afazekas): It should always get an address instead of server
+    def __init__(self, server, username, password=None, pkey=None,
+                 conf=None):
+        self.conf = conf
+        ssh_timeout = self.conf.ssh_timeout
+        network = self.conf.network_for_ssh
+        ip_version = self.conf.ip_version_for_ssh
+        ssh_channel_timeout = self.conf.ssh_channel_timeout
+        if isinstance(server, six.string_types):
+            ip_address = server
+        else:
+            addresses = server['addresses'][network]
+            for address in addresses:
+                if address['version'] == ip_version:
+                    ip_address = address['addr']
+                    break
+            else:
+                raise exceptions.ServerUnreachable()
+        self.ssh_client = Client(ip_address, username, password,
+                                 ssh_timeout, pkey=pkey,
+                                 channel_timeout=ssh_channel_timeout)
+
+    def exec_command(self, cmd):
+        return self.ssh_client.exec_command(cmd)
+
+    def validate_authentication(self):
+        """Validate ssh connection and authentication.
+
+        This method raises an Exception when the validation fails.
+        """
+        self.ssh_client.test_connection_auth()
+
+    def get_partitions(self):
+        # Return the contents of /proc/partitions
+        command = 'cat /proc/partitions'
+        output = self.exec_command(command)
+        return output
+
+    def get_boot_time(self):
+        cmd = 'cut -f1 -d. /proc/uptime'
+        boot_secs = self.exec_command(cmd)
+        boot_time = time.time() - int(boot_secs)
+        return time.localtime(boot_time)
+
+    def write_to_console(self, message):
+        message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
+        # usually to /dev/ttyS0
+        cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
+        return self.exec_command(cmd)
+
+    def ping_host(self, host):
+        cmd = 'ping -c1 -w1 %s' % host
+        return self.exec_command(cmd)
+
+    def get_ip_list(self):
+        cmd = "/bin/ip address"
+        return self.exec_command(cmd)
diff --git a/heat_tempest_plugin/common/test.py b/heat_tempest_plugin/common/test.py
new file mode 100644
index 0000000..64fd1a0
--- /dev/null
+++ b/heat_tempest_plugin/common/test.py
@@ -0,0 +1,706 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import random
+import re
+import subprocess
+import time
+
+import fixtures
+from heatclient import exc as heat_exceptions
+from keystoneauth1 import exceptions as kc_exceptions
+from neutronclient.common import exceptions as network_exceptions
+from oslo_log import log as logging
+from oslo_utils import timeutils
+import six
+from six.moves import urllib
+import testscenarios
+import testtools
+
+from heat_tempest_plugin.common import exceptions
+from heat_tempest_plugin.common import remote_client
+from heat_tempest_plugin import config
+from heat_tempest_plugin.services import clients
+
+LOG = logging.getLogger(__name__)
+_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
+
+
+def call_until_true(duration, sleep_for, func, *args, **kwargs):
+    """Call the function until it returns True or the duration elapsed.
+
+    Call the given function until it returns True (and return True) or
+    until the specified duration (in seconds) elapses (and return
+    False).
+
+    :param func: A zero argument callable that returns True on success.
+    :param duration: The number of seconds for which to attempt a
+        successful call of the function.
+    :param sleep_for: The number of seconds to sleep after an unsuccessful
+                      invocation of the function.
+    """
+    now = time.time()
+    timeout = now + duration
+    while now < timeout:
+        if func(*args, **kwargs):
+            return True
+        LOG.debug("Sleeping for %d seconds", sleep_for)
+        time.sleep(sleep_for)
+        now = time.time()
+    return False
+
+
+def rand_name(name=''):
+    randbits = six.text_type(random.randint(1, 0x7fffffff))
+    if name:
+        return name + '-' + randbits
+    else:
+        return randbits
+
+
+def requires_convergence(test_method):
+    '''Decorator for convergence-only tests.
+
+    The decorated test will be skipped when convergence is disabled.
+    '''
+    convergence_enabled = config.CONF.heat_plugin.convergence_engine_enabled
+    skipper = testtools.skipUnless(convergence_enabled,
+                                   "Convergence-only tests are disabled")
+    return skipper(test_method)
+
+
+class HeatIntegrationTest(testscenarios.WithScenarios,
+                          testtools.TestCase):
+
+    def setUp(self):
+        super(HeatIntegrationTest, self).setUp()
+
+        self.conf = config.CONF.orchestration_plugin
+
+        self.assertIsNotNone(self.conf.auth_url,
+                             'No auth_url configured')
+        self.assertIsNotNone(self.conf.username,
+                             'No username configured')
+        self.assertIsNotNone(self.conf.password,
+                             'No password configured')
+        self.setup_clients(self.conf)
+        self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
+        self.updated_time = {}
+        if self.conf.disable_ssl_certificate_validation:
+            self.verify_cert = False
+        else:
+            self.verify_cert = self.conf.ca_file or True
+
+    def setup_clients(self, conf, admin_credentials=False):
+        self.manager = clients.ClientManager(conf, admin_credentials)
+        self.identity_client = self.manager.identity_client
+        self.orchestration_client = self.manager.orchestration_client
+        self.compute_client = self.manager.compute_client
+        self.network_client = self.manager.network_client
+        self.volume_client = self.manager.volume_client
+        self.object_client = self.manager.object_client
+        self.metering_client = self.manager.metering_client
+
+        self.client = self.orchestration_client
+
+    def setup_clients_for_admin(self):
+        self.setup_clients(self.conf, True)
+
+    def get_remote_client(self, server_or_ip, username, private_key=None):
+        if isinstance(server_or_ip, six.string_types):
+            ip = server_or_ip
+        else:
+            network_name_for_ssh = self.conf.network_for_ssh
+            ip = server_or_ip.networks[network_name_for_ssh][0]
+        if private_key is None:
+            private_key = self.keypair.private_key
+        linux_client = remote_client.RemoteClient(ip, username,
+                                                  pkey=private_key,
+                                                  conf=self.conf)
+        try:
+            linux_client.validate_authentication()
+        except exceptions.SSHTimeout:
+            LOG.exception('ssh connection to %s failed', ip)
+            raise
+
+        return linux_client
+
+    def check_connectivity(self, check_ip):
+        def try_connect(ip):
+            try:
+                urllib.request.urlopen('http://%s/' % ip)
+                return True
+            except IOError:
+                return False
+
+        timeout = self.conf.connectivity_timeout
+        elapsed_time = 0
+        while not try_connect(check_ip):
+            time.sleep(10)
+            elapsed_time += 10
+            if elapsed_time > timeout:
+                raise exceptions.TimeoutException()
+
+    def _log_console_output(self, servers=None):
+        if not servers:
+            servers = self.compute_client.servers.list()
+        for server in servers:
+            LOG.info('Console output for %s', server.id)
+            LOG.info(server.get_console_output())
+
+    def _load_template(self, base_file, file_name, sub_dir=None):
+        sub_dir = sub_dir or ''
+        filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
+                                sub_dir, file_name)
+        with open(filepath) as f:
+            return f.read()
+
+    def create_keypair(self, client=None, name=None):
+        if client is None:
+            client = self.compute_client
+        if name is None:
+            name = rand_name('heat-keypair')
+        keypair = client.keypairs.create(name)
+        self.assertEqual(keypair.name, name)
+
+        def delete_keypair():
+            keypair.delete()
+
+        self.addCleanup(delete_keypair)
+        return keypair
+
+    def assign_keypair(self):
+        if self.conf.keypair_name:
+            self.keypair = None
+            self.keypair_name = self.conf.keypair_name
+        else:
+            self.keypair = self.create_keypair()
+            self.keypair_name = self.keypair.id
+
+    @classmethod
+    def _stack_rand_name(cls):
+        return rand_name(cls.__name__)
+
+    def _get_network(self, net_name=None):
+        if net_name is None:
+            net_name = self.conf.fixed_network_name
+        networks = self.network_client.list_networks()
+        for net in networks['networks']:
+            if net['name'] == net_name:
+                return net
+
+    def is_network_extension_supported(self, extension_alias):
+        try:
+            self.network_client.show_extension(extension_alias)
+        except network_exceptions.NeutronClientException:
+            return False
+        return True
+
+    def is_service_available(self, service_type):
+        try:
+            self.identity_client.get_endpoint_url(
+                service_type, self.conf.region)
+        except kc_exceptions.EndpointNotFound:
+            return False
+        else:
+            return True
+
+    @staticmethod
+    def _stack_output(stack, output_key, validate_errors=True):
+        """Return a stack output value for a given key."""
+        value = None
+        for o in stack.outputs:
+            if validate_errors and 'output_error' in o:
+                # scan for errors in the stack output.
+                raise ValueError(
+                    'Unexpected output errors in %s : %s' % (
+                        output_key, o['output_error']))
+            if o['output_key'] == output_key:
+                value = o['output_value']
+        return value
+
+    def _ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.wait()
+            return (proc.returncode == 0) == should_succeed
+
+        return call_until_true(
+            self.conf.build_timeout, 1, ping)
+
+    def _wait_for_all_resource_status(self, stack_identifier,
+                                      status, failure_pattern='^.*_FAILED$',
+                                      success_on_not_found=False):
+        for res in self.client.resources.list(stack_identifier):
+            self._wait_for_resource_status(
+                stack_identifier, res.resource_name,
+                status, failure_pattern=failure_pattern,
+                success_on_not_found=success_on_not_found)
+
+    def _wait_for_resource_status(self, stack_identifier, resource_name,
+                                  status, failure_pattern='^.*_FAILED$',
+                                  success_on_not_found=False):
+        """Waits for a Resource to reach a given status."""
+        fail_regexp = re.compile(failure_pattern)
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                res = self.client.resources.get(
+                    stack_identifier, resource_name)
+            except heat_exceptions.HTTPNotFound:
+                if success_on_not_found:
+                    return
+                # ignore this, as the resource may not have
+                # been created yet
+            else:
+                if res.resource_status == status:
+                    return
+                wait_for_action = status.split('_')[0]
+                resource_action = res.resource_status.split('_')[0]
+                if (resource_action == wait_for_action and
+                        fail_regexp.search(res.resource_status)):
+                    raise exceptions.StackResourceBuildErrorException(
+                        resource_name=res.resource_name,
+                        stack_identifier=stack_identifier,
+                        resource_status=res.resource_status,
+                        resource_status_reason=res.resource_status_reason)
+            time.sleep(build_interval)
+
+        message = ('Resource %s failed to reach %s status within '
+                   'the required time (%s s).' %
+                   (resource_name, status, build_timeout))
+        raise exceptions.TimeoutException(message)
+
+    def verify_resource_status(self, stack_identifier, resource_name,
+                               status='CREATE_COMPLETE'):
+        try:
+            res = self.client.resources.get(stack_identifier, resource_name)
+        except heat_exceptions.HTTPNotFound:
+            return False
+        return res.resource_status == status
+
+    def _verify_status(self, stack, stack_identifier, status, fail_regexp):
+        if stack.stack_status == status:
+            # Handle UPDATE_COMPLETE/FAILED case: Make sure we don't
+            # wait for a stale UPDATE_COMPLETE/FAILED status.
+            if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
+                if self.updated_time.get(
+                        stack_identifier) != stack.updated_time:
+                    self.updated_time[stack_identifier] = stack.updated_time
+                    return True
+            elif status == 'DELETE_COMPLETE' and stack.deletion_time is None:
+                # Wait for deleted_time to be filled, so that we have more
+                # confidence the operation is finished.
+                return False
+            else:
+                return True
+
+        wait_for_action = status.split('_')[0]
+        if (stack.action == wait_for_action and
+                fail_regexp.search(stack.stack_status)):
+            # Handle UPDATE_COMPLETE/UPDATE_FAILED case.
+            if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
+                if self.updated_time.get(
+                        stack_identifier) != stack.updated_time:
+                    self.updated_time[stack_identifier] = stack.updated_time
+                    raise exceptions.StackBuildErrorException(
+                        stack_identifier=stack_identifier,
+                        stack_status=stack.stack_status,
+                        stack_status_reason=stack.stack_status_reason)
+            else:
+                raise exceptions.StackBuildErrorException(
+                    stack_identifier=stack_identifier,
+                    stack_status=stack.stack_status,
+                    stack_status_reason=stack.stack_status_reason)
+
+    def _wait_for_stack_status(self, stack_identifier, status,
+                               failure_pattern=None,
+                               success_on_not_found=False,
+                               signal_required=False,
+                               resources_to_signal=None):
+        """Waits for a Stack to reach a given status.
+
+        Note this compares the full $action_$status, e.g
+        CREATE_COMPLETE, not just COMPLETE which is exposed
+        via the status property of Stack in heatclient
+        """
+        if failure_pattern:
+            fail_regexp = re.compile(failure_pattern)
+        elif 'FAILED' in status:
+            # If we're looking for e.g CREATE_FAILED, COMPLETE is unexpected.
+            fail_regexp = re.compile('^.*_COMPLETE$')
+        else:
+            fail_regexp = re.compile('^.*_FAILED$')
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                stack = self.client.stacks.get(stack_identifier,
+                                               resolve_outputs=False)
+            except heat_exceptions.HTTPNotFound:
+                if success_on_not_found:
+                    return
+                # ignore this, as the resource may not have
+                # been created yet
+            else:
+                if self._verify_status(stack, stack_identifier, status,
+                                       fail_regexp):
+                    return
+            if signal_required:
+                self.signal_resources(resources_to_signal)
+            time.sleep(build_interval)
+
+        message = ('Stack %s failed to reach %s status within '
+                   'the required time (%s s).' %
+                   (stack_identifier, status, build_timeout))
+        raise exceptions.TimeoutException(message)
+
+    def _stack_delete(self, stack_identifier):
+        try:
+            self._handle_in_progress(self.client.stacks.delete,
+                                     stack_identifier)
+        except heat_exceptions.HTTPNotFound:
+            pass
+        self._wait_for_stack_status(
+            stack_identifier, 'DELETE_COMPLETE',
+            success_on_not_found=True)
+
+    def _handle_in_progress(self, fn, *args, **kwargs):
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                fn(*args, **kwargs)
+            except heat_exceptions.HTTPConflict as ex:
+                # FIXME(sirushtim): Wait a little for the stack lock to be
+                # released and hopefully, the stack should be usable again.
+                if ex.error['error']['type'] != 'ActionInProgress':
+                    raise ex
+
+                time.sleep(build_interval)
+            else:
+                break
+
+    def update_stack(self, stack_identifier, template=None, environment=None,
+                     files=None, parameters=None, tags=None,
+                     expected_status='UPDATE_COMPLETE',
+                     disable_rollback=True,
+                     existing=False):
+        env = environment or {}
+        env_files = files or {}
+        parameters = parameters or {}
+
+        self.updated_time[stack_identifier] = self.client.stacks.get(
+            stack_identifier, resolve_outputs=False).updated_time
+
+        self._handle_in_progress(
+            self.client.stacks.update,
+            stack_id=stack_identifier,
+            template=template,
+            files=env_files,
+            disable_rollback=disable_rollback,
+            parameters=parameters,
+            environment=env,
+            tags=tags,
+            existing=existing)
+
+        kwargs = {'stack_identifier': stack_identifier,
+                  'status': expected_status}
+        if expected_status in ['ROLLBACK_COMPLETE']:
+            # To trigger rollback you would intentionally fail the stack
+            # Hence check for rollback failures
+            kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
+
+        self._wait_for_stack_status(**kwargs)
+
+    def cancel_update_stack(self, stack_identifier,
+                            expected_status='ROLLBACK_COMPLETE'):
+
+        stack_name = stack_identifier.split('/')[0]
+
+        self.updated_time[stack_identifier] = self.client.stacks.get(
+            stack_identifier, resolve_outputs=False).updated_time
+
+        self.client.actions.cancel_update(stack_name)
+
+        kwargs = {'stack_identifier': stack_identifier,
+                  'status': expected_status}
+        if expected_status in ['ROLLBACK_COMPLETE']:
+            # To trigger rollback you would intentionally fail the stack
+            # Hence check for rollback failures
+            kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
+
+        self._wait_for_stack_status(**kwargs)
+
+    def preview_update_stack(self, stack_identifier, template,
+                             environment=None, files=None, parameters=None,
+                             tags=None, disable_rollback=True,
+                             show_nested=False):
+        env = environment or {}
+        env_files = files or {}
+        parameters = parameters or {}
+
+        return self.client.stacks.preview_update(
+            stack_id=stack_identifier,
+            template=template,
+            files=env_files,
+            disable_rollback=disable_rollback,
+            parameters=parameters,
+            environment=env,
+            tags=tags,
+            show_nested=show_nested
+        )
+
+    def assert_resource_is_a_stack(self, stack_identifier, res_name,
+                                   wait=False):
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            time.sleep(build_interval)
+            try:
+                nested_identifier = self._get_nested_identifier(
+                    stack_identifier, res_name)
+            except Exception:
+                # We may have to wait, if the create is in-progress
+                if wait:
+                    time.sleep(build_interval)
+                else:
+                    raise
+            else:
+                return nested_identifier
+
+    def _get_nested_identifier(self, stack_identifier, res_name):
+        rsrc = self.client.resources.get(stack_identifier, res_name)
+        nested_link = [l for l in rsrc.links if l['rel'] == 'nested']
+        nested_href = nested_link[0]['href']
+        nested_id = nested_href.split('/')[-1]
+        nested_identifier = '/'.join(nested_href.split('/')[-2:])
+        self.assertEqual(rsrc.physical_resource_id, nested_id)
+
+        nested_stack = self.client.stacks.get(nested_id, resolve_outputs=False)
+        nested_identifier2 = '%s/%s' % (nested_stack.stack_name,
+                                        nested_stack.id)
+        self.assertEqual(nested_identifier, nested_identifier2)
+        parent_id = stack_identifier.split("/")[-1]
+        self.assertEqual(parent_id, nested_stack.parent)
+        return nested_identifier
+
+    def group_nested_identifier(self, stack_identifier,
+                                group_name):
+        # Get the nested stack identifier from a group resource
+        rsrc = self.client.resources.get(stack_identifier, group_name)
+        physical_resource_id = rsrc.physical_resource_id
+
+        nested_stack = self.client.stacks.get(physical_resource_id,
+                                              resolve_outputs=False)
+        nested_identifier = '%s/%s' % (nested_stack.stack_name,
+                                       nested_stack.id)
+        parent_id = stack_identifier.split("/")[-1]
+        self.assertEqual(parent_id, nested_stack.parent)
+        return nested_identifier
+
+    def list_group_resources(self, stack_identifier,
+                             group_name, minimal=True):
+        nested_identifier = self.group_nested_identifier(stack_identifier,
+                                                         group_name)
+        if minimal:
+            return self.list_resources(nested_identifier)
+        return self.client.resources.list(nested_identifier)
+
+    def list_resources(self, stack_identifier):
+        resources = self.client.resources.list(stack_identifier)
+        return dict((r.resource_name, r.resource_type) for r in resources)
+
+    def get_resource_stack_id(self, r):
+        stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
+        return stack_link['href'].split("/")[-1]
+
+    def get_physical_resource_id(self, stack_identifier, resource_name):
+        try:
+            resource = self.client.resources.get(
+                stack_identifier, resource_name)
+            return resource.physical_resource_id
+        except Exception:
+            raise Exception('Resource (%s) not found in stack (%s)!' %
+                            (stack_identifier, resource_name))
+
+    def get_stack_output(self, stack_identifier, output_key,
+                         validate_errors=True):
+        stack = self.client.stacks.get(stack_identifier)
+        return self._stack_output(stack, output_key, validate_errors)
+
+    def check_input_values(self, group_resources, key, value):
+        # Check inputs for deployment and derived config
+        for r in group_resources:
+            d = self.client.software_deployments.get(
+                r.physical_resource_id)
+            self.assertEqual({key: value}, d.input_values)
+            c = self.client.software_configs.get(
+                d.config_id)
+            foo_input_c = [i for i in c.inputs if i.get('name') == key][0]
+            self.assertEqual(value, foo_input_c.get('value'))
+
+    def signal_resources(self, resources):
+        # Signal all IN_PROGRESS resources
+        for r in resources:
+            if 'IN_PROGRESS' in r.resource_status:
+                stack_id = self.get_resource_stack_id(r)
+                self.client.resources.signal(stack_id, r.resource_name)
+
+    def stack_create(self, stack_name=None, template=None, files=None,
+                     parameters=None, environment=None, tags=None,
+                     expected_status='CREATE_COMPLETE',
+                     disable_rollback=True, enable_cleanup=True,
+                     environment_files=None, timeout=None):
+        name = stack_name or self._stack_rand_name()
+        templ = template or self.template
+        templ_files = files or {}
+        params = parameters or {}
+        env = environment or {}
+        timeout_mins = timeout or self.conf.build_timeout
+        self.client.stacks.create(
+            stack_name=name,
+            template=templ,
+            files=templ_files,
+            disable_rollback=disable_rollback,
+            parameters=params,
+            environment=env,
+            tags=tags,
+            environment_files=environment_files,
+            timeout_mins=timeout_mins
+        )
+        if expected_status not in ['ROLLBACK_COMPLETE'] and enable_cleanup:
+            self.addCleanup(self._stack_delete, name)
+
+        stack = self.client.stacks.get(name, resolve_outputs=False)
+        stack_identifier = '%s/%s' % (name, stack.id)
+        kwargs = {'stack_identifier': stack_identifier,
+                  'status': expected_status}
+        if expected_status:
+            if expected_status in ['ROLLBACK_COMPLETE']:
+                # To trigger rollback you would intentionally fail the stack
+                # Hence check for rollback failures
+                kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
+            self._wait_for_stack_status(**kwargs)
+        return stack_identifier
+
+    def stack_adopt(self, stack_name=None, files=None,
+                    parameters=None, environment=None, adopt_data=None,
+                    wait_for_status='ADOPT_COMPLETE'):
+        if (self.conf.skip_test_stack_action_list and
+                'ADOPT' in self.conf.skip_test_stack_action_list):
+            self.skipTest('Testing Stack adopt disabled in conf, skipping')
+        name = stack_name or self._stack_rand_name()
+        templ_files = files or {}
+        params = parameters or {}
+        env = environment or {}
+        self.client.stacks.create(
+            stack_name=name,
+            files=templ_files,
+            disable_rollback=True,
+            parameters=params,
+            environment=env,
+            adopt_stack_data=adopt_data,
+        )
+        self.addCleanup(self._stack_delete, name)
+        stack = self.client.stacks.get(name, resolve_outputs=False)
+        stack_identifier = '%s/%s' % (name, stack.id)
+        self._wait_for_stack_status(stack_identifier, wait_for_status)
+        return stack_identifier
+
+    def stack_abandon(self, stack_id):
+        if (self.conf.skip_test_stack_action_list and
+                'ABANDON' in self.conf.skip_test_stack_action_list):
+            self.addCleanup(self._stack_delete, stack_id)
+            self.skipTest('Testing Stack abandon disabled in conf, skipping')
+        info = self.client.stacks.abandon(stack_id=stack_id)
+        return info
+
+    def stack_snapshot(self, stack_id,
+                       wait_for_status='SNAPSHOT_COMPLETE'):
+        snapshot = self.client.stacks.snapshot(stack_id=stack_id)
+        self._wait_for_stack_status(stack_id, wait_for_status)
+        return snapshot['id']
+
+    def stack_restore(self, stack_id, snapshot_id,
+                      wait_for_status='RESTORE_COMPLETE'):
+        self.client.stacks.restore(stack_id, snapshot_id)
+        self._wait_for_stack_status(stack_id, wait_for_status)
+
+    def stack_suspend(self, stack_identifier):
+        if (self.conf.skip_test_stack_action_list and
+                'SUSPEND' in self.conf.skip_test_stack_action_list):
+            self.addCleanup(self._stack_delete, stack_identifier)
+            self.skipTest('Testing Stack suspend disabled in conf, skipping')
+        self._handle_in_progress(self.client.actions.suspend, stack_identifier)
+        # improve debugging by first checking the resource's state.
+        self._wait_for_all_resource_status(stack_identifier,
+                                           'SUSPEND_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
+
+    def stack_resume(self, stack_identifier):
+        if (self.conf.skip_test_stack_action_list and
+                'RESUME' in self.conf.skip_test_stack_action_list):
+            self.addCleanup(self._stack_delete, stack_identifier)
+            self.skipTest('Testing Stack resume disabled in conf, skipping')
+        self._handle_in_progress(self.client.actions.resume, stack_identifier)
+        # improve debugging by first checking the resource's state.
+        self._wait_for_all_resource_status(stack_identifier,
+                                           'RESUME_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'RESUME_COMPLETE')
+
+    def wait_for_event_with_reason(self, stack_identifier, reason,
+                                   rsrc_name=None, num_expected=1):
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                rsrc_events = self.client.events.list(stack_identifier,
+                                                      resource_name=rsrc_name)
+            except heat_exceptions.HTTPNotFound:
+                LOG.debug("No events yet found for %s", rsrc_name)
+            else:
+                matched = [e for e in rsrc_events
+                           if e.resource_status_reason == reason]
+                if len(matched) == num_expected:
+                    return matched
+            time.sleep(build_interval)
+
+    def check_autoscale_complete(self, stack_id, expected_num, parent_stack,
+                                 policy):
+        res_list = self.client.resources.list(stack_id)
+        all_res_complete = all(res.resource_status in ('UPDATE_COMPLETE',
+                                                       'CREATE_COMPLETE')
+                               for res in res_list)
+        all_res = len(res_list) == expected_num
+        if all_res and all_res_complete:
+            metadata = self.client.resources.metadata(parent_stack, policy)
+            return not metadata.get('scaling_in_progress')
+        return False
diff --git a/heat_tempest_plugin/config.py b/heat_tempest_plugin/config.py
new file mode 100644
index 0000000..65b870d
--- /dev/null
+++ b/heat_tempest_plugin/config.py
@@ -0,0 +1,172 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+CONF = None
+
+service_available_group = cfg.OptGroup(name="service_available",
+                                       title="Available OpenStack Services")
+
+ServiceAvailableGroup = [
+    cfg.BoolOpt("orchestration_plugin",
+                default=True,
+                help="Whether or not heat is expected to be available"),
+]
+
+heat_group = cfg.OptGroup(name="orchestration_plugin",
+                          title="Heat Service Options")
+
+HeatGroup = [
+    cfg.StrOpt("catalog_type",
+               default="orchestration",
+               help="Catalog type of the orchestration service."),
+    cfg.StrOpt('username',
+               help="Username to use for non admin API requests."),
+    cfg.StrOpt('password',
+               help="Non admin API key to use when authenticating.",
+               secret=True),
+    cfg.StrOpt('admin_username',
+               help="Username to use for admin API requests."),
+    cfg.StrOpt('admin_password',
+               help="Admin API key to use when authentication.",
+               secret=True),
+    cfg.StrOpt('project_name',
+               help="Project name to use for API requests.",
+               deprecated_opts=[cfg.DeprecatedOpt(
+                   'tenant_name', group='orchestration_plugin')]),
+    cfg.StrOpt('admin_project_name',
+               default='admin',
+               help="Admin project name to use for admin API requests.",
+               deprecated_opts=[cfg.DeprecatedOpt(
+                   'admin_tenant_name', group='orchestration_plugin')]),
+    cfg.StrOpt('auth_url',
+               help="Full URI of the OpenStack Identity API (Keystone)."),
+    cfg.StrOpt('auth_version',
+               help="OpenStack Identity API version."),
+    cfg.StrOpt('user_domain_name',
+               help="User domain name, if keystone v3 auth_url "
+                    "is used"),
+    cfg.StrOpt('project_domain_name',
+               help="Project domain name, if keystone v3 auth_url "
+                    "is used"),
+    cfg.StrOpt('user_domain_id',
+               help="User domain id, if keystone v3 auth_url "
+                    "is used"),
+    cfg.StrOpt('project_domain_id',
+               help="Project domain id, if keystone v3 auth_url "
+                    "is used"),
+    cfg.StrOpt('region',
+               help="The region name to use"),
+    cfg.StrOpt('instance_type',
+               help="Instance type for tests. Needs to be big enough for a "
+                    "full OS plus the test workload"),
+    cfg.StrOpt('minimal_instance_type',
+               help="Instance type enough for simplest cases."),
+    cfg.StrOpt('image_ref',
+               help="Name of image to use for tests which boot servers."),
+    cfg.StrOpt('keypair_name',
+               help="Name of existing keypair to launch servers with."),
+    cfg.StrOpt('minimal_image_ref',
+               help="Name of minimal (e.g cirros) image to use when "
+                    "launching test instances."),
+    cfg.BoolOpt('disable_ssl_certificate_validation',
+                default=False,
+                help="Set to True if using self-signed SSL certificates."),
+    cfg.StrOpt('ca_file',
+               help="CA certificate to pass for servers that have "
+                    "https endpoint."),
+    cfg.IntOpt('build_interval',
+               default=4,
+               help="Time in seconds between build status checks."),
+    cfg.IntOpt('build_timeout',
+               default=1200,
+               help="Timeout in seconds to wait for a stack to build."),
+    cfg.StrOpt('network_for_ssh',
+               default='heat-net',
+               help="Network used for SSH connections."),
+    cfg.StrOpt('fixed_network_name',
+               default='heat-net',
+               help="Visible fixed network name "),
+    cfg.StrOpt('floating_network_name',
+               default='public',
+               help="Visible floating network name "),
+    cfg.StrOpt('boot_config_env',
+               default=('heat_tempest_plugin/tests/scenario/templates'
+                        '/boot_config_none_env.yaml'),
+               help="Path to environment file which defines the "
+                    "resource type Heat::InstallConfigAgent. Needs to "
+                    "be appropriate for the image_ref."),
+    cfg.StrOpt('fixed_subnet_name',
+               default='heat-subnet',
+               help="Visible fixed sub-network name "),
+    cfg.IntOpt('ssh_timeout',
+               default=300,
+               help="Timeout in seconds to wait for authentication to "
+                    "succeed."),
+    cfg.IntOpt('ip_version_for_ssh',
+               default=4,
+               help="IP version used for SSH connections."),
+    cfg.IntOpt('ssh_channel_timeout',
+               default=60,
+               help="Timeout in seconds to wait for output from ssh "
+                    "channel."),
+    cfg.IntOpt('tenant_network_mask_bits',
+               default=28,
+               help="The mask bits for tenant ipv4 subnets"),
+    cfg.BoolOpt('skip_scenario_tests',
+                default=False,
+                help="Skip all scenario tests"),
+    cfg.BoolOpt('skip_functional_tests',
+                default=False,
+                help="Skip all functional tests"),
+    cfg.ListOpt('skip_functional_test_list',
+                help="List of functional test class or class.method "
+                     "names to skip ex. AutoscalingGroupTest, "
+                     "InstanceGroupBasicTest.test_size_updates_work"),
+    cfg.ListOpt('skip_scenario_test_list',
+                help="List of scenario test class or class.method "
+                     "names to skip ex. NeutronLoadBalancerTest, "
+                     "AodhAlarmTest.test_alarm"),
+    cfg.ListOpt('skip_test_stack_action_list',
+                help="List of stack actions in tests to skip "
+                     "ex. ABANDON, ADOPT, SUSPEND, RESUME"),
+    cfg.BoolOpt('convergence_engine_enabled',
+                default=True,
+                help="Test features that are only present for stacks with "
+                     "convergence enabled."),
+    cfg.IntOpt('volume_size',
+               default=1,
+               help='Default size in GB for volumes created by volumes tests'),
+    cfg.IntOpt('connectivity_timeout',
+               default=120,
+               help="Timeout in seconds to wait for connectivity to "
+                    "server."),
+    cfg.IntOpt('sighup_timeout',
+               default=120,
+               help="Timeout in seconds to wait for adding or removing child "
+                    "process after receiving of sighup signal"),
+    cfg.IntOpt('sighup_config_edit_retries',
+               default=10,
+               help='Count of retries to edit config file during sighup. If '
+                    'another worker already edit config file, file can be '
+                    'busy, so need to wait and try edit file again.'),
+    cfg.StrOpt('heat_config_notify_script',
+               default=('heat-config-notify'),
+               help="Path to the script heat-config-notify"),
+
+]
+
+
+def list_opts():
+    yield heat_group.name, HeatGroup
+    yield service_available_group.name, ServiceAvailableGroup
diff --git a/heat_tempest_plugin/plugin.py b/heat_tempest_plugin/plugin.py
new file mode 100644
index 0000000..0938b43
--- /dev/null
+++ b/heat_tempest_plugin/plugin.py
@@ -0,0 +1,41 @@
+# Copyright 2015
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+from tempest import config
+from tempest.test_discover import plugins
+
+from heat_tempest_plugin import config as heat_config
+
+
+class HeatTempestPlugin(plugins.TempestPlugin):
+    def load_tests(self):
+        base_path = os.path.split(os.path.dirname(
+            os.path.abspath(__file__)))[0]
+        test_dir = "heat_tempest_plugin"
+        full_test_dir = os.path.join(base_path, test_dir)
+        return full_test_dir, base_path
+
+    def register_opts(self, conf):
+        config.register_opt_group(conf, heat_config.service_available_group,
+                                  heat_config.ServiceAvailableGroup)
+        config.register_opt_group(conf, heat_config.heat_group,
+                                  heat_config.HeatGroup)
+        heat_config.CONF = config.CONF
+
+    def get_opt_lists(self):
+        return [(heat_config.heat_group.name,
+                 heat_config.HeatGroup)]
diff --git a/heat_tempest_plugin/services/__init__.py b/heat_tempest_plugin/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/services/__init__.py
diff --git a/heat_tempest_plugin/services/clients.py b/heat_tempest_plugin/services/clients.py
new file mode 100644
index 0000000..ffd5b78
--- /dev/null
+++ b/heat_tempest_plugin/services/clients.py
@@ -0,0 +1,199 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+from ceilometerclient import client as ceilometer_client
+from cinderclient import client as cinder_client
+from heatclient import client as heat_client
+from keystoneauth1 import exceptions as kc_exceptions
+from keystoneauth1.identity.generic import password
+from keystoneauth1 import session
+from neutronclient.v2_0 import client as neutron_client
+from novaclient import client as nova_client
+from swiftclient import client as swift_client
+
+
+class KeystoneWrapperClient(object):
+    """Wrapper object for keystone client
+
+    This wraps keystone client, so we can encpasulate certain
+    added properties like auth_token, project_id etc.
+    """
+    def __init__(self, auth_plugin, verify=True):
+        self.auth_plugin = auth_plugin
+        self.session = session.Session(
+            auth=auth_plugin,
+            verify=verify)
+
+    @property
+    def auth_token(self):
+        return self.auth_plugin.get_token(self.session)
+
+    @property
+    def auth_ref(self):
+        return self.auth_plugin.get_access(self.session)
+
+    @property
+    def project_id(self):
+        return self.auth_plugin.get_project_id(self.session)
+
+    def get_endpoint_url(self, service_type, region=None):
+        kwargs = {
+            'service_type': service_type,
+            'region_name': region}
+        return self.auth_ref.service_catalog.url_for(**kwargs)
+
+
+class ClientManager(object):
+    """Provides access to the official python clients for calling various APIs.
+
+    Manager that provides access to the official python clients for
+    calling various OpenStack APIs.
+    """
+
+    CINDERCLIENT_VERSION = '2'
+    HEATCLIENT_VERSION = '1'
+    NOVA_API_VERSION = '2.1'
+    CEILOMETER_VERSION = '2'
+
+    def __init__(self, conf, admin_credentials=False):
+        self.conf = conf
+        self.admin_credentials = admin_credentials
+
+        self.auth_version = self.conf.auth_version
+        if not self.auth_version:
+            self.auth_version = self.conf.auth_url.split('/v')[1]
+        self.insecure = self.conf.disable_ssl_certificate_validation
+        self.ca_file = self.conf.ca_file
+
+        self.identity_client = self._get_identity_client()
+        self.orchestration_client = self._get_orchestration_client()
+        self.compute_client = self._get_compute_client()
+        self.network_client = self._get_network_client()
+        self.volume_client = self._get_volume_client()
+        self.object_client = self._get_object_client()
+        self.metering_client = self._get_metering_client()
+
+    def _username(self):
+        if self.admin_credentials:
+            return self.conf.admin_username
+        return self.conf.username
+
+    def _password(self):
+        if self.admin_credentials:
+            return self.conf.admin_password
+        return self.conf.password
+
+    def _project_name(self):
+        if self.admin_credentials:
+            return self.conf.admin_project_name
+        return self.conf.project_name
+
+    def _get_orchestration_client(self):
+        endpoint = os.environ.get('HEAT_URL')
+        if os.environ.get('OS_NO_CLIENT_AUTH') == 'True':
+            token = None
+        else:
+            token = self.identity_client.auth_token
+        try:
+            if endpoint is None:
+                endpoint = self.identity_client.get_endpoint_url(
+                    'orchestration', self.conf.region)
+        except kc_exceptions.EndpointNotFound:
+            return None
+        else:
+            return heat_client.Client(
+                self.HEATCLIENT_VERSION,
+                endpoint,
+                token=token,
+                username=self._username(),
+                password=self._password())
+
+    def _get_identity_client(self):
+        user_domain_id = self.conf.user_domain_id
+        project_domain_id = self.conf.project_domain_id
+        user_domain_name = self.conf.user_domain_name
+        project_domain_name = self.conf.project_domain_name
+        kwargs = {
+            'username': self._username(),
+            'password': self._password(),
+            'project_name': self._project_name(),
+            'auth_url': self.conf.auth_url
+        }
+        # keystone v2 can't ignore domain details
+        if self.auth_version == '3':
+            kwargs.update({
+                'user_domain_id': user_domain_id,
+                'project_domain_id': project_domain_id,
+                'user_domain_name': user_domain_name,
+                'project_domain_name': project_domain_name})
+        auth = password.Password(**kwargs)
+        if self.insecure:
+            verify_cert = False
+        else:
+            verify_cert = self.ca_file or True
+
+        return KeystoneWrapperClient(auth, verify_cert)
+
+    def _get_compute_client(self):
+        # Create our default Nova client to use in testing
+        return nova_client.Client(
+            self.NOVA_API_VERSION,
+            session=self.identity_client.session,
+            service_type='compute',
+            endpoint_type='publicURL',
+            region_name=self.conf.region,
+            os_cache=False,
+            http_log_debug=True)
+
+    def _get_network_client(self):
+
+        return neutron_client.Client(
+            session=self.identity_client.session,
+            service_type='network',
+            region_name=self.conf.region,
+            endpoint_type='publicURL')
+
+    def _get_volume_client(self):
+        return cinder_client.Client(
+            self.CINDERCLIENT_VERSION,
+            session=self.identity_client.session,
+            endpoint_type='publicURL',
+            region_name=self.conf.region,
+            http_log_debug=True)
+
+    def _get_object_client(self):
+        args = {
+            'auth_version': self.auth_version,
+            'session': self.identity_client.session,
+            'os_options': {'endpoint_type': 'publicURL',
+                           'region_name': self.conf.region,
+                           'service_type': 'object-store'},
+        }
+        return swift_client.Connection(**args)
+
+    def _get_metering_client(self):
+        try:
+            endpoint = self.identity_client.get_endpoint_url('metering',
+                                                             self.conf.region)
+        except kc_exceptions.EndpointNotFound:
+            return None
+        else:
+            args = {
+                'session': self.identity_client.session,
+                'region_name': self.conf.region,
+                'endpoint_type': 'publicURL',
+                'service_type': 'metering',
+            }
+            return ceilometer_client.Client(self.CEILOMETER_VERSION,
+                                            endpoint, **args)
diff --git a/heat_tempest_plugin/tests/__init__.py b/heat_tempest_plugin/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/tests/__init__.py
diff --git a/heat_tempest_plugin/tests/api/__init__.py b/heat_tempest_plugin/tests/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/__init__.py
diff --git a/heat_tempest_plugin/tests/api/gabbits/environments.yaml b/heat_tempest_plugin/tests/api/gabbits/environments.yaml
new file mode 100644
index 0000000..17ac476
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/gabbits/environments.yaml
@@ -0,0 +1,55 @@
+defaults:
+  request_headers:
+    X-Auth-Token: $ENVIRON['OS_TOKEN']
+
+tests:
+- name: environment with parameter
+  POST: /stacks
+  request_headers:
+    content-type: application/json
+  data:
+    files: {}
+    disable_rollback: true
+    parameters: {}
+    stack_name: $ENVIRON['PREFIX']-envstack
+    environment:
+      parameters:
+        test_val: test
+    template:
+      heat_template_version: '2016-04-08'
+      parameters:
+        test_val:
+          type: string
+      resources:
+        test:
+          type: OS::Heat::TestResource
+          properties:
+            value: {get_param: test_val}
+      outputs:
+        output_value:
+          value: {get_attr: [test, output]}
+
+  status: 201
+  response_headers:
+    location: //stacks/$ENVIRON['PREFIX']-envstack/[a-f0-9-]+/
+
+- name: poll for envstack CREATE_COMPLETE
+  GET: $LOCATION
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: CREATE_COMPLETE
+
+- name: get stack output
+  GET: $LAST_URL/outputs/output_value
+  redirects: True
+  status: 200
+  response_json_paths:
+    $.output.output_value: test
+
+- name: delete envstack
+  DELETE: /stacks/$ENVIRON['PREFIX']-envstack
+  redirects: True
+  status: 204
diff --git a/heat_tempest_plugin/tests/api/gabbits/resources.yaml b/heat_tempest_plugin/tests/api/gabbits/resources.yaml
new file mode 100644
index 0000000..41da444
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/gabbits/resources.yaml
@@ -0,0 +1,90 @@
+defaults:
+  request_headers:
+    X-Auth-Token: $ENVIRON['OS_TOKEN']
+
+tests:
+- name: create stack with resources
+  POST: /stacks
+  request_headers:
+    content-type: application/json
+  data:
+    files: {}
+    disable_rollback: true
+    parameters: {}
+    stack_name: $ENVIRON['PREFIX']-rsrcstack
+    template:
+      heat_template_version: '2016-04-08'
+      parameters:
+        test_val:
+          type: string
+          default: test
+      resources:
+        test:
+          type: OS::Heat::TestResource
+          properties:
+            value: {get_param: test_val}
+
+  status: 201
+  response_headers:
+    location: //stacks/$ENVIRON['PREFIX']-rsrcstack/[a-f0-9-]+/
+
+- name: poll for rsrcstack CREATE_COMPLETE
+  GET: $LOCATION
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: CREATE_COMPLETE
+
+- name: list resources
+  GET: $LAST_URL/resources
+  request_headers:
+    content-type: application/json
+  status: 200
+  response_json_paths:
+    $.resources[0].logical_resource_id: test
+    $.resources[0].resource_status: CREATE_COMPLETE
+
+- name: list filtered resources
+  GET: $LAST_URL
+  request_headers:
+    content-type: application/json
+  query_parameters:
+    type: OS::Nova::Server
+  status: 200
+  response_json_paths:
+    $.resources: []
+
+- name: show resource
+  GET: $LAST_URL/test
+  request_headers:
+    content-type: application/json
+  status: 200
+  response_json_paths:
+    $.resource.attributes.output: test
+
+- name: mark resource unhealthy
+  PATCH: $LAST_URL
+  request_headers:
+    content-type: application/json
+  data:
+    mark_unhealthy: true
+    resource_status_reason: 'resource deleted'
+  status: 200
+
+- name: show unhealthy resource
+  GET: $LAST_URL
+  status: 200
+  response_json_paths:
+    $.resource.resource_status: CHECK_FAILED
+    $.resource.resource_status_reason: 'resource deleted'
+
+- name: signal resource
+  POST: $LAST_URL/signal
+  status: 400
+
+- name: delete stack with resources
+  DELETE: /stacks/$ENVIRON['PREFIX']-rsrcstack
+  redirects: True
+  status: 204
diff --git a/heat_tempest_plugin/tests/api/gabbits/resourcetypes.yaml b/heat_tempest_plugin/tests/api/gabbits/resourcetypes.yaml
new file mode 100644
index 0000000..0730cc8
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/gabbits/resourcetypes.yaml
@@ -0,0 +1,24 @@
+defaults:
+  request_headers:
+    X-Auth-Token: $ENVIRON['OS_TOKEN']
+
+tests:
+- name: list resource types
+  GET: /resource_types
+  status: 200
+
+- name: show resource type
+  GET: /resource_types/OS::Heat::TestResource
+  status: 200
+  response_json_paths:
+    $.support_status.status: SUPPORTED
+    $.properties.wait_secs.default: 0
+
+- name: resource type template
+  GET: /resource_types/OS::Heat::TestResource/template
+  query_parameters:
+    template_type: hot
+  status: 200
+  response_json_paths:
+    $.resources.TestResource.type: OS::Heat::TestResource
+    $.heat_template_version: '2016-10-14'
diff --git a/heat_tempest_plugin/tests/api/gabbits/stacks.yaml b/heat_tempest_plugin/tests/api/gabbits/stacks.yaml
new file mode 100644
index 0000000..cb67e71
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/gabbits/stacks.yaml
@@ -0,0 +1,162 @@
+defaults:
+  request_headers:
+    X-Auth-Token: $ENVIRON['OS_TOKEN']
+
+tests:
+- name: stack list
+  GET: /stacks
+  status: 200
+  response_headers:
+    content-type: application/json
+
+- name: create empty stack
+  POST: /stacks
+  request_headers:
+    content-type: application/json
+  data:
+    files: {}
+    disable_rollback: true
+    parameters: {}
+    stack_name: $ENVIRON['PREFIX']-empty
+    environment: {}
+    template:
+      heat_template_version: '2016-04-08'
+
+  status: 201
+  response_headers:
+    location: //stacks/$ENVIRON['PREFIX']-empty/[a-f0-9-]+/
+
+
+- name: poll for empty CREATE_COMPLETE
+  GET: $LOCATION
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: CREATE_COMPLETE
+
+- name: show empty stack
+  GET: $LAST_URL
+  redirects: True
+  status: 200
+
+- name: delete empty stack
+  DELETE: $LAST_URL
+  redirects: True
+  status: 204
+
+- name: create stack
+  POST: /stacks
+  request_headers:
+    content-type: application/json
+  data:
+    files: {}
+    disable_rollback: true
+    parameters: {'test_val': value}
+    stack_name: $ENVIRON['PREFIX']-stack
+    template:
+      heat_template_version: pike
+      parameters:
+        test_val:
+          type: string
+      resources:
+        test:
+          type: OS::Heat::TestResource
+          properties:
+            value: {get_param: test_val}
+      outputs:
+        output_value:
+          value: {get_attr: [test, output]}
+
+  status: 201
+  response_headers:
+    location: //stacks/$ENVIRON['PREFIX']-stack/[a-f0-9-]+/
+
+- name: poll for stack CREATE_COMPLETE
+  GET: $LOCATION
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: CREATE_COMPLETE
+
+- name: show stack
+  GET: $LAST_URL
+  redirects: True
+  status: 200
+
+- name: update stack
+  PUT: $LAST_URL
+  request_headers:
+    content-type: application/json
+  data:
+    files: {}
+    disable_rollback: true
+    parameters: {'test_val': new_value}
+    stack_name: $ENVIRON['PREFIX']-stack
+    template:
+      heat_template_version: pike
+      parameters:
+        test_val:
+          type: string
+      resources:
+        test:
+          type: OS::Heat::TestResource
+          properties:
+            value: {get_param: test_val}
+            action_wait_secs:
+              update: 1
+      outputs:
+        output_value:
+          value: {get_attr: [test, output]}
+
+  status: 202
+
+- name: poll for stack UPDATE_COMPLETE
+  GET: $LAST_URL
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: UPDATE_COMPLETE
+
+- name: patch update stack
+  PATCH: $LAST_URL
+  request_headers:
+    content-type: application/json
+  data:
+    parameters: {'test_val': new_patched_value}
+
+  status: 202
+
+- name: poll for stack patch UPDATE_COMPLETE
+  GET: $LAST_URL
+  redirects: True
+  poll:
+    count: 5
+    delay: 1.0
+  response_json_paths:
+    $.stack.stack_status: UPDATE_COMPLETE
+    $.stack.updated_time: /^(?!$HISTORY['poll for stack UPDATE_COMPLETE'].$RESPONSE['$.stack.updated_time'])/
+
+- name: list stack outputs
+  GET: $LAST_URL/outputs
+  redirects: True
+  status: 200
+  response_json_paths:
+    $.outputs[0].output_key: output_value
+
+- name: get stack output
+  GET: $LAST_URL/output_value
+  redirects: True
+  status: 200
+  response_json_paths:
+    $.output.output_value: new_patched_value
+
+- name: delete stack
+  DELETE: /stacks/$ENVIRON['PREFIX']-stack
+  redirects: True
+  status: 204
diff --git a/heat_tempest_plugin/tests/api/gabbits/templates.yaml b/heat_tempest_plugin/tests/api/gabbits/templates.yaml
new file mode 100644
index 0000000..7b67054
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/gabbits/templates.yaml
@@ -0,0 +1,37 @@
+defaults:
+  request_headers:
+    X-Auth-Token: $ENVIRON['OS_TOKEN']
+
+tests:
+- name: list template versions
+  GET: /template_versions
+  status: 200
+  response_json_paths:
+    $.template_versions[?(@.version='heat_template_version.2017-02-24')].type: hot
+
+- name: list template functions
+  GET: /template_versions/heat_template_version.2016-10-14/functions
+  status: 200
+  response_json_paths:
+    $.template_functions[?(@.functions='get_file')].description:
+      A function for including a file inline.
+
+- name: template validate
+  POST: /validate
+  request_headers:
+    content-type: application/json
+  data:
+    template:
+      heat_template_version: '2016-04-08'
+      parameters:
+        test_val:
+          type: string
+      resources:
+        test:
+          type: OS::Heat::TestResource
+          properties:
+            value: {get_param: test_val}
+      outputs:
+        output_value:
+          value: {get_attr: [test, output]}
+  status: 200
diff --git a/heat_tempest_plugin/tests/api/test_heat_api.py b/heat_tempest_plugin/tests/api/test_heat_api.py
new file mode 100644
index 0000000..091edb3
--- /dev/null
+++ b/heat_tempest_plugin/tests/api/test_heat_api.py
@@ -0,0 +1,44 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Heat API with gabbi.  """
+
+import os
+
+from gabbi import driver
+from six.moves.urllib import parse as urlparse
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin import config
+from heat_tempest_plugin.services import clients
+
+TESTS_DIR = 'gabbits'
+
+
+def load_tests(loader, tests, pattern):
+    """Provide a TestSuite to the discovery process."""
+    test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+
+    conf = config.CONF.orchestration_plugin
+    if conf.auth_url is None:
+        # It's not configured, let's not load tests
+        return
+    manager = clients.ClientManager(conf)
+    endpoint = manager.identity_client.get_endpoint_url(
+        'orchestration', conf.region)
+    host = urlparse.urlparse(endpoint).hostname
+    os.environ['OS_TOKEN'] = manager.identity_client.auth_token
+    os.environ['PREFIX'] = test.rand_name('api')
+
+    return driver.build_tests(test_dir, loader, host=host,
+                              url=endpoint, test_loader_name=__name__)
diff --git a/heat_tempest_plugin/tests/functional/__init__.py b/heat_tempest_plugin/tests/functional/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/__init__.py
diff --git a/heat_tempest_plugin/tests/functional/functional_base.py b/heat_tempest_plugin/tests/functional/functional_base.py
new file mode 100644
index 0000000..c9d82b6
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/functional_base.py
@@ -0,0 +1,32 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_utils import reflection
+
+from heat_tempest_plugin.common import test
+
+
+class FunctionalTestsBase(test.HeatIntegrationTest):
+
+    def setUp(self):
+        super(FunctionalTestsBase, self).setUp()
+        self.check_skip()
+
+    def check_skip(self):
+        test_cls_name = reflection.get_class_name(self, fully_qualified=False)
+        test_method_name = '.'.join([test_cls_name, self._testMethodName])
+        test_skipped = (self.conf.skip_functional_test_list and (
+            test_cls_name in self.conf.skip_functional_test_list or
+            test_method_name in self.conf.skip_functional_test_list))
+
+        if self.conf.skip_functional_tests or test_skipped:
+            self.skipTest('Test disabled in conf, skipping')
diff --git a/heat_tempest_plugin/tests/functional/test_admin_actions.py b/heat_tempest_plugin/tests/functional/test_admin_actions.py
new file mode 100644
index 0000000..757b03c
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_admin_actions.py
@@ -0,0 +1,101 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+# Simple stack
+test_template = {
+    'heat_template_version': '2013-05-23',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1'
+            }
+        }
+    }
+}
+
+# Nested stack
+rsg_template = {
+    'heat_template_version': '2013-05-23',
+    'resources': {
+        'random_group': {
+            'type': 'OS::Heat::ResourceGroup',
+            'properties': {
+                'count': 2,
+                'resource_def': {
+                    'type': 'OS::Heat::RandomString',
+                    'properties': {
+                        'length': 30,
+                        'salt': 'initial'
+                    }
+                }
+            }
+        }
+    }
+}
+
+
+class AdminActionsTest(functional_base.FunctionalTestsBase):
+
+    def setUp(self):
+        super(AdminActionsTest, self).setUp()
+        if not self.conf.admin_username or not self.conf.admin_password:
+            self.skipTest('No admin creds found, skipping')
+
+    def create_stack_setup_admin_client(self, template=test_template):
+        # Create the stack with the default user
+        self.stack_identifier = self.stack_create(template=template)
+
+        # Setup admin clients
+        self.setup_clients_for_admin()
+
+    def test_admin_simple_stack_actions(self):
+        self.create_stack_setup_admin_client()
+
+        updated_template = test_template.copy()
+        props = updated_template['resources']['test1']['properties']
+        props['value'] = 'new_value'
+
+        # Update, suspend and resume stack
+        self.update_stack(self.stack_identifier,
+                          template=updated_template)
+        self.stack_suspend(self.stack_identifier)
+        self.stack_resume(self.stack_identifier)
+
+        # List stack resources
+        initial_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(self.stack_identifier))
+        # Delete stack
+        self._stack_delete(self.stack_identifier)
+
+    def test_admin_complex_stack_actions(self):
+        self.create_stack_setup_admin_client(template=rsg_template)
+
+        updated_template = rsg_template.copy()
+        props = updated_template['resources']['random_group']['properties']
+        props['count'] = 3
+
+        # Update, suspend and resume stack
+        self.update_stack(self.stack_identifier,
+                          template=updated_template)
+        self.stack_suspend(self.stack_identifier)
+        self.stack_resume(self.stack_identifier)
+
+        # List stack resources
+        resources = {'random_group': 'OS::Heat::ResourceGroup'}
+        self.assertEqual(resources,
+                         self.list_resources(self.stack_identifier))
+        # Delete stack
+        self._stack_delete(self.stack_identifier)
diff --git a/heat_tempest_plugin/tests/functional/test_autoscaling.py b/heat_tempest_plugin/tests/functional/test_autoscaling.py
new file mode 100644
index 0000000..d266646
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_autoscaling.py
@@ -0,0 +1,752 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import json
+
+from heatclient import exc
+from oslo_log import log as logging
+import six
+from testtools import matchers
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AutoscalingGroupTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template to create multiple instances.",
+  "Parameters" : {"size": {"Type": "String", "Default": "1"},
+                  "AZ": {"Type": "String", "Default": "nova"},
+                  "image": {"Type": "String"},
+                  "flavor": {"Type": "String"},
+                  "user_data": {"Type": "String", "Default": "jsconfig data"}},
+  "Resources": {
+    "JobServerGroup": {
+      "Type" : "AWS::AutoScaling::AutoScalingGroup",
+      "Properties" : {
+        "AvailabilityZones" : [{"Ref": "AZ"}],
+        "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
+        "MinSize" : {"Ref": "size"},
+        "MaxSize" : "20"
+      }
+    },
+
+    "JobServerConfig" : {
+      "Type" : "AWS::AutoScaling::LaunchConfiguration",
+      "Metadata": {"foo": "bar"},
+      "Properties": {
+        "ImageId"           : {"Ref": "image"},
+        "InstanceType"      : {"Ref": "flavor"},
+        "SecurityGroups"    : [ "sg-1" ],
+        "UserData"          : {"Ref": "user_data"}
+      }
+    }
+  },
+  "Outputs": {
+    "InstanceList": {"Value": {
+      "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
+    "JobServerConfigRef": {"Value": {
+      "Ref": "JobServerConfig"}}
+  }
+}
+'''
+
+    instance_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list, default: "x,y"}
+
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    properties:
+      salt: {get_param: UserData}
+outputs:
+  PublicIp: {value: {get_attr: [random1, value]}}
+  AvailabilityZone: {value: 'not-used11'}
+  PrivateDnsName: {value: 'not-used12'}
+  PublicDnsName: {value: 'not-used13'}
+  PrivateIp: {value: 'not-used14'}
+'''
+
+    # This is designed to fail.
+    bad_instance_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list, default: "x,y"}
+
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    depends_on: waiter
+  ready_poster:
+    type: AWS::CloudFormation::WaitConditionHandle
+  waiter:
+    type: AWS::CloudFormation::WaitCondition
+    properties:
+      Handle: {get_resource: ready_poster}
+      Timeout: 1
+outputs:
+  PublicIp:
+    value: {get_attr: [random1, value]}
+'''
+
+    def setUp(self):
+        super(AutoscalingGroupTest, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.instance_type:
+            raise self.skipException("No flavor configured to test")
+
+    def assert_instance_count(self, stack, expected_count):
+        inst_list = self._stack_output(stack, 'InstanceList')
+        self.assertEqual(expected_count, len(inst_list.split(',')))
+
+    def _assert_instance_state(self, nested_identifier,
+                               num_complete, num_failed):
+        for res in self.client.resources.list(nested_identifier):
+            if 'COMPLETE' in res.resource_status:
+                num_complete = num_complete - 1
+            elif 'FAILED' in res.resource_status:
+                num_failed = num_failed - 1
+        self.assertEqual(0, num_failed)
+        self.assertEqual(0, num_complete)
+
+
+class AutoscalingGroupBasicTest(AutoscalingGroupTest):
+
+    def test_basic_create_works(self):
+        """Make sure the working case is good.
+
+        Note this combines test_override_aws_ec2_instance into this test as
+        well, which is:
+        If AWS::EC2::Instance is overridden, AutoScalingGroup will
+        automatically use that overridden resource type.
+        """
+
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 4,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files, environment=env)
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 4)
+
+    def test_size_updates_work(self):
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 2)
+
+        # Increase min size to 5
+        env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+                'parameters': {'size': 5,
+                               'image': self.conf.minimal_image_ref,
+                               'flavor': self.conf.instance_type}}
+        self.update_stack(stack_identifier, self.template,
+                          environment=env2, files=files)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 5)
+
+    def test_update_group_replace(self):
+        """Test case for ensuring non-updatable props case a replacement.
+
+        Make sure that during a group update the non-updatable
+        properties cause a replacement.
+        """
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry':
+               {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': '1',
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
+        orig_asg_id = rsrc.physical_resource_id
+
+        env2 = {'resource_registry':
+                {'AWS::EC2::Instance': 'provider.yaml'},
+                'parameters': {'size': '1',
+                               'AZ': 'wibble',
+                               'image': self.conf.minimal_image_ref,
+                               'flavor': self.conf.instance_type,
+                               'user_data': 'new data'}}
+        self.update_stack(stack_identifier, self.template,
+                          environment=env2, files=files)
+
+        # replacement will cause the resource physical_resource_id to change.
+        rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
+        self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
+
+    def test_create_instance_error_causes_group_error(self):
+        """Test create failing a resource in the instance group.
+
+        If a resource in an instance group fails to be created, the instance
+        group itself will fail and the broken inner resource will remain.
+        """
+        stack_name = self._stack_rand_name()
+        files = {'provider.yaml': self.bad_instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        self.client.stacks.create(
+            stack_name=stack_name,
+            template=self.template,
+            files=files,
+            disable_rollback=True,
+            parameters={},
+            environment=env
+        )
+        self.addCleanup(self._stack_delete, stack_name)
+        stack = self.client.stacks.get(stack_name)
+        stack_identifier = '%s/%s' % (stack_name, stack.id)
+        self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        self._assert_instance_state(nested_ident, 0, 2)
+
+    def test_update_instance_error_causes_group_error(self):
+        """Test update failing a resource in the instance group.
+
+        If a resource in an instance group fails to be created during an
+        update, the instance group itself will fail and the broken inner
+        resource will remain.
+        """
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 2)
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        self._assert_instance_state(nested_ident, 2, 0)
+        initial_list = [res.resource_name
+                        for res in self.client.resources.list(nested_ident)]
+
+        env['parameters']['size'] = 3
+        files2 = {'provider.yaml': self.bad_instance_template}
+        self.client.stacks.update(
+            stack_id=stack_identifier,
+            template=self.template,
+            files=files2,
+            disable_rollback=True,
+            parameters={},
+            environment=env
+        )
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
+
+        # assert that there are 3 bad instances
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+
+        # 2 resources should be in update failed, and one create failed.
+        for res in self.client.resources.list(nested_ident):
+            if res.resource_name in initial_list:
+                self._wait_for_resource_status(nested_ident,
+                                               res.resource_name,
+                                               'UPDATE_FAILED')
+            else:
+                self._wait_for_resource_status(nested_ident,
+                                               res.resource_name,
+                                               'CREATE_FAILED')
+
+    def test_group_suspend_resume(self):
+
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 4,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files, environment=env)
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+
+        self.stack_suspend(stack_identifier)
+        self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
+
+        self.stack_resume(stack_identifier)
+        self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
+
+
+class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
+
+    def ig_tmpl_with_updt_policy(self):
+        templ = json.loads(copy.deepcopy(self.template))
+        up = {"AutoScalingRollingUpdate": {
+            "MinInstancesInService": "1",
+            "MaxBatchSize": "2",
+            "PauseTime": "PT1S"}}
+        templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
+        return templ
+
+    def update_instance_group(self, updt_template,
+                              num_updates_expected_on_updt,
+                              num_creates_expected_on_updt,
+                              num_deletes_expected_on_updt):
+
+        # setup stack from the initial template
+        files = {'provider.yaml': self.instance_template}
+        size = 10
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': size,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+        stack_name = self._stack_rand_name()
+        stack_identifier = self.stack_create(
+            stack_name=stack_name,
+            template=self.ig_tmpl_with_updt_policy(),
+            files=files,
+            environment=env)
+        stack = self.client.stacks.get(stack_identifier)
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+
+        # test that physical resource name of launch configuration is used
+        conf_name = self._stack_output(stack, 'JobServerConfigRef')
+        conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
+        self.assertThat(conf_name,
+                        matchers.MatchesRegex(conf_name_pattern))
+
+        # test the number of instances created
+        self.assert_instance_count(stack, size)
+        # saves info from initial list of instances for comparison later
+        init_instances = self.client.resources.list(nested_ident)
+        init_names = [inst.resource_name for inst in init_instances]
+
+        # test stack update
+        self.update_stack(stack_identifier, updt_template,
+                          environment=env, files=files)
+        updt_stack = self.client.stacks.get(stack_identifier)
+
+        # test that the launch configuration is replaced
+        updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
+        self.assertThat(updt_conf_name,
+                        matchers.MatchesRegex(conf_name_pattern))
+        self.assertNotEqual(conf_name, updt_conf_name)
+
+        # test that the group size are the same
+        updt_instances = self.client.resources.list(nested_ident)
+        updt_names = [inst.resource_name for inst in updt_instances]
+        self.assertEqual(len(init_names), len(updt_names))
+        for res in updt_instances:
+            self.assertEqual('UPDATE_COMPLETE', res.resource_status)
+
+        # test that the appropriate number of instance names are the same
+        matched_names = set(updt_names) & set(init_names)
+        self.assertEqual(num_updates_expected_on_updt, len(matched_names))
+
+        # test that the appropriate number of new instances are created
+        self.assertEqual(num_creates_expected_on_updt,
+                         len(set(updt_names) - set(init_names)))
+
+        # test that the appropriate number of instances are deleted
+        self.assertEqual(num_deletes_expected_on_updt,
+                         len(set(init_names) - set(updt_names)))
+
+        # test that the older instances are the ones being deleted
+        if num_deletes_expected_on_updt > 0:
+            deletes_expected = init_names[:num_deletes_expected_on_updt]
+            self.assertNotIn(deletes_expected, updt_names)
+
+    def test_instance_group_update_replace(self):
+        """Test simple update replace.
+
+        Test update replace with no conflict in batch size and minimum
+        instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        grp = updt_template['Resources']['JobServerGroup']
+        policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '1'
+        policy['MaxBatchSize'] = '3'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=10,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0)
+
+    def test_instance_group_update_replace_with_adjusted_capacity(self):
+        """Test update replace with capacity adjustment.
+
+        Test update replace with capacity adjustment due to conflict in batch
+        size and minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        grp = updt_template['Resources']['JobServerGroup']
+        policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '8'
+        policy['MaxBatchSize'] = '4'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=8,
+                                   num_creates_expected_on_updt=2,
+                                   num_deletes_expected_on_updt=2)
+
+    def test_instance_group_update_replace_huge_batch_size(self):
+        """Test update replace with a huge batch size."""
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '0'
+        policy['MaxBatchSize'] = '20'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=10,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0)
+
+    def test_instance_group_update_replace_huge_min_in_service(self):
+        """Update replace with huge number of minimum instances in service."""
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '20'
+        policy['MaxBatchSize'] = '1'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=9,
+                                   num_creates_expected_on_updt=1,
+                                   num_deletes_expected_on_updt=1)
+
+    def test_instance_group_update_no_replace(self):
+        """Test simple update only and no replace.
+
+        Test simple update only and no replace (i.e. updated instance flavor
+        in Launch Configuration) with no conflict in batch size and
+        minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '1'
+        policy['MaxBatchSize'] = '3'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['InstanceType'] = self.conf.minimal_instance_type
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=10,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0)
+
+    def test_instance_group_update_no_replace_with_adjusted_capacity(self):
+        """Test update only and no replace with capacity adjustment.
+
+        Test update only and no replace (i.e. updated instance flavor in
+        Launch Configuration) with capacity adjustment due to conflict in
+        batch size and minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
+        policy['MinInstancesInService'] = '8'
+        policy['MaxBatchSize'] = '4'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['InstanceType'] = self.conf.minimal_instance_type
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=8,
+                                   num_creates_expected_on_updt=2,
+                                   num_deletes_expected_on_updt=2)
+
+
+class AutoScalingSignalTest(AutoscalingGroupTest):
+
+    template = '''
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template to create multiple instances.",
+  "Parameters" : {"size": {"Type": "String", "Default": "1"},
+                  "AZ": {"Type": "String", "Default": "nova"},
+                  "image": {"Type": "String"},
+                  "flavor": {"Type": "String"}},
+  "Resources": {
+    "custom_lb": {
+      "Type": "AWS::EC2::Instance",
+      "Properties": {
+        "ImageId": {"Ref": "image"},
+        "InstanceType": {"Ref": "flavor"},
+        "UserData": "foo",
+        "SecurityGroups": [ "sg-1" ],
+        "Tags": []
+      },
+      "Metadata": {
+        "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
+      }
+    },
+    "JobServerGroup": {
+      "Type" : "AWS::AutoScaling::AutoScalingGroup",
+      "Properties" : {
+        "AvailabilityZones" : [{"Ref": "AZ"}],
+        "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
+        "DesiredCapacity" : {"Ref": "size"},
+        "MinSize" : "0",
+        "MaxSize" : "20"
+      }
+    },
+    "JobServerConfig" : {
+      "Type" : "AWS::AutoScaling::LaunchConfiguration",
+      "Metadata": {"foo": "bar"},
+      "Properties": {
+        "ImageId"           : {"Ref": "image"},
+        "InstanceType"      : {"Ref": "flavor"},
+        "SecurityGroups"    : [ "sg-1" ],
+        "UserData"          : "jsconfig data"
+      }
+    },
+    "ScaleUpPolicy" : {
+      "Type" : "AWS::AutoScaling::ScalingPolicy",
+      "Properties" : {
+        "AdjustmentType" : "ChangeInCapacity",
+        "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
+        "Cooldown" : "0",
+        "ScalingAdjustment": "1"
+      }
+    },
+    "ScaleDownPolicy" : {
+      "Type" : "AWS::AutoScaling::ScalingPolicy",
+      "Properties" : {
+        "AdjustmentType" : "ChangeInCapacity",
+        "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
+        "Cooldown" : "0",
+        "ScalingAdjustment" : "-2"
+      }
+    }
+  },
+  "Outputs": {
+    "InstanceList": {"Value": {
+      "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
+  }
+}
+'''
+
+    lb_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list, default: "x,y"}
+
+resources:
+outputs:
+  PublicIp: {value: "not-used"}
+  AvailabilityZone: {value: 'not-used1'}
+  PrivateDnsName: {value: 'not-used2'}
+  PublicDnsName: {value: 'not-used3'}
+  PrivateIp: {value: 'not-used4'}
+
+'''
+
+    def setUp(self):
+        super(AutoScalingSignalTest, self).setUp()
+        self.build_timeout = self.conf.build_timeout
+        self.build_interval = self.conf.build_interval
+        self.files = {'provider.yaml': self.instance_template,
+                      'lb.yaml': self.lb_template}
+        self.env = {'resource_registry':
+                    {'resources':
+                     {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
+                     'AWS::EC2::Instance': 'provider.yaml'},
+                    'parameters': {'size': 2,
+                                   'image': self.conf.minimal_image_ref,
+                                   'flavor': self.conf.instance_type}}
+
+    def check_instance_count(self, stack_identifier, expected):
+        md = self.client.resources.metadata(stack_identifier, 'custom_lb')
+        actual_md = len(md['IPs'].split(','))
+        if actual_md != expected:
+            LOG.warning('check_instance_count exp:%d, meta:%s' % (expected,
+                                                                  md['IPs']))
+            return False
+
+        stack = self.client.stacks.get(stack_identifier)
+        inst_list = self._stack_output(stack, 'InstanceList')
+        actual = len(inst_list.split(','))
+        if actual != expected:
+            LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
+                                                                 inst_list))
+        return actual == expected
+
+    def test_scaling_meta_update(self):
+        """Use heatclient to signal the up and down policy.
+
+        Then confirm that the metadata in the custom_lb is updated each
+        time.
+        """
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=self.files,
+                                             environment=self.env)
+
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        # Scale up one, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 3))
+
+        # Scale down two, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 1))
+
+    def test_signal_with_policy_update(self):
+        """Prove that an updated policy is used in the next signal."""
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=self.files,
+                                             environment=self.env)
+
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        # Scale up one, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 3))
+
+        # increase the adjustment to "+2" and remove the DesiredCapacity
+        # so we don't go from 3 to 2.
+        new_template = self.template.replace(
+            '"ScalingAdjustment": "1"',
+            '"ScalingAdjustment": "2"').replace(
+                '"DesiredCapacity" : {"Ref": "size"},', '')
+
+        self.update_stack(stack_identifier, template=new_template,
+                          environment=self.env, files=self.files)
+
+        # Scale up two, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 5))
+
+    def test_signal_during_suspend(self):
+        """Prove that a signal will fail when the stack is in suspend."""
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=self.files,
+                                             environment=self.env)
+
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+
+        # suspend the top level stack.
+        self.client.actions.suspend(stack_id=stack_identifier)
+
+        # Wait for stack to reach SUSPEND_COMPLETE
+        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
+
+        # Send a signal and an exception will raise
+        ex = self.assertRaises(exc.BadRequest,
+                               self.client.resources.signal,
+                               stack_identifier, 'ScaleUpPolicy')
+
+        error_msg = 'Signal resource during SUSPEND is not supported'
+        self.assertIn(error_msg, six.text_type(ex))
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Cannot signal resource during SUSPEND',
+            rsrc_name='ScaleUpPolicy')
+        self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)
+
+        # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
+        self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
+        # still 2 instances.
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
diff --git a/heat_tempest_plugin/tests/functional/test_aws_stack.py b/heat_tempest_plugin/tests/functional/test_aws_stack.py
new file mode 100644
index 0000000..03beb1f
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_aws_stack.py
@@ -0,0 +1,201 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import hashlib
+import json
+import random
+
+from six.moves.urllib import parse
+from swiftclient import utils as swiftclient_utils
+import yaml
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class AwsStackTest(functional_base.FunctionalTestsBase):
+    test_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: AWS::CloudFormation::Stack
+    Properties:
+      TemplateURL: the.yaml
+      Parameters:
+        KeyName: foo
+Outputs:
+  output_foo:
+    Value: {"Fn::GetAtt": [the_nested, Outputs.Foo]}
+'''
+
+    nested_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  KeyName:
+    Type: String
+Outputs:
+  Foo:
+    Value: bar
+'''
+
+    update_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  KeyName:
+    Type: String
+Outputs:
+  Foo:
+    Value: foo
+'''
+
+    nested_with_res_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  KeyName:
+    Type: String
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+Outputs:
+  Foo:
+    Value: {"Fn::GetAtt": [NestedResource, value]}
+'''
+
+    def setUp(self):
+        super(AwsStackTest, self).setUp()
+        if not self.is_service_available('object-store'):
+            self.skipTest('object-store service not available, skipping')
+        self.object_container_name = test.rand_name()
+        self.project_id = self.identity_client.project_id
+        self.swift_key = hashlib.sha224(
+            str(random.getrandbits(256)).encode('ascii')).hexdigest()[:32]
+        key_header = 'x-container-meta-temp-url-key'
+        self.object_client.put_container(self.object_container_name,
+                                         {key_header: self.swift_key})
+        self.addCleanup(self.object_client.delete_container,
+                        self.object_container_name)
+
+    def publish_template(self, contents, cleanup=True):
+        oc = self.object_client
+
+        # post the object
+        oc.put_object(self.object_container_name, 'template.yaml', contents)
+        if cleanup:
+            self.addCleanup(oc.delete_object,
+                            self.object_container_name,
+                            'template.yaml')
+        path = '/v1/AUTH_%s/%s/%s' % (self.project_id,
+                                      self.object_container_name,
+                                      'template.yaml')
+        timeout = self.conf.build_timeout * 10
+        tempurl = swiftclient_utils.generate_temp_url(path, timeout,
+                                                      self.swift_key, 'GET')
+        sw_url = parse.urlparse(oc.url)
+        return '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)
+
+    def test_nested_stack_create(self):
+        url = self.publish_template(self.nested_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        stack_identifier = self.stack_create(template=self.template)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_resource_is_a_stack(stack_identifier, 'the_nested')
+        self.assertEqual('bar', self._stack_output(stack, 'output_foo'))
+
+    def test_nested_stack_create_with_timeout(self):
+        url = self.publish_template(self.nested_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        timeout_template = yaml.safe_load(self.template)
+        props = timeout_template['Resources']['the_nested']['Properties']
+        props['TimeoutInMinutes'] = '50'
+
+        stack_identifier = self.stack_create(
+            template=timeout_template)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_resource_is_a_stack(stack_identifier, 'the_nested')
+        self.assertEqual('bar', self._stack_output(stack, 'output_foo'))
+
+    def test_nested_stack_adopt_ok(self):
+        url = self.publish_template(self.nested_with_res_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        adopt_data = {
+            "resources": {
+                "the_nested": {
+                    "resource_id": "test-res-id",
+                    "resources": {
+                        "NestedResource": {
+                            "type": "OS::Heat::RandomString",
+                            "resource_id": "test-nested-res-id",
+                            "resource_data": {"value": "goopie"}
+                        }
+                    }
+                }
+            },
+            "environment": {"parameters": {}},
+            "template": yaml.safe_load(self.template)
+        }
+
+        stack_identifier = self.stack_adopt(adopt_data=json.dumps(adopt_data))
+
+        self.assert_resource_is_a_stack(stack_identifier, 'the_nested')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('goopie', self._stack_output(stack, 'output_foo'))
+
+    def test_nested_stack_adopt_fail(self):
+        url = self.publish_template(self.nested_with_res_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        adopt_data = {
+            "resources": {
+                "the_nested": {
+                    "resource_id": "test-res-id",
+                    "resources": {
+                    }
+                }
+            },
+            "environment": {"parameters": {}},
+            "template": yaml.safe_load(self.template)
+        }
+
+        stack_identifier = self.stack_adopt(adopt_data=json.dumps(adopt_data),
+                                            wait_for_status='ADOPT_FAILED')
+        rsrc = self.client.resources.get(stack_identifier, 'the_nested')
+        self.assertEqual('ADOPT_FAILED', rsrc.resource_status)
+
+    def test_nested_stack_update(self):
+        url = self.publish_template(self.nested_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        stack_identifier = self.stack_create(template=self.template)
+        original_nested_id = self.assert_resource_is_a_stack(
+            stack_identifier, 'the_nested')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('bar', self._stack_output(stack, 'output_foo'))
+
+        new_template = yaml.safe_load(self.template)
+        props = new_template['Resources']['the_nested']['Properties']
+        props['TemplateURL'] = self.publish_template(self.update_template,
+                                                     cleanup=False)
+
+        self.update_stack(stack_identifier, new_template)
+
+        # Expect the physical resource name staying the same after update,
+        # so that the nested was actually updated instead of replaced.
+        new_nested_id = self.assert_resource_is_a_stack(
+            stack_identifier, 'the_nested')
+        self.assertEqual(original_nested_id, new_nested_id)
+        updt_stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('foo', self._stack_output(updt_stack, 'output_foo'))
+
+    def test_nested_stack_suspend_resume(self):
+        url = self.publish_template(self.nested_template)
+        self.template = self.test_template.replace('the.yaml', url)
+        stack_identifier = self.stack_create(template=self.template)
+        self.stack_suspend(stack_identifier)
+        self.stack_resume(stack_identifier)
diff --git a/heat_tempest_plugin/tests/functional/test_cancel_update.py b/heat_tempest_plugin/tests/functional/test_cancel_update.py
new file mode 100644
index 0000000..4a8938a
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_cancel_update.py
@@ -0,0 +1,61 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class CancelUpdateTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: '2013-05-23'
+parameters:
+ InstanceType:
+   type: string
+ ImageId:
+   type: string
+ network:
+   type: string
+resources:
+ port:
+   type: OS::Neutron::Port
+   properties:
+     network: {get_param: network}
+ Server:
+   type: OS::Nova::Server
+   properties:
+     flavor_update_policy: REPLACE
+     image: {get_param: ImageId}
+     flavor: {get_param: InstanceType}
+     networks:
+       - port: {get_resource: port}
+'''
+
+    def setUp(self):
+        super(CancelUpdateTest, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.minimal_instance_type:
+            raise self.skipException("No minimal flavor configured to test.")
+
+    def test_cancel_update_server_with_port(self):
+        parameters = {'InstanceType': self.conf.minimal_instance_type,
+                      'ImageId': self.conf.minimal_image_ref,
+                      'network': self.conf.fixed_network_name}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             parameters=parameters)
+        parameters['InstanceType'] = self.conf.instance_type
+        self.update_stack(stack_identifier, self.template,
+                          parameters=parameters,
+                          expected_status='UPDATE_IN_PROGRESS')
+
+        self.cancel_update_stack(stack_identifier)
diff --git a/heat_tempest_plugin/tests/functional/test_conditional_exposure.py b/heat_tempest_plugin/tests/functional/test_conditional_exposure.py
new file mode 100644
index 0000000..dd4329b
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_conditional_exposure.py
@@ -0,0 +1,157 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heatclient import exc
+import keystoneclient
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ServiceBasedExposureTest(functional_base.FunctionalTestsBase):
+    # NOTE(pas-ha) if we ever decide to install Sahara on Heat
+    # functional gate, this must be changed to other not-installed
+    # but in principle supported service
+    unavailable_service = 'Sahara'
+    unavailable_template = """
+heat_template_version: 2015-10-15
+parameters:
+  instance_type:
+    type: string
+resources:
+  not_available:
+    type: OS::Sahara::NodeGroupTemplate
+    properties:
+      plugin_name: fake
+      hadoop_version: 0.1
+      flavor: {get_param: instance_type}
+      node_processes: []
+"""
+
+    def setUp(self):
+        super(ServiceBasedExposureTest, self).setUp()
+        # check that Sahara endpoint is available
+        if self._is_sahara_deployed():
+            self.skipTest("Sahara is actually deployed, "
+                          "can not run negative tests on "
+                          "Sahara resources availability.")
+
+    def _is_sahara_deployed(self):
+        try:
+            self.identity_client.get_endpoint_url('data-processing',
+                                                  self.conf.region)
+        except keystoneclient.exceptions.EndpointNotFound:
+            return False
+        return True
+
+    def test_unavailable_resources_not_listed(self):
+        resources = self.client.resource_types.list()
+        self.assertFalse(any(self.unavailable_service in r.resource_type
+                             for r in resources))
+
+    def test_unavailable_resources_not_created(self):
+        stack_name = self._stack_rand_name()
+        parameters = {'instance_type': self.conf.minimal_instance_type}
+        ex = self.assertRaises(exc.HTTPBadRequest,
+                               self.client.stacks.create,
+                               stack_name=stack_name,
+                               parameters=parameters,
+                               template=self.unavailable_template)
+        self.assertIn('ResourceTypeUnavailable', ex.message.decode('utf-8'))
+        self.assertIn('OS::Sahara::NodeGroupTemplate',
+                      ex.message.decode('utf-8'))
+
+
+class RoleBasedExposureTest(functional_base.FunctionalTestsBase):
+
+    fl_tmpl = """
+heat_template_version: 2015-10-15
+
+resources:
+  not4everyone:
+    type: OS::Nova::Flavor
+    properties:
+      ram: 20000
+      vcpus: 10
+"""
+
+    cvt_tmpl = """
+heat_template_version: 2015-10-15
+
+resources:
+  cvt:
+    type: OS::Cinder::VolumeType
+    properties:
+      name: cvt_test
+"""
+
+    host_aggr_tmpl = """
+heat_template_version: 2015-10-15
+parameters:
+  az:
+    type: string
+    default: nova
+resources:
+  cvt:
+    type: OS::Nova::HostAggregate
+    properties:
+      name: aggregate_test
+      availability_zone: {get_param: az}
+"""
+
+    scenarios = [
+        ('r_nova_flavor', dict(
+            stack_name='s_nova_flavor',
+            template=fl_tmpl,
+            forbidden_r_type="OS::Nova::Flavor",
+            test_creation=True)),
+        ('r_nova_host_aggregate', dict(
+            stack_name='s_nova_ost_aggregate',
+            template=host_aggr_tmpl,
+            forbidden_r_type="OS::Nova::HostAggregate",
+            test_creation=True)),
+        ('r_cinder_vtype', dict(
+            stack_name='s_cinder_vtype',
+            template=cvt_tmpl,
+            forbidden_r_type="OS::Cinder::VolumeType",
+            test_creation=True)),
+        ('r_cinder_vtype_encrypt', dict(
+            forbidden_r_type="OS::Cinder::EncryptedVolumeType",
+            test_creation=False)),
+        ('r_neutron_qos', dict(
+            forbidden_r_type="OS::Neutron::QoSPolicy",
+            test_creation=False)),
+        ('r_neutron_qos_bandwidth_limit', dict(
+            forbidden_r_type="OS::Neutron::QoSBandwidthLimitRule",
+            test_creation=False)),
+        ('r_manila_share_type', dict(
+            forbidden_r_type="OS::Manila::ShareType",
+            test_creation=False))
+    ]
+
+    def test_non_admin_forbidden_create_resources(self):
+        """Fail to create resource w/o admin role.
+
+        Integration tests job runs as normal OpenStack user,
+        and the resources above are configured to require
+        admin role in default policy file of Heat.
+        """
+        if self.test_creation:
+            ex = self.assertRaises(exc.Forbidden,
+                                   self.client.stacks.create,
+                                   stack_name=self.stack_name,
+                                   template=self.template)
+            self.assertIn(self.forbidden_r_type, ex.message.decode('utf-8'))
+
+    def test_forbidden_resource_not_listed(self):
+        resources = self.client.resource_types.list()
+        self.assertNotIn(self.forbidden_r_type,
+                         (r.resource_type for r in resources))
diff --git a/heat_tempest_plugin/tests/functional/test_conditions.py b/heat_tempest_plugin/tests/functional/test_conditions.py
new file mode 100644
index 0000000..106a4ca
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_conditions.py
@@ -0,0 +1,619 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+cfn_template = '''
+AWSTemplateFormatVersion: 2010-09-09
+Parameters:
+  env_type:
+    Default: test
+    Type: String
+    AllowedValues: [prod, test]
+  zone:
+    Type: String
+    Default: beijing
+Conditions:
+  Prod: {"Fn::Equals" : [{Ref: env_type}, "prod"]}
+  Test:
+    Fn::Not:
+    - Fn::Equals:
+      - Ref: env_type
+      - prod
+  Beijing_Prod:
+    Fn::And:
+    - Fn::Equals:
+      - Ref: env_type
+      - prod
+    - Fn::Equals:
+      - Ref: zone
+      - beijing
+  Xian_Zone:
+    Fn::Equals:
+    - Ref: zone
+    - xian
+  Xianyang_Zone:
+    Fn::Equals:
+    - Ref: zone
+    - xianyang
+  Fujian_Zone:
+    Fn::Or:
+    - Fn::Equals:
+      - Ref: zone
+      - fuzhou
+    - Fn::Equals:
+      - Ref: zone
+      - xiamen
+  Fujian_Prod:
+    Fn::And:
+    - Fujian_Zone
+    - Prod
+  Shannxi_Provice:
+    Fn::Or:
+    - Xian_Zone
+    - Xianyang_Zone
+  Not_Shannxi:
+    Fn::Not: [Shannxi_Provice]
+Resources:
+  test_res:
+    Type: OS::Heat::TestResource
+    Properties:
+      value: {"Fn::If": ["Prod", "env_is_prod", "env_is_test"]}
+  prod_res:
+    Type: OS::Heat::TestResource
+    Properties:
+      value: prod_res
+    Condition: Prod
+  test_res1:
+    Type: OS::Heat::TestResource
+    Properties:
+      value: just in test env
+    Condition: Test
+  beijing_prod_res:
+    Type: OS::Heat::TestResource
+    Properties:
+      value: beijing_prod_res
+    Condition: Beijing_Prod
+  fujian_res:
+    Type: OS::Heat::TestResource
+    Condition: Fujian_Zone
+    Properties:
+      value: fujian_res
+  fujian_prod_res:
+    Type: OS::Heat::TestResource
+    Condition: Fujian_Prod
+    Properties:
+      value: fujian_prod_res
+  shannxi_res:
+    Type: OS::Heat::TestResource
+    Condition: Shannxi_Provice
+    Properties:
+      value: shannxi_res
+  not_shannxi_res:
+    Type: OS::Heat::TestResource
+    Condition: Not_Shannxi
+    Properties:
+      value: not_shannxi_res
+Outputs:
+  res_value:
+    Value: {"Fn::GetAtt": [prod_res, output]}
+    Condition: Prod
+  test_res_value:
+    Value: {"Fn::GetAtt": [test_res, output]}
+  prod_resource:
+    Value: {"Fn::If": [Prod, {Ref: prod_res}, 'no_prod_res']}
+  test_res1_value:
+    Value: {"Fn::If": [Test, {"Fn::GetAtt": [test_res1, output]},
+                       'no_test_res1']}
+  beijing_prod_res:
+    Value: {"Fn::If": [Beijing_Prod, {Ref: beijing_prod_res}, 'no_prod_res']}
+'''
+
+hot_template = '''
+heat_template_version: 2016-10-14
+parameters:
+  env_type:
+    default: test
+    type: string
+    constraints:
+      - allowed_values: [prod, test]
+  zone:
+    type: string
+    default: beijing
+conditions:
+  prod: {equals : [{get_param: env_type}, "prod"]}
+  test:
+    not:
+      equals:
+      - get_param: env_type
+      - prod
+  beijing_prod:
+    and:
+    - equals:
+      - get_param: zone
+      - beijing
+    - equals:
+      - get_param: env_type
+      - prod
+  xian_zone:
+    equals:
+    - get_param: zone
+    - xian
+  xianyang_zone:
+    equals:
+    - get_param: zone
+    - xianyang
+  fujian_zone:
+    or:
+    - equals:
+      - get_param: zone
+      - fuzhou
+    - equals:
+      - get_param: zone
+      - xiamen
+  fujian_prod:
+    and:
+    - fujian_zone
+    - prod
+  shannxi_provice:
+    or:
+    - xian_zone
+    - xianyang_zone
+  not_shannxi:
+    not: shannxi_provice
+resources:
+  test_res:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: ["prod", "env_is_prod", "env_is_test"]}
+  prod_res:
+    type: OS::Heat::TestResource
+    properties:
+      value: prod_res
+    condition: prod
+  test_res1:
+    type: OS::Heat::TestResource
+    properties:
+      value: just in test env
+    condition: test
+  beijing_prod_res:
+    type: OS::Heat::TestResource
+    properties:
+      value: beijing_prod_res
+    condition: beijing_prod
+  fujian_res:
+    type: OS::Heat::TestResource
+    condition: fujian_zone
+    properties:
+      value: fujian_res
+  fujian_prod_res:
+    type: OS::Heat::TestResource
+    condition: fujian_prod
+    properties:
+      value: fujian_prod_res
+  shannxi_res:
+    type: OS::Heat::TestResource
+    condition: shannxi_provice
+    properties:
+      value: shannxi_res
+  not_shannxi_res:
+    type: OS::Heat::TestResource
+    condition: not_shannxi
+    properties:
+      value: not_shannxi_res
+outputs:
+  res_value:
+    value: {get_attr: [prod_res, output]}
+    condition: prod
+  test_res_value:
+    value: {get_attr: [test_res, output]}
+  prod_resource:
+    value: {if: [prod, {get_resource: prod_res}, 'no_prod_res']}
+  test_res1_value:
+    value: {if: [test, {get_attr: [test_res1, output]}, 'no_test_res1']}
+  beijing_prod_res:
+    value: {if: [beijing_prod, {get_resource: beijing_prod_res},
+                 'no_prod_res']}
+'''
+
+before_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+  env_type:
+    default: test
+    type: string
+conditions:
+  cd1: {equals : [{get_param: env_type}, "prod"]}
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd1, 'prod', 'test']}
+'''
+
+after_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+  env_type:
+    default: prod
+    type: string
+conditions:
+  cd2: {equals : [{get_param: env_type}, "prod"]}
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd2, 'prod', 'test']}
+  test2:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd2, 'prod', 'test']}
+'''
+
+fail_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+  env_type:
+    default: prod
+    type: string
+conditions:
+  cd3: {equals : [{get_param: env_type}, "prod"]}
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd3, 'prod', 'test']}
+  test2:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd3, 'prod', 'test']}
+  test_fail:
+    type: OS::Heat::TestResource
+    properties:
+      fail: True
+    depends_on: [test, test2]
+'''
+
+recover_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+  env_type:
+    default: prod
+    type: string
+conditions:
+  cd3: {equals : [{get_param: env_type}, "prod"]}
+resources:
+  test2:
+    type: OS::Heat::TestResource
+    properties:
+      value: {if: [cd3, 'prod', 'test']}
+  test_fail:
+    type: OS::Heat::TestResource
+    properties:
+      fail: False
+    depends_on: [test2]
+'''
+
+
+class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
+
+    def res_assert_for_prod(self, resources, bj_prod=True, fj_zone=False,
+                            shannxi_provice=False):
+        res_names = [res.resource_name for res in resources]
+        if bj_prod:
+            self.assertEqual(4, len(resources))
+            self.assertIn('beijing_prod_res', res_names)
+            self.assertIn('not_shannxi_res', res_names)
+        elif fj_zone:
+            self.assertEqual(5, len(resources))
+            self.assertIn('fujian_res', res_names)
+            self.assertNotIn('beijing_prod_res', res_names)
+            self.assertIn('not_shannxi_res', res_names)
+            self.assertIn('fujian_prod_res', res_names)
+        elif shannxi_provice:
+            self.assertEqual(3, len(resources))
+            self.assertIn('shannxi_res', res_names)
+        else:
+            self.assertEqual(3, len(resources))
+            self.assertIn('not_shannxi_res', res_names)
+        self.assertIn('prod_res', res_names)
+        self.assertIn('test_res', res_names)
+
+    def res_assert_for_test(self, resources, fj_zone=False,
+                            shannxi_provice=False):
+        res_names = [res.resource_name for res in resources]
+
+        if fj_zone:
+            self.assertEqual(4, len(resources))
+            self.assertIn('fujian_res', res_names)
+            self.assertIn('not_shannxi_res', res_names)
+        elif shannxi_provice:
+            self.assertEqual(3, len(resources))
+            self.assertNotIn('fujian_res', res_names)
+            self.assertIn('shannxi_res', res_names)
+        else:
+            self.assertEqual(3, len(resources))
+            self.assertIn('not_shannxi_res', res_names)
+        self.assertIn('test_res', res_names)
+        self.assertIn('test_res1', res_names)
+        self.assertNotIn('prod_res', res_names)
+
+    def output_assert_for_prod(self, stack_id, bj_prod=True):
+        output = self.client.stacks.output_show(stack_id,
+                                                'res_value')['output']
+        self.assertEqual('prod_res', output['output_value'])
+
+        test_res_value = self.client.stacks.output_show(
+            stack_id, 'test_res_value')['output']
+        self.assertEqual('env_is_prod', test_res_value['output_value'])
+
+        prod_resource = self.client.stacks.output_show(
+            stack_id, 'prod_resource')['output']
+        self.assertNotEqual('no_prod_res', prod_resource['output_value'])
+
+        test_res_output = self.client.stacks.output_show(
+            stack_id, 'test_res1_value')['output']
+        self.assertEqual('no_test_res1', test_res_output['output_value'])
+
+        beijing_prod_res = self.client.stacks.output_show(
+            stack_id, 'beijing_prod_res')['output']
+        if bj_prod:
+            self.assertNotEqual('no_prod_res',
+                                beijing_prod_res['output_value'])
+        else:
+            self.assertEqual('no_prod_res', beijing_prod_res['output_value'])
+
+    def output_assert_for_test(self, stack_id):
+        output = self.client.stacks.output_show(stack_id,
+                                                'res_value')['output']
+        self.assertIsNone(output['output_value'])
+
+        test_res_value = self.client.stacks.output_show(
+            stack_id, 'test_res_value')['output']
+        self.assertEqual('env_is_test', test_res_value['output_value'])
+
+        prod_resource = self.client.stacks.output_show(
+            stack_id, 'prod_resource')['output']
+        self.assertEqual('no_prod_res', prod_resource['output_value'])
+
+        test_res_output = self.client.stacks.output_show(
+            stack_id, 'test_res1_value')['output']
+        self.assertEqual('just in test env',
+                         test_res_output['output_value'])
+
+        beijing_prod_res = self.client.stacks.output_show(
+            stack_id, 'beijing_prod_res')['output']
+        self.assertEqual('no_prod_res', beijing_prod_res['output_value'])
+
+    def test_stack_create_update_cfn_template_test_to_prod(self):
+        stack_identifier = self.stack_create(template=cfn_template)
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'zone': 'fuzhou'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, fj_zone=True)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, shannxi_provice=True)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'env_type': 'prod'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources)
+        self.output_assert_for_prod(stack_identifier)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'shanghai'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, False)
+        self.output_assert_for_prod(stack_identifier, False)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'xiamen'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+        self.output_assert_for_prod(stack_identifier, False)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+                                 shannxi_provice=True)
+        self.output_assert_for_prod(stack_identifier, False)
+
+    def test_stack_create_update_cfn_template_prod_to_test(self):
+        parms = {'env_type': 'prod'}
+        stack_identifier = self.stack_create(template=cfn_template,
+                                             parameters=parms)
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources)
+        self.output_assert_for_prod(stack_identifier)
+
+        parms = {'zone': 'xiamen',
+                 'env_type': 'prod'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+        self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+        parms = {'zone': 'xianyang',
+                 'env_type': 'prod'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+                                 shannxi_provice=True)
+        self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+        parms = {'zone': 'shanghai',
+                 'env_type': 'prod'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+                                 shannxi_provice=False)
+        self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+        parms = {'env_type': 'test'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'env_type': 'test',
+                 'zone': 'fuzhou'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, fj_zone=True)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'env_type': 'test',
+                 'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=cfn_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, fj_zone=False,
+                                 shannxi_provice=True)
+        self.output_assert_for_test(stack_identifier)
+
+    def test_stack_create_update_hot_template_test_to_prod(self):
+        stack_identifier = self.stack_create(template=hot_template)
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, shannxi_provice=True)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'env_type': 'prod'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources)
+        self.output_assert_for_prod(stack_identifier)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'shanghai'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, False)
+        self.output_assert_for_prod(stack_identifier, False)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, False, shannxi_provice=True)
+        self.output_assert_for_prod(stack_identifier, False)
+
+    def test_stack_create_update_hot_template_prod_to_test(self):
+        parms = {'env_type': 'prod'}
+        stack_identifier = self.stack_create(template=hot_template,
+                                             parameters=parms)
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources)
+        self.output_assert_for_prod(stack_identifier)
+
+        parms = {'env_type': 'prod',
+                 'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_prod(resources, False, shannxi_provice=True)
+        self.output_assert_for_prod(stack_identifier, False)
+
+        parms = {'env_type': 'test'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources)
+        self.output_assert_for_test(stack_identifier)
+
+        parms = {'env_type': 'test',
+                 'zone': 'xianyang'}
+        self.update_stack(stack_identifier,
+                          template=hot_template,
+                          parameters=parms)
+
+        resources = self.client.resources.list(stack_identifier)
+        self.res_assert_for_test(resources, fj_zone=False,
+                                 shannxi_provice=True)
+        self.output_assert_for_test(stack_identifier)
+
+    def test_condition_rename(self):
+        stack_identifier = self.stack_create(template=before_rename_tmpl)
+        self.update_stack(stack_identifier, template=after_rename_tmpl)
+        self.update_stack(stack_identifier, template=fail_rename_tmpl,
+                          expected_status='UPDATE_FAILED')
+        self.update_stack(stack_identifier, template=recover_rename_tmpl)
diff --git a/heat_tempest_plugin/tests/functional/test_create_update.py b/heat_tempest_plugin/tests/functional/test_create_update.py
new file mode 100644
index 0000000..46b1837
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_create_update.py
@@ -0,0 +1,710 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import copy
+import json
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+test_template_one_resource = {
+    'heat_template_version': 'pike',
+    'description': 'Test template to create one instance.',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 1,
+                'action_wait_secs': {'create': 1},
+                'client_name': 'nova',
+                'entity_name': 'servers',
+            }
+        }
+    }
+}
+
+test_template_two_resource = {
+    'heat_template_version': 'pike',
+    'description': 'Test template to create two instance.',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0,
+                'action_wait_secs': {'update': 1}
+            }
+        },
+        'test2': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0
+            }
+        }
+    }
+}
+
+
+def _change_rsrc_properties(template, rsrcs, values):
+        modified_template = copy.deepcopy(template)
+        for rsrc_name in rsrcs:
+            rsrc_prop = modified_template['resources'][
+                rsrc_name]['properties']
+            for prop in rsrc_prop:
+                if prop in values:
+                    rsrc_prop[prop] = values[prop]
+        return modified_template
+
+
+class CreateStackTest(functional_base.FunctionalTestsBase):
+    def test_create_rollback(self):
+        values = {'fail': True, 'value': 'test_create_rollback'}
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'], values)
+
+        self.stack_create(
+            template=template,
+            expected_status='ROLLBACK_COMPLETE',
+            disable_rollback=False)
+
+
+class UpdateStackTest(functional_base.FunctionalTestsBase):
+
+    provider_template = {
+        'heat_template_version': '2013-05-23',
+        'description': 'foo',
+        'resources': {
+            'test1': {
+                'type': 'My::TestResource'
+            }
+        }
+    }
+
+    provider_group_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  count:
+    type: number
+    default: 2
+resources:
+  test_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: {get_param: count}
+      resource_def:
+        type: My::TestResource
+'''
+
+    update_userdata_template = '''
+heat_template_version: 2014-10-16
+parameters:
+  flavor:
+    type: string
+  user_data:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+
+resources:
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      networks: [{network: {get_param: network} }]
+      user_data_format: SOFTWARE_CONFIG
+      user_data: {get_param: user_data}
+'''
+
+    fail_param_template = '''
+heat_template_version: 2014-10-16
+parameters:
+  do_fail:
+    type: boolean
+    default: False
+resources:
+  aresource:
+    type: OS::Heat::TestResource
+    properties:
+      value: Test
+      fail: {get_param: do_fail}
+      wait_secs: 1
+'''
+
+    def test_stack_update_nochange(self):
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_no_change'})
+        stack_identifier = self.stack_create(
+            template=template)
+        expected_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+        # Update with no changes, resources should be unchanged
+        self.update_stack(stack_identifier, template)
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+    def test_stack_in_place_update(self):
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_in_place'})
+        stack_identifier = self.stack_create(
+            template=template)
+        expected_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+        resource = self.client.resources.list(stack_identifier)
+        initial_phy_id = resource[0].physical_resource_id
+
+        tmpl_update = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_in_place_update'})
+        # Update the Value
+        self.update_stack(stack_identifier, tmpl_update)
+        resource = self.client.resources.list(stack_identifier)
+        # By default update_in_place
+        self.assertEqual(initial_phy_id,
+                         resource[0].physical_resource_id)
+
+    def test_stack_update_replace(self):
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_replace'})
+        stack_identifier = self.stack_create(
+            template=template)
+        expected_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+        resource = self.client.resources.list(stack_identifier)
+        initial_phy_id = resource[0].physical_resource_id
+
+        # Update the value and also set update_replace prop
+        tmpl_update = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_in_place_update', 'update_replace': True})
+        self.update_stack(stack_identifier, tmpl_update)
+        resource = self.client.resources.list(stack_identifier)
+        # update Replace
+        self.assertNotEqual(initial_phy_id,
+                            resource[0].physical_resource_id)
+
+    def test_stack_update_add_remove(self):
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_add_remove'})
+        stack_identifier = self.stack_create(
+            template=template)
+        initial_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        tmpl_update = _change_rsrc_properties(
+            test_template_two_resource, ['test1', 'test2'],
+            {'value': 'test_add_remove_update'})
+        # Add one resource via a stack update
+        self.update_stack(stack_identifier, tmpl_update)
+        updated_resources = {'test1': 'OS::Heat::TestResource',
+                             'test2': 'OS::Heat::TestResource'}
+        self.assertEqual(updated_resources,
+                         self.list_resources(stack_identifier))
+
+        # Then remove it by updating with the original template
+        self.update_stack(stack_identifier, template)
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+    def test_stack_update_rollback(self):
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_update_rollback'})
+        stack_identifier = self.stack_create(
+            template=template)
+        initial_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        tmpl_update = _change_rsrc_properties(
+            test_template_two_resource, ['test1', 'test2'],
+            {'value': 'test_update_rollback', 'fail': True})
+        # stack update, also set failure
+        self.update_stack(stack_identifier, tmpl_update,
+                          expected_status='ROLLBACK_COMPLETE',
+                          disable_rollback=False)
+        # since stack update failed only the original resource is present
+        updated_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(updated_resources,
+                         self.list_resources(stack_identifier))
+
+    def test_stack_update_from_failed(self):
+        # Prove it's possible to update from an UPDATE_FAILED state
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'],
+                                           {'value': 'test_update_failed'})
+        stack_identifier = self.stack_create(
+            template=template)
+        initial_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        tmpl_update = _change_rsrc_properties(
+            test_template_one_resource, ['test1'], {'fail': True})
+        # Update with bad template, we should fail
+        self.update_stack(stack_identifier, tmpl_update,
+                          expected_status='UPDATE_FAILED')
+        # but then passing a good template should succeed
+        self.update_stack(stack_identifier, test_template_two_resource)
+        updated_resources = {'test1': 'OS::Heat::TestResource',
+                             'test2': 'OS::Heat::TestResource'}
+        self.assertEqual(updated_resources,
+                         self.list_resources(stack_identifier))
+
+    def test_stack_update_provider(self):
+        template = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_provider_template'})
+        files = {'provider.template': json.dumps(template)}
+        env = {'resource_registry':
+               {'My::TestResource': 'provider.template'}}
+        stack_identifier = self.stack_create(
+            template=self.provider_template,
+            files=files,
+            environment=env
+        )
+
+        initial_resources = {'test1': 'My::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Prove the resource is backed by a nested stack, save the ID
+        nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
+                                                            'test1')
+        nested_id = nested_identifier.split('/')[-1]
+
+        # Then check the expected resources are in the nested stack
+        nested_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+        tmpl_update = _change_rsrc_properties(
+            test_template_two_resource, ['test1', 'test2'],
+            {'value': 'test_provider_template'})
+        # Add one resource via a stack update by changing the nested stack
+        files['provider.template'] = json.dumps(tmpl_update)
+        self.update_stack(stack_identifier, self.provider_template,
+                          environment=env, files=files)
+
+        # Parent resources should be unchanged and the nested stack
+        # should have been updated in-place without replacement
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+        rsrc = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual(rsrc.physical_resource_id, nested_id)
+
+        # Then check the expected resources are in the nested stack
+        nested_resources = {'test1': 'OS::Heat::TestResource',
+                            'test2': 'OS::Heat::TestResource'}
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+
+    def test_stack_update_alias_type(self):
+        env = {'resource_registry':
+               {'My::TestResource': 'OS::Heat::RandomString',
+                'My::TestResource2': 'OS::Heat::RandomString'}}
+        stack_identifier = self.stack_create(
+            template=self.provider_template,
+            environment=env
+        )
+        p_res = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual('My::TestResource', p_res.resource_type)
+
+        initial_resources = {'test1': 'My::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+        res = self.client.resources.get(stack_identifier, 'test1')
+        # Modify the type of the resource alias to My::TestResource2
+        tmpl_update = copy.deepcopy(self.provider_template)
+        tmpl_update['resources']['test1']['type'] = 'My::TestResource2'
+        self.update_stack(stack_identifier, tmpl_update, environment=env)
+        res_a = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual(res.physical_resource_id, res_a.physical_resource_id)
+        self.assertEqual(res.attributes['value'], res_a.attributes['value'])
+
+    def test_stack_update_alias_changes(self):
+        env = {'resource_registry':
+               {'My::TestResource': 'OS::Heat::RandomString'}}
+        stack_identifier = self.stack_create(
+            template=self.provider_template,
+            environment=env
+        )
+        p_res = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual('My::TestResource', p_res.resource_type)
+
+        initial_resources = {'test1': 'My::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+        res = self.client.resources.get(stack_identifier, 'test1')
+        # Modify the resource alias to point to a different type
+        env = {'resource_registry':
+               {'My::TestResource': 'OS::Heat::TestResource'}}
+        self.update_stack(stack_identifier, template=self.provider_template,
+                          environment=env)
+        res_a = self.client.resources.get(stack_identifier, 'test1')
+        self.assertNotEqual(res.physical_resource_id,
+                            res_a.physical_resource_id)
+
+    def test_stack_update_provider_type(self):
+        template = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_provider_template'})
+        files = {'provider.template': json.dumps(template)}
+        env = {'resource_registry':
+               {'My::TestResource': 'provider.template',
+                'My::TestResource2': 'provider.template'}}
+        stack_identifier = self.stack_create(
+            template=self.provider_template,
+            files=files,
+            environment=env
+        )
+        p_res = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual('My::TestResource', p_res.resource_type)
+
+        initial_resources = {'test1': 'My::TestResource'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Prove the resource is backed by a nested stack, save the ID
+        nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
+                                                            'test1')
+        nested_id = nested_identifier.split('/')[-1]
+
+        # Then check the expected resources are in the nested stack
+        nested_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+        n_res = self.client.resources.get(nested_identifier, 'test1')
+
+        # Modify the type of the provider resource to My::TestResource2
+        tmpl_update = copy.deepcopy(self.provider_template)
+        tmpl_update['resources']['test1']['type'] = 'My::TestResource2'
+        self.update_stack(stack_identifier, tmpl_update,
+                          environment=env, files=files)
+        p_res = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual('My::TestResource2', p_res.resource_type)
+
+        # Parent resources should be unchanged and the nested stack
+        # should have been updated in-place without replacement
+        self.assertEqual({u'test1': u'My::TestResource2'},
+                         self.list_resources(stack_identifier))
+        rsrc = self.client.resources.get(stack_identifier, 'test1')
+        self.assertEqual(rsrc.physical_resource_id, nested_id)
+
+        # Then check the expected resources are in the nested stack
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+        n_res2 = self.client.resources.get(nested_identifier, 'test1')
+        self.assertEqual(n_res.physical_resource_id,
+                         n_res2.physical_resource_id)
+
+    def test_stack_update_provider_group(self):
+        """Test two-level nested update."""
+
+        # Create a ResourceGroup (which creates a nested stack),
+        # containing provider resources (which create a nested
+        # stack), thus exercising an update which traverses
+        # two levels of nesting.
+        template = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_provider_group_template'})
+        files = {'provider.template': json.dumps(template)}
+        env = {'resource_registry':
+               {'My::TestResource': 'provider.template'}}
+
+        stack_identifier = self.stack_create(
+            template=self.provider_group_template,
+            files=files,
+            environment=env
+        )
+
+        initial_resources = {'test_group': 'OS::Heat::ResourceGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Prove the resource is backed by a nested stack, save the ID
+        nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
+                                                            'test_group')
+
+        # Then check the expected resources are in the nested stack
+        nested_resources = {'0': 'My::TestResource',
+                            '1': 'My::TestResource'}
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+
+        for n_rsrc in nested_resources:
+            rsrc = self.client.resources.get(nested_identifier, n_rsrc)
+            provider_stack = self.client.stacks.get(rsrc.physical_resource_id)
+            provider_identifier = '%s/%s' % (provider_stack.stack_name,
+                                             provider_stack.id)
+            provider_resources = {u'test1': u'OS::Heat::TestResource'}
+            self.assertEqual(provider_resources,
+                             self.list_resources(provider_identifier))
+
+        tmpl_update = _change_rsrc_properties(
+            test_template_two_resource, ['test1', 'test2'],
+            {'value': 'test_provider_group_template'})
+        # Add one resource via a stack update by changing the nested stack
+        files['provider.template'] = json.dumps(tmpl_update)
+        self.update_stack(stack_identifier, self.provider_group_template,
+                          environment=env, files=files)
+
+        # Parent resources should be unchanged and the nested stack
+        # should have been updated in-place without replacement
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Resource group stack should also be unchanged (but updated)
+        nested_stack = self.client.stacks.get(nested_identifier)
+        self.assertEqual('UPDATE_COMPLETE', nested_stack.stack_status)
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+
+        for n_rsrc in nested_resources:
+            rsrc = self.client.resources.get(nested_identifier, n_rsrc)
+            provider_stack = self.client.stacks.get(rsrc.physical_resource_id)
+            provider_identifier = '%s/%s' % (provider_stack.stack_name,
+                                             provider_stack.id)
+            provider_resources = {'test1': 'OS::Heat::TestResource',
+                                  'test2': 'OS::Heat::TestResource'}
+            self.assertEqual(provider_resources,
+                             self.list_resources(provider_identifier))
+
+    def test_stack_update_with_replacing_userdata(self):
+        """Test case for updating userdata of instance.
+
+        Confirm that we can update userdata of instance during updating stack
+        by the user of member role.
+
+        Make sure that a resource that inherits from StackUser can be deleted
+        during updating stack.
+        """
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.minimal_instance_type:
+            raise self.skipException("No flavor configured to test")
+
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'image': self.conf.minimal_image_ref,
+                 'network': self.conf.fixed_network_name,
+                 'user_data': ''}
+
+        stack_identifier = self.stack_create(
+            template=self.update_userdata_template,
+            parameters=parms
+        )
+
+        parms_updated = parms
+        parms_updated['user_data'] = 'two'
+        self.update_stack(
+            stack_identifier,
+            template=self.update_userdata_template,
+            parameters=parms_updated)
+
+    def test_stack_update_provider_group_patch(self):
+        '''Test two-level nested update with PATCH'''
+        template = _change_rsrc_properties(
+            test_template_one_resource, ['test1'],
+            {'value': 'test_provider_group_template'})
+        files = {'provider.template': json.dumps(template)}
+        env = {'resource_registry':
+               {'My::TestResource': 'provider.template'}}
+
+        stack_identifier = self.stack_create(
+            template=self.provider_group_template,
+            files=files,
+            environment=env
+        )
+
+        initial_resources = {'test_group': 'OS::Heat::ResourceGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Prove the resource is backed by a nested stack, save the ID
+        nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
+                                                            'test_group')
+
+        # Then check the expected resources are in the nested stack
+        nested_resources = {'0': 'My::TestResource',
+                            '1': 'My::TestResource'}
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+
+        # increase the count, pass only the paramter, no env or template
+        params = {'count': 3}
+        self.update_stack(stack_identifier, parameters=params, existing=True)
+
+        # Parent resources should be unchanged and the nested stack
+        # should have been updated in-place without replacement
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Resource group stack should also be unchanged (but updated)
+        nested_stack = self.client.stacks.get(nested_identifier)
+        self.assertEqual('UPDATE_COMPLETE', nested_stack.stack_status)
+        # Add a resource, as we should have added one
+        nested_resources['2'] = 'My::TestResource'
+        self.assertEqual(nested_resources,
+                         self.list_resources(nested_identifier))
+
+    def test_stack_update_from_failed_patch(self):
+        '''Test PATCH update from a failed state.'''
+
+        # Start with empty template
+        stack_identifier = self.stack_create(
+            template='heat_template_version: 2014-10-16')
+
+        # Update with a good template, but bad parameter
+        self.update_stack(stack_identifier,
+                          template=self.fail_param_template,
+                          parameters={'do_fail': True},
+                          expected_status='UPDATE_FAILED')
+
+        # PATCH update, only providing the parameter
+        self.update_stack(stack_identifier,
+                          parameters={'do_fail': False},
+                          existing=True)
+        self.assertEqual({u'aresource': u'OS::Heat::TestResource'},
+                         self.list_resources(stack_identifier))
+
+    def test_stack_update_with_new_env(self):
+        """Update handles new resource types in the environment.
+
+        If a resource type appears during an update and the update fails,
+        retrying the update is able to find the type properly in the
+        environment.
+        """
+        stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+
+        # Update with a new resource and make the update fails
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'], {'fail': True})
+        template['resources']['test2'] = {'type': 'My::TestResource'}
+        template['resources']['test1']['depends_on'] = 'test2'
+        env = {'resource_registry':
+               {'My::TestResource': 'OS::Heat::TestResource'}}
+        self.update_stack(stack_identifier,
+                          template=template,
+                          environment=env,
+                          expected_status='UPDATE_FAILED')
+
+        # Fixing the template should fix the stack
+        template = _change_rsrc_properties(template,
+                                           ['test1'], {'fail': False})
+        self.update_stack(stack_identifier,
+                          template=template,
+                          environment=env)
+        self.assertEqual({'test1': 'OS::Heat::TestResource',
+                          'test2': 'My::TestResource'},
+                         self.list_resources(stack_identifier))
+
+    def test_stack_update_with_new_version(self):
+        """Update handles new template version in failure.
+
+        If a stack update fails while changing the template version, update is
+        able to handle the new version fine.
+        """
+        stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+
+        # Update with a new function and make the update fails
+        template = _change_rsrc_properties(test_template_two_resource,
+                                           ['test1'], {'fail': True})
+
+        template['heat_template_version'] = '2015-10-15'
+        template['resources']['test2']['properties']['value'] = {
+            'list_join': [',', ['a'], ['b']]}
+        self.update_stack(stack_identifier,
+                          template=template,
+                          expected_status='UPDATE_FAILED')
+
+        template = _change_rsrc_properties(template,
+                                           ['test2'], {'value': 'Test2'})
+        self.update_stack(stack_identifier,
+                          template=template,
+                          expected_status='UPDATE_FAILED')
+        self._stack_delete(stack_identifier)
+
+    def test_stack_update_with_old_version(self):
+        """Update handles old template version in failure.
+
+        If a stack update fails while changing the template version, update is
+        able to handle the old version fine.
+        """
+        template = _change_rsrc_properties(
+            test_template_one_resource,
+            ['test1'], {'value': {'list_join': [',', ['a'], ['b']]}})
+        template['heat_template_version'] = '2015-10-15'
+        stack_identifier = self.stack_create(
+            template=template)
+
+        # Update with a new function and make the update fails
+        template = _change_rsrc_properties(test_template_one_resource,
+                                           ['test1'], {'fail': True})
+        self.update_stack(stack_identifier,
+                          template=template,
+                          expected_status='UPDATE_FAILED')
+        self._stack_delete(stack_identifier)
+
+    def test_stack_update_with_conditions(self):
+        """Update manages new conditions added.
+
+        When a new resource is added during updates, the stacks handles the new
+        conditions correctly, and doesn't fail to load them while the update is
+        still in progress.
+        """
+        stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+
+        updated_template = copy.deepcopy(test_template_two_resource)
+        updated_template['conditions'] = {'cond1': True}
+        updated_template['resources']['test3'] = {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': {'if': ['cond1', 'val3', 'val4']}
+            }
+        }
+        test2_props = updated_template['resources']['test2']['properties']
+        test2_props['action_wait_secs'] = {'create': 30}
+
+        self.update_stack(stack_identifier,
+                          template=updated_template,
+                          expected_status='UPDATE_IN_PROGRESS')
+
+        def check_resources():
+            resources = self.list_resources(stack_identifier)
+            if len(resources) < 2:
+                return False
+            self.assertIn('test3', resources)
+            return True
+
+        self.assertTrue(test.call_until_true(20, 2, check_resources))
diff --git a/heat_tempest_plugin/tests/functional/test_create_update_neutron_port.py b/heat_tempest_plugin/tests/functional/test_create_update_neutron_port.py
new file mode 100644
index 0000000..72cd47c
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_create_update_neutron_port.py
@@ -0,0 +1,101 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+test_template = '''
+heat_template_version: 2015-04-30
+description: Test template to create port wit ip_address.
+parameters:
+  mac:
+    type: string
+    default: 00-00-00-00-BB-BB
+resources:
+  net:
+    type: OS::Neutron::Net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      enable_dhcp: false
+      network: { get_resource: net }
+      cidr: 11.11.11.0/24
+  port:
+    type: OS::Neutron::Port
+    properties:
+      network: {get_resource: net}
+      mac_address: {get_param: mac}
+      fixed_ips:
+        - subnet: {get_resource: subnet}
+          ip_address: 11.11.11.11
+  test:
+    depends_on: port
+    type: OS::Heat::TestResource
+    properties:
+      value: Test1
+      fail: False
+outputs:
+  port_ip:
+    value: {get_attr: [port, fixed_ips, 0, ip_address]}
+  mac_address:
+    value: {get_attr: [port, mac_address]}
+'''
+
+
+class UpdatePortTest(functional_base.FunctionalTestsBase):
+
+    def get_port_id_and_outputs(self, stack_identifier):
+        resources = self.client.resources.list(stack_identifier)
+        port_id = [res.physical_resource_id for res in resources
+                   if res.resource_name == 'port']
+        stack = self.client.stacks.get(stack_identifier)
+        port_ip = self._stack_output(stack, 'port_ip')
+        port_mac = self._stack_output(stack, 'mac_address')
+        return port_id[0], port_ip, port_mac
+
+    def test_update_remove_ip(self):
+        # create with defined ip_address
+        stack_identifier = self.stack_create(template=test_template)
+        _id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
+
+        # remove ip_address property and update stack
+        templ_no_ip = test_template.replace('ip_address: 11.11.11.11', '')
+        self.update_stack(stack_identifier, templ_no_ip)
+
+        new_id, new_ip, new_mac = self.get_port_id_and_outputs(
+            stack_identifier)
+        # port should be updated with the same id
+        self.assertEqual(_id, new_id)
+        self.assertEqual(_mac, new_mac)
+
+    def test_update_with_mac_address(self):
+        if not self.conf.admin_username or not self.conf.admin_password:
+            self.skipTest('No admin creds found, skipping')
+
+        # Setup admin clients for updating mac_address
+        self.setup_clients_for_admin()
+
+        # Create with default mac_address and defined ip_address
+        stack_identifier = self.stack_create(template=test_template)
+        _id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
+
+        # Update with another 'mac' parameter
+        parameters = {'mac': '00-00-00-00-AA-AA'}
+        self.update_stack(stack_identifier, test_template,
+                          parameters=parameters)
+
+        new_id, new_ip, new_mac = self.get_port_id_and_outputs(
+            stack_identifier)
+        # mac_address should be different
+        self.assertEqual(_id, new_id)
+        self.assertEqual(_ip, new_ip)
+        self.assertNotEqual(_mac, new_mac)
diff --git a/heat_tempest_plugin/tests/functional/test_create_update_neutron_subnet.py b/heat_tempest_plugin/tests/functional/test_create_update_neutron_subnet.py
new file mode 100644
index 0000000..345a740
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_create_update_neutron_subnet.py
@@ -0,0 +1,127 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+test_template = '''
+heat_template_version: 2015-04-30
+description: Test template to create/update subnet with allocation_pools.
+resources:
+  net:
+    type: OS::Neutron::Net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: net }
+      cidr: 11.11.11.0/24
+      gateway_ip: 11.11.11.5
+      allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]
+outputs:
+  alloc_pools:
+    value: {get_attr: [subnet, allocation_pools]}
+  gateway_ip:
+    value: {get_attr: [subnet, gateway_ip]}
+'''
+
+
+class UpdateSubnetTest(functional_base.FunctionalTestsBase):
+
+    def get_outputs(self, stack_identifier, output_key):
+        stack = self.client.stacks.get(stack_identifier)
+        output = self._stack_output(stack, output_key)
+        return output
+
+    def test_update_allocation_pools(self):
+        stack_identifier = self.stack_create(template=test_template)
+        alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
+                         alloc_pools)
+
+        # Update allocation_pools with a new range
+        templ_other_pool = test_template.replace(
+            'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
+            'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.100}]')
+        self.update_stack(stack_identifier, templ_other_pool)
+        new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        # the new pools should be the new range
+        self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.100'}],
+                         new_alloc_pools)
+
+    def test_update_allocation_pools_to_empty(self):
+        stack_identifier = self.stack_create(template=test_template)
+        alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
+                         alloc_pools)
+
+        # Update allocation_pools with []
+        templ_empty_pools = test_template.replace(
+            'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
+            'allocation_pools: []')
+        self.update_stack(stack_identifier, templ_empty_pools)
+        new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        # new_alloc_pools should be []
+        self.assertEqual([], new_alloc_pools)
+
+    def test_update_to_no_allocation_pools(self):
+        stack_identifier = self.stack_create(template=test_template)
+        alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
+                         alloc_pools)
+
+        # Remove the allocation_pools from template
+        templ_no_pools = test_template.replace(
+            'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
+            '')
+        self.update_stack(stack_identifier, templ_no_pools)
+        last_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
+        # last_alloc_pools should be []
+        self.assertEqual([], last_alloc_pools)
+
+    def test_update_gateway_ip(self):
+        stack_identifier = self.stack_create(template=test_template)
+        gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        self.assertEqual('11.11.11.5', gw_ip)
+
+        # Update gateway_ip
+        templ_other_gw_ip = test_template.replace(
+            'gateway_ip: 11.11.11.5', 'gateway_ip: 11.11.11.9')
+        self.update_stack(stack_identifier, templ_other_gw_ip)
+        new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        # the gateway_ip should be the new one
+        self.assertEqual('11.11.11.9', new_gw_ip)
+
+    def test_update_gateway_ip_to_empty(self):
+        stack_identifier = self.stack_create(template=test_template)
+        gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        self.assertEqual('11.11.11.5', gw_ip)
+
+        # Update gateway_ip to null(resolve to '')
+        templ_empty_gw_ip = test_template.replace(
+            'gateway_ip: 11.11.11.5', 'gateway_ip: null')
+        self.update_stack(stack_identifier, templ_empty_gw_ip)
+        new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        # new gateway_ip should be None
+        self.assertIsNone(new_gw_ip)
+
+    def test_update_to_no_gateway_ip(self):
+        stack_identifier = self.stack_create(template=test_template)
+        gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        self.assertEqual('11.11.11.5', gw_ip)
+
+        # Remove the gateway from template
+        templ_no_gw_ip = test_template.replace(
+            'gateway_ip: 11.11.11.5', '')
+        self.update_stack(stack_identifier, templ_no_gw_ip)
+        new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
+        # new gateway_ip should be None
+        self.assertIsNone(new_gw_ip)
diff --git a/heat_tempest_plugin/tests/functional/test_create_update_neutron_trunk.py b/heat_tempest_plugin/tests/functional/test_create_update_neutron_trunk.py
new file mode 100644
index 0000000..d572405
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_create_update_neutron_trunk.py
@@ -0,0 +1,275 @@
+# Copyright (c) 2017 Ericsson.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import yaml
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+test_template = '''
+heat_template_version: pike
+description: Test template to create, update, delete trunk.
+resources:
+  parent_net:
+    type: OS::Neutron::Net
+  trunk_net_one:
+    type: OS::Neutron::Net
+  trunk_net_two:
+    type: OS::Neutron::Net
+  parent_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: parent_net }
+      cidr: 10.0.0.0/16
+  trunk_subnet_one:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: trunk_net_one }
+      cidr: 10.10.0.0/16
+  trunk_subnet_two:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: trunk_net_two }
+      cidr: 10.20.0.0/16
+  parent_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: parent_net }
+      name: trunk_parent_port
+  sub_port_one:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: trunk_net_one }
+      name: trunk_sub_port_one
+  sub_port_two:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: trunk_net_two }
+      name: trunk_sub_port_two
+  trunk:
+    type: OS::Neutron::Trunk
+    properties:
+      name: test_trunk
+      port: { get_resource: parent_port }
+      sub_ports:
+outputs:
+  trunk_parent_port:
+    value: { get_attr: [trunk, port_id] }
+'''
+
+
+class UpdateTrunkTest(functional_base.FunctionalTestsBase):
+
+    @staticmethod
+    def _sub_ports_dict_to_set(sub_ports):
+        new_sub_ports = copy.deepcopy(sub_ports)
+
+        # NOTE(lajos katona): In the template we have to give the sub port as
+        # port, but from trunk_details we receive back them with port_id.
+        # As an extra trunk_details contains the mac_address as well which is
+        # useless here.
+        # So here we have to make sure that the dictionary (input from
+        # template or output from trunk_details) have the same keys:
+        if any('mac_address' in d for d in new_sub_ports):
+            for sp in new_sub_ports:
+                sp['port'] = sp['port_id']
+                del sp['port_id']
+                del sp['mac_address']
+
+        # NOTE(lajos katona): We receive lists (trunk_details['sub_ports'] and
+        # the input to the template) and we can't be sure that the order is the
+        # same, so by using sets we can compare them.
+        sub_ports_set = {frozenset(d.items()) for d in new_sub_ports}
+        return sub_ports_set
+
+    def test_add_first_sub_port(self):
+        stack_identifier = self.stack_create(template=test_template)
+
+        parsed_template = yaml.safe_load(test_template)
+        new_sub_port = [{'port': {'get_resource': 'sub_port_one'},
+                         'segmentation_id': 10,
+                         'segmentation_type': 'vlan'}]
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'] = new_sub_port
+        updated_template = yaml.safe_dump(parsed_template)
+        self.update_stack(stack_identifier, updated_template)
+
+        # Fix the port_id in the template for assertion
+        new_sub_port[0]['port'] = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_one')
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_sub_port = parent_port['trunk_details']['sub_ports']
+
+        self.assertEqual(self._sub_ports_dict_to_set(new_sub_port),
+                         self._sub_ports_dict_to_set(trunk_sub_port))
+
+    def test_add_a_second_sub_port(self):
+        parsed_template = yaml.safe_load(test_template)
+        sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+                      'segmentation_type': 'vlan',
+                      'segmentation_id': 10}, ]
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'] = sub_ports
+        template_with_sub_ports = yaml.safe_dump(parsed_template)
+
+        stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+        new_sub_port = {'port': {'get_resource': 'sub_port_two'},
+                        'segmentation_id': 20,
+                        'segmentation_type': 'vlan'}
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'].append(new_sub_port)
+
+        updated_template = yaml.safe_dump(parsed_template)
+
+        self.update_stack(stack_identifier, updated_template)
+
+        # Fix the port_ids in the templates for assertion
+        sub_ports[0]['port'] = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_one')
+        new_sub_port['port'] = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_two')
+        expected_sub_ports = [sub_ports[0], new_sub_port]
+
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+        self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
+                         self._sub_ports_dict_to_set(trunk_sub_ports))
+
+    def test_remove_sub_port_from_trunk(self):
+        sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+                      'segmentation_type': 'vlan',
+                      'segmentation_id': 10},
+                     {'port': {'get_resource': 'sub_port_two'},
+                      'segmentation_type': 'vlan',
+                      'segmentation_id': 20}]
+        parsed_template = yaml.safe_load(test_template)
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'] = sub_ports
+        template_with_sub_ports = yaml.safe_dump(parsed_template)
+
+        stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+        sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_two'},
+                                  'segmentation_type': 'vlan',
+                                  'segmentation_id': 20}
+        parsed_template['resources']['trunk'][
+            'properties']['sub_ports'].remove(sub_port_to_be_removed)
+        updated_template = yaml.safe_dump(parsed_template)
+
+        self.update_stack(stack_identifier, updated_template)
+
+        # Fix the port_ids in the templates for assertion
+        sub_ports[0]['port'] = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_one')
+        expected_sub_ports = [sub_ports[0]]
+
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+        self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
+                         self._sub_ports_dict_to_set(trunk_sub_ports))
+
+    def test_remove_last_sub_port_from_trunk(self):
+        sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+                      'segmentation_type': 'vlan',
+                      'segmentation_id': 10}]
+        parsed_template = yaml.safe_load(test_template)
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'] = sub_ports
+
+        template_with_sub_ports = yaml.safe_dump(parsed_template)
+        stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+        sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_one'},
+                                  'segmentation_type': 'vlan',
+                                  'segmentation_id': 10}
+
+        parsed_template['resources']['trunk'][
+            'properties']['sub_ports'] = []
+        updated_template = yaml.safe_dump(parsed_template)
+
+        self.update_stack(stack_identifier, updated_template)
+
+        sub_port_to_be_removed['port'] = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_one')
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+        self.assertNotEqual(
+            self._sub_ports_dict_to_set([sub_port_to_be_removed]),
+            self._sub_ports_dict_to_set(trunk_sub_ports))
+        self.assertFalse(trunk_sub_ports,
+                         'The returned sub ports (%s) in trunk_details is '
+                         'not empty!' % trunk_sub_ports)
+
+    def test_update_existing_sub_port_on_trunk(self):
+        sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+                      'segmentation_type': 'vlan',
+                      'segmentation_id': 10}]
+        parsed_template = yaml.safe_load(test_template)
+        parsed_template['resources']['trunk']['properties'][
+            'sub_ports'] = sub_ports
+
+        template_with_sub_ports = yaml.safe_dump(parsed_template)
+        stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+        sub_port_id = self.get_physical_resource_id(
+            stack_identifier, 'sub_port_one')
+        parsed_template['resources']['trunk']['properties']['sub_ports'][0][
+            'segmentation_id'] = 99
+        updated_template = yaml.safe_dump(parsed_template)
+
+        self.update_stack(stack_identifier, updated_template)
+        updated_sub_port = {'port': sub_port_id,
+                            'segmentation_type': 'vlan',
+                            'segmentation_id': 99}
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+        self.assertEqual(self._sub_ports_dict_to_set([updated_sub_port]),
+                         self._sub_ports_dict_to_set(trunk_sub_ports))
+
+    def test_update_trunk_name_and_description(self):
+        new_name = 'pineapple'
+        new_description = 'This is a test trunk'
+
+        stack_identifier = self.stack_create(template=test_template)
+        parsed_template = yaml.safe_load(test_template)
+        parsed_template['resources']['trunk']['properties']['name'] = new_name
+        parsed_template['resources']['trunk']['properties'][
+            'description'] = new_description
+        updated_template = yaml.safe_dump(parsed_template)
+        self.update_stack(stack_identifier, template=updated_template)
+
+        parent_id = self.get_stack_output(
+            stack_identifier, 'trunk_parent_port')
+        parent_port = self.network_client.show_port(parent_id)['port']
+        trunk_id = parent_port['trunk_details']['trunk_id']
+
+        trunk = self.network_client.show_trunk(trunk_id)['trunk']
+        self.assertEqual(new_name, trunk['name'])
+        self.assertEqual(new_description, trunk['description'])
diff --git a/heat_tempest_plugin/tests/functional/test_default_parameters.py b/heat_tempest_plugin/tests/functional/test_default_parameters.py
new file mode 100644
index 0000000..24aa6e4
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_default_parameters.py
@@ -0,0 +1,92 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import yaml
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class DefaultParametersTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2013-05-23
+parameters:
+  length:
+    type: string
+    default: 40
+resources:
+  random1:
+    type: nested_random.yaml
+  random2:
+    type: OS::Heat::RandomString
+    properties:
+      length: {get_param: length}
+outputs:
+  random1:
+    value: {get_attr: [random1, random1_value]}
+  random2:
+    value: {get_resource: random2}
+'''
+    nested_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  length:
+    type: string
+    default: 50
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    properties:
+      length: {get_param: length}
+outputs:
+  random1_value:
+    value: {get_resource: random1}
+'''
+
+    scenarios = [
+        ('none', dict(param=None, default=None, temp_def=True,
+                      expect1=50, expect2=40)),
+        ('default', dict(param=None, default=12, temp_def=True,
+                         expect1=12, expect2=12)),
+        ('both', dict(param=15, default=12, temp_def=True,
+                      expect1=12, expect2=15)),
+        ('no_temp_default', dict(param=None, default=12, temp_def=False,
+                                 expect1=12, expect2=12)),
+    ]
+
+    def test_defaults(self):
+        env = {'parameters': {}, 'parameter_defaults': {}}
+        if self.param:
+            env['parameters'] = {'length': self.param}
+        if self.default:
+            env['parameter_defaults'] = {'length': self.default}
+
+        if not self.temp_def:
+            # remove the default from the parameter in the nested template.
+            ntempl = yaml.safe_load(self.nested_template)
+            del ntempl['parameters']['length']['default']
+            nested_template = yaml.safe_dump(ntempl)
+        else:
+            nested_template = self.nested_template
+
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'nested_random.yaml': nested_template},
+            environment=env
+        )
+
+        stack = self.client.stacks.get(stack_identifier)
+        for out in stack.outputs:
+            if out['output_key'] == 'random1':
+                self.assertEqual(self.expect1, len(out['output_value']))
+            if out['output_key'] == 'random2':
+                self.assertEqual(self.expect2, len(out['output_value']))
diff --git a/heat_tempest_plugin/tests/functional/test_delete.py b/heat_tempest_plugin/tests/functional/test_delete.py
new file mode 100644
index 0000000..20266d6
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_delete.py
@@ -0,0 +1,42 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class DeleteInProgressTest(functional_base.FunctionalTestsBase):
+
+    root_template = '''
+heat_template_version: 2013-05-23
+resources:
+    rg:
+        type: OS::Heat::ResourceGroup
+        properties:
+            count: 125
+            resource_def:
+                type: empty.yaml
+'''
+
+    empty_template = '''
+heat_template_version: 2013-05-23
+resources:
+'''
+
+    def test_delete_nested_stacks_create_in_progress(self):
+        files = {'empty.yaml': self.empty_template}
+        identifier = self.stack_create(template=self.root_template,
+                                       files=files,
+                                       expected_status='CREATE_IN_PROGRESS')
+        time.sleep(20)
+        self._stack_delete(identifier)
diff --git a/heat_tempest_plugin/tests/functional/test_encrypted_parameter.py b/heat_tempest_plugin/tests/functional/test_encrypted_parameter.py
new file mode 100644
index 0000000..bfbd77b
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_encrypted_parameter.py
@@ -0,0 +1,65 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class EncryptedParametersTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2014-10-16
+parameters:
+  image:
+    type: string
+  flavor:
+    type: string
+  network:
+    type: string
+  foo:
+    type: string
+    description: 'parameter with encryption turned on'
+    hidden: true
+    default: secret
+resources:
+  server_with_encrypted_property:
+    type: OS::Nova::Server
+    properties:
+      name: { get_param: foo }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      networks: [{network: {get_param: network} }]
+outputs:
+  encrypted_foo_param:
+    description: 'encrypted param'
+    value: { get_param: foo }
+'''
+
+    def test_db_encryption(self):
+        # Create a stack with the value of 'foo' to be encrypted
+        foo_param = 'my_encrypted_foo'
+        parameters = {
+            "image": self.conf.minimal_image_ref,
+            "flavor": self.conf.minimal_instance_type,
+            'network': self.conf.fixed_network_name,
+            "foo": foo_param
+        }
+
+        stack_identifier = self.stack_create(
+            template=self.template,
+            parameters=parameters
+        )
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Verify the output value for 'foo' parameter
+        for out in stack.outputs:
+            if out['output_key'] == 'encrypted_foo_param':
+                self.assertEqual(foo_param, out['output_value'])
diff --git a/heat_tempest_plugin/tests/functional/test_encryption_vol_type.py b/heat_tempest_plugin/tests/functional/test_encryption_vol_type.py
new file mode 100644
index 0000000..2b98022
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_encryption_vol_type.py
@@ -0,0 +1,87 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+test_encryption_vol_type = {
+    'heat_template_version': '2015-04-30',
+    'description': 'Test template to create encryption volume type.',
+    'resources': {
+        'my_volume_type': {
+            'type': 'OS::Cinder::VolumeType',
+            'properties': {
+                'name': 'LUKS'
+            }
+        },
+        'my_encrypted_vol_type': {
+            'type': 'OS::Cinder::EncryptedVolumeType',
+            'properties': {
+                'provider': 'nova.volume.encryptors.luks.LuksEncryptor',
+                'control_location': 'front-end',
+                'cipher': 'aes-xts-plain64',
+                'key_size': 512,
+                'volume_type': {'get_resource': 'my_volume_type'}
+            }
+        }
+    }
+}
+
+
+class EncryptionVolTypeTest(functional_base.FunctionalTestsBase):
+    def setUp(self):
+        super(EncryptionVolTypeTest, self).setUp()
+        if not self.conf.admin_username or not self.conf.admin_password:
+            self.skipTest('No admin creds found, skipping')
+        # cinder security policy usage of volume type is limited
+        # to being used by administrators only.
+        # Switch to admin
+        self.setup_clients_for_admin()
+
+    def check_stack(self, sid):
+        vt = 'my_volume_type'
+        e_vt = 'my_encrypted_vol_type'
+
+        # check if only two resources are present.
+        expected_resources = {vt: 'OS::Cinder::VolumeType',
+                              e_vt: 'OS::Cinder::EncryptedVolumeType'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(sid))
+
+        e_vt_obj = self.client.resources.get(sid, e_vt)
+        my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
+            'resources']['my_encrypted_vol_type']['properties']
+
+        # check if the phy rsrc specs was created in accordance with template.
+        phy_rsrc_specs = self.volume_client.volume_encryption_types.get(
+            e_vt_obj.physical_resource_id)
+        self.assertEqual(my_encrypted_vol_type_tmpl_prop['key_size'],
+                         phy_rsrc_specs.key_size)
+        self.assertEqual(my_encrypted_vol_type_tmpl_prop['provider'],
+                         phy_rsrc_specs.provider)
+        self.assertEqual(my_encrypted_vol_type_tmpl_prop['cipher'],
+                         phy_rsrc_specs.cipher)
+        self.assertEqual(my_encrypted_vol_type_tmpl_prop['control_location'],
+                         phy_rsrc_specs.control_location)
+
+    def test_create_update(self):
+        stack_identifier = self.stack_create(
+            template=test_encryption_vol_type)
+        self.check_stack(stack_identifier)
+
+        # Change some properties and trigger update.
+        my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
+            'resources']['my_encrypted_vol_type']['properties']
+        my_encrypted_vol_type_tmpl_prop['key_size'] = 256
+        my_encrypted_vol_type_tmpl_prop['cipher'] = 'aes-cbc-essiv'
+        self.update_stack(stack_identifier, test_encryption_vol_type)
+        self.check_stack(stack_identifier)
diff --git a/heat_tempest_plugin/tests/functional/test_env_merge.py b/heat_tempest_plugin/tests/functional/test_env_merge.py
new file mode 100644
index 0000000..68ec035
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_env_merge.py
@@ -0,0 +1,95 @@
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+TEMPLATE = '''
+    heat_template_version: 2015-04-30
+    parameters:
+      p0:
+        type: string
+        default: CORRECT
+      p1:
+        type: string
+        default: INCORRECT
+      p2:
+        type: string
+        default: INCORRECT
+    resources:
+      r1:
+        type: test::R1
+      r2:
+        type: test::R2
+      r3a:
+        type: test::R3
+      r3b:
+        type: test::R3
+'''
+
+ENV_1 = '''
+    parameters:
+      p1: CORRECT
+      p2: INCORRECT-E1
+    resource_registry:
+      test::R1: OS::Heat::RandomString
+      test::R2: BROKEN
+      test::R3: OS::Heat::None
+'''
+
+ENV_2 = '''
+    parameters:
+      p2: CORRECT
+    resource_registry:
+      test::R2: OS::Heat::RandomString
+      resources:
+        r3b:
+          test::R3: OS::Heat::RandomString
+'''
+
+
+class EnvironmentMergingTests(functional_base.FunctionalTestsBase):
+
+    def test_server_environment_merging(self):
+
+        # Setup
+        files = {'env1.yaml': ENV_1, 'env2.yaml': ENV_2}
+        environment_files = ['env1.yaml', 'env2.yaml']
+
+        # Test
+        stack_id = self.stack_create(stack_name='env_merge',
+                                     template=TEMPLATE,
+                                     files=files,
+                                     environment_files=environment_files)
+
+        # Verify
+
+        # Since there is no environment show, the registry overriding
+        # is partially verified by there being no error. If it wasn't
+        # working, test::R2 would remain mapped to BROKEN in env1.
+
+        # Sanity check
+        resources = self.list_resources(stack_id)
+        self.assertEqual(4, len(resources))
+
+        # Verify the parameters are correctly set
+        stack = self.client.stacks.get(stack_id)
+        self.assertEqual('CORRECT', stack.parameters['p0'])
+        self.assertEqual('CORRECT', stack.parameters['p1'])
+        self.assertEqual('CORRECT', stack.parameters['p2'])
+
+        # Verify that r3b has been overridden into a RandomString
+        # by checking to see that it has a value
+        r3b = self.client.resources.get(stack_id, 'r3b')
+        r3b_attrs = r3b.attributes
+        self.assertIn('value', r3b_attrs)
diff --git a/heat_tempest_plugin/tests/functional/test_event_sinks.py b/heat_tempest_plugin/tests/functional/test_event_sinks.py
new file mode 100644
index 0000000..a6e59eb
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_event_sinks.py
@@ -0,0 +1,79 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+from zaqarclient.queues.v2 import client as zaqarclient
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ZaqarEventSinkTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: "2013-05-23"
+resources:
+  test_resource:
+    type: OS::Heat::TestResource
+    properties:
+      value: ok
+'''
+
+    def test_events(self):
+        queue_id = str(uuid.uuid4())
+        environment = {'event_sinks': [{'type': 'zaqar-queue',
+                                        'target': queue_id,
+                                        'ttl': 120}]}
+        stack_identifier = self.stack_create(
+            template=self.template,
+            environment=environment)
+        stack_name, stack_id = stack_identifier.split('/')
+        conf = {
+            'auth_opts': {
+                'backend': 'keystone',
+                'options': {
+                    'os_username': self.conf.username,
+                    'os_password': self.conf.password,
+                    'os_project_name': self.conf.project_name,
+                    'os_auth_url': self.conf.auth_url,
+                    'os_user_domain_id': self.conf.user_domain_id,
+                    'os_project_domain_id': self.conf.project_domain_id,
+                    'os_user_domain_name': self.conf.user_domain_name,
+                    'os_project_domain_name': self.conf.project_domain_name
+                }
+            }
+        }
+
+        zaqar = zaqarclient.Client(conf=conf)
+        queue = zaqar.queue(queue_id)
+
+        def validate_messages():
+            messages = list(queue.messages())
+            if len(messages) < 4:
+                return False
+
+            types = [m.body['type'] for m in messages]
+            self.assertEqual(['os.heat.event'] * 4, types)
+            resources = set([m.body['payload'][
+                'resource_name'] for m in messages])
+            self.assertEqual(set([stack_name, 'test_resource']), resources)
+            stack_ids = [m.body['payload']['stack_id'] for m in messages]
+            self.assertEqual([stack_id] * 4, stack_ids)
+            statuses = [m.body['payload']['resource_status'] for m in messages]
+            statuses.sort()
+            self.assertEqual(['COMPLETE', 'COMPLETE',
+                              'IN_PROGRESS', 'IN_PROGRESS'], statuses)
+            actions = [m.body['payload']['resource_action'] for m in messages]
+            self.assertEqual(['CREATE'] * 4, actions)
+            return True
+
+        self.assertTrue(test.call_until_true(20, 0, validate_messages))
diff --git a/heat_tempest_plugin/tests/functional/test_external_ref.py b/heat_tempest_plugin/tests/functional/test_external_ref.py
new file mode 100644
index 0000000..c54e453
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_external_ref.py
@@ -0,0 +1,83 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ExternalReferencesTest(functional_base.FunctionalTestsBase):
+
+    TEMPLATE = '''
+heat_template_version: 2016-10-14
+resources:
+  test1:
+    type: OS::Heat::TestResource
+'''
+    TEMPLATE_WITH_EX_REF = '''
+heat_template_version: 2016-10-14
+resources:
+  test1:
+    type: OS::Heat::TestResource
+    external_id: foobar
+outputs:
+  str:
+    value: {get_resource: test1}
+'''
+
+    def test_create_with_external_ref(self):
+        stack_name = self._stack_rand_name()
+        stack_identifier = self.stack_create(
+            stack_name=stack_name,
+            template=self.TEMPLATE_WITH_EX_REF,
+            files={},
+            disable_rollback=True,
+            parameters={},
+            environment={}
+        )
+
+        stack = self.client.stacks.get(stack_identifier)
+
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        expected_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(
+            [{'description': 'No description given',
+              'output_key': 'str',
+              'output_value': 'foobar'}], stack.outputs)
+
+    def test_update_with_external_ref(self):
+        stack_name = self._stack_rand_name()
+        stack_identifier = self.stack_create(
+            stack_name=stack_name,
+            template=self.TEMPLATE,
+            files={},
+            disable_rollback=True,
+            parameters={},
+            environment={}
+        )
+        stack = self.client.stacks.get(stack_identifier)
+
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        expected_resources = {'test1': 'OS::Heat::TestResource'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual([], stack.outputs)
+
+        stack_name = stack_identifier.split('/')[0]
+        kwargs = {'stack_id': stack_identifier, 'stack_name': stack_name,
+                  'template': self.TEMPLATE_WITH_EX_REF, 'files': {},
+                  'disable_rollback': True, 'parameters': {}, 'environment': {}
+                  }
+        self.client.stacks.update(**kwargs)
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
diff --git a/heat_tempest_plugin/tests/functional/test_heat_autoscaling.py b/heat_tempest_plugin/tests/functional/test_heat_autoscaling.py
new file mode 100644
index 0000000..a83c7fe
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_heat_autoscaling.py
@@ -0,0 +1,215 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class HeatAutoscalingTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2014-10-16
+
+resources:
+  random_group:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      cooldown: 0
+      desired_capacity: 3
+      max_size: 5
+      min_size: 2
+      resource:
+        type: OS::Heat::RandomString
+
+  scale_up_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: random_group }
+      scaling_adjustment: 1
+
+  scale_down_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: random_group }
+      scaling_adjustment: -1
+
+outputs:
+  all_values:
+    value: {get_attr: [random_group, outputs_list, value]}
+  value_0:
+    value: {get_attr: [random_group, resource.0.value]}
+  value_1:
+    value: {get_attr: [random_group, resource.1.value]}
+  value_2:
+    value: {get_attr: [random_group, resource.2.value]}
+  asg_size:
+    value: {get_attr: [random_group, current_size]}
+'''
+
+    template_nested = '''
+heat_template_version: 2014-10-16
+
+resources:
+  random_group:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 3
+      max_size: 5
+      min_size: 2
+      resource:
+        type: randomstr.yaml
+
+outputs:
+  all_values:
+    value: {get_attr: [random_group, outputs_list, random_str]}
+  value_0:
+    value: {get_attr: [random_group, resource.0.random_str]}
+  value_1:
+    value: {get_attr: [random_group, resource.1.random_str]}
+  value_2:
+    value: {get_attr: [random_group, resource.2.random_str]}
+'''
+
+    template_randomstr = '''
+heat_template_version: 2013-05-23
+
+resources:
+  random_str:
+    type: OS::Heat::RandomString
+
+outputs:
+  random_str:
+    value: {get_attr: [random_str, value]}
+'''
+
+    def _assert_output_values(self, stack_id):
+        stack = self.client.stacks.get(stack_id)
+        all_values = self._stack_output(stack, 'all_values')
+        self.assertEqual(3, len(all_values))
+        self.assertEqual(all_values[0], self._stack_output(stack, 'value_0'))
+        self.assertEqual(all_values[1], self._stack_output(stack, 'value_1'))
+        self.assertEqual(all_values[2], self._stack_output(stack, 'value_2'))
+
+    def test_asg_scale_up_max_size(self):
+        stack_id = self.stack_create(template=self.template,
+                                     expected_status='CREATE_COMPLETE')
+        stack = self.client.stacks.get(stack_id)
+        asg_size = self._stack_output(stack, 'asg_size')
+        # Ensure that initial desired capacity is met
+        self.assertEqual(3, asg_size)
+
+        # send scale up signals and ensure that asg honors max_size
+        asg = self.client.resources.get(stack_id, 'random_group')
+        max_size = 5
+        for num in range(asg_size+1, max_size+2):
+            expected_resources = num if num <= max_size else max_size
+            self.client.resources.signal(stack_id, 'scale_up_policy')
+            self.assertTrue(
+                test.call_until_true(self.conf.build_timeout,
+                                     self.conf.build_interval,
+                                     self.check_autoscale_complete,
+                                     asg.physical_resource_id,
+                                     expected_resources, stack_id,
+                                     'scale_up_policy'))
+
+    def test_asg_scale_down_min_size(self):
+        stack_id = self.stack_create(template=self.template,
+                                     expected_status='CREATE_COMPLETE')
+        stack = self.client.stacks.get(stack_id)
+        asg_size = self._stack_output(stack, 'asg_size')
+        # Ensure that initial desired capacity is met
+        self.assertEqual(3, asg_size)
+
+        # send scale down signals and ensure that asg honors min_size
+        asg = self.client.resources.get(stack_id, 'random_group')
+        min_size = 2
+        for num in range(asg_size-1, 0, -1):
+            expected_resources = num if num >= min_size else min_size
+            self.client.resources.signal(stack_id, 'scale_down_policy')
+            self.assertTrue(
+                test.call_until_true(self.conf.build_timeout,
+                                     self.conf.build_interval,
+                                     self.check_autoscale_complete,
+                                     asg.physical_resource_id,
+                                     expected_resources, stack_id,
+                                     'scale_down_policy'))
+
+    def test_asg_cooldown(self):
+        cooldown_tmpl = self.template.replace('cooldown: 0',
+                                              'cooldown: 60')
+        stack_id = self.stack_create(template=cooldown_tmpl,
+                                     expected_status='CREATE_COMPLETE')
+        stack = self.client.stacks.get(stack_id)
+        asg_size = self._stack_output(stack, 'asg_size')
+        # Ensure that initial desired capacity is met
+        self.assertEqual(3, asg_size)
+
+        # send scale up signal.
+        # Since cooldown is in effect, number of resources should not change
+        asg = self.client.resources.get(stack_id, 'random_group')
+        expected_resources = 3
+        self.client.resources.signal(stack_id, 'scale_up_policy')
+        self.assertTrue(
+            test.call_until_true(self.conf.build_timeout,
+                                 self.conf.build_interval,
+                                 self.check_autoscale_complete,
+                                 asg.physical_resource_id,
+                                 expected_resources, stack_id,
+                                 'scale_up_policy'))
+
+    def test_path_attrs(self):
+        stack_id = self.stack_create(template=self.template)
+        expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup',
+                              'scale_up_policy': 'OS::Heat::ScalingPolicy',
+                              'scale_down_policy': 'OS::Heat::ScalingPolicy'}
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+        self._assert_output_values(stack_id)
+
+    def test_path_attrs_nested(self):
+        files = {'randomstr.yaml': self.template_randomstr}
+        stack_id = self.stack_create(template=self.template_nested,
+                                     files=files)
+        expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup'}
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+        self._assert_output_values(stack_id)
+
+
+class AutoScalingGroupUpdateWithNoChanges(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2013-05-23
+
+resources:
+  test_group:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 0
+      max_size: 0
+      min_size: 0
+      resource:
+        type: OS::Heat::RandomString
+  test_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: test_group }
+      scaling_adjustment: 1
+'''
+
+    def test_as_group_update_without_resource_changes(self):
+        stack_identifier = self.stack_create(template=self.template)
+        new_template = self.template.replace(
+            'scaling_adjustment: 1',
+            'scaling_adjustment: 2')
+
+        self.update_stack(stack_identifier, template=new_template)
diff --git a/heat_tempest_plugin/tests/functional/test_hooks.py b/heat_tempest_plugin/tests/functional/test_hooks.py
new file mode 100644
index 0000000..78c58f3
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_hooks.py
@@ -0,0 +1,281 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import yaml
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class HooksTest(functional_base.FunctionalTestsBase):
+
+    def setUp(self):
+        super(HooksTest, self).setUp()
+        self.template = {'heat_template_version': '2014-10-16',
+                         'resources': {
+                             'foo_step1': {'type': 'OS::Heat::RandomString'},
+                             'foo_step2': {'type': 'OS::Heat::RandomString',
+                                           'depends_on': 'foo_step1'},
+                             'foo_step3': {'type': 'OS::Heat::RandomString',
+                                           'depends_on': 'foo_step2'}}}
+
+    def test_hook_pre_create(self):
+        env = {'resource_registry':
+               {'resources':
+                {'foo_step2':
+                 {'hooks': 'pre-create'}}}}
+        # Note we don't wait for CREATE_COMPLETE, because we need to
+        # signal to clear the hook before create will complete
+        stack_identifier = self.stack_create(
+            template=self.template,
+            environment=env,
+            expected_status='CREATE_IN_PROGRESS')
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step1', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'INIT_COMPLETE')
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='CREATE paused until Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
+        self.client.resources.signal(stack_identifier, 'foo_step2',
+                                     data={'unset_hook': 'pre-create'})
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+    def test_hook_pre_update_nochange(self):
+        env = {'resource_registry':
+               {'resources':
+                {'foo_step2':
+                 {'hooks': 'pre-update'}}}}
+        stack_identifier = self.stack_create(
+            template=self.template,
+            environment=env)
+        res_before = self.client.resources.get(stack_identifier, 'foo_step2')
+        # Note we don't wait for UPDATE_COMPLETE, because we need to
+        # signal to clear the hook before update will complete
+        self.update_stack(
+            stack_identifier,
+            template=self.template,
+            environment=env,
+            expected_status='UPDATE_IN_PROGRESS')
+
+        # Note when a hook is specified, the resource status doesn't change
+        # when we hit the hook, so we look for the event, then assert the
+        # state is unchanged.
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='UPDATE paused until Hook pre-update is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self.client.resources.signal(stack_identifier, 'foo_step2',
+                                     data={'unset_hook': 'pre-update'})
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-update is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+        res_after = self.client.resources.get(stack_identifier, 'foo_step2')
+        self.assertEqual(res_before.physical_resource_id,
+                         res_after.physical_resource_id)
+
+    def test_hook_pre_update_replace(self):
+        env = {'resource_registry':
+               {'resources':
+                {'foo_step2':
+                 {'hooks': 'pre-update'}}}}
+        stack_identifier = self.stack_create(
+            template=self.template,
+            environment=env)
+        res_before = self.client.resources.get(stack_identifier, 'foo_step2')
+        # Note we don't wait for UPDATE_COMPLETE, because we need to
+        # signal to clear the hook before update will complete
+        self.template['resources']['foo_step2']['properties'] = {'length': 10}
+        self.update_stack(
+            stack_identifier,
+            template=self.template,
+            environment=env,
+            expected_status='UPDATE_IN_PROGRESS')
+
+        # Note when a hook is specified, the resource status doesn't change
+        # when we hit the hook, so we look for the event, then assert the
+        # state is unchanged.
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='UPDATE paused until Hook pre-update is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self.client.resources.signal(stack_identifier, 'foo_step2',
+                                     data={'unset_hook': 'pre-update'})
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-update is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+        res_after = self.client.resources.get(stack_identifier, 'foo_step2')
+        self.assertNotEqual(res_before.physical_resource_id,
+                            res_after.physical_resource_id)
+
+    def test_hook_pre_update_in_place(self):
+        env = {'resource_registry':
+               {'resources':
+                {'rg':
+                 {'hooks': 'pre-update'}}}}
+        template = {'heat_template_version': '2014-10-16',
+                    'resources': {
+                        'rg': {
+                            'type': 'OS::Heat::ResourceGroup',
+                            'properties': {
+                                'count': 1,
+                                'resource_def': {
+                                    'type': 'OS::Heat::RandomString'}}}}}
+        # Note we don't wait for CREATE_COMPLETE, because we need to
+        # signal to clear the hook before create will complete
+        stack_identifier = self.stack_create(
+            template=template,
+            environment=env)
+        res_before = self.client.resources.get(stack_identifier, 'rg')
+        template['resources']['rg']['properties']['count'] = 2
+        self.update_stack(
+            stack_identifier,
+            template=template,
+            environment=env,
+            expected_status='UPDATE_IN_PROGRESS')
+
+        # Note when a hook is specified, the resource status doesn't change
+        # when we hit the hook, so we look for the event, then assert the
+        # state is unchanged.
+        self._wait_for_resource_status(
+            stack_identifier, 'rg', 'CREATE_COMPLETE')
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='UPDATE paused until Hook pre-update is cleared',
+            rsrc_name='rg')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self.client.resources.signal(stack_identifier, 'rg',
+                                     data={'unset_hook': 'pre-update'})
+
+        ev = self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-update is cleared',
+            rsrc_name='rg')
+        self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+        res_after = self.client.resources.get(stack_identifier, 'rg')
+        self.assertEqual(res_before.physical_resource_id,
+                         res_after.physical_resource_id)
+
+    def test_hook_pre_create_nested(self):
+        files = {'nested.yaml': yaml.safe_dump(self.template)}
+        env = {'resource_registry':
+               {'resources':
+                {'nested':
+                 {'foo_step2':
+                  {'hooks': 'pre-create'}}}}}
+        template = {'heat_template_version': '2014-10-16',
+                    'resources': {
+                        'nested': {'type': 'nested.yaml'}}}
+        # Note we don't wait for CREATE_COMPLETE, because we need to
+        # signal to clear the hook before create will complete
+        stack_identifier = self.stack_create(
+            template=template,
+            environment=env,
+            files=files,
+            expected_status='CREATE_IN_PROGRESS')
+        self._wait_for_resource_status(stack_identifier, 'nested',
+                                       'CREATE_IN_PROGRESS')
+        nested_identifier = self.assert_resource_is_a_stack(
+            stack_identifier, 'nested', wait=True)
+        self._wait_for_resource_status(
+            nested_identifier, 'foo_step1', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            nested_identifier, 'foo_step2', 'INIT_COMPLETE')
+        ev = self.wait_for_event_with_reason(
+            nested_identifier,
+            reason='CREATE paused until Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
+        self.client.resources.signal(nested_identifier, 'foo_step2',
+                                     data={'unset_hook': 'pre-create'})
+        ev = self.wait_for_event_with_reason(
+            nested_identifier,
+            reason='Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
+        self._wait_for_resource_status(
+            nested_identifier, 'foo_step2', 'CREATE_COMPLETE')
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+    def test_hook_pre_create_wildcard(self):
+        env = {'resource_registry':
+               {'resources':
+                {'foo_*':
+                 {'hooks': 'pre-create'}}}}
+        # Note we don't wait for CREATE_COMPLETE, because we need to
+        # signal to clear the hook before create will complete
+        stack_identifier = self.stack_create(
+            template=self.template,
+            environment=env,
+            expected_status='CREATE_IN_PROGRESS')
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step1', 'INIT_COMPLETE')
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='CREATE paused until Hook pre-create is cleared',
+            rsrc_name='foo_step1')
+        self.client.resources.signal(stack_identifier, 'foo_step1',
+                                     data={'unset_hook': 'pre-create'})
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-create is cleared',
+            rsrc_name='foo_step1')
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step2', 'INIT_COMPLETE')
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='CREATE paused until Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self.client.resources.signal(stack_identifier, 'foo_step2',
+                                     data={'unset_hook': 'pre-create'})
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-create is cleared',
+            rsrc_name='foo_step2')
+        self._wait_for_resource_status(
+            stack_identifier, 'foo_step3', 'INIT_COMPLETE')
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='CREATE paused until Hook pre-create is cleared',
+            rsrc_name='foo_step3')
+        self.client.resources.signal(stack_identifier, 'foo_step3',
+                                     data={'unset_hook': 'pre-create'})
+        self.wait_for_event_with_reason(
+            stack_identifier,
+            reason='Hook pre-create is cleared',
+            rsrc_name='foo_step3')
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
diff --git a/heat_tempest_plugin/tests/functional/test_immutable_parameters.py b/heat_tempest_plugin/tests/functional/test_immutable_parameters.py
new file mode 100644
index 0000000..f031c01
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_immutable_parameters.py
@@ -0,0 +1,141 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+from heatclient import exc as heat_exceptions
+
+
+class ImmutableParametersTest(functional_base.FunctionalTestsBase):
+
+    template_param_has_no_immutable_field = '''
+heat_template_version: 2014-10-16
+parameters:
+  param1:
+    type: string
+    default: default_value
+outputs:
+  param1_output:
+    description: 'parameter 1 details'
+    value: { get_param: param1 }
+'''
+
+    template_param_has_immutable_field = '''
+heat_template_version: 2014-10-16
+parameters:
+  param1:
+    type: string
+    default: default_value
+    immutable: false
+outputs:
+  param1_output:
+    description: 'parameter 1 details'
+    value: { get_param: param1 }
+'''
+
+    def test_no_immutable_param_field(self):
+        param1_create_value = 'value1'
+        create_parameters = {"param1": param1_create_value}
+
+        stack_identifier = self.stack_create(
+            template=self.template_param_has_no_immutable_field,
+            parameters=create_parameters
+        )
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Verify the value of the parameter
+        self.assertEqual(param1_create_value,
+                         self._stack_output(stack, 'param1_output'))
+
+        param1_update_value = 'value2'
+        update_parameters = {"param1": param1_update_value}
+
+        self.update_stack(
+            stack_identifier,
+            template=self.template_param_has_no_immutable_field,
+            parameters=update_parameters)
+
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Verify the value of the updated parameter
+        self.assertEqual(param1_update_value,
+                         self._stack_output(stack, 'param1_output'))
+
+    def test_immutable_param_field_allowed(self):
+        param1_create_value = 'value1'
+        create_parameters = {"param1": param1_create_value}
+
+        stack_identifier = self.stack_create(
+            template=self.template_param_has_immutable_field,
+            parameters=create_parameters
+        )
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Verify the value of the parameter
+        self.assertEqual(param1_create_value,
+                         self._stack_output(stack, 'param1_output'))
+
+        param1_update_value = 'value2'
+        update_parameters = {"param1": param1_update_value}
+
+        self.update_stack(
+            stack_identifier,
+            template=self.template_param_has_immutable_field,
+            parameters=update_parameters)
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Verify the value of the updated parameter
+        self.assertEqual(param1_update_value,
+                         self._stack_output(stack, 'param1_output'))
+
+        # Ensure stack is not in a failed state
+        self.assertEqual('UPDATE_COMPLETE', stack.stack_status)
+
+    def test_immutable_param_field_error(self):
+        param1_create_value = 'value1'
+        create_parameters = {"param1": param1_create_value}
+
+        # Toggle the immutable field to preclude updating
+        immutable_true = self.template_param_has_immutable_field.replace(
+            'immutable: false', 'immutable: true')
+
+        stack_identifier = self.stack_create(
+            template=immutable_true,
+            parameters=create_parameters
+        )
+        stack = self.client.stacks.get(stack_identifier)
+
+        param1_update_value = 'value2'
+        update_parameters = {"param1": param1_update_value}
+
+        # Verify the value of the parameter
+        self.assertEqual(param1_create_value,
+                         self._stack_output(stack, 'param1_output'))
+
+        # Attempt to update the stack with a new parameter value
+        try:
+            self.update_stack(
+                stack_identifier,
+                template=immutable_true,
+                parameters=update_parameters)
+        except heat_exceptions.HTTPBadRequest as exc:
+            exp = ('The following parameters are immutable and may not be '
+                   'updated: param1')
+            self.assertIn(exp, str(exc))
+
+        stack = self.client.stacks.get(stack_identifier)
+
+        # Ensure stack is not in a failed state
+        self.assertEqual('CREATE_COMPLETE', stack.stack_status)
+
+        # Ensure immutable parameter has not changed
+        self.assertEqual(param1_create_value,
+                         self._stack_output(stack, 'param1_output'))
diff --git a/heat_tempest_plugin/tests/functional/test_instance_group.py b/heat_tempest_plugin/tests/functional/test_instance_group.py
new file mode 100644
index 0000000..c0bb7e8
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_instance_group.py
@@ -0,0 +1,500 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import json
+
+from testtools import matchers
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class InstanceGroupTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template to create multiple instances.",
+  "Parameters" : {"size": {"Type": "String", "Default": "1"},
+                  "AZ": {"Type": "String", "Default": "nova"},
+                  "image": {"Type": "String"},
+                  "flavor": {"Type": "String"},
+                  "user_data": {"Type": "String", "Default": "jsconfig data"}},
+  "Resources": {
+    "JobServerGroup": {
+      "Type": "OS::Heat::InstanceGroup",
+      "Properties": {
+        "LaunchConfigurationName" : {"Ref": "JobServerConfig"},
+        "Size" : {"Ref": "size"},
+        "AvailabilityZones" : [{"Ref": "AZ"}]
+      }
+    },
+
+    "JobServerConfig" : {
+      "Type" : "AWS::AutoScaling::LaunchConfiguration",
+      "Metadata": {"foo": "bar"},
+      "Properties": {
+        "ImageId"           : {"Ref": "image"},
+        "InstanceType"      : {"Ref": "flavor"},
+        "SecurityGroups"    : [ "sg-1" ],
+        "UserData"          : {"Ref": "user_data"}
+      }
+    }
+  },
+  "Outputs": {
+    "InstanceList": {"Value": {
+      "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
+    "JobServerConfigRef": {"Value": {
+      "Ref": "JobServerConfig"}}
+  }
+}
+'''
+
+    instance_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list}
+
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    properties:
+      salt: {get_param: UserData}
+outputs:
+  PublicIp:
+    value: {get_attr: [random1, value]}
+'''
+
+    # This is designed to fail.
+    bad_instance_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list}
+
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    depends_on: waiter
+  ready_poster:
+    type: AWS::CloudFormation::WaitConditionHandle
+  waiter:
+    type: AWS::CloudFormation::WaitCondition
+    properties:
+      Handle: {Ref: ready_poster}
+      Timeout: 1
+outputs:
+  PublicIp:
+    value: {get_attr: [random1, value]}
+'''
+
+    def setUp(self):
+        super(InstanceGroupTest, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.instance_type:
+            raise self.skipException("No flavor configured to test")
+
+    def assert_instance_count(self, stack, expected_count):
+        inst_list = self._stack_output(stack, 'InstanceList')
+        self.assertEqual(expected_count, len(inst_list.split(',')))
+
+    def _assert_instance_state(self, nested_identifier,
+                               num_complete, num_failed):
+        for res in self.client.resources.list(nested_identifier):
+            if 'COMPLETE' in res.resource_status:
+                num_complete = num_complete - 1
+            elif 'FAILED' in res.resource_status:
+                num_failed = num_failed - 1
+        self.assertEqual(0, num_failed)
+        self.assertEqual(0, num_complete)
+
+
+class InstanceGroupBasicTest(InstanceGroupTest):
+
+    def test_basic_create_works(self):
+        """Make sure the working case is good.
+
+        Note this combines test_override_aws_ec2_instance into this test as
+        well, which is:
+        If AWS::EC2::Instance is overridden, InstanceGroup will automatically
+        use that overridden resource type.
+        """
+
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 4,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files, environment=env)
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'OS::Heat::InstanceGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 4)
+
+    def test_size_updates_work(self):
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 2)
+
+        # Increase min size to 5
+        env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+                'parameters': {'size': 5,
+                               'image': self.conf.minimal_image_ref,
+                               'flavor': self.conf.instance_type}}
+        self.update_stack(stack_identifier, self.template,
+                          environment=env2, files=files)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 5)
+
+    def test_update_group_replace(self):
+        """Test case for ensuring non-updatable props case a replacement.
+
+        Make sure that during a group update the non-updatable properties cause
+        a replacement.
+        """
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry':
+               {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 1,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
+        orig_asg_id = rsrc.physical_resource_id
+
+        env2 = {'resource_registry':
+                {'AWS::EC2::Instance': 'provider.yaml'},
+                'parameters': {'size': '2',
+                               'AZ': 'wibble',
+                               'image': self.conf.minimal_image_ref,
+                               'flavor': self.conf.instance_type,
+                               'user_data': 'new data'}}
+        self.update_stack(stack_identifier, self.template,
+                          environment=env2, files=files)
+
+        # replacement will cause the resource physical_resource_id to change.
+        rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
+        self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
+
+    def test_create_instance_error_causes_group_error(self):
+        """Test create failing a resource in the instance group.
+
+        If a resource in an instance group fails to be created, the instance
+        group itself will fail and the broken inner resource will remain.
+        """
+        stack_name = self._stack_rand_name()
+        files = {'provider.yaml': self.bad_instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        self.client.stacks.create(
+            stack_name=stack_name,
+            template=self.template,
+            files=files,
+            disable_rollback=True,
+            parameters={},
+            environment=env
+        )
+        self.addCleanup(self._stack_delete, stack_name)
+        stack = self.client.stacks.get(stack_name)
+        stack_identifier = '%s/%s' % (stack_name, stack.id)
+        self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'OS::Heat::InstanceGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        self._assert_instance_state(nested_ident, 0, 2)
+
+    def test_update_instance_error_causes_group_error(self):
+        """Test update failing a resource in the instance group.
+
+        If a resource in an instance group fails to be created during an
+        update, the instance group itself will fail and the broken inner
+        resource will remain.
+        """
+        files = {'provider.yaml': self.instance_template}
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': 2,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.instance_type}}
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=files,
+                                             environment=env)
+        initial_resources = {
+            'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
+            'JobServerGroup': 'OS::Heat::InstanceGroup'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        stack = self.client.stacks.get(stack_identifier)
+        self.assert_instance_count(stack, 2)
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        self._assert_instance_state(nested_ident, 2, 0)
+        initial_list = [res.resource_name
+                        for res in self.client.resources.list(nested_ident)]
+
+        env['parameters']['size'] = 3
+        files2 = {'provider.yaml': self.bad_instance_template}
+        self.client.stacks.update(
+            stack_id=stack_identifier,
+            template=self.template,
+            files=files2,
+            disable_rollback=True,
+            parameters={},
+            environment=env
+        )
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        # assert that there are 3 bad instances
+        # 2 resources should be in update failed, and one create failed.
+        for res in self.client.resources.list(nested_ident):
+            if res.resource_name in initial_list:
+                self._wait_for_resource_status(nested_ident,
+                                               res.resource_name,
+                                               'UPDATE_FAILED')
+            else:
+                self._wait_for_resource_status(nested_ident,
+                                               res.resource_name,
+                                               'CREATE_FAILED')
+
+
+class InstanceGroupUpdatePolicyTest(InstanceGroupTest):
+
+    def ig_tmpl_with_updt_policy(self):
+        templ = json.loads(copy.deepcopy(self.template))
+        up = {"RollingUpdate": {
+            "MinInstancesInService": "1",
+            "MaxBatchSize": "2",
+            "PauseTime": "PT1S"}}
+        templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
+        return templ
+
+    def update_instance_group(self, updt_template,
+                              num_updates_expected_on_updt,
+                              num_creates_expected_on_updt,
+                              num_deletes_expected_on_updt,
+                              update_replace):
+
+        # setup stack from the initial template
+        files = {'provider.yaml': self.instance_template}
+        size = 5
+        env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
+               'parameters': {'size': size,
+                              'image': self.conf.minimal_image_ref,
+                              'flavor': self.conf.minimal_instance_type}}
+        stack_name = self._stack_rand_name()
+        stack_identifier = self.stack_create(
+            stack_name=stack_name,
+            template=self.ig_tmpl_with_updt_policy(),
+            files=files,
+            environment=env)
+        stack = self.client.stacks.get(stack_identifier)
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+
+        # test that physical resource name of launch configuration is used
+        conf_name = self._stack_output(stack, 'JobServerConfigRef')
+        conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
+        self.assertThat(conf_name,
+                        matchers.MatchesRegex(conf_name_pattern))
+
+        # test the number of instances created
+        self.assert_instance_count(stack, size)
+        # saves info from initial list of instances for comparison later
+        init_instances = self.client.resources.list(nested_ident)
+        init_names = [inst.resource_name for inst in init_instances]
+
+        # test stack update
+        self.update_stack(stack_identifier, updt_template,
+                          environment=env, files=files)
+        updt_stack = self.client.stacks.get(stack_identifier)
+
+        # test that the launch configuration is replaced
+        updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
+        self.assertThat(updt_conf_name,
+                        matchers.MatchesRegex(conf_name_pattern))
+        self.assertNotEqual(conf_name, updt_conf_name)
+
+        # test that the group size are the same
+        updt_instances = self.client.resources.list(nested_ident)
+        updt_names = [inst.resource_name for inst in updt_instances]
+        self.assertEqual(len(init_names), len(updt_names))
+        for res in updt_instances:
+            self.assertEqual('UPDATE_COMPLETE', res.resource_status)
+
+        # test that the appropriate number of instance names are the same
+        matched_names = set(updt_names) & set(init_names)
+        self.assertEqual(num_updates_expected_on_updt, len(matched_names))
+
+        # test that the appropriate number of new instances are created
+        self.assertEqual(num_creates_expected_on_updt,
+                         len(set(updt_names) - set(init_names)))
+
+        # test that the appropriate number of instances are deleted
+        self.assertEqual(num_deletes_expected_on_updt,
+                         len(set(init_names) - set(updt_names)))
+
+        # test that the older instances are the ones being deleted
+        if num_deletes_expected_on_updt > 0:
+            deletes_expected = init_names[:num_deletes_expected_on_updt]
+            self.assertNotIn(deletes_expected, updt_names)
+
+    def test_instance_group_update_replace(self):
+        """Test simple update replace with no conflict.
+
+        Test simple update replace with no conflict in batch size and
+        minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        grp = updt_template['Resources']['JobServerGroup']
+        policy = grp['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '1'
+        policy['MaxBatchSize'] = '3'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=5,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0,
+                                   update_replace=True)
+
+    def test_instance_group_update_replace_with_adjusted_capacity(self):
+        """Test update replace with capacity adjustment.
+
+        Test update replace with capacity adjustment due to conflict in
+        batch size and minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        grp = updt_template['Resources']['JobServerGroup']
+        policy = grp['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '4'
+        policy['MaxBatchSize'] = '4'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=2,
+                                   num_creates_expected_on_updt=3,
+                                   num_deletes_expected_on_updt=3,
+                                   update_replace=True)
+
+    def test_instance_group_update_replace_huge_batch_size(self):
+        """Test update replace with a huge batch size."""
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '0'
+        policy['MaxBatchSize'] = '20'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=5,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0,
+                                   update_replace=True)
+
+    def test_instance_group_update_replace_huge_min_in_service(self):
+        """Update replace with huge number of minimum instances in service."""
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '20'
+        policy['MaxBatchSize'] = '2'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['UserData'] = 'new data'
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=3,
+                                   num_creates_expected_on_updt=2,
+                                   num_deletes_expected_on_updt=2,
+                                   update_replace=True)
+
+    def test_instance_group_update_no_replace(self):
+        """Test simple update only and no replace with no conflict.
+
+        Test simple update only and no replace (i.e. updated instance flavor
+        in Launch Configuration) with no conflict in batch size and
+        minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '1'
+        policy['MaxBatchSize'] = '3'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['InstanceType'] = self.conf.instance_type
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=5,
+                                   num_creates_expected_on_updt=0,
+                                   num_deletes_expected_on_updt=0,
+                                   update_replace=False)
+
+    def test_instance_group_update_no_replace_with_adjusted_capacity(self):
+        """Test update only and no replace with capacity adjustment.
+
+        Test update only and no replace (i.e. updated instance flavor in
+        Launch Configuration) with capacity adjustment due to conflict in
+        batch size and minimum instances in service.
+        """
+        updt_template = self.ig_tmpl_with_updt_policy()
+        group = updt_template['Resources']['JobServerGroup']
+        policy = group['UpdatePolicy']['RollingUpdate']
+        policy['MinInstancesInService'] = '4'
+        policy['MaxBatchSize'] = '4'
+        policy['PauseTime'] = 'PT0S'
+        config = updt_template['Resources']['JobServerConfig']
+        config['Properties']['InstanceType'] = self.conf.instance_type
+
+        self.update_instance_group(updt_template,
+                                   num_updates_expected_on_updt=2,
+                                   num_creates_expected_on_updt=3,
+                                   num_deletes_expected_on_updt=3,
+                                   update_replace=False)
diff --git a/heat_tempest_plugin/tests/functional/test_lbaasv2.py b/heat_tempest_plugin/tests/functional/test_lbaasv2.py
new file mode 100644
index 0000000..e33c5b9
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_lbaasv2.py
@@ -0,0 +1,155 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class LoadBalancerv2Test(functional_base.FunctionalTestsBase):
+
+    create_template = '''
+heat_template_version: 2016-04-08
+parameters:
+    subnet:
+        type: string
+resources:
+  loadbalancer:
+    type: OS::Neutron::LBaaS::LoadBalancer
+    properties:
+      description: aLoadBalancer
+      vip_subnet: { get_param: subnet }
+  listener:
+    type: OS::Neutron::LBaaS::Listener
+    properties:
+      description: aListener
+      loadbalancer: { get_resource: loadbalancer }
+      protocol: HTTP
+      protocol_port: 80
+      connection_limit: 5555
+  pool:
+    type: OS::Neutron::LBaaS::Pool
+    properties:
+      description: aPool
+      lb_algorithm: ROUND_ROBIN
+      protocol: HTTP
+      listener: { get_resource: listener }
+  poolmember:
+    type: OS::Neutron::LBaaS::PoolMember
+    properties:
+      address: 1.1.1.1
+      pool: { get_resource: pool }
+      protocol_port: 1111
+      subnet: { get_param: subnet }
+      weight: 255
+  # pm2
+  healthmonitor:
+    type: OS::Neutron::LBaaS::HealthMonitor
+    properties:
+      delay: 3
+      type: HTTP
+      timeout: 3
+      max_retries: 3
+      pool: { get_resource: pool }
+outputs:
+  loadbalancer:
+    value: { get_attr: [ loadbalancer, show ] }
+  pool:
+    value: { get_attr: [ pool, show ] }
+  poolmember:
+    value: { get_attr: [ poolmember, show ] }
+  listener:
+    value: { get_attr: [ listener, show ] }
+  healthmonitor:
+    value: { get_attr: [ healthmonitor, show ] }
+'''
+
+    add_member = '''
+  poolmember2:
+    type: OS::Neutron::LBaaS::PoolMember
+    properties:
+      address: 2.2.2.2
+      pool: { get_resource: pool }
+      protocol_port: 2222
+      subnet: { get_param: subnet }
+      weight: 222
+'''
+
+    def setUp(self):
+        super(LoadBalancerv2Test, self).setUp()
+        if not self.is_network_extension_supported('lbaasv2'):
+            self.skipTest('LBaasv2 extension not available, skipping')
+
+    def test_create_update_loadbalancer(self):
+        parameters = {
+            'subnet': self.conf.fixed_subnet_name,
+        }
+        stack_identifier = self.stack_create(template=self.create_template,
+                                             parameters=parameters)
+        stack = self.client.stacks.get(stack_identifier)
+        output = self._stack_output(stack, 'loadbalancer')
+        self.assertEqual('ONLINE', output['operating_status'])
+
+        template = self.create_template.replace('ROUND_ROBIN', 'SOURCE_IP')
+        template = template.replace('3', '6')
+        template = template.replace('255', '256')
+        template = template.replace('5555', '7777')
+        template = template.replace('aLoadBalancer', 'updatedLoadBalancer')
+        template = template.replace('aPool', 'updatedPool')
+        template = template.replace('aListener', 'updatedListener')
+        self.update_stack(stack_identifier, template=template,
+                          parameters=parameters)
+        stack = self.client.stacks.get(stack_identifier)
+
+        output = self._stack_output(stack, 'loadbalancer')
+        self.assertEqual('ONLINE', output['operating_status'])
+        self.assertEqual('updatedLoadBalancer', output['description'])
+        output = self._stack_output(stack, 'pool')
+        self.assertEqual('SOURCE_IP', output['lb_algorithm'])
+        self.assertEqual('updatedPool', output['description'])
+        output = self._stack_output(stack, 'poolmember')
+        self.assertEqual(256, output['weight'])
+        output = self._stack_output(stack, 'healthmonitor')
+        self.assertEqual(6, output['delay'])
+        self.assertEqual(6, output['timeout'])
+        self.assertEqual(6, output['max_retries'])
+        output = self._stack_output(stack, 'listener')
+        self.assertEqual(7777, output['connection_limit'])
+        self.assertEqual('updatedListener', output['description'])
+
+    def test_add_delete_poolmember(self):
+        parameters = {
+            'subnet': self.conf.fixed_subnet_name,
+        }
+        stack_identifier = self.stack_create(template=self.create_template,
+                                             parameters=parameters)
+        stack = self.client.stacks.get(stack_identifier)
+        output = self._stack_output(stack, 'loadbalancer')
+        self.assertEqual('ONLINE', output['operating_status'])
+        output = self._stack_output(stack, 'pool')
+        self.assertEqual(1, len(output['members']))
+        # add pool member
+        template = self.create_template.replace('# pm2', self.add_member)
+        self.update_stack(stack_identifier, template=template,
+                          parameters=parameters)
+        stack = self.client.stacks.get(stack_identifier)
+        output = self._stack_output(stack, 'loadbalancer')
+        self.assertEqual('ONLINE', output['operating_status'])
+        output = self._stack_output(stack, 'pool')
+        self.assertEqual(2, len(output['members']))
+        # delete pool member
+        self.update_stack(stack_identifier, template=self.create_template,
+                          parameters=parameters)
+        stack = self.client.stacks.get(stack_identifier)
+        output = self._stack_output(stack, 'loadbalancer')
+        self.assertEqual('ONLINE', output['operating_status'])
+        output = self._stack_output(stack, 'pool')
+        self.assertEqual(1, len(output['members']))
diff --git a/heat_tempest_plugin/tests/functional/test_nested_get_attr.py b/heat_tempest_plugin/tests/functional/test_nested_get_attr.py
new file mode 100644
index 0000000..57f65e2
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_nested_get_attr.py
@@ -0,0 +1,165 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Using nested get_attr functions isn't a good idea - in particular, this
+# actually working depends on correct dependencies between the two resources
+# whose attributes are being fetched, and these dependencies are non-local to
+# where the get_attr calls are used. Nevertheless, it did sort-of work, and
+# this test will help keep it that way.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+initial_template = '''
+heat_template_version: ocata
+resources:
+  dict_resource:
+    type: OS::Heat::Value
+    properties:
+      value:
+        blarg: wibble
+        foo: bar
+        baz: quux
+        fred: barney
+    # These dependencies are required because we only want to read the
+    # attribute values for a given resource once, and therefore we do so in
+    # dependency order. This is necessarily true for a convergence traversal,
+    # but also happens when we're fetching the resource attributes e.g. to show
+    # the output values. The key1/key2 attribute values must be stored before
+    # we attempt to calculate the dep_attrs for dict_resource in order to
+    # correctly determine which attributes of dict_resource are used.
+    depends_on:
+      - key1
+      - key2
+      - indirect_key3_dep
+  key1:
+    type: OS::Heat::Value
+    properties:
+      value: blarg
+  key2:
+    type: OS::Heat::Value
+    properties:
+      value: foo
+  key3:
+    type: OS::Heat::Value
+    properties:
+      value: fred
+  value1:
+    type: OS::Heat::Value
+    properties:
+      value:
+        get_attr:
+          - dict_resource
+          - value
+          - {get_attr: [key1, value]}
+  indirect_key3_dep:
+    type: OS::Heat::Value
+    properties:
+      value: ignored
+    depends_on: key3
+outputs:
+  value1:
+    value: {get_attr: [value1, value]}
+  value2:
+    value: {get_attr: [dict_resource, value, {get_attr: [key2, value]}]}
+  value3:
+    value: {get_attr: [dict_resource, value, {get_attr: [key3, value]}]}
+'''
+
+update_template = '''
+heat_template_version: ocata
+resources:
+  dict_resource:
+    type: OS::Heat::Value
+    properties:
+      value:
+        blarg: wibble
+        foo: bar
+        baz: quux
+        fred: barney
+    depends_on:
+      - key1
+      - key2
+      - indirect_key3_dep
+      - key4
+  key1:
+    type: OS::Heat::Value
+    properties:
+      value: foo
+  key2:
+    type: OS::Heat::Value
+    properties:
+      value: fred
+  key3:
+    type: OS::Heat::Value
+    properties:
+      value: blarg
+  key4:
+    type: OS::Heat::Value
+    properties:
+      value: baz
+  value1:
+    type: OS::Heat::Value
+    properties:
+      value:
+        get_attr:
+          - dict_resource
+          - value
+          - {get_attr: [key1, value]}
+  value4:
+    type: OS::Heat::Value
+    properties:
+      value:
+        get_attr:
+          - dict_resource
+          - value
+          - {get_attr: [key4, value]}
+  indirect_key3_dep:
+    type: OS::Heat::Value
+    properties:
+      value: ignored
+    depends_on: key3
+outputs:
+  value1:
+    value: {get_attr: [value1, value]}
+  value2:
+    value: {get_attr: [dict_resource, value, {get_attr: [key2, value]}]}
+  value3:
+    value: {get_attr: [dict_resource, value, {get_attr: [key3, value]}]}
+  value4:
+    value: {get_attr: [value4, value]}
+'''
+
+
+class NestedGetAttrTest(functional_base.FunctionalTestsBase):
+    def assertOutput(self, value, stack_identifier, key):
+        op = self.client.stacks.output_show(stack_identifier, key)['output']
+        self.assertEqual(key, op['output_key'])
+        if 'output_error' in op:
+            raise Exception(op['output_error'])
+        self.assertEqual(value, op['output_value'])
+
+    def test_nested_get_attr_create(self):
+        stack_identifier = self.stack_create(template=initial_template)
+
+        self.assertOutput('wibble', stack_identifier, 'value1')
+        self.assertOutput('bar', stack_identifier, 'value2')
+        self.assertOutput('barney', stack_identifier, 'value3')
+
+    def test_nested_get_attr_update(self):
+        stack_identifier = self.stack_create(template=initial_template)
+        self.update_stack(stack_identifier, template=update_template)
+
+        self.assertOutput('bar', stack_identifier, 'value1')
+        self.assertOutput('barney', stack_identifier, 'value2')
+        self.assertOutput('wibble', stack_identifier, 'value3')
+        self.assertOutput('quux', stack_identifier, 'value4')
diff --git a/heat_tempest_plugin/tests/functional/test_notifications.py b/heat_tempest_plugin/tests/functional/test_notifications.py
new file mode 100644
index 0000000..7a74cdb
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_notifications.py
@@ -0,0 +1,194 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import kombu
+from oslo_config import cfg
+from oslo_messaging._drivers import common
+from oslo_messaging import transport
+import requests
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+BASIC_NOTIFICATIONS = [
+    'orchestration.stack.create.start',
+    'orchestration.stack.create.end',
+    'orchestration.stack.update.start',
+    'orchestration.stack.update.end',
+    'orchestration.stack.suspend.start',
+    'orchestration.stack.suspend.end',
+    'orchestration.stack.resume.start',
+    'orchestration.stack.resume.end',
+    'orchestration.stack.delete.start',
+    'orchestration.stack.delete.end'
+]
+
+ASG_NOTIFICATIONS = [
+    'orchestration.autoscaling.start',
+    'orchestration.autoscaling.end'
+]
+
+
+def get_url(conf):
+    conf = conf.oslo_messaging_rabbit
+    return 'amqp://%s:%s@%s:%s/' % (conf.rabbit_userid,
+                                    conf.rabbit_password,
+                                    conf.rabbit_host,
+                                    conf.rabbit_port)
+
+
+class NotificationHandler(object):
+    def __init__(self, stack_id, events=None):
+        self._notifications = []
+        self.stack_id = stack_id
+        self.events = events
+
+    def process_message(self, body, message):
+        notification = common.deserialize_msg(body)
+        if notification['payload']['stack_name'] == self.stack_id:
+            if self.events is not None:
+                if notification['event_type'] in self.events:
+                    self.notifications.append(notification['event_type'])
+            else:
+                self.notifications.append(notification['event_type'])
+        message.ack()
+
+    def clear(self):
+        self._notifications = []
+
+    @property
+    def notifications(self):
+        return self._notifications
+
+
+class NotificationTest(functional_base.FunctionalTestsBase):
+
+    basic_template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+'''
+    update_basic_template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+  random2:
+    type: OS::Heat::RandomString
+'''
+
+    asg_template = '''
+heat_template_version: 2013-05-23
+resources:
+  asg:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      resource:
+        type: OS::Heat::RandomString
+      min_size: 1
+      desired_capacity: 2
+      max_size: 3
+
+  scale_up_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: {get_resource: asg}
+      cooldown: 0
+      scaling_adjustment: 1
+
+  scale_down_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: {get_resource: asg}
+      cooldown: 0
+      scaling_adjustment: '-1'
+
+outputs:
+  scale_up_url:
+    value: {get_attr: [scale_up_policy, alarm_url]}
+  scale_dn_url:
+    value: {get_attr: [scale_down_policy, alarm_url]}
+'''
+
+    def setUp(self):
+        super(NotificationTest, self).setUp()
+        self.exchange = kombu.Exchange('heat', 'topic', durable=False)
+        queue = kombu.Queue(exchange=self.exchange,
+                            routing_key='notifications.info',
+                            exclusive=True)
+        self.conn = kombu.Connection(get_url(
+            transport.get_transport(cfg.CONF).conf))
+        self.ch = self.conn.channel()
+        self.queue = queue(self.ch)
+        self.queue.declare()
+
+    def consume_events(self, handler, count):
+        self.conn.drain_events()
+        return len(handler.notifications) == count
+
+    def test_basic_notifications(self):
+        # disable cleanup so we can call _stack_delete() directly.
+        stack_identifier = self.stack_create(template=self.basic_template,
+                                             enable_cleanup=False)
+        self.update_stack(stack_identifier,
+                          template=self.update_basic_template)
+        self.stack_suspend(stack_identifier)
+        self.stack_resume(stack_identifier)
+        self._stack_delete(stack_identifier)
+
+        handler = NotificationHandler(stack_identifier.split('/')[0])
+
+        with self.conn.Consumer(self.queue,
+                                callbacks=[handler.process_message],
+                                auto_declare=False):
+            try:
+                while True:
+                    self.conn.drain_events(timeout=1)
+            except Exception:
+                pass
+
+        for n in BASIC_NOTIFICATIONS:
+            self.assertIn(n, handler.notifications)
+
+    def test_asg_notifications(self):
+        stack_identifier = self.stack_create(template=self.asg_template)
+
+        for output in self.client.stacks.get(stack_identifier).outputs:
+            if output['output_key'] == 'scale_dn_url':
+                scale_down_url = output['output_value']
+            else:
+                scale_up_url = output['output_value']
+
+        notifications = []
+        handler = NotificationHandler(stack_identifier.split('/')[0],
+                                      ASG_NOTIFICATIONS)
+
+        with self.conn.Consumer(self.queue,
+                                callbacks=[handler.process_message],
+                                auto_declare=False):
+
+            requests.post(scale_up_url, verify=self.verify_cert)
+            self.assertTrue(
+                test.call_until_true(20, 0, self.consume_events, handler, 2))
+            notifications += handler.notifications
+
+            handler.clear()
+            requests.post(scale_down_url, verify=self.verify_cert)
+            self.assertTrue(
+                test.call_until_true(20, 0, self.consume_events, handler, 2))
+            notifications += handler.notifications
+
+        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[0]))
+        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[1]))
diff --git a/heat_tempest_plugin/tests/functional/test_nova_server_networks.py b/heat_tempest_plugin/tests/functional/test_nova_server_networks.py
new file mode 100644
index 0000000..c0baeb4
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_nova_server_networks.py
@@ -0,0 +1,149 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+server_with_sub_fixed_ip_template = '''
+heat_template_version: 2016-04-08
+description: Test template to test nova server with subnet and fixed_ip.
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+resources:
+  net:
+    type: OS::Neutron::Net
+    properties:
+      name: my_net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: {get_resource: net}
+      cidr: 11.11.11.0/24
+  security_group:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name: the_sg
+      description: Ping and SSH
+      rules:
+      - protocol: icmp
+      - protocol: tcp
+        port_range_min: 22
+        port_range_max: 22
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      networks:
+        - subnet: {get_resource: subnet}
+          fixed_ip: 11.11.11.11
+      security_groups:
+        - {get_resource: security_group}
+outputs:
+  networks:
+    value: {get_attr: [server, networks]}
+'''
+
+server_with_port_template = '''
+heat_template_version: 2016-04-08
+description: Test template to test nova server with port.
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+resources:
+  net:
+    type: OS::Neutron::Net
+    properties:
+      name: server_with_port_net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: {get_resource: net}
+      cidr: 11.11.11.0/24
+  port:
+    type: OS::Neutron::Port
+    properties:
+      network: {get_resource: net}
+      fixed_ips:
+        - subnet: {get_resource: subnet}
+          ip_address: 11.11.11.11
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      networks:
+        - port: {get_resource: port}
+'''
+
+
+class CreateServerTest(functional_base.FunctionalTestsBase):
+
+    def get_outputs(self, stack_identifier, output_key):
+        stack = self.client.stacks.get(stack_identifier)
+        return self._stack_output(stack, output_key)
+
+    def test_create_server_with_subnet_fixed_ip_sec_group(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'image': self.conf.minimal_image_ref}
+        stack_identifier = self.stack_create(
+            template=server_with_sub_fixed_ip_template,
+            stack_name='server_with_sub_ip',
+            parameters=parms)
+
+        networks = self.get_outputs(stack_identifier, 'networks')
+        self.assertEqual(['11.11.11.11'], networks['my_net'])
+
+        server_resource = self.client.resources.get(
+            stack_identifier, 'server')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+        self.assertEqual([{"name": "the_sg"}], server.security_groups)
+
+    def test_create_update_server_with_subnet(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'image': self.conf.minimal_image_ref}
+        template = server_with_sub_fixed_ip_template.replace(
+            'fixed_ip: 11.11.11.11',
+            'fixed_ip: 11.11.11.22').replace(
+            'name: my_net', 'name: your_net')
+        stack_identifier = self.stack_create(
+            template=template,
+            stack_name='create_server_with_sub_ip',
+            parameters=parms)
+        networks = self.get_outputs(stack_identifier, 'networks')
+        self.assertEqual(['11.11.11.22'], networks['your_net'])
+
+        # update the server only with subnet, we won't pass
+        # both port_id and net_id to attach interface, then update success
+        template_only_subnet = template.replace(
+            'fixed_ip: 11.11.11.22', '')
+        self.update_stack(stack_identifier,
+                          template_only_subnet,
+                          parameters=parms)
+        new_networks = self.get_outputs(stack_identifier, 'networks')
+        self.assertNotEqual(['11.11.11.22'], new_networks['your_net'])
+
+    def test_create_server_with_port(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'image': self.conf.minimal_image_ref}
+        # We just want to make sure we can create the server, no need to assert
+        # anything
+        self.stack_create(
+            template=server_with_port_template,
+            stack_name='server_with_port',
+            parameters=parms)
diff --git a/heat_tempest_plugin/tests/functional/test_os_wait_condition.py b/heat_tempest_plugin/tests/functional/test_os_wait_condition.py
new file mode 100644
index 0000000..e19be4f
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_os_wait_condition.py
@@ -0,0 +1,107 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class OSWaitCondition(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2013-05-23
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+  timeout:
+    type: number
+    default: 60
+resources:
+  instance1:
+    type: OS::Nova::Server
+    properties:
+      flavor: {get_param: flavor}
+      image: {get_param: image}
+      networks:
+      - network: {get_param: network}
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template: '#!/bin/sh
+
+            wc_notify --data-binary ''{"status": "SUCCESS"}''
+
+            # signals with reason
+
+            wc_notify --data-binary ''{"status": "SUCCESS", "reason":
+            "signal2"}''
+
+            # signals with data
+
+            wc_notify --data-binary ''{"status": "SUCCESS", "reason":
+            "signal3", "data": "data3"}''
+
+            wc_notify --data-binary ''{"status": "SUCCESS", "reason":
+            "signal4", "data": "data4"}''
+
+            # check signals with the same number
+
+            wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
+
+            wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
+
+            # loop for 20 signals without reasons and data
+
+            for i in `seq 1 20`; do wc_notify --data-binary ''{"status":
+            "SUCCESS"}'' & done
+
+            wait
+            '
+          params:
+            wc_notify:
+              get_attr: [wait_handle, curl_cli]
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    depends_on: instance1
+    properties:
+      count: 25
+      handle: {get_resource: wait_handle}
+      timeout: {get_param: timeout}
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+outputs:
+  curl_cli:
+    value:
+      get_attr: [wait_handle, curl_cli]
+  wc_data:
+    value:
+      get_attr: [wait_condition, data]
+'''
+
+    def setUp(self):
+        super(OSWaitCondition, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.minimal_instance_type:
+            raise self.skipException("No minimal flavor configured to test")
+
+    def test_create_stack_with_multi_signal_waitcondition(self):
+        params = {'flavor': self.conf.minimal_instance_type,
+                  'image': self.conf.minimal_image_ref,
+                  'network': self.conf.fixed_network_name,
+                  'timeout': 120}
+        self.stack_create(template=self.template, parameters=params)
diff --git a/heat_tempest_plugin/tests/functional/test_preview.py b/heat_tempest_plugin/tests/functional/test_preview.py
new file mode 100644
index 0000000..b1779dc
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_preview.py
@@ -0,0 +1,237 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+from heatclient import exc
+import six
+
+
+class StackPreviewTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2015-04-30
+parameters:
+  incomming:
+    type: string
+resources:
+  one:
+    type: OS::Heat::TestResource
+    properties:
+      value: fred
+  two:
+    type: OS::Heat::TestResource
+    properties:
+      value: {get_param: incomming}
+    depends_on: one
+outputs:
+  main_out:
+    value: {get_attr: [two, output]}
+    '''
+    env = '''
+parameters:
+  incomming: abc
+    '''
+
+    def setUp(self):
+        super(StackPreviewTest, self).setUp()
+        self.client = self.orchestration_client
+        self.project_id = self.identity_client.project_id
+
+    def _assert_resource(self, res, stack_name):
+        self.assertEqual(stack_name, res['stack_name'])
+        self.assertEqual('INIT', res['resource_action'])
+        self.assertEqual('COMPLETE', res['resource_status'])
+        for field in ('resource_status_reason', 'physical_resource_id',
+                      'description'):
+            self.assertIn(field, res)
+            self.assertEqual('', res[field])
+        # 'creation_time' and 'updated_time' are None when preview
+        for field in ('creation_time', 'updated_time'):
+            self.assertIn(field, res)
+            self.assertIsNone(res[field])
+        self.assertIn('output', res['attributes'])
+
+        # resource_identity
+        self.assertEqual(stack_name,
+                         res['resource_identity']['stack_name'])
+        self.assertEqual('None', res['resource_identity']['stack_id'])
+        self.assertEqual(self.project_id,
+                         res['resource_identity']['tenant'])
+        self.assertEqual('/resources/%s' % res['resource_name'],
+                         res['resource_identity']['path'])
+        # stack_identity
+        self.assertEqual(stack_name,
+                         res['stack_identity']['stack_name'])
+        self.assertEqual('None', res['stack_identity']['stack_id'])
+        self.assertEqual(self.project_id,
+                         res['stack_identity']['tenant'])
+        self.assertEqual('', res['stack_identity']['path'])
+
+    def _assert_results(self, result, stack_name):
+        # global stuff.
+        self.assertEqual(stack_name, result['stack_name'])
+        self.assertTrue(result['disable_rollback'])
+        self.assertEqual('None', result['id'])
+        self.assertIsNone(result['parent'])
+        self.assertEqual('No description', result['template_description'])
+
+        # parameters
+        self.assertEqual('None', result['parameters']['OS::stack_id'])
+        self.assertEqual(stack_name, result['parameters']['OS::stack_name'])
+        self.assertEqual('abc', result['parameters']['incomming'])
+
+    def test_basic_pass(self):
+        stack_name = self._stack_rand_name()
+        result = self.client.stacks.preview(
+            template=self.template,
+            stack_name=stack_name,
+            disable_rollback=True,
+            environment=self.env).to_dict()
+
+        self._assert_results(result, stack_name)
+        for res in result['resources']:
+            self._assert_resource(res, stack_name)
+            self.assertEqual('OS::Heat::TestResource',
+                             res['resource_type'])
+
+            # common properties
+            self.assertFalse(res['properties']['fail'])
+            self.assertEqual(0, res['properties']['wait_secs'])
+            self.assertFalse(res['properties']['update_replace'])
+
+            if res['resource_name'] == 'one':
+                self.assertEqual('fred', res['properties']['value'])
+                self.assertEqual(['two'], res['required_by'])
+            if res['resource_name'] == 'two':
+                self.assertEqual('abc', res['properties']['value'])
+                self.assertEqual([], res['required_by'])
+
+    def test_basic_fail(self):
+        stack_name = self._stack_rand_name()
+
+        # break the template so it fails validation.
+        wont_work = self.template.replace('get_param: incomming',
+                                          'get_param: missing')
+        excp = self.assertRaises(exc.HTTPBadRequest,
+                                 self.client.stacks.preview,
+                                 template=wont_work,
+                                 stack_name=stack_name,
+                                 disable_rollback=True,
+                                 environment=self.env)
+
+        self.assertIn('Property error: : resources.two.properties.value: '
+                      ': The Parameter (missing) was not provided.',
+                      six.text_type(excp))
+
+    def test_nested_pass(self):
+        """Nested stacks need to recurse down the stacks."""
+        main_template = '''
+heat_template_version: 2015-04-30
+parameters:
+  incomming:
+    type: string
+resources:
+  main:
+    type: nested.yaml
+    properties:
+      value: {get_param: incomming}
+outputs:
+  main_out:
+    value: {get_attr: [main, output]}
+    '''
+        nested_template = '''
+heat_template_version: 2015-04-30
+parameters:
+  value:
+    type: string
+resources:
+  nested:
+    type: OS::Heat::TestResource
+    properties:
+      value: {get_param: value}
+outputs:
+  output:
+    value: {get_attr: [nested, output]}
+'''
+        stack_name = self._stack_rand_name()
+        result = self.client.stacks.preview(
+            disable_rollback=True,
+            stack_name=stack_name,
+            template=main_template,
+            files={'nested.yaml': nested_template},
+            environment=self.env).to_dict()
+
+        self._assert_results(result, stack_name)
+
+        # nested resources return a list of their resources.
+        res = result['resources'][0][0]
+        nested_stack_name = '%s-%s' % (stack_name,
+                                       res['parent_resource'])
+
+        self._assert_resource(res, nested_stack_name)
+        self.assertEqual('OS::Heat::TestResource',
+                         res['resource_type'])
+
+        self.assertFalse(res['properties']['fail'])
+        self.assertEqual(0, res['properties']['wait_secs'])
+        self.assertFalse(res['properties']['update_replace'])
+
+        self.assertEqual('abc', res['properties']['value'])
+        self.assertEqual([], res['required_by'])
+
+    def test_res_group_with_nested_template(self):
+        main_template = '''
+heat_template_version: 2015-04-30
+resources:
+  fixed_network:
+    type: "OS::Neutron::Net"
+  rg:
+    type: "OS::Heat::ResourceGroup"
+    properties:
+      count: 1
+      resource_def:
+        type: nested.yaml
+        properties:
+          fixed_network_id: {get_resource: fixed_network}
+    '''
+        nested_template = '''
+heat_template_version: 2015-04-30
+
+parameters:
+  fixed_network_id:
+    type: string
+resources:
+  port:
+    type: "OS::Neutron::Port"
+    properties:
+      network_id:
+          get_param: fixed_network_id
+
+'''
+        stack_name = self._stack_rand_name()
+        result = self.client.stacks.preview(
+            disable_rollback=True,
+            stack_name=stack_name,
+            template=main_template,
+            files={'nested.yaml': nested_template}).to_dict()
+
+        resource_names = []
+
+        def get_resource_names(resources):
+            for item in resources:
+                if isinstance(item, dict):
+                    resource_names.append(item['resource_name'])
+                else:
+                    get_resource_names(item)
+        get_resource_names(result['resources'])
+        # ensure that fixed network and port here
+        self.assertIn('fixed_network', resource_names)
+        self.assertIn('port', resource_names)
diff --git a/heat_tempest_plugin/tests/functional/test_preview_update.py b/heat_tempest_plugin/tests/functional/test_preview_update.py
new file mode 100644
index 0000000..a4d293f
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_preview_update.py
@@ -0,0 +1,298 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+test_template_one_resource = {
+    'heat_template_version': '2013-05-23',
+    'description': 'Test template to create one instance.',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0
+            }
+        }
+    }
+}
+
+test_template_two_resource = {
+    'heat_template_version': '2013-05-23',
+    'description': 'Test template to create two instance.',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0
+            }
+        },
+        'test2': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0
+            }
+        }
+    }
+}
+
+
+class UpdatePreviewBase(functional_base.FunctionalTestsBase):
+
+    def assert_empty_sections(self, changes, empty_sections):
+        for section in empty_sections:
+            self.assertEqual([], changes[section])
+
+
+class UpdatePreviewStackTest(UpdatePreviewBase):
+
+    def test_add_resource(self):
+        self.stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+        result = self.preview_update_stack(self.stack_identifier,
+                                           test_template_two_resource)
+        changes = result['resource_changes']
+
+        unchanged = changes['unchanged'][0]['resource_name']
+        self.assertEqual('test1', unchanged)
+
+        added = changes['added'][0]['resource_name']
+        self.assertEqual('test2', added)
+
+        self.assert_empty_sections(changes, ['updated', 'replaced', 'deleted'])
+
+    def test_no_change(self):
+        self.stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+        result = self.preview_update_stack(self.stack_identifier,
+                                           test_template_one_resource)
+        changes = result['resource_changes']
+
+        unchanged = changes['unchanged'][0]['resource_name']
+        self.assertEqual('test1', unchanged)
+
+        self.assert_empty_sections(
+            changes, ['updated', 'replaced', 'deleted', 'added'])
+
+    def test_update_resource(self):
+        self.stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+        test_template_updated_resource = {
+            'heat_template_version': '2013-05-23',
+            'description': 'Test template to create one instance.',
+            'resources': {
+                'test1': {
+                    'type': 'OS::Heat::TestResource',
+                    'properties': {
+                        'value': 'Test1 foo',
+                        'fail': False,
+                        'update_replace': False,
+                        'wait_secs': 0
+                    }
+                }
+            }
+        }
+
+        result = self.preview_update_stack(self.stack_identifier,
+                                           test_template_updated_resource)
+        changes = result['resource_changes']
+
+        updated = changes['updated'][0]['resource_name']
+        self.assertEqual('test1', updated)
+
+        self.assert_empty_sections(
+            changes, ['added', 'unchanged', 'replaced', 'deleted'])
+
+    def test_replaced_resource(self):
+        self.stack_identifier = self.stack_create(
+            template=test_template_one_resource)
+        new_template = {
+            'heat_template_version': '2013-05-23',
+            'description': 'Test template to create one instance.',
+            'resources': {
+                'test1': {
+                    'type': 'OS::Heat::TestResource',
+                    'properties': {
+                        'update_replace': True,
+                    }
+                }
+            }
+        }
+
+        result = self.preview_update_stack(self.stack_identifier, new_template)
+        changes = result['resource_changes']
+
+        replaced = changes['replaced'][0]['resource_name']
+        self.assertEqual('test1', replaced)
+
+        self.assert_empty_sections(
+            changes, ['added', 'unchanged', 'updated', 'deleted'])
+
+    def test_delete_resource(self):
+        self.stack_identifier = self.stack_create(
+            template=test_template_two_resource)
+        result = self.preview_update_stack(self.stack_identifier,
+                                           test_template_one_resource)
+        changes = result['resource_changes']
+
+        unchanged = changes['unchanged'][0]['resource_name']
+        self.assertEqual('test1', unchanged)
+
+        deleted = changes['deleted'][0]['resource_name']
+        self.assertEqual('test2', deleted)
+
+        self.assert_empty_sections(changes, ['updated', 'replaced', 'added'])
+
+
+class UpdatePreviewStackTestNested(UpdatePreviewBase):
+    template_nested_parent = '''
+heat_template_version: 2016-04-08
+resources:
+  nested1:
+    type: nested1.yaml
+'''
+
+    template_nested1 = '''
+heat_template_version: 2016-04-08
+resources:
+  nested2:
+    type: nested2.yaml
+'''
+
+    template_nested2 = '''
+heat_template_version: 2016-04-08
+resources:
+  random:
+    type: OS::Heat::RandomString
+'''
+
+    template_nested2_2 = '''
+heat_template_version: 2016-04-08
+resources:
+  random:
+    type: OS::Heat::RandomString
+  random2:
+    type: OS::Heat::RandomString
+'''
+
+    def _get_by_resource_name(self, changes, name, action):
+        filtered_l = [x for x in changes[action]
+                      if x['resource_name'] == name]
+        self.assertEqual(1, len(filtered_l))
+        return filtered_l[0]
+
+    def test_nested_resources_nochange(self):
+        files = {'nested1.yaml': self.template_nested1,
+                 'nested2.yaml': self.template_nested2}
+        self.stack_identifier = self.stack_create(
+            template=self.template_nested_parent, files=files)
+        result = self.preview_update_stack(
+            self.stack_identifier,
+            template=self.template_nested_parent,
+            files=files, show_nested=True)
+        changes = result['resource_changes']
+
+        # The nested random resource should be unchanged, but we always
+        # update nested stacks even when there are no changes
+        self.assertEqual(1, len(changes['unchanged']))
+        self.assertEqual('random', changes['unchanged'][0]['resource_name'])
+        self.assertEqual('nested2', changes['unchanged'][0]['parent_resource'])
+
+        self.assertEqual(2, len(changes['updated']))
+        u_nested1 = self._get_by_resource_name(changes, 'nested1', 'updated')
+        self.assertNotIn('parent_resource', u_nested1)
+        u_nested2 = self._get_by_resource_name(changes, 'nested2', 'updated')
+        self.assertEqual('nested1', u_nested2['parent_resource'])
+
+        self.assert_empty_sections(changes, ['replaced', 'deleted', 'added'])
+
+    def test_nested_resources_add(self):
+        files = {'nested1.yaml': self.template_nested1,
+                 'nested2.yaml': self.template_nested2}
+        self.stack_identifier = self.stack_create(
+            template=self.template_nested_parent, files=files)
+        files['nested2.yaml'] = self.template_nested2_2
+        result = self.preview_update_stack(
+            self.stack_identifier,
+            template=self.template_nested_parent,
+            files=files, show_nested=True)
+        changes = result['resource_changes']
+
+        # The nested random resource should be unchanged, but we always
+        # update nested stacks even when there are no changes
+        self.assertEqual(1, len(changes['unchanged']))
+        self.assertEqual('random', changes['unchanged'][0]['resource_name'])
+        self.assertEqual('nested2', changes['unchanged'][0]['parent_resource'])
+
+        self.assertEqual(1, len(changes['added']))
+        self.assertEqual('random2', changes['added'][0]['resource_name'])
+        self.assertEqual('nested2', changes['added'][0]['parent_resource'])
+
+        self.assert_empty_sections(changes, ['replaced', 'deleted'])
+
+    def test_nested_resources_delete(self):
+        files = {'nested1.yaml': self.template_nested1,
+                 'nested2.yaml': self.template_nested2_2}
+        self.stack_identifier = self.stack_create(
+            template=self.template_nested_parent, files=files)
+        files['nested2.yaml'] = self.template_nested2
+        result = self.preview_update_stack(
+            self.stack_identifier,
+            template=self.template_nested_parent,
+            files=files, show_nested=True)
+        changes = result['resource_changes']
+
+        # The nested random resource should be unchanged, but we always
+        # update nested stacks even when there are no changes
+        self.assertEqual(1, len(changes['unchanged']))
+        self.assertEqual('random', changes['unchanged'][0]['resource_name'])
+        self.assertEqual('nested2', changes['unchanged'][0]['parent_resource'])
+
+        self.assertEqual(1, len(changes['deleted']))
+        self.assertEqual('random2', changes['deleted'][0]['resource_name'])
+        self.assertEqual('nested2', changes['deleted'][0]['parent_resource'])
+
+        self.assert_empty_sections(changes, ['replaced', 'added'])
+
+    def test_nested_resources_replace(self):
+        files = {'nested1.yaml': self.template_nested1,
+                 'nested2.yaml': self.template_nested2}
+        self.stack_identifier = self.stack_create(
+            template=self.template_nested_parent, files=files)
+        parent_none = self.template_nested_parent.replace(
+            'nested1.yaml', 'OS::Heat::None')
+        result = self.preview_update_stack(
+            self.stack_identifier,
+            template=parent_none,
+            show_nested=True)
+        changes = result['resource_changes']
+
+        # The nested random resource should be unchanged, but we always
+        # update nested stacks even when there are no changes
+        self.assertEqual(1, len(changes['replaced']))
+        self.assertEqual('nested1', changes['replaced'][0]['resource_name'])
+
+        self.assertEqual(2, len(changes['deleted']))
+        d_random = self._get_by_resource_name(changes, 'random', 'deleted')
+        self.assertEqual('nested2', d_random['parent_resource'])
+        d_nested2 = self._get_by_resource_name(changes, 'nested2', 'deleted')
+        self.assertEqual('nested1', d_nested2['parent_resource'])
+
+        self.assert_empty_sections(changes, ['updated', 'unchanged', 'added'])
diff --git a/heat_tempest_plugin/tests/functional/test_purge.py b/heat_tempest_plugin/tests/functional/test_purge.py
new file mode 100644
index 0000000..1dd4ff8
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_purge.py
@@ -0,0 +1,51 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+from oslo_concurrency import processutils
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class PurgeTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2014-10-16
+parameters:
+resources:
+  test_resource:
+    type: OS::Heat::TestResource
+'''
+
+    def test_purge(self):
+        stack_identifier = self.stack_create(template=self.template)
+        self._stack_delete(stack_identifier)
+        stacks = dict((stack.id, stack) for stack in
+                      self.client.stacks.list(show_deleted=True))
+        self.assertIn(stack_identifier.split('/')[1], stacks)
+        time.sleep(1)
+        cmd = "heat-manage purge_deleted 0"
+        processutils.execute(cmd, shell=True)
+        stacks = dict((stack.id, stack) for stack in
+                      self.client.stacks.list(show_deleted=True))
+        self.assertNotIn(stack_identifier.split('/')[1], stacks)
+
+        # Test with tags
+        stack_identifier = self.stack_create(template=self.template,
+                                             tags="foo,bar")
+        self._stack_delete(stack_identifier)
+        time.sleep(1)
+        cmd = "heat-manage purge_deleted 0"
+        processutils.execute(cmd, shell=True)
+        stacks = dict((stack.id, stack) for stack in
+                      self.client.stacks.list(show_deleted=True))
+        self.assertNotIn(stack_identifier.split('/')[1], stacks)
diff --git a/heat_tempest_plugin/tests/functional/test_reload_on_sighup.py b/heat_tempest_plugin/tests/functional/test_reload_on_sighup.py
new file mode 100644
index 0000000..81da958
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_reload_on_sighup.py
@@ -0,0 +1,142 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+import subprocess
+import time
+
+import eventlet
+
+from oslo_concurrency import processutils
+from six.moves import configparser
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ReloadOnSighupTest(functional_base.FunctionalTestsBase):
+
+    def setUp(self):
+        self.config_file = "/etc/heat/heat.conf"
+        super(ReloadOnSighupTest, self).setUp()
+
+    def _is_mod_wsgi_daemon(self, service):
+        process = ''.join(['wsgi:',
+                           service[:9]]).replace('_', '-').encode('utf-8')
+        s = subprocess.Popen(["ps", "ax"], stdout=subprocess.PIPE)
+        for x in s.stdout:
+            if re.search(process, x):
+                return True
+
+    def _set_config_value(self, service, key, value):
+        config = configparser.ConfigParser()
+
+        # NOTE(prazumovsky): If there are several workers, there can be
+        # situation, when one thread opens self.config_file for writing
+        # (so config_file erases with opening), in that moment other thread
+        # intercepts to this file and try to set config option value, i.e.
+        # write to file, which is already erased by first thread, so,
+        # NoSectionError raised. So, should wait until first thread writes to
+        # config_file.
+        retries_count = self.conf.sighup_config_edit_retries
+        while True:
+            config.read(self.config_file)
+            try:
+                config.set(service, key, str(value))
+            except configparser.NoSectionError:
+                if retries_count <= 0:
+                    raise
+                retries_count -= 1
+                eventlet.sleep(1)
+            else:
+                break
+
+        with open(self.config_file, 'w') as f:
+            config.write(f)
+
+    def _get_config_value(self, service, key):
+        config = configparser.ConfigParser()
+        config.read(self.config_file)
+        val = config.get(service, key)
+        return val
+
+    def _get_heat_api_pids(self, service):
+        # get the pids of all heat-api processes
+        if service == "heat_api":
+            process = "heat-api|grep -Ev 'grep|cloudwatch|cfn'"
+        else:
+            process = "%s|grep -Ev 'grep'" % service.replace('_', '-')
+        cmd = "ps -ef|grep %s|awk '{print $2}'" % process
+        out, err = processutils.execute(cmd, shell=True)
+        self.assertIsNotNone(out, "heat-api service not running. %s" % err)
+        pids = filter(None, out.split('\n'))
+
+        # get the parent pids of all heat-api processes
+        cmd = "ps -ef|grep %s|awk '{print $3}'" % process
+        out, _ = processutils.execute(cmd, shell=True)
+        parent_pids = filter(None, out.split('\n'))
+
+        heat_api_parent = list(set(pids) & set(parent_pids))[0]
+        heat_api_children = list(set(pids) - set(parent_pids))
+
+        return heat_api_parent, heat_api_children
+
+    def _change_config(self, service, old_workers, new_workers):
+        pre_reload_parent, pre_reload_children = self._get_heat_api_pids(
+            service)
+        self.assertEqual(old_workers, len(pre_reload_children))
+
+        # change the config values
+        self._set_config_value(service, 'workers', new_workers)
+        cmd = "kill -HUP %s" % pre_reload_parent
+        processutils.execute(cmd, shell=True)
+
+        # wait till heat-api reloads
+        start_time = time.time()
+        while time.time() - start_time < self.conf.sighup_timeout:
+            post_reload_parent, post_reload_children = self._get_heat_api_pids(
+                service)
+            intersect = set(post_reload_children) & set(pre_reload_children)
+            if (new_workers == len(post_reload_children)
+                and pre_reload_parent == post_reload_parent
+                    and intersect == set()):
+                break
+            eventlet.sleep(1)
+        self.assertEqual(pre_reload_parent, post_reload_parent)
+        self.assertEqual(new_workers, len(post_reload_children))
+        # test if all child processes are newly created
+        self.assertEqual(set(post_reload_children) & set(pre_reload_children),
+                         set())
+
+    def _reload(self, service):
+        old_workers = int(self._get_config_value(service, 'workers'))
+        new_workers = old_workers + 1
+        self.addCleanup(self._set_config_value, service, 'workers',
+                        old_workers)
+
+        self._change_config(service, old_workers, new_workers)
+        # revert all the changes made
+        self._change_config(service, new_workers, old_workers)
+
+    def _reload_on_sighup(self, service):
+        if not self._is_mod_wsgi_daemon(service):
+            self._reload(service)
+        else:
+            self.skipTest('Skipping Test, Service running under httpd.')
+
+    def test_api_reload_on_sighup(self):
+        self._reload_on_sighup('heat_api')
+
+    def test_api_cfn_reload_on_sighup(self):
+        self._reload_on_sighup('heat_api_cfn')
+
+    def test_api_cloudwatch_on_sighup(self):
+        self._reload_on_sighup('heat_api_cloudwatch')
diff --git a/heat_tempest_plugin/tests/functional/test_remote_stack.py b/heat_tempest_plugin/tests/functional/test_remote_stack.py
new file mode 100644
index 0000000..4c53933
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_remote_stack.py
@@ -0,0 +1,144 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heatclient import exc
+import six
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class RemoteStackTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2013-05-23
+resources:
+  my_stack:
+    type: OS::Heat::Stack
+    properties:
+      context:
+        region_name: RegionOne
+      template:
+        get_file: remote_stack.yaml
+outputs:
+  key:
+    value: {get_attr: [my_stack, outputs]}
+'''
+
+    remote_template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+outputs:
+  remote_key:
+    value: {get_attr: [random1, value]}
+'''
+
+    def setUp(self):
+        super(RemoteStackTest, self).setUp()
+        # replacing the template region with the one from the config
+        self.template = self.template.replace('RegionOne',
+                                              self.conf.region)
+
+    def test_remote_stack_alone(self):
+        stack_id = self.stack_create(template=self.remote_template)
+        expected_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+        stack = self.client.stacks.get(stack_id)
+        output_value = self._stack_output(stack, 'remote_key')
+        self.assertEqual(32, len(output_value))
+
+    def test_stack_create(self):
+        files = {'remote_stack.yaml': self.remote_template}
+        stack_id = self.stack_create(files=files)
+
+        expected_resources = {'my_stack': 'OS::Heat::Stack'}
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+
+        stack = self.client.stacks.get(stack_id)
+        output = self._stack_output(stack, 'key')
+        parent_output_value = output['remote_key']
+        self.assertEqual(32, len(parent_output_value))
+
+        rsrc = self.client.resources.get(stack_id, 'my_stack')
+        remote_id = rsrc.physical_resource_id
+        rstack = self.client.stacks.get(remote_id)
+        self.assertEqual(remote_id, rstack.id)
+        remote_output_value = self._stack_output(rstack, 'remote_key')
+        self.assertEqual(32, len(remote_output_value))
+        self.assertEqual(parent_output_value, remote_output_value)
+
+        remote_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(remote_resources, self.list_resources(remote_id))
+
+    def test_stack_create_bad_region(self):
+        tmpl_bad_region = self.template.replace(self.conf.region, 'DARKHOLE')
+        files = {'remote_stack.yaml': self.remote_template}
+        kwargs = {
+            'template': tmpl_bad_region,
+            'files': files
+        }
+        ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
+
+        error_msg = ('ERROR: Cannot establish connection to Heat endpoint '
+                     'at region "DARKHOLE" due to "publicURL endpoint for '
+                     'orchestration service in DARKHOLE region not found"')
+        self.assertEqual(error_msg, six.text_type(ex))
+
+    def test_stack_resource_validation_fail(self):
+        tmpl_bad_format = self.remote_template.replace('resources', 'resource')
+        files = {'remote_stack.yaml': tmpl_bad_format}
+        kwargs = {'files': files}
+        ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
+
+        error_msg = ('ERROR: Failed validating stack template using Heat '
+                     'endpoint at region "%s" due to '
+                     '"ERROR: The template section is '
+                     'invalid: resource"') % self.conf.region
+        self.assertEqual(error_msg, six.text_type(ex))
+
+    def test_stack_update(self):
+        files = {'remote_stack.yaml': self.remote_template}
+        stack_id = self.stack_create(files=files)
+
+        expected_resources = {'my_stack': 'OS::Heat::Stack'}
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+
+        rsrc = self.client.resources.get(stack_id, 'my_stack')
+        physical_resource_id = rsrc.physical_resource_id
+        rstack = self.client.stacks.get(physical_resource_id)
+        self.assertEqual(physical_resource_id, rstack.id)
+
+        remote_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(remote_resources,
+                         self.list_resources(rstack.id))
+        # do an update
+        update_template = self.remote_template.replace('random1', 'random2')
+        files = {'remote_stack.yaml': update_template}
+        self.update_stack(stack_id, self.template, files=files)
+
+        # check if the remote stack is still there with the same ID
+        self.assertEqual(expected_resources, self.list_resources(stack_id))
+        rsrc = self.client.resources.get(stack_id, 'my_stack')
+        physical_resource_id = rsrc.physical_resource_id
+        rstack = self.client.stacks.get(physical_resource_id)
+        self.assertEqual(physical_resource_id, rstack.id)
+
+        remote_resources = {'random2': 'OS::Heat::RandomString'}
+        self.assertEqual(remote_resources,
+                         self.list_resources(rstack.id))
+
+    def test_stack_suspend_resume(self):
+        files = {'remote_stack.yaml': self.remote_template}
+        stack_id = self.stack_create(files=files)
+        self.stack_suspend(stack_id)
+        self.stack_resume(stack_id)
diff --git a/heat_tempest_plugin/tests/functional/test_replace_deprecated.py b/heat_tempest_plugin/tests/functional/test_replace_deprecated.py
new file mode 100644
index 0000000..0eee3f1
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_replace_deprecated.py
@@ -0,0 +1,92 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import yaml
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ReplaceDeprecatedResourceTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: "2013-05-23"
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+
+resources:
+  config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      config: xxxx
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      networks: [{network: {get_param: network} }]
+      user_data_format: SOFTWARE_CONFIG
+  dep:
+    type: OS::Heat::SoftwareDeployments
+    properties:
+        config: {get_resource: config}
+        servers: {'0': {get_resource: server}}
+        signal_transport: NO_SIGNAL
+outputs:
+  server:
+    value: {get_resource: server}
+'''
+
+    deployment_group_snippet = '''
+type: OS::Heat::SoftwareDeploymentGroup
+properties:
+  config: {get_resource: config}
+  servers: {'0': {get_resource: server}}
+  signal_transport: NO_SIGNAL
+'''
+    enable_cleanup = True
+
+    def test_replace_software_deployments(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'network': self.conf.fixed_network_name,
+                 'image': self.conf.minimal_image_ref
+                 }
+        deployments_template = yaml.safe_load(self.template)
+        stack_identifier = self.stack_create(
+            parameters=parms,
+            template=deployments_template,
+            enable_cleanup=self.enable_cleanup)
+        expected_resources = {'config': 'OS::Heat::SoftwareConfig',
+                              'dep': 'OS::Heat::SoftwareDeployments',
+                              'server': 'OS::Nova::Server'}
+        resource = self.client.resources.get(stack_identifier, 'server')
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+        initial_phy_id = resource.physical_resource_id
+        resources = deployments_template['resources']
+        resources['dep'] = yaml.safe_load(self.deployment_group_snippet)
+        self.update_stack(
+            stack_identifier,
+            deployments_template,
+            parameters=parms)
+        resource = self.client.resources.get(stack_identifier, 'server')
+        self.assertEqual(initial_phy_id,
+                         resource.physical_resource_id)
+        expected_new_resources = {'config': 'OS::Heat::SoftwareConfig',
+                                  'dep': 'OS::Heat::SoftwareDeploymentGroup',
+                                  'server': 'OS::Nova::Server'}
+        self.assertEqual(expected_new_resources,
+                         self.list_resources(stack_identifier))
diff --git a/heat_tempest_plugin/tests/functional/test_resource_chain.py b/heat_tempest_plugin/tests/functional/test_resource_chain.py
new file mode 100644
index 0000000..06ec8ff
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_resource_chain.py
@@ -0,0 +1,167 @@
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+TEMPLATE_SIMPLE = '''
+heat_template_version: 2016-04-08
+parameters:
+  string-length:
+    type: number
+resources:
+  my-chain:
+    type: OS::Heat::ResourceChain
+    properties:
+      resources: ['OS::Heat::RandomString', 'OS::Heat::RandomString']
+      resource_properties:
+        length: { get_param: string-length }
+outputs:
+  resource-ids:
+    value: { get_attr: [my-chain, refs] }
+  resource-0-value:
+    value: { get_attr: [my-chain, resource.0, value] }
+  all-resource-attrs:
+    value: { get_attr: [my-chain, attributes, value] }
+'''
+
+TEMPLATE_PARAM_DRIVEN = '''
+heat_template_version: 2016-04-08
+parameters:
+  chain-types:
+    type: comma_delimited_list
+resources:
+  my-chain:
+    type: OS::Heat::ResourceChain
+    properties:
+      resources: { get_param: chain-types }
+'''
+
+
+class ResourceChainTests(functional_base.FunctionalTestsBase):
+
+    def test_create(self):
+        # Test
+        params = {'string-length': 8}
+        stack_id = self.stack_create(template=TEMPLATE_SIMPLE,
+                                     parameters=params)
+
+        # Verify
+        stack = self.client.stacks.get(stack_id)
+        self.assertIsNotNone(stack)
+
+        # Top-level resource for chain
+        expected = {'my-chain': 'OS::Heat::ResourceChain'}
+        found = self.list_resources(stack_id)
+        self.assertEqual(expected, found)
+
+        # Nested stack exists and has two resources
+        nested_id = self.group_nested_identifier(stack_id, 'my-chain')
+        expected = {'0': 'OS::Heat::RandomString',
+                    '1': 'OS::Heat::RandomString'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
+
+        # Outputs
+        resource_ids = self._stack_output(stack, 'resource-ids')
+        self.assertIsNotNone(resource_ids)
+        self.assertEqual(2, len(resource_ids))
+
+        resource_value = self._stack_output(stack, 'resource-0-value')
+        self.assertIsNotNone(resource_value)
+        self.assertEqual(8, len(resource_value))  # from parameter
+
+        resource_attrs = self._stack_output(stack, 'all-resource-attrs')
+        self.assertIsNotNone(resource_attrs)
+        self.assertIsInstance(resource_attrs, dict)
+        self.assertEqual(2, len(resource_attrs))
+        self.assertEqual(8, len(resource_attrs['0']))
+        self.assertEqual(8, len(resource_attrs['1']))
+
+    def test_update(self):
+        # Setup
+        params = {'string-length': 8}
+        stack_id = self.stack_create(template=TEMPLATE_SIMPLE,
+                                     parameters=params)
+
+        update_tmpl = '''
+        heat_template_version: 2016-04-08
+        parameters:
+          string-length:
+            type: number
+        resources:
+          my-chain:
+            type: OS::Heat::ResourceChain
+            properties:
+              resources: ['OS::Heat::None']
+        '''
+
+        # Test
+        self.update_stack(stack_id, template=update_tmpl, parameters=params)
+
+        # Verify
+        # Nested stack only has the None resource
+        nested_id = self.group_nested_identifier(stack_id, 'my-chain')
+        expected = {'0': 'OS::Heat::None'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
+
+    def test_update_resources(self):
+        params = {'chain-types': 'OS::Heat::None'}
+
+        stack_id = self.stack_create(template=TEMPLATE_PARAM_DRIVEN,
+                                     parameters=params)
+
+        nested_id = self.group_nested_identifier(stack_id, 'my-chain')
+        expected = {'0': 'OS::Heat::None'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
+
+        params = {'chain-types': 'OS::Heat::None,OS::Heat::None'}
+        self.update_stack(stack_id, template=TEMPLATE_PARAM_DRIVEN,
+                          parameters=params)
+
+        expected = {'0': 'OS::Heat::None', '1': 'OS::Heat::None'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
+
+    def test_resources_param_driven(self):
+        # Setup
+        params = {'chain-types':
+                  'OS::Heat::None,OS::Heat::RandomString,OS::Heat::None'}
+
+        # Test
+        stack_id = self.stack_create(template=TEMPLATE_PARAM_DRIVEN,
+                                     parameters=params)
+
+        # Verify
+        nested_id = self.group_nested_identifier(stack_id, 'my-chain')
+        expected = {'0': 'OS::Heat::None',
+                    '1': 'OS::Heat::RandomString',
+                    '2': 'OS::Heat::None'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
+
+    def test_resources_env_defined(self):
+        # Setup
+        env = {'parameters': {'chain-types': 'OS::Heat::None'}}
+
+        # Test
+        stack_id = self.stack_create(template=TEMPLATE_PARAM_DRIVEN,
+                                     environment=env)
+
+        # Verify
+        nested_id = self.group_nested_identifier(stack_id, 'my-chain')
+        expected = {'0': 'OS::Heat::None'}
+        found = self.list_resources(nested_id)
+        self.assertEqual(expected, found)
diff --git a/heat_tempest_plugin/tests/functional/test_resource_group.py b/heat_tempest_plugin/tests/functional/test_resource_group.py
new file mode 100644
index 0000000..aea1bda
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_resource_group.py
@@ -0,0 +1,695 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import json
+
+from heatclient import exc
+import six
+import yaml
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ResourceGroupTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2013-05-23
+resources:
+  random_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 0
+      resource_def:
+        type: My::RandomString
+        properties:
+          length: 30
+          salt: initial
+outputs:
+  random1:
+    value: {get_attr: [random_group, resource.0.value]}
+  random2:
+    value: {get_attr: [random_group, resource.1.value]}
+  all_values:
+    value: {get_attr: [random_group, value]}
+'''
+
+    def test_resource_group_zero_novalidate(self):
+        # Nested resources should be validated only when size > 0
+        # This allows features to be disabled via size=0 without
+        # triggering validation of nested resource custom constraints
+        # e.g images etc in the nested schema.
+        nested_template_fail = '''
+heat_template_version: 2013-05-23
+parameters:
+  length:
+    type: string
+    default: 50
+  salt:
+    type: string
+    default: initial
+resources:
+  random:
+    type: OS::Heat::RandomString
+    properties:
+      length: BAD
+'''
+
+        files = {'provider.yaml': nested_template_fail}
+        env = {'resource_registry':
+               {'My::RandomString': 'provider.yaml'}}
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files=files,
+            environment=env
+        )
+
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+
+        # Check we created an empty nested stack
+        nested_identifier = self.group_nested_identifier(stack_identifier,
+                                                         'random_group')
+        self.assertEqual({}, self.list_resources(nested_identifier))
+
+        # Prove validation works for non-zero create/update
+        template_two_nested = self.template.replace("count: 0", "count: 2")
+        expected_err = ("resources.random_group<nested_stack>.resources."
+                        "0<provider.yaml>.resources.random: : "
+                        "Value 'BAD' is not an integer")
+        ex = self.assertRaises(exc.HTTPBadRequest, self.update_stack,
+                               stack_identifier, template_two_nested,
+                               environment=env, files=files)
+        self.assertIn(expected_err, six.text_type(ex))
+
+        ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create,
+                               template=template_two_nested,
+                               environment=env, files=files)
+        self.assertIn(expected_err, six.text_type(ex))
+
+    def _validate_resources(self, stack_identifier, expected_count):
+        resources = self.list_group_resources(stack_identifier,
+                                              'random_group')
+        self.assertEqual(expected_count, len(resources))
+        expected_resources = dict(
+            (str(idx), 'My::RandomString')
+            for idx in range(expected_count))
+
+        self.assertEqual(expected_resources, resources)
+
+    def test_create(self):
+        def validate_output(stack, output_key, length):
+            output_value = self._stack_output(stack, output_key)
+            self.assertEqual(length, len(output_value))
+            return output_value
+        # verify that the resources in resource group are identically
+        # configured, resource names and outputs are appropriate.
+        env = {'resource_registry':
+               {'My::RandomString': 'OS::Heat::RandomString'}}
+        create_template = self.template.replace("count: 0", "count: 2")
+        stack_identifier = self.stack_create(template=create_template,
+                                             environment=env)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+
+        # validate count, type and name of resources in a resource group.
+        self._validate_resources(stack_identifier, 2)
+
+        # validate outputs
+        stack = self.client.stacks.get(stack_identifier)
+        outputs = []
+        outputs.append(validate_output(stack, 'random1', 30))
+        outputs.append(validate_output(stack, 'random2', 30))
+        self.assertEqual(outputs, self._stack_output(stack, 'all_values'))
+
+    def test_update_increase_decrease_count(self):
+        # create stack with resource group count 2
+        env = {'resource_registry':
+               {'My::RandomString': 'OS::Heat::RandomString'}}
+        create_template = self.template.replace("count: 0", "count: 2")
+        stack_identifier = self.stack_create(template=create_template,
+                                             environment=env)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+        # verify that the resource group has 2 resources
+        self._validate_resources(stack_identifier, 2)
+
+        # increase the resource group count to 5
+        update_template = self.template.replace("count: 0", "count: 5")
+        self.update_stack(stack_identifier, update_template, environment=env)
+        # verify that the resource group has 5 resources
+        self._validate_resources(stack_identifier, 5)
+
+        # decrease the resource group count to 3
+        update_template = self.template.replace("count: 0", "count: 3")
+        self.update_stack(stack_identifier, update_template, environment=env)
+        # verify that the resource group has 3 resources
+        self._validate_resources(stack_identifier, 3)
+
+    def test_update_removal_policies(self):
+        rp_template = '''
+heat_template_version: 2014-10-16
+resources:
+  random_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 5
+      removal_policies: []
+      resource_def:
+        type: OS::Heat::RandomString
+'''
+
+        # create stack with resource group, initial count 5
+        stack_identifier = self.stack_create(template=rp_template)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+        group_resources = self.list_group_resources(stack_identifier,
+                                                    'random_group')
+        expected_resources = {u'0': u'OS::Heat::RandomString',
+                              u'1': u'OS::Heat::RandomString',
+                              u'2': u'OS::Heat::RandomString',
+                              u'3': u'OS::Heat::RandomString',
+                              u'4': u'OS::Heat::RandomString'}
+        self.assertEqual(expected_resources, group_resources)
+
+        # Remove three, specifying the middle resources to be removed
+        update_template = rp_template.replace(
+            'removal_policies: []',
+            'removal_policies: [{resource_list: [\'1\', \'2\', \'3\']}]')
+        self.update_stack(stack_identifier, update_template)
+        group_resources = self.list_group_resources(stack_identifier,
+                                                    'random_group')
+        expected_resources = {u'0': u'OS::Heat::RandomString',
+                              u'4': u'OS::Heat::RandomString',
+                              u'5': u'OS::Heat::RandomString',
+                              u'6': u'OS::Heat::RandomString',
+                              u'7': u'OS::Heat::RandomString'}
+        self.assertEqual(expected_resources, group_resources)
+
+    def test_props_update(self):
+        """Test update of resource_def properties behaves as expected."""
+
+        env = {'resource_registry':
+               {'My::RandomString': 'OS::Heat::RandomString'}}
+        template_one = self.template.replace("count: 0", "count: 1")
+        stack_identifier = self.stack_create(template=template_one,
+                                             environment=env)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+
+        initial_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual({'0': 'My::RandomString'},
+                         self.list_resources(initial_nested_ident))
+        # get the resource id
+        res = self.client.resources.get(initial_nested_ident, '0')
+        initial_res_id = res.physical_resource_id
+
+        # change the salt (this should replace the RandomString but
+        # not the nested stack or resource group.
+        template_salt = template_one.replace("salt: initial", "salt: more")
+        self.update_stack(stack_identifier, template_salt, environment=env)
+        updated_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual(initial_nested_ident, updated_nested_ident)
+
+        # compare the resource id, we expect a change.
+        res = self.client.resources.get(updated_nested_ident, '0')
+        updated_res_id = res.physical_resource_id
+        self.assertNotEqual(initial_res_id, updated_res_id)
+
+    def test_update_nochange(self):
+        """Test update with no properties change."""
+
+        env = {'resource_registry':
+               {'My::RandomString': 'OS::Heat::RandomString'}}
+        template_one = self.template.replace("count: 0", "count: 2")
+        stack_identifier = self.stack_create(template=template_one,
+                                             environment=env)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+
+        initial_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual({'0': 'My::RandomString', '1': 'My::RandomString'},
+                         self.list_resources(initial_nested_ident))
+        # get the output
+        stack0 = self.client.stacks.get(stack_identifier)
+        initial_rand = self._stack_output(stack0, 'random1')
+
+        template_copy = copy.deepcopy(template_one)
+        self.update_stack(stack_identifier, template_copy, environment=env)
+        updated_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual(initial_nested_ident, updated_nested_ident)
+
+        # compare the random number, we expect no change.
+        stack1 = self.client.stacks.get(stack_identifier)
+        updated_rand = self._stack_output(stack1, 'random1')
+        self.assertEqual(initial_rand, updated_rand)
+
+    def test_update_nochange_resource_needs_update(self):
+        """Test update when the resource definition has changed.
+
+        Test the scenario when the ResourceGroup update happens without
+        any changed properties, this can happen if the definition of
+        a contained provider resource changes (files map changes), then
+        the group and underlying nested stack should end up updated.
+        """
+
+        random_templ1 = '''
+heat_template_version: 2013-05-23
+parameters:
+  length:
+    type: string
+    default: not-used
+  salt:
+    type: string
+    default: not-used
+resources:
+  random1:
+    type: OS::Heat::RandomString
+    properties:
+      salt: initial
+outputs:
+  value:
+    value: {get_attr: [random1, value]}
+'''
+        files1 = {'my_random.yaml': random_templ1}
+
+        random_templ2 = random_templ1.replace('salt: initial',
+                                              'salt: more')
+        files2 = {'my_random.yaml': random_templ2}
+
+        env = {'resource_registry':
+               {'My::RandomString': 'my_random.yaml'}}
+
+        template_one = self.template.replace("count: 0", "count: 2")
+        stack_identifier = self.stack_create(template=template_one,
+                                             environment=env,
+                                             files=files1)
+        self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
+                         self.list_resources(stack_identifier))
+        self.assertEqual(files1, self.client.stacks.files(stack_identifier))
+
+        initial_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual({'0': 'My::RandomString', '1': 'My::RandomString'},
+                         self.list_resources(initial_nested_ident))
+        # get the output
+        stack0 = self.client.stacks.get(stack_identifier)
+        initial_rand = self._stack_output(stack0, 'random1')
+
+        # change the environment so we use a different TemplateResource.
+        # note "files2".
+        self.update_stack(stack_identifier, template_one,
+                          environment=env, files=files2)
+        updated_nested_ident = self.group_nested_identifier(stack_identifier,
+                                                            'random_group')
+        self.assertEqual(initial_nested_ident, updated_nested_ident)
+        self.assertEqual(files2, self.client.stacks.files(stack_identifier))
+
+        # compare the output, we expect a change.
+        stack1 = self.client.stacks.get(stack_identifier)
+        updated_rand = self._stack_output(stack1, 'random1')
+        self.assertNotEqual(initial_rand, updated_rand)
+
+
+class ResourceGroupTestNullParams(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: 2013-05-23
+parameters:
+  param:
+    type: empty
+resources:
+  random_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 1
+      resource_def:
+        type: My::RandomString
+        properties:
+          param: {get_param: param}
+outputs:
+  val:
+    value: {get_attr: [random_group, val]}
+'''
+
+    nested_template_file = '''
+heat_template_version: 2013-05-23
+parameters:
+  param:
+    type: empty
+outputs:
+  val:
+    value: {get_param: param}
+'''
+
+    scenarios = [
+        ('string_empty', dict(
+            param='',
+            p_type='string',
+        )),
+        ('boolean_false', dict(
+            param=False,
+            p_type='boolean',
+        )),
+        ('number_zero', dict(
+            param=0,
+            p_type='number',
+        )),
+        ('comma_delimited_list', dict(
+            param=[],
+            p_type='comma_delimited_list',
+        )),
+        ('json_empty', dict(
+            param={},
+            p_type='json',
+        )),
+    ]
+
+    def test_create_pass_zero_parameter(self):
+        templ = self.template.replace('type: empty',
+                                      'type: %s' % self.p_type)
+        n_t_f = self.nested_template_file.replace('type: empty',
+                                                  'type: %s' % self.p_type)
+        files = {'provider.yaml': n_t_f}
+        env = {'resource_registry':
+               {'My::RandomString': 'provider.yaml'}}
+        stack_identifier = self.stack_create(
+            template=templ,
+            files=files,
+            environment=env,
+            parameters={'param': self.param}
+        )
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(self.param, self._stack_output(stack, 'val')[0])
+
+
+class ResourceGroupAdoptTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do resource group adopt."""
+
+    main_template = '''
+heat_template_version: "2013-05-23"
+resources:
+  group1:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 2
+      resource_def:
+        type: OS::Heat::RandomString
+outputs:
+  test0:
+    value: {get_attr: [group1, resource.0.value]}
+  test1:
+    value: {get_attr: [group1, resource.1.value]}
+'''
+
+    def _yaml_to_json(self, yaml_templ):
+        return yaml.safe_load(yaml_templ)
+
+    def test_adopt(self):
+        data = {
+            "resources": {
+                "group1": {
+                    "status": "COMPLETE",
+                    "name": "group1",
+                    "resource_data": {},
+                    "metadata": {},
+                    "resource_id": "test-group1-id",
+                    "action": "CREATE",
+                    "type": "OS::Heat::ResourceGroup",
+                    "resources": {
+                        "0": {
+                            "status": "COMPLETE",
+                            "name": "0",
+                            "resource_data": {"value": "goopie"},
+                            "resource_id": "ID-0",
+                            "action": "CREATE",
+                            "type": "OS::Heat::RandomString",
+                            "metadata": {}
+                        },
+                        "1": {
+                            "status": "COMPLETE",
+                            "name": "1",
+                            "resource_data": {"value": "different"},
+                            "resource_id": "ID-1",
+                            "action": "CREATE",
+                            "type": "OS::Heat::RandomString",
+                            "metadata": {}
+                        }
+                    }
+                }
+            },
+            "environment": {"parameters": {}},
+            "template": yaml.safe_load(self.main_template)
+        }
+        stack_identifier = self.stack_adopt(
+            adopt_data=json.dumps(data))
+
+        self.assert_resource_is_a_stack(stack_identifier, 'group1')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('goopie', self._stack_output(stack, 'test0'))
+        self.assertEqual('different', self._stack_output(stack, 'test1'))
+
+
+class ResourceGroupErrorResourceTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: "2013-05-23"
+resources:
+  group1:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 2
+      resource_def:
+        type: fail.yaml
+'''
+    nested_templ = '''
+heat_template_version: "2013-05-23"
+resources:
+  oops:
+    type: OS::Heat::TestResource
+    properties:
+      fail: true
+      wait_secs: 2
+'''
+
+    def test_fail(self):
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'fail.yaml': self.nested_templ},
+            expected_status='CREATE_FAILED',
+            enable_cleanup=False)
+        stack = self.client.stacks.get(stack_identifier)
+
+        self.assertEqual('CREATE_FAILED', stack.stack_status)
+        self.client.stacks.delete(stack_identifier)
+        self._wait_for_stack_status(
+            stack_identifier, 'DELETE_COMPLETE',
+            success_on_not_found=True)
+
+
+class ResourceGroupUpdatePolicyTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: '2015-04-30'
+resources:
+  random_group:
+    type: OS::Heat::ResourceGroup
+    update_policy:
+      rolling_update:
+        min_in_service: 1
+        max_batch_size: 2
+        pause_time: 1
+    properties:
+      count: 10
+      resource_def:
+        type: OS::Heat::TestResource
+        properties:
+          value: initial
+          update_replace: False
+'''
+
+    def update_resource_group(self, update_template,
+                              updated, created, deleted):
+        stack_identifier = self.stack_create(template=self.template)
+        group_resources = self.list_group_resources(stack_identifier,
+                                                    'random_group',
+                                                    minimal=False)
+
+        init_names = [res.physical_resource_id for res in group_resources]
+
+        self.update_stack(stack_identifier, update_template)
+        group_resources = self.list_group_resources(stack_identifier,
+                                                    'random_group',
+                                                    minimal=False)
+
+        updt_names = [res.physical_resource_id for res in group_resources]
+
+        matched_names = set(updt_names) & set(init_names)
+
+        self.assertEqual(updated, len(matched_names))
+
+        self.assertEqual(created, len(set(updt_names) - set(init_names)))
+
+        self.assertEqual(deleted, len(set(init_names) - set(updt_names)))
+
+    def test_resource_group_update(self):
+        """Test rolling update with no conflict.
+
+        Simple rolling update with no conflict in batch size
+        and minimum instances in service.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '1'
+        policy['max_batch_size'] = '3'
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=10,
+                                   created=0,
+                                   deleted=0)
+
+    def test_resource_group_update_replace(self):
+        """Test rolling update(replace)with no conflict.
+
+        Simple rolling update replace with no conflict in batch size
+        and minimum instances in service.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '1'
+        policy['max_batch_size'] = '3'
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+        res_def['properties']['update_replace'] = True
+
+        self.update_resource_group(updt_template,
+                                   updated=0,
+                                   created=10,
+                                   deleted=10)
+
+    def test_resource_group_update_scaledown(self):
+        """Test rolling update with scaledown.
+
+        Simple rolling update with reduced size.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '1'
+        policy['max_batch_size'] = '3'
+        grp['properties']['count'] = 6
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=6,
+                                   created=0,
+                                   deleted=4)
+
+    def test_resource_group_update_scaleup(self):
+        """Test rolling update with scaleup.
+
+        Simple rolling update with increased size.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '1'
+        policy['max_batch_size'] = '3'
+        grp['properties']['count'] = 12
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=10,
+                                   created=2,
+                                   deleted=0)
+
+    def test_resource_group_update_adjusted(self):
+        """Test rolling update with enough available resources
+
+        Update  with capacity adjustment with enough resources.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '8'
+        policy['max_batch_size'] = '4'
+        grp['properties']['count'] = 6
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=6,
+                                   created=0,
+                                   deleted=4)
+
+    def test_resource_group_update_with_adjusted_capacity(self):
+        """Test rolling update with capacity adjustment.
+
+        Rolling update with capacity adjustment due to conflict in
+        batch size and minimum instances in service.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '8'
+        policy['max_batch_size'] = '4'
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=10,
+                                   created=0,
+                                   deleted=0)
+
+    def test_resource_group_update_huge_batch_size(self):
+        """Test rolling update with huge batch size.
+
+        Rolling Update with a huge batch size(more than
+        current size).
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '0'
+        policy['max_batch_size'] = '20'
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+        self.update_resource_group(updt_template,
+                                   updated=10,
+                                   created=0,
+                                   deleted=0)
+
+    def test_resource_group_update_huge_min_in_service(self):
+        """Test rolling update with huge minimum capacity.
+
+        Rolling Update with a huge number of minimum instances
+        in service.
+        """
+        updt_template = yaml.safe_load(copy.deepcopy(self.template))
+        grp = updt_template['resources']['random_group']
+        policy = grp['update_policy']['rolling_update']
+        policy['min_in_service'] = '20'
+        policy['max_batch_size'] = '1'
+        res_def = grp['properties']['resource_def']
+        res_def['properties']['value'] = 'updated'
+
+        self.update_resource_group(updt_template,
+                                   updated=10,
+                                   created=0,
+                                   deleted=0)
diff --git a/heat_tempest_plugin/tests/functional/test_resources_list.py b/heat_tempest_plugin/tests/functional/test_resources_list.py
new file mode 100644
index 0000000..0a182ad
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_resources_list.py
@@ -0,0 +1,50 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+test_template_depend = {
+    'heat_template_version': '2013-05-23',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+            }
+        },
+        'test2': {
+            'type': 'OS::Heat::TestResource',
+            'depends_on': ['test1'],
+            'properties': {
+                'value': 'Test2',
+            }
+        }
+    }
+}
+
+
+class ResourcesList(functional_base.FunctionalTestsBase):
+
+    def test_filtering_with_depend(self):
+        stack_identifier = self.stack_create(template=test_template_depend)
+        [test2] = self.client.resources.list(stack_identifier,
+                                             filters={'name': 'test2'})
+
+        self.assertEqual('CREATE_COMPLETE', test2.resource_status)
+
+    def test_required_by(self):
+        stack_identifier = self.stack_create(template=test_template_depend)
+        [test1] = self.client.resources.list(stack_identifier,
+                                             filters={'name': 'test1'})
+
+        self.assertEqual(['test2'], test1.required_by)
diff --git a/heat_tempest_plugin/tests/functional/test_simultaneous_update.py b/heat_tempest_plugin/tests/functional/test_simultaneous_update.py
new file mode 100644
index 0000000..0c562c0
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_simultaneous_update.py
@@ -0,0 +1,93 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import copy
+import time
+
+from heat_integrationtests.common import test
+from heat_integrationtests.functional import functional_base
+
+_test_template = {
+    'heat_template_version': 'pike',
+    'description': 'Test template to create two resources.',
+    'resources': {
+        'test1': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0,
+            }
+        },
+        'test2': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': 'Test1',
+                'fail': False,
+                'update_replace': False,
+                'wait_secs': 0,
+                'action_wait_secs': {
+                    'create': 30,
+                }
+            },
+            'depends_on': ['test1']
+        }
+    }
+}
+
+
+def get_templates(fail=False, delay_s=None):
+    before = copy.deepcopy(_test_template)
+
+    after = copy.deepcopy(before)
+    for r in after['resources'].values():
+        r['properties']['value'] = 'Test2'
+
+    before_props = before['resources']['test2']['properties']
+    before_props['fail'] = fail
+    if delay_s is not None:
+        before_props['action_wait_secs']['create'] = delay_s
+
+    return before, after
+
+
+class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
+
+    @test.requires_convergence
+    def test_retrigger_success(self):
+        before, after = get_templates()
+        stack_id = self.stack_create(template=before,
+                                     expected_status='CREATE_IN_PROGRESS')
+        time.sleep(10)
+
+        self.update_stack(stack_id, after)
+
+    @test.requires_convergence
+    def test_retrigger_failure(self):
+        before, after = get_templates(fail=True)
+        stack_id = self.stack_create(template=before,
+                                     expected_status='CREATE_IN_PROGRESS')
+        time.sleep(10)
+
+        self.update_stack(stack_id, after)
+
+    @test.requires_convergence
+    def test_retrigger_timeout(self):
+        before, after = get_templates(delay_s=70)
+        stack_id = self.stack_create(template=before,
+                                     expected_status='CREATE_IN_PROGRESS',
+                                     timeout=1)
+        time.sleep(50)
+
+        self.update_stack(stack_id, after)
diff --git a/heat_tempest_plugin/tests/functional/test_snapshot_restore.py b/heat_tempest_plugin/tests/functional/test_snapshot_restore.py
new file mode 100644
index 0000000..3616c8f
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_snapshot_restore.py
@@ -0,0 +1,76 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackSnapshotRestoreTest(functional_base.FunctionalTestsBase):
+
+    def setUp(self):
+        super(StackSnapshotRestoreTest, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No image configured to test")
+
+        if not self.conf.minimal_instance_type:
+            raise self.skipException(
+                "No minimal_instance_type configured to test")
+
+        self.assign_keypair()
+
+    def test_stack_snapshot_restore(self):
+        template = '''
+heat_template_version: ocata
+parameters:
+  keyname:
+    type: string
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+resources:
+  my_port:
+    type: OS::Neutron::Port
+    properties:
+      network: {get_param: network}
+  my_server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      key_name: {get_param: keyname}
+      networks: [{port: {get_resource: my_port} }]
+
+'''
+
+        def get_server_image(server_id):
+            server = self.compute_client.servers.get(server_id)
+            return server.image['id']
+
+        parameters = {'keyname': self.keypair_name,
+                      'flavor': self.conf.minimal_instance_type,
+                      'image': self.conf.minimal_image_ref,
+                      'network': self.conf.fixed_network_name}
+        stack_identifier = self.stack_create(template=template,
+                                             parameters=parameters)
+        server_resource = self.client.resources.get(
+            stack_identifier, 'my_server')
+        server_id = server_resource.physical_resource_id
+        prev_image_id = get_server_image(server_id)
+
+        # Do snapshot and restore
+        snapshot_id = self.stack_snapshot(stack_identifier)
+        self.stack_restore(stack_identifier, snapshot_id)
+
+        self.assertNotEqual(prev_image_id, get_server_image(server_id))
diff --git a/heat_tempest_plugin/tests/functional/test_software_config.py b/heat_tempest_plugin/tests/functional/test_software_config.py
new file mode 100644
index 0000000..2f31b29
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_software_config.py
@@ -0,0 +1,283 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import os
+import requests
+import subprocess
+import sys
+import tempfile
+import time
+import yaml
+
+from oslo_utils import timeutils
+
+from heat_tempest_plugin.common import exceptions
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ParallelDeploymentsTest(functional_base.FunctionalTestsBase):
+    server_template = '''
+heat_template_version: "2013-05-23"
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+resources:
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      user_data_format: SOFTWARE_CONFIG
+      networks: [{network: {get_param: network}}]
+outputs:
+  server:
+    value: {get_resource: server}
+'''
+
+    config_template = '''
+heat_template_version: "2013-05-23"
+parameters:
+  server:
+    type: string
+resources:
+  config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+'''
+
+    deployment_snippet = '''
+type: OS::Heat::SoftwareDeployments
+properties:
+  config: {get_resource: config}
+  servers: {'0': {get_param: server}}
+'''
+
+    enable_cleanup = True
+
+    def test_deployments_metadata(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'network': self.conf.fixed_network_name,
+                 'image': self.conf.minimal_image_ref}
+        stack_identifier = self.stack_create(
+            parameters=parms,
+            template=self.server_template,
+            enable_cleanup=self.enable_cleanup)
+        server_stack = self.client.stacks.get(stack_identifier)
+        server = server_stack.outputs[0]['output_value']
+
+        config_stacks = []
+        # add up to 3 stacks each with up to 3 deployments
+        deploy_count = 0
+        deploy_count = self.deploy_many_configs(
+            stack_identifier,
+            server,
+            config_stacks,
+            2,
+            5,
+            deploy_count)
+        self.deploy_many_configs(
+            stack_identifier,
+            server,
+            config_stacks,
+            3,
+            3,
+            deploy_count)
+
+        self.signal_deployments(stack_identifier)
+        for config_stack in config_stacks:
+            self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
+
+    def test_deployments_timeout_failed(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'network': self.conf.fixed_network_name,
+                 'image': self.conf.minimal_image_ref}
+        stack_identifier = self.stack_create(
+            parameters=parms,
+            template=self.server_template,
+            enable_cleanup=self.enable_cleanup)
+        server_stack = self.client.stacks.get(stack_identifier)
+        server = server_stack.outputs[0]['output_value']
+        config_stack = self.deploy_config(server, 3, 1)
+        self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
+        kwargs = {'server_id': server}
+
+        def check_deployment_status():
+            sd_list = self.client.software_deployments.list(**kwargs)
+            for sd in sd_list:
+                if sd.status != 'FAILED':
+                    return False
+            return True
+
+        self.assertTrue(test.call_until_true(
+            20, 0, check_deployment_status))
+
+    def deploy_many_configs(self, stack, server, config_stacks,
+                            stack_count, deploys_per_stack,
+                            deploy_count_start):
+        for a in range(stack_count):
+            config_stacks.append(
+                self.deploy_config(server, deploys_per_stack))
+
+        new_count = deploy_count_start + stack_count * deploys_per_stack
+        self.wait_for_deploy_metadata_set(stack, new_count)
+        return new_count
+
+    def deploy_config(self, server, deploy_count, timeout=None):
+        parms = {'server': server}
+        template = yaml.safe_load(self.config_template)
+        resources = template['resources']
+        resources['config']['properties'] = {'config': 'x' * 10000}
+        for a in range(deploy_count):
+            resources['dep_%s' % a] = yaml.safe_load(self.deployment_snippet)
+        return self.stack_create(
+            parameters=parms,
+            template=template,
+            enable_cleanup=self.enable_cleanup,
+            expected_status=None,
+            timeout=timeout)
+
+    def wait_for_deploy_metadata_set(self, stack, deploy_count):
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            server_metadata = self.client.resources.metadata(
+                stack, 'server')
+            if len(server_metadata['deployments']) == deploy_count:
+                return
+            time.sleep(build_interval)
+
+        message = ('Deployment resources failed to be created within '
+                   'the required time (%s s).' %
+                   (build_timeout))
+        raise exceptions.TimeoutException(message)
+
+    def signal_deployments(self, stack_identifier):
+        server_metadata = self.client.resources.metadata(
+            stack_identifier, 'server')
+        for dep in server_metadata['deployments']:
+            iv = dict((i['name'], i['value']) for i in dep['inputs'])
+            sigurl = iv.get('deploy_signal_id')
+            requests.post(sigurl, data='{}',
+                          headers={'content-type': 'application/json'},
+                          verify=self.verify_cert)
+
+
+class ZaqarSignalTransportTest(functional_base.FunctionalTestsBase):
+    server_template = '''
+heat_template_version: "2013-05-23"
+
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+
+resources:
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      user_data_format: SOFTWARE_CONFIG
+      software_config_transport: ZAQAR_MESSAGE
+      networks: [{network: {get_param: network}}]
+  config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      config: echo 'foo'
+  deployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config: {get_resource: config}
+      server: {get_resource: server}
+      signal_transport: ZAQAR_SIGNAL
+
+outputs:
+  data:
+    value: {get_attr: [deployment, deploy_stdout]}
+'''
+
+    conf_template = '''
+[zaqar]
+user_id = %(user_id)s
+password = %(password)s
+project_id = %(project_id)s
+auth_url = %(auth_url)s
+queue_id = %(queue_id)s
+    '''
+
+    def test_signal_queues(self):
+        parms = {'flavor': self.conf.minimal_instance_type,
+                 'network': self.conf.fixed_network_name,
+                 'image': self.conf.minimal_image_ref}
+        stack_identifier = self.stack_create(
+            parameters=parms,
+            template=self.server_template,
+            expected_status=None)
+        metadata = self.wait_for_deploy_metadata_set(stack_identifier)
+        config = metadata['os-collect-config']['zaqar']
+        conf_content = self.conf_template % config
+        fd, temp_path = tempfile.mkstemp()
+        os.write(fd, conf_content.encode('utf-8'))
+        os.close(fd)
+        cmd = ['os-collect-config', '--one-time',
+               '--config-file=%s' % temp_path, 'zaqar']
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+        stdout_value = proc.communicate()[0]
+        data = json.loads(stdout_value.decode('utf-8'))
+        self.assertEqual(config, data['zaqar']['os-collect-config']['zaqar'])
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+        stdout_value = proc.communicate()[0]
+        data = json.loads(stdout_value.decode('utf-8'))
+
+        fd, temp_path = tempfile.mkstemp()
+        os.write(fd,
+                 json.dumps(data['zaqar']['deployments'][0]).encode('utf-8'))
+        os.close(fd)
+        cmd = [sys.executable, self.conf.heat_config_notify_script, temp_path]
+        proc = subprocess.Popen(cmd,
+                                stderr=subprocess.PIPE,
+                                stdin=subprocess.PIPE)
+        proc.communicate(
+            json.dumps({'deploy_stdout': 'here!'}).encode('utf-8'))
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('here!', stack.outputs[0]['output_value'])
+
+    def wait_for_deploy_metadata_set(self, stack):
+        build_timeout = self.conf.build_timeout
+        build_interval = self.conf.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            server_metadata = self.client.resources.metadata(
+                stack, 'server')
+            if server_metadata.get('deployments'):
+                return server_metadata
+            time.sleep(build_interval)
+
+        message = ('Deployment resources failed to be created within '
+                   'the required time (%s s).' %
+                   (build_timeout))
+        raise exceptions.TimeoutException(message)
diff --git a/heat_tempest_plugin/tests/functional/test_software_deployment_group.py b/heat_tempest_plugin/tests/functional/test_software_deployment_group.py
new file mode 100644
index 0000000..a298419
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_software_deployment_group.py
@@ -0,0 +1,150 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class SoftwareDeploymentGroupTest(functional_base.FunctionalTestsBase):
+    sd_template = '''
+heat_template_version: 2016-10-14
+
+parameters:
+  input:
+    type: string
+    default: foo_input
+
+resources:
+  config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      inputs:
+      - name: foo
+
+  deployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      config: {get_resource: config}
+      input_values:
+        foo: {get_param: input}
+      servers:
+        '0': dummy0
+        '1': dummy1
+        '2': dummy2
+        '3': dummy3
+'''
+
+    sd_template_with_upd_policy = '''
+heat_template_version: 2016-10-14
+
+parameters:
+  input:
+    type: string
+    default: foo_input
+
+resources:
+  config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      inputs:
+      - name: foo
+
+  deployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    update_policy:
+      rolling_update:
+        max_batch_size: 2
+        pause_time: 1
+    properties:
+      config: {get_resource: config}
+      input_values:
+        foo: {get_param: input}
+      servers:
+        '0': dummy0
+        '1': dummy1
+        '2': dummy2
+        '3': dummy3
+'''
+    enable_cleanup = True
+
+    def deployment_crud(self, template):
+        stack_identifier = self.stack_create(
+            template=template,
+            enable_cleanup=self.enable_cleanup,
+            expected_status='CREATE_IN_PROGRESS')
+        self._wait_for_resource_status(
+            stack_identifier, 'deployment', 'CREATE_IN_PROGRESS')
+
+        # Wait for all deployment resources to become IN_PROGRESS, since only
+        # IN_PROGRESS resources get signalled
+        nested_identifier = self.assert_resource_is_a_stack(
+            stack_identifier, 'deployment')
+        self._wait_for_stack_status(nested_identifier, 'CREATE_IN_PROGRESS')
+        self._wait_for_all_resource_status(nested_identifier,
+                                           'CREATE_IN_PROGRESS')
+        group_resources = self.list_group_resources(
+            stack_identifier, 'deployment', minimal=False)
+
+        self.assertEqual(4, len(group_resources))
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE',
+                                    signal_required=True,
+                                    resources_to_signal=group_resources)
+
+        created_group_resources = self.list_group_resources(
+            stack_identifier, 'deployment', minimal=False)
+        self.assertEqual(4, len(created_group_resources))
+        self.check_input_values(created_group_resources, 'foo', 'foo_input')
+
+        self.update_stack(stack_identifier,
+                          template=template,
+                          environment={'parameters': {'input': 'input2'}},
+                          expected_status='UPDATE_IN_PROGRESS')
+        nested_identifier = self.assert_resource_is_a_stack(
+            stack_identifier, 'deployment')
+        self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE',
+                                    signal_required=True,
+                                    resources_to_signal=group_resources)
+
+        self.check_input_values(created_group_resources, 'foo', 'input2')
+
+        # We explicitly test delete here, vs just via cleanup and check
+        # the nested stack is gone
+        self._stack_delete(stack_identifier)
+        self._wait_for_stack_status(
+            nested_identifier, 'DELETE_COMPLETE',
+            success_on_not_found=True)
+
+    def test_deployment_crud(self):
+        self.deployment_crud(self.sd_template)
+
+    def test_deployment_crud_with_rolling_update(self):
+        self.deployment_crud(self.sd_template_with_upd_policy)
+
+    def test_deployments_create_delete_in_progress(self):
+        stack_identifier = self.stack_create(
+            template=self.sd_template,
+            enable_cleanup=self.enable_cleanup,
+            expected_status='CREATE_IN_PROGRESS')
+        self._wait_for_resource_status(
+            stack_identifier, 'deployment', 'CREATE_IN_PROGRESS')
+        nested_identifier = self.assert_resource_is_a_stack(
+            stack_identifier, 'deployment')
+        group_resources = self.list_group_resources(
+            stack_identifier, 'deployment', minimal=False)
+
+        self.assertEqual(4, len(group_resources))
+        # Now test delete while the stacks are still IN_PROGRESS
+        self._stack_delete(stack_identifier)
+        self._wait_for_stack_status(
+            nested_identifier, 'DELETE_COMPLETE',
+            success_on_not_found=True)
diff --git a/heat_tempest_plugin/tests/functional/test_stack_events.py b/heat_tempest_plugin/tests/functional/test_stack_events.py
new file mode 100644
index 0000000..0db852e
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_stack_events.py
@@ -0,0 +1,109 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackEventsTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2014-10-16
+parameters:
+resources:
+  test_resource:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'test1'
+      fail: False
+      update_replace: False
+      wait_secs: 0
+outputs:
+  resource_id:
+    description: 'ID of resource'
+    value: { get_resource: test_resource }
+'''
+
+    def _verify_event_fields(self, event, event_characteristics):
+        self.assertIsNotNone(event_characteristics)
+        self.assertIsNotNone(event.event_time)
+        self.assertIsNotNone(event.links)
+        self.assertIsNotNone(event.logical_resource_id)
+        self.assertIsNotNone(event.resource_status)
+        self.assertIn(event.resource_status, event_characteristics[1])
+        self.assertIsNotNone(event.resource_status_reason)
+        self.assertIsNotNone(event.id)
+
+    def test_event(self):
+        parameters = {}
+
+        test_stack_name = self._stack_rand_name()
+        stack_identifier = self.stack_create(
+            stack_name=test_stack_name,
+            template=self.template,
+            parameters=parameters
+        )
+
+        expected_status = ['CREATE_IN_PROGRESS', 'CREATE_COMPLETE']
+        event_characteristics = {
+            test_stack_name: ('OS::Heat::Stack', expected_status),
+            'test_resource': ('OS::Heat::TestResource', expected_status)}
+
+        # List stack events
+        # API: GET /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/events
+        stack_events = self.client.events.list(stack_identifier)
+
+        for stack_event in stack_events:
+            # Key on an expected/valid resource name
+            self._verify_event_fields(
+                stack_event,
+                event_characteristics[stack_event.resource_name])
+
+            # Test the event filtering API based on this resource_name
+            # /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/events
+            resource_events = self.client.events.list(
+                stack_identifier,
+                stack_event.resource_name)
+
+            # Resource events are a subset of the original stack event list
+            self.assertLess(len(resource_events), len(stack_events))
+
+            # Get the event details for each resource event
+            for resource_event in resource_events:
+                # A resource_event should be in the original stack event list
+                self.assertIn(resource_event, stack_events)
+                # Given a filtered list, the resource names should be identical
+                self.assertEqual(
+                    resource_event.resource_name,
+                    stack_event.resource_name)
+                # Verify all fields, keying off the resource_name
+                self._verify_event_fields(
+                    resource_event,
+                    event_characteristics[resource_event.resource_name])
+
+                # Exercise the event details API
+                # /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/events/{event_id}
+                event_details = self.client.events.get(
+                    stack_identifier,
+                    resource_event.resource_name,
+                    resource_event.id)
+                self._verify_event_fields(
+                    event_details,
+                    event_characteristics[event_details.resource_name])
+                # The names should be identical to the non-detailed event
+                self.assertEqual(
+                    resource_event.resource_name,
+                    event_details.resource_name)
+                # Verify the extra field in the detail results
+                self.assertIsNotNone(event_details.resource_type)
+                self.assertEqual(
+                    event_characteristics[event_details.resource_name][0],
+                    event_details.resource_type)
diff --git a/heat_tempest_plugin/tests/functional/test_stack_outputs.py b/heat_tempest_plugin/tests/functional/test_stack_outputs.py
new file mode 100644
index 0000000..f629a97
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_stack_outputs.py
@@ -0,0 +1,155 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackOutputsTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2015-10-15
+resources:
+  test_resource_a:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'a'
+  test_resource_b:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'b'
+outputs:
+  resource_output_a:
+    description: 'Output of resource a'
+    value: { get_attr: [test_resource_a, output] }
+  resource_output_b:
+    description: 'Output of resource b'
+    value: { get_attr: [test_resource_b, output] }
+'''
+
+    def test_outputs(self):
+        stack_identifier = self.stack_create(
+            template=self.template
+        )
+        expected_list = [{u'output_key': u'resource_output_a',
+                          u'description': u'Output of resource a'},
+                         {u'output_key': u'resource_output_b',
+                          u'description': u'Output of resource b'}]
+
+        actual_list = self.client.stacks.output_list(
+            stack_identifier)['outputs']
+        sorted_actual_list = sorted(actual_list, key=lambda x: x['output_key'])
+        self.assertEqual(expected_list, sorted_actual_list)
+
+        expected_output_a = {
+            u'output_value': u'a', u'output_key': u'resource_output_a',
+            u'description': u'Output of resource a'}
+        expected_output_b = {
+            u'output_value': u'b', u'output_key': u'resource_output_b',
+            u'description': u'Output of resource b'}
+        actual_output_a = self.client.stacks.output_show(
+            stack_identifier, 'resource_output_a')['output']
+        actual_output_b = self.client.stacks.output_show(
+            stack_identifier, 'resource_output_b')['output']
+        self.assertEqual(expected_output_a, actual_output_a)
+        self.assertEqual(expected_output_b, actual_output_b)
+
+    before_template = '''
+heat_template_version: 2015-10-15
+resources:
+  test_resource_a:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'foo'
+outputs:
+'''
+
+    after_template = '''
+heat_template_version: 2015-10-15
+resources:
+  test_resource_a:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'foo'
+  test_resource_b:
+    type: OS::Heat::TestResource
+    properties:
+      value: {get_attr: [test_resource_a, output]}
+outputs:
+  output_value:
+    description: 'Output of resource b'
+    value: {get_attr: [test_resource_b, output]}
+'''
+
+    def test_outputs_update_new_resource(self):
+        stack_identifier = self.stack_create(template=self.before_template)
+        self.update_stack(stack_identifier, template=self.after_template)
+
+        expected_output_value = {
+            u'output_value': u'foo', u'output_key': u'output_value',
+            u'description': u'Output of resource b'}
+        actual_output_value = self.client.stacks.output_show(
+            stack_identifier, 'output_value')['output']
+        self.assertEqual(expected_output_value, actual_output_value)
+
+    nested_template = '''
+heat_template_version: 2015-10-15
+resources:
+  parent:
+    type: 1.yaml
+outputs:
+  resource_output_a:
+    value: { get_attr: [parent, resource_output_a] }
+    description: 'parent a'
+  resource_output_b:
+    value: { get_attr: [parent, resource_output_b] }
+    description: 'parent b'
+    '''
+    error_template = '''
+heat_template_version: 2015-10-15
+resources:
+  test_resource_a:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'a'
+  test_resource_b:
+    type: OS::Heat::TestResource
+    properties:
+      value: 'b'
+outputs:
+  resource_output_a:
+    description: 'Output of resource a'
+    value: { get_attr: [test_resource_a, output] }
+  resource_output_b:
+    description: 'Output of resource b'
+    value: { get_param: foo }
+'''
+
+    def test_output_error_nested(self):
+        stack_identifier = self.stack_create(
+            template=self.nested_template,
+            files={'1.yaml': self.error_template}
+        )
+        self.update_stack(stack_identifier, template=self.nested_template,
+                          files={'1.yaml': self.error_template})
+        expected_list = [{u'output_key': u'resource_output_a',
+                          u'output_value': u'a',
+                          u'description': u'parent a'},
+                         {u'output_key': u'resource_output_b',
+                          u'output_value': None,
+                          u'output_error': u'Error in parent output '
+                                           u'resource_output_b: The Parameter'
+                                           u' (foo) was not provided.',
+                          u'description': u'parent b'}]
+
+        actual_list = self.client.stacks.get(stack_identifier).outputs
+        sorted_actual_list = sorted(actual_list, key=lambda x: x['output_key'])
+        self.assertEqual(expected_list, sorted_actual_list)
diff --git a/heat_tempest_plugin/tests/functional/test_stack_tags.py b/heat_tempest_plugin/tests/functional/test_stack_tags.py
new file mode 100644
index 0000000..a270950
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_stack_tags.py
@@ -0,0 +1,77 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackTagTest(functional_base.FunctionalTestsBase):
+
+    template = '''
+heat_template_version: 2014-10-16
+description:
+  foo
+parameters:
+  input:
+    type: string
+    default: test
+resources:
+  not-used:
+    type: OS::Heat::TestResource
+    properties:
+      wait_secs: 1
+      value: {get_param: input}
+'''
+
+    def test_stack_tag(self):
+        # Stack create with stack tags
+        tags = 'foo,bar'
+        stack_identifier = self.stack_create(
+            template=self.template,
+            tags=tags
+        )
+
+        # Ensure property tag is populated and matches given tags
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(['foo', 'bar'], stack.tags)
+
+        # Update tags
+        updated_tags = 'tag1,tag2'
+        self.update_stack(
+            stack_identifier,
+            template=self.template,
+            tags=updated_tags,
+            parameters={'input': 'next'})
+
+        # Ensure property tag is populated and matches updated tags
+        updated_stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(['tag1', 'tag2'], updated_stack.tags)
+
+        # Delete tags
+        self.update_stack(
+            stack_identifier,
+            template=self.template,
+            parameters={'input': 'none'}
+        )
+
+        # Ensure property tag is not populated
+        empty_tags_stack = self.client.stacks.get(stack_identifier)
+        self.assertIsNone(empty_tags_stack.tags)
+
+    def test_hidden_stack(self):
+        # Stack create with hidden stack tag
+        tags = 'foo,hidden'
+        self.stack_create(
+            template=self.template,
+            tags=tags)
+        # Ensure stack does not exist when we do a stack list
+        for stack in self.client.stacks.list():
+            self.assertNotIn('hidden', stack.tags, "Hidden stack can be seen")
diff --git a/heat_tempest_plugin/tests/functional/test_swiftsignal_update.py b/heat_tempest_plugin/tests/functional/test_swiftsignal_update.py
new file mode 100644
index 0000000..604e592
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_swiftsignal_update.py
@@ -0,0 +1,46 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+test_template = '''
+heat_template_version: 2014-10-16
+
+resources:
+  signal_handle:
+    type: "OS::Heat::SwiftSignalHandle"
+
+outputs:
+  signal_curl:
+    value: { get_attr: ['signal_handle', 'curl_cli'] }
+    description: Swift signal cURL
+
+  signal_url:
+    value: { get_attr: ['signal_handle', 'endpoint'] }
+    description: Swift signal URL
+'''
+
+
+class SwiftSignalHandleUpdateTest(functional_base.FunctionalTestsBase):
+
+    def test_stack_update_same_template_replace_no_url(self):
+        if not self.is_service_available('object-store'):
+            self.skipTest('object-store service not available, skipping')
+        stack_identifier = self.stack_create(template=test_template)
+        stack = self.client.stacks.get(stack_identifier)
+        orig_url = self._stack_output(stack, 'signal_url')
+        orig_curl = self._stack_output(stack, 'signal_curl')
+        self.update_stack(stack_identifier, test_template)
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(orig_url, self._stack_output(stack, 'signal_url'))
+        self.assertEqual(orig_curl, self._stack_output(stack, 'signal_curl'))
diff --git a/heat_tempest_plugin/tests/functional/test_template_resource.py b/heat_tempest_plugin/tests/functional/test_template_resource.py
new file mode 100644
index 0000000..a7cc808
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_template_resource.py
@@ -0,0 +1,982 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from heatclient import exc as heat_exceptions
+import six
+import yaml
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class TemplateResourceTest(functional_base.FunctionalTestsBase):
+    """Prove that we can use the registry in a nested provider."""
+
+    template = '''
+heat_template_version: 2013-05-23
+resources:
+  secret1:
+    type: OS::Heat::RandomString
+outputs:
+  secret-out:
+    value: { get_attr: [secret1, value] }
+'''
+    nested_templ = '''
+heat_template_version: 2013-05-23
+resources:
+  secret2:
+    type: OS::Heat::RandomString
+outputs:
+  value:
+    value: { get_attr: [secret2, value] }
+'''
+
+    env_templ = '''
+resource_registry:
+  "OS::Heat::RandomString": nested.yaml
+'''
+
+    def test_nested_env(self):
+        main_templ = '''
+heat_template_version: 2013-05-23
+resources:
+  secret1:
+    type: My::NestedSecret
+outputs:
+  secret-out:
+    value: { get_attr: [secret1, value] }
+'''
+
+        nested_templ = '''
+heat_template_version: 2013-05-23
+resources:
+  secret2:
+    type: My::Secret
+outputs:
+  value:
+    value: { get_attr: [secret2, value] }
+'''
+
+        env_templ = '''
+resource_registry:
+  "My::Secret": "OS::Heat::RandomString"
+  "My::NestedSecret": nested.yaml
+'''
+
+        stack_identifier = self.stack_create(
+            template=main_templ,
+            files={'nested.yaml': nested_templ},
+            environment=env_templ)
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'secret1')
+        # prove that resource.parent_resource is populated.
+        sec2 = self.client.resources.get(nested_ident, 'secret2')
+        self.assertEqual('secret1', sec2.parent_resource)
+
+    def test_no_infinite_recursion(self):
+        """Prove that we can override a python resource.
+
+        And use that resource within the template resource.
+        """
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'nested.yaml': self.nested_templ},
+            environment=self.env_templ)
+        self.assert_resource_is_a_stack(stack_identifier, 'secret1')
+
+    def test_nested_stack_delete_then_delete_parent_stack(self):
+        """Check the robustness of stack deletion.
+
+        This tests that if you manually delete a nested
+        stack, the parent stack is still deletable.
+        """
+        # disable cleanup so we can call _stack_delete() directly.
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'nested.yaml': self.nested_templ},
+            environment=self.env_templ,
+            enable_cleanup=False)
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'secret1')
+
+        self._stack_delete(nested_ident)
+        self._stack_delete(stack_identifier)
+
+    def test_change_in_file_path(self):
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'nested.yaml': self.nested_templ},
+            environment=self.env_templ)
+        stack = self.client.stacks.get(stack_identifier)
+        secret_out1 = self._stack_output(stack, 'secret-out')
+
+        nested_templ_2 = '''
+heat_template_version: 2013-05-23
+resources:
+  secret2:
+    type: OS::Heat::RandomString
+outputs:
+  value:
+    value: freddy
+'''
+        env_templ_2 = '''
+resource_registry:
+  "OS::Heat::RandomString": new/nested.yaml
+'''
+        self.update_stack(stack_identifier,
+                          template=self.template,
+                          files={'new/nested.yaml': nested_templ_2},
+                          environment=env_templ_2)
+        stack = self.client.stacks.get(stack_identifier)
+        secret_out2 = self._stack_output(stack, 'secret-out')
+        self.assertNotEqual(secret_out1, secret_out2)
+        self.assertEqual('freddy', secret_out2)
+
+
+class NestedAttributesTest(functional_base.FunctionalTestsBase):
+    """Prove that we can use the template resource references."""
+
+    main_templ = '''
+heat_template_version: 2014-10-16
+resources:
+  secret2:
+    type: My::NestedSecret
+outputs:
+  old_way:
+    value: { get_attr: [secret2, nested_str]}
+  test_attr1:
+    value: { get_attr: [secret2, resource.secret1, value]}
+  test_attr2:
+    value: { get_attr: [secret2, resource.secret1.value]}
+  test_ref:
+    value: { get_resource: secret2 }
+'''
+
+    env_templ = '''
+resource_registry:
+  "My::NestedSecret": nested.yaml
+'''
+
+    def test_stack_ref(self):
+        nested_templ = '''
+heat_template_version: 2014-10-16
+resources:
+  secret1:
+    type: OS::Heat::RandomString
+outputs:
+  nested_str:
+    value: {get_attr: [secret1, value]}
+'''
+        stack_identifier = self.stack_create(
+            template=self.main_templ,
+            files={'nested.yaml': nested_templ},
+            environment=self.env_templ)
+        self.assert_resource_is_a_stack(stack_identifier, 'secret2')
+        stack = self.client.stacks.get(stack_identifier)
+        test_ref = self._stack_output(stack, 'test_ref')
+        self.assertIn('arn:openstack:heat:', test_ref)
+
+    def test_transparent_ref(self):
+        """Test using nested resource more transparently.
+
+        With the addition of OS::stack_id we can now use the nested resource
+        more transparently.
+        """
+
+        nested_templ = '''
+heat_template_version: 2014-10-16
+resources:
+  secret1:
+    type: OS::Heat::RandomString
+outputs:
+  OS::stack_id:
+    value: {get_resource: secret1}
+  nested_str:
+    value: {get_attr: [secret1, value]}
+'''
+        stack_identifier = self.stack_create(
+            template=self.main_templ,
+            files={'nested.yaml': nested_templ},
+            environment=self.env_templ)
+        self.assert_resource_is_a_stack(stack_identifier, 'secret2')
+        stack = self.client.stacks.get(stack_identifier)
+        test_ref = self._stack_output(stack, 'test_ref')
+        test_attr = self._stack_output(stack, 'old_way')
+
+        self.assertNotIn('arn:openstack:heat', test_ref)
+        self.assertEqual(test_attr, test_ref)
+
+    def test_nested_attributes(self):
+        nested_templ = '''
+heat_template_version: 2014-10-16
+resources:
+  secret1:
+    type: OS::Heat::RandomString
+outputs:
+  nested_str:
+    value: {get_attr: [secret1, value]}
+'''
+        stack_identifier = self.stack_create(
+            template=self.main_templ,
+            files={'nested.yaml': nested_templ},
+            environment=self.env_templ)
+        self.assert_resource_is_a_stack(stack_identifier, 'secret2')
+        stack = self.client.stacks.get(stack_identifier)
+        old_way = self._stack_output(stack, 'old_way')
+        test_attr1 = self._stack_output(stack, 'test_attr1')
+        test_attr2 = self._stack_output(stack, 'test_attr2')
+
+        self.assertEqual(old_way, test_attr1)
+        self.assertEqual(old_way, test_attr2)
+
+
+class TemplateResourceFacadeTest(functional_base.FunctionalTestsBase):
+    """Prove that we can use ResourceFacade in a HOT template."""
+
+    main_template = '''
+heat_template_version: 2013-05-23
+resources:
+  the_nested:
+    type: the.yaml
+    metadata:
+      foo: bar
+outputs:
+  value:
+    value: {get_attr: [the_nested, output]}
+'''
+
+    nested_templ = '''
+heat_template_version: 2013-05-23
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {"Fn::Select": [foo, {resource_facade: metadata}]}
+outputs:
+  output:
+    value: {get_attr: [test, output]}
+    '''
+
+    def test_metadata(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'the.yaml': self.nested_templ})
+        stack = self.client.stacks.get(stack_identifier)
+        value = self._stack_output(stack, 'value')
+        self.assertEqual('bar', value)
+
+
+class TemplateResourceUpdateTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do template resource updates."""
+
+    main_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: my_name
+      two: your_name
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    main_template_change_prop = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: updated_name
+      two: your_name
+
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    main_template_add_prop = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: my_name
+      two: your_name
+      three: third_name
+
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    main_template_remove_prop = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: my_name
+
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    initial_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: foo
+    Type: String
+  two:
+    Default: bar
+    Type: String
+
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: one}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+'''
+
+    prop_change_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: yikes
+    Type: String
+  two:
+    Default: foo
+    Type: String
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: two}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+'''
+
+    prop_add_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: yikes
+    Type: String
+  two:
+    Default: foo
+    Type: String
+  three:
+    Default: bar
+    Type: String
+
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: three}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+'''
+
+    prop_remove_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: yikes
+    Type: String
+
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: one}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+'''
+
+    attr_change_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: foo
+    Type: String
+  two:
+    Default: bar
+    Type: String
+
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: one}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+  something_else:
+    Value: just_a_string
+'''
+
+    content_change_tmpl = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: foo
+    Type: String
+  two:
+    Default: bar
+    Type: String
+
+Resources:
+  NestedResource:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: yum
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [NestedResource, value]}
+'''
+
+    EXPECTED = (UPDATE, NOCHANGE) = ('update', 'nochange')
+    scenarios = [
+        ('no_changes', dict(template=main_template,
+                            provider=initial_tmpl,
+                            expect=NOCHANGE)),
+        ('main_tmpl_change', dict(template=main_template_change_prop,
+                                  provider=initial_tmpl,
+                                  expect=UPDATE)),
+        ('provider_change', dict(template=main_template,
+                                 provider=content_change_tmpl,
+                                 expect=UPDATE)),
+        ('provider_props_change', dict(template=main_template,
+                                       provider=prop_change_tmpl,
+                                       expect=UPDATE)),
+        ('provider_props_add', dict(template=main_template_add_prop,
+                                    provider=prop_add_tmpl,
+                                    expect=UPDATE)),
+        ('provider_props_remove', dict(template=main_template_remove_prop,
+                                       provider=prop_remove_tmpl,
+                                       expect=NOCHANGE)),
+        ('provider_attr_change', dict(template=main_template,
+                                      provider=attr_change_tmpl,
+                                      expect=NOCHANGE)),
+    ]
+
+    def test_template_resource_update_template_schema(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'the.yaml': self.initial_tmpl})
+        stack = self.client.stacks.get(stack_identifier)
+        initial_id = self._stack_output(stack, 'identifier')
+        initial_val = self._stack_output(stack, 'value')
+
+        self.update_stack(stack_identifier,
+                          self.template,
+                          files={'the.yaml': self.provider})
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual(initial_id,
+                         self._stack_output(stack, 'identifier'))
+        if self.expect == self.NOCHANGE:
+            self.assertEqual(initial_val,
+                             self._stack_output(stack, 'value'))
+        else:
+            self.assertNotEqual(initial_val,
+                                self._stack_output(stack, 'value'))
+
+
+class TemplateResourceUpdateFailedTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do updates on a nested stack to fix a stack."""
+
+    main_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  keypair:
+    Type: OS::Nova::KeyPair
+    Properties:
+      name: replace-this
+      save_private_key: false
+  server:
+    Type: server_fail.yaml
+    DependsOn: keypair
+'''
+    nested_templ = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  RealRandom:
+    Type: OS::Heat::RandomString
+'''
+
+    def setUp(self):
+        super(TemplateResourceUpdateFailedTest, self).setUp()
+        self.assign_keypair()
+
+    def test_update_on_failed_create(self):
+        # create a stack with "server" dependent on "keypair", but
+        # keypair fails, so "server" is not created properly.
+        # We then fix the template and it should succeed.
+        broken_templ = self.main_template.replace('replace-this',
+                                                  self.keypair_name)
+        stack_identifier = self.stack_create(
+            template=broken_templ,
+            files={'server_fail.yaml': self.nested_templ},
+            expected_status='CREATE_FAILED')
+
+        fixed_templ = self.main_template.replace('replace-this',
+                                                 test.rand_name())
+        self.update_stack(stack_identifier,
+                          fixed_templ,
+                          files={'server_fail.yaml': self.nested_templ})
+
+
+class TemplateResourceAdoptTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do template resource adopt/abandon."""
+
+    main_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: my_name
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    nested_templ = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: foo
+    Type: String
+Resources:
+  RealRandom:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: one}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [RealRandom, value]}
+'''
+
+    def _yaml_to_json(self, yaml_templ):
+        return yaml.safe_load(yaml_templ)
+
+    def test_abandon(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'the.yaml': self.nested_templ},
+            enable_cleanup=False
+        )
+
+        info = self.stack_abandon(stack_id=stack_identifier)
+        self.assertEqual(self._yaml_to_json(self.main_template),
+                         info['template'])
+        self.assertEqual(self._yaml_to_json(self.nested_templ),
+                         info['resources']['the_nested']['template'])
+        # TODO(james combs): Implement separate test cases for export
+        # once export REST API is available.  Also test reverse order
+        # of invocation: export -> abandon AND abandon -> export
+
+    def test_adopt(self):
+        data = {
+            'resources': {
+                'the_nested': {
+                    "type": "the.yaml",
+                    "resources": {
+                        "RealRandom": {
+                            "type": "OS::Heat::RandomString",
+                            'resource_data': {'value': 'goopie'},
+                            'resource_id': 'froggy'
+                        }
+                    }
+                }
+            },
+            "environment": {"parameters": {}},
+            "template": yaml.safe_load(self.main_template)
+        }
+
+        stack_identifier = self.stack_adopt(
+            adopt_data=json.dumps(data),
+            files={'the.yaml': self.nested_templ})
+
+        self.assert_resource_is_a_stack(stack_identifier, 'the_nested')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('goopie', self._stack_output(stack, 'value'))
+
+
+class TemplateResourceCheckTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do template resource check."""
+
+    main_template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  the_nested:
+    Type: the.yaml
+    Properties:
+      one: my_name
+Outputs:
+  identifier:
+    Value: {Ref: the_nested}
+  value:
+    Value: {'Fn::GetAtt': [the_nested, the_str]}
+'''
+
+    nested_templ = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Parameters:
+  one:
+    Default: foo
+    Type: String
+Resources:
+  RealRandom:
+    Type: OS::Heat::RandomString
+    Properties:
+      salt: {Ref: one}
+Outputs:
+  the_str:
+    Value: {'Fn::GetAtt': [RealRandom, value]}
+'''
+
+    def test_check(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'the.yaml': self.nested_templ}
+        )
+
+        self.client.actions.check(stack_id=stack_identifier)
+        self._wait_for_stack_status(stack_identifier, 'CHECK_COMPLETE')
+
+
+class TemplateResourceErrorMessageTest(functional_base.FunctionalTestsBase):
+    """Prove that nested stack errors don't suck."""
+
+    template = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  victim:
+    Type: fail.yaml
+'''
+    nested_templ = '''
+HeatTemplateFormatVersion: '2012-12-12'
+Resources:
+  oops:
+    Type: OS::Heat::TestResource
+    Properties:
+      fail: true
+      wait_secs: 2
+'''
+
+    def test_fail(self):
+        stack_identifier = self.stack_create(
+            template=self.template,
+            files={'fail.yaml': self.nested_templ},
+            expected_status='CREATE_FAILED')
+        stack = self.client.stacks.get(stack_identifier)
+
+        exp_path = 'resources.victim.resources.oops'
+        exp_msg = 'Test Resource failed oops'
+        exp = 'Resource CREATE failed: ValueError: %s: %s' % (exp_path,
+                                                              exp_msg)
+        self.assertEqual(exp, stack.stack_status_reason)
+
+
+class TemplateResourceSuspendResumeTest(functional_base.FunctionalTestsBase):
+    """Prove that we can do template resource suspend/resume."""
+
+    main_template = '''
+heat_template_version: 2014-10-16
+parameters:
+resources:
+  the_nested:
+    type: the.yaml
+'''
+
+    nested_templ = '''
+heat_template_version: 2014-10-16
+resources:
+  test_random_string:
+    type: OS::Heat::RandomString
+'''
+
+    def test_suspend_resume(self):
+        """Basic test for template resource suspend resume."""
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'the.yaml': self.nested_templ}
+        )
+
+        self.stack_suspend(stack_identifier=stack_identifier)
+        self.stack_resume(stack_identifier=stack_identifier)
+
+
+class ValidateFacadeTest(functional_base.FunctionalTestsBase):
+    """Prove that nested stack errors don't suck."""
+
+    template = '''
+heat_template_version: 2015-10-15
+resources:
+  thisone:
+    type: OS::Thingy
+    properties:
+      one: pre
+      two: post
+outputs:
+  one:
+    value: {get_attr: [thisone, here-it-is]}
+'''
+    templ_facade = '''
+heat_template_version: 2015-04-30
+parameters:
+  one:
+    type: string
+  two:
+    type: string
+outputs:
+  here-it-is:
+    value: noop
+'''
+    env = '''
+resource_registry:
+  OS::Thingy: facade.yaml
+  resources:
+    thisone:
+      OS::Thingy: concrete.yaml
+'''
+
+    def setUp(self):
+        super(ValidateFacadeTest, self).setUp()
+        self.client = self.orchestration_client
+
+    def test_missing_param(self):
+        templ_missing_parameter = '''
+heat_template_version: 2015-04-30
+parameters:
+  one:
+    type: string
+resources:
+  str:
+    type: OS::Heat::RandomString
+outputs:
+  here-it-is:
+    value:
+      not-important
+'''
+        try:
+            self.stack_create(
+                template=self.template,
+                environment=self.env,
+                files={'facade.yaml': self.templ_facade,
+                       'concrete.yaml': templ_missing_parameter},
+                expected_status='CREATE_FAILED')
+        except heat_exceptions.HTTPBadRequest as exc:
+            exp = ('ERROR: Required property two for facade '
+                   'OS::Thingy missing in provider')
+            self.assertEqual(exp, six.text_type(exc))
+
+    def test_missing_output(self):
+        templ_missing_output = '''
+heat_template_version: 2015-04-30
+parameters:
+  one:
+    type: string
+  two:
+    type: string
+resources:
+  str:
+    type: OS::Heat::RandomString
+'''
+        try:
+            self.stack_create(
+                template=self.template,
+                environment=self.env,
+                files={'facade.yaml': self.templ_facade,
+                       'concrete.yaml': templ_missing_output},
+                expected_status='CREATE_FAILED')
+        except heat_exceptions.HTTPBadRequest as exc:
+            exp = ('ERROR: Attribute here-it-is for facade '
+                   'OS::Thingy missing in provider')
+            self.assertEqual(exp, six.text_type(exc))
+
+
+class TemplateResourceNewParamTest(functional_base.FunctionalTestsBase):
+
+    main_template = '''
+heat_template_version: 2013-05-23
+resources:
+  my_resource:
+    type: resource.yaml
+    properties:
+      value1: foo
+'''
+    nested_templ = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {get_param: value1}
+'''
+    main_template_update = '''
+heat_template_version: 2013-05-23
+resources:
+  my_resource:
+    type: resource.yaml
+    properties:
+      value1: foo
+      value2: foo
+'''
+    nested_templ_update_fail = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+  value2:
+    type: string
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      fail: True
+      value:
+        str_replace:
+          template: VAL1-VAL2
+          params:
+            VAL1: {get_param: value1}
+            VAL2: {get_param: value2}
+'''
+    nested_templ_update = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+  value2:
+    type: string
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value:
+        str_replace:
+          template: VAL1-VAL2
+          params:
+            VAL1: {get_param: value1}
+            VAL2: {get_param: value2}
+'''
+
+    def test_update(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            files={'resource.yaml': self.nested_templ})
+
+        # Make the update fails with the new parameter inserted.
+        self.update_stack(
+            stack_identifier,
+            self.main_template_update,
+            files={'resource.yaml': self.nested_templ_update_fail},
+            expected_status='UPDATE_FAILED')
+
+        # Fix the update, it should succeed now.
+        self.update_stack(
+            stack_identifier,
+            self.main_template_update,
+            files={'resource.yaml': self.nested_templ_update})
+
+
+class TemplateResourceRemovedParamTest(functional_base.FunctionalTestsBase):
+
+    main_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+    default: foo
+resources:
+  my_resource:
+    type: resource.yaml
+    properties:
+       value1: {get_param: value1}
+'''
+    nested_templ = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+    default: foo
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value: {get_param: value1}
+'''
+    main_template_update = '''
+heat_template_version: 2013-05-23
+resources:
+  my_resource:
+    type: resource.yaml
+'''
+    nested_templ_update = '''
+heat_template_version: 2013-05-23
+parameters:
+  value1:
+    type: string
+    default: foo
+  value2:
+    type: string
+    default: bar
+resources:
+  test:
+    type: OS::Heat::TestResource
+    properties:
+      value:
+        str_replace:
+          template: VAL1-VAL2
+          params:
+            VAL1: {get_param: value1}
+            VAL2: {get_param: value2}
+'''
+
+    def test_update(self):
+        stack_identifier = self.stack_create(
+            template=self.main_template,
+            environment={'parameters': {'value1': 'spam'}},
+            files={'resource.yaml': self.nested_templ})
+
+        self.update_stack(
+            stack_identifier,
+            self.main_template_update,
+            environment={'parameter_defaults': {'value2': 'egg'}},
+            files={'resource.yaml': self.nested_templ_update}, existing=True)
diff --git a/heat_tempest_plugin/tests/functional/test_template_validate.py b/heat_tempest_plugin/tests/functional/test_template_validate.py
new file mode 100644
index 0000000..4694a82
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_template_validate.py
@@ -0,0 +1,244 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import six
+
+from heatclient import exc
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackTemplateValidateTest(functional_base.FunctionalTestsBase):
+
+    random_template = '''
+heat_template_version: 2014-10-16
+description: the stack description
+parameters:
+  aparam:
+    type: number
+    default: 10
+    description: the param description
+resources:
+  myres:
+    type: OS::Heat::RandomString
+    properties:
+      length: {get_param: aparam}
+'''
+
+    parent_template = '''
+heat_template_version: 2014-10-16
+description: the parent template
+parameters:
+  pparam:
+    type: number
+    default: 5
+    description: the param description
+resources:
+  nres:
+    type: mynested.yaml
+    properties:
+      aparam: {get_param: pparam}
+'''
+
+    parent_template_noprop = '''
+heat_template_version: 2014-10-16
+description: the parent template
+resources:
+  nres:
+    type: mynested.yaml
+'''
+
+    random_template_groups = '''
+heat_template_version: 2014-10-16
+description: the stack description
+parameters:
+  aparam:
+    type: number
+    default: 10
+    description: the param description
+  bparam:
+    type: string
+    default: foo
+  cparam:
+    type: string
+    default: secret
+    hidden: true
+parameter_groups:
+- label: str_params
+  description: The string params
+  parameters:
+  - bparam
+  - cparam
+resources:
+  myres:
+    type: OS::Heat::RandomString
+    properties:
+      length: {get_param: aparam}
+'''
+
+    def test_template_validate_basic(self):
+        ret = self.client.stacks.validate(template=self.random_template)
+        expected = {'Description': 'the stack description',
+                    'Parameters': {
+                        'aparam': {'Default': 10,
+                                   'Description': 'the param description',
+                                   'Label': 'aparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_override_default(self):
+        env = {'parameters': {'aparam': 5}}
+        ret = self.client.stacks.validate(template=self.random_template,
+                                          environment=env)
+        expected = {'Description': 'the stack description',
+                    'Parameters': {
+                        'aparam': {'Default': 10,
+                                   'Value': 5,
+                                   'Description': 'the param description',
+                                   'Label': 'aparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_override_none(self):
+        env = {'resource_registry': {
+               'OS::Heat::RandomString': 'OS::Heat::None'}}
+        ret = self.client.stacks.validate(template=self.random_template,
+                                          environment=env)
+        expected = {'Description': 'the stack description',
+                    'Parameters': {
+                        'aparam': {'Default': 10,
+                                   'Description': 'the param description',
+                                   'Label': 'aparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_basic_required_param(self):
+        tmpl = self.random_template.replace('default: 10', '')
+        ret = self.client.stacks.validate(template=tmpl)
+        expected = {'Description': 'the stack description',
+                    'Parameters': {
+                        'aparam': {'Description': 'the param description',
+                                   'Label': 'aparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_fail_version(self):
+        fail_template = self.random_template.replace('2014-10-16', 'invalid')
+        ex = self.assertRaises(exc.HTTPBadRequest,
+                               self.client.stacks.validate,
+                               template=fail_template)
+        self.assertIn('The template version is invalid', six.text_type(ex))
+
+    def test_template_validate_parameter_groups(self):
+        ret = self.client.stacks.validate(template=self.random_template_groups)
+        expected = {'Description': 'the stack description',
+                    'ParameterGroups':
+                    [{'description': 'The string params',
+                      'label': 'str_params',
+                      'parameters': ['bparam', 'cparam']}],
+                    'Parameters':
+                    {'aparam':
+                     {'Default': 10,
+                      'Description': 'the param description',
+                      'Label': 'aparam',
+                      'NoEcho': 'false',
+                      'Type': 'Number'},
+                     'bparam':
+                     {'Default': 'foo',
+                      'Description': '',
+                      'Label': 'bparam',
+                      'NoEcho': 'false',
+                      'Type': 'String'},
+                     'cparam':
+                     {'Default': 'secret',
+                      'Description': '',
+                      'Label': 'cparam',
+                      'NoEcho': 'true',
+                      'Type': 'String'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_nested_off(self):
+        files = {'mynested.yaml': self.random_template}
+        ret = self.client.stacks.validate(template=self.parent_template,
+                                          files=files)
+        expected = {'Description': 'the parent template',
+                    'Parameters': {
+                        'pparam': {'Default': 5,
+                                   'Description': 'the param description',
+                                   'Label': 'pparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_nested_on(self):
+        files = {'mynested.yaml': self.random_template}
+        ret = self.client.stacks.validate(template=self.parent_template_noprop,
+                                          files=files,
+                                          show_nested=True)
+        expected = {'Description': 'the parent template',
+                    'Parameters': {},
+                    'NestedParameters': {
+                        'nres': {'Description': 'the stack description',
+                                 'Parameters': {'aparam': {'Default': 10,
+                                                           'Description':
+                                                           'the param '
+                                                           'description',
+                                                           'Label': 'aparam',
+                                                           'NoEcho': 'false',
+                                                           'Type': 'Number'}},
+                                 'Type': 'mynested.yaml'}}}
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_nested_on_multiple(self):
+        # parent_template -> nested_template -> random_template
+        nested_template = self.random_template.replace(
+            'OS::Heat::RandomString', 'mynested2.yaml')
+        files = {'mynested.yaml': nested_template,
+                 'mynested2.yaml': self.random_template}
+        ret = self.client.stacks.validate(template=self.parent_template,
+                                          files=files,
+                                          show_nested=True)
+
+        n_param2 = {'myres': {'Description': 'the stack description',
+                              'Parameters': {'aparam': {'Default': 10,
+                                                        'Description':
+                                                        'the param '
+                                                        'description',
+                                                        'Label': 'aparam',
+                                                        'NoEcho': 'false',
+                                                        'Type': 'Number'}},
+                              'Type': 'mynested2.yaml'}}
+        expected = {'Description': 'the parent template',
+                    'Parameters': {
+                        'pparam': {'Default': 5,
+                                   'Description': 'the param description',
+                                   'Label': 'pparam',
+                                   'NoEcho': 'false',
+                                   'Type': 'Number'}},
+                    'NestedParameters': {
+                        'nres': {'Description': 'the stack description',
+                                 'Parameters': {'aparam': {'Default': 10,
+                                                           'Description':
+                                                           'the param '
+                                                           'description',
+                                                           'Label': 'aparam',
+                                                           'Value': 5,
+                                                           'NoEcho': 'false',
+                                                           'Type': 'Number'}},
+                                 'NestedParameters': n_param2,
+                                 'Type': 'mynested.yaml'}}}
+        self.assertEqual(expected, ret)
diff --git a/heat_tempest_plugin/tests/functional/test_templates.py b/heat_tempest_plugin/tests/functional/test_templates.py
new file mode 100644
index 0000000..bfcf8bf
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_templates.py
@@ -0,0 +1,72 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class TemplateAPITest(functional_base.FunctionalTestsBase):
+    """This will test the following template calls:
+
+    1. Get the template content for the specific stack
+    2. List template versions
+    3. List resource types
+    4. Show resource details for OS::Heat::TestResource
+    """
+
+    template = {
+        'heat_template_version': '2014-10-16',
+        'description': 'Test Template APIs',
+        'resources': {
+            'test1': {
+                'type': 'OS::Heat::TestResource',
+                'properties': {
+                    'update_replace': False,
+                    'wait_secs': 0,
+                    'value': 'Test1',
+                    'fail': False,
+                }
+            }
+        }
+    }
+
+    def test_get_stack_template(self):
+        stack_identifier = self.stack_create(
+            template=self.template
+        )
+        template_from_client = self.client.stacks.template(stack_identifier)
+        self.assertEqual(self.template, template_from_client)
+
+    def test_template_version(self):
+        template_versions = self.client.template_versions.list()
+        supported_template_versions = ["2013-05-23", "2014-10-16",
+                                       "2015-04-30", "2015-10-15",
+                                       "2012-12-12", "2010-09-09",
+                                       "2016-04-08", "2016-10-14", "newton",
+                                       "2017-02-24", "ocata",
+                                       "2017-09-01", "pike",
+                                       "2018-03-02", "queens"]
+        for template in template_versions:
+            self.assertIn(template.version.split(".")[1],
+                          supported_template_versions)
+
+    def test_resource_types(self):
+        resource_types = self.client.resource_types.list()
+        self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
+                            for resource in resource_types))
+
+    def test_show_resource_template(self):
+        resource_details = self.client.resource_types.get(
+            resource_type="OS::Heat::TestResource"
+        )
+        self.assertEqual("OS::Heat::TestResource",
+                         resource_details['resource_type'])
diff --git a/heat_tempest_plugin/tests/functional/test_translation.py b/heat_tempest_plugin/tests/functional/test_translation.py
new file mode 100644
index 0000000..484663b
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_translation.py
@@ -0,0 +1,117 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+template_subnet_old_network = """
+heat_template_version: 2016-10-14
+parameters:
+  net_cidr:
+    type: string
+resources:
+  net:
+    type: OS::Neutron::Net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      cidr: { get_param: net_cidr }
+      network_id: { get_resource: net }
+"""
+
+template_with_get_attr = """
+heat_template_version: 2016-10-14
+description: Test template to create/update subnet with translation
+parameters:
+  net_cidr:
+    type: string
+resources:
+  net:
+    type: OS::Neutron::Net
+  net_value:
+    type: OS::Heat::Value
+    properties:
+      value: { get_resource: net }
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_attr: [net_value, value] }
+      cidr: { get_param: net_cidr }
+"""
+
+template_value_from_nested_stack_main = """
+heat_template_version: 2016-10-14
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  public_net:
+    type: string
+resources:
+  network_settings:
+    type: network.yaml
+    properties:
+      public_net: { get_param: public_net }
+  server:
+    type: OS::Nova::Server
+    properties:
+      flavor: { get_param: flavor }
+      image: { get_param: image }
+      networks: { get_attr: [network_settings, networks] }
+"""
+
+template_value_from_nested_stack_network = """
+heat_template_version: 2016-10-14
+parameters:
+  public_net:
+    type: string
+outputs:
+  networks:
+    value:
+      - uuid: { get_param: public_net }
+"""
+
+
+class TestTranslation(functional_base.FunctionalTestsBase):
+
+    def test_create_update_subnet_old_network(self):
+        # Just create and update where network is translated properly.
+        env = {'parameters': {'net_cidr': '11.11.11.0/24'}}
+        stack_identifier = self.stack_create(
+            template=template_subnet_old_network,
+            environment=env)
+        env = {'parameters': {'net_cidr': '11.11.12.0/24'}}
+        self.update_stack(stack_identifier,
+                          template=template_subnet_old_network,
+                          environment=env)
+
+    def test_create_update_translation_with_get_attr(self):
+        # Check create and update successful for translation function value.
+        env = {'parameters': {'net_cidr': '11.11.11.0/24'}}
+        stack_identifier = self.stack_create(
+            template=template_with_get_attr,
+            environment=env)
+        env = {'parameters': {'net_cidr': '11.11.12.0/24'}}
+        self.update_stack(stack_identifier,
+                          template=template_with_get_attr,
+                          environment=env)
+
+    def test_value_from_nested_stack(self):
+        env = {'parameters': {
+            'flavor': self.conf.minimal_instance_type,
+            'image': self.conf.minimal_image_ref,
+            'public_net': self.conf.fixed_network_name
+        }}
+        self.stack_create(
+            template=template_value_from_nested_stack_main,
+            environment=env,
+            files={'network.yaml': template_value_from_nested_stack_network})
diff --git a/heat_tempest_plugin/tests/functional/test_unicode_template.py b/heat_tempest_plugin/tests/functional/test_unicode_template.py
new file mode 100644
index 0000000..d3363aa
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_unicode_template.py
@@ -0,0 +1,110 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackUnicodeTemplateTest(functional_base.FunctionalTestsBase):
+
+    random_template = u'''
+heat_template_version: 2014-10-16
+description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
+parameters:
+  \u53c2\u6570:
+    type: number
+    default: 10
+    label: \u6807\u7b7e
+    description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
+resources:
+  \u8d44\u6e90:
+    type: OS::Heat::RandomString
+    properties:
+      length: {get_param: \u53c2\u6570}
+outputs:
+  \u8f93\u51fa:
+    description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
+    value: {get_attr: [\u8d44\u6e90, value]}
+'''
+
+    def _assert_results(self, result):
+        self.assertTrue(result['disable_rollback'])
+        self.assertIsNone(result['parent'])
+        self.assertEqual(u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+                         result['template_description'])
+        self.assertEqual(u'10', result['parameters'][u'\u53c2\u6570'])
+
+    def _assert_preview_results(self, result):
+        self._assert_results(result)
+        res = result['resources'][0]
+        self.assertEqual('/resources/%s' % res['resource_name'],
+                         res['resource_identity']['path'])
+
+    def _assert_create_results(self, result):
+        self._assert_results(result)
+        output = result['outputs'][0]
+        self.assertEqual(u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+                         output['description'])
+        self.assertEqual(u'\u8f93\u51fa', output['output_key'])
+        self.assertIsNotNone(output['output_value'])
+
+    def _assert_resource_results(self, result):
+        self.assertEqual(u'\u8d44\u6e90', result['resource_name'])
+        self.assertEqual('OS::Heat::RandomString',
+                         result['resource_type'])
+
+    def test_template_validate_basic(self):
+        ret = self.client.stacks.validate(template=self.random_template)
+        expected = {
+            'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+            'Parameters': {
+                u'\u53c2\u6570': {
+                    'Default': 10,
+                    'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+                    'Label': u'\u6807\u7b7e',
+                    'NoEcho': 'false',
+                    'Type': 'Number'}
+            }
+        }
+        self.assertEqual(expected, ret)
+
+    def test_template_validate_override_default(self):
+        env = {'parameters': {u'\u53c2\u6570': 5}}
+        ret = self.client.stacks.validate(template=self.random_template,
+                                          environment=env)
+        expected = {
+            'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+            'Parameters': {
+                u'\u53c2\u6570': {
+                    'Default': 10,
+                    'Value': 5,
+                    'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
+                    'Label': u'\u6807\u7b7e',
+                    'NoEcho': 'false',
+                    'Type': 'Number'}
+            }
+        }
+        self.assertEqual(expected, ret)
+
+    def test_stack_preview(self):
+        result = self.client.stacks.preview(
+            template=self.random_template,
+            stack_name=self._stack_rand_name(),
+            disable_rollback=True).to_dict()
+        self._assert_preview_results(result)
+
+    def test_create_stack(self):
+        stack_identifier = self.stack_create(template=self.random_template)
+        stack = self.client.stacks.get(stack_identifier)
+        self._assert_create_results(stack.to_dict())
+        rl = self.client.resources.list(stack_identifier)
+        self.assertEqual(1, len(rl))
+        self._assert_resource_results(rl[0].to_dict())
diff --git a/heat_tempest_plugin/tests/functional/test_update_restricted.py b/heat_tempest_plugin/tests/functional/test_update_restricted.py
new file mode 100644
index 0000000..83cb280
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_update_restricted.py
@@ -0,0 +1,166 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+test_template = {
+    'heat_template_version': '2013-05-23',
+    'description': 'Test template to create one instance.',
+    'resources': {
+        'bar': {
+            'type': 'OS::Heat::TestResource',
+            'properties': {
+                'value': '1234',
+                'update_replace': False,
+            }
+        }
+    }
+}
+
+env_both_restrict = {u'resource_registry': {
+    u'resources': {
+        'bar': {'restricted_actions': ['update', 'replace']}
+    }
+}
+}
+
+env_replace_restrict = {u'resource_registry': {
+    u'resources': {
+        '*ar': {'restricted_actions': 'replace'}
+    }
+}
+}
+
+reason_update_restrict = 'update is restricted for resource.'
+reason_replace_restrict = 'replace is restricted for resource.'
+
+
+class UpdateRestrictedStackTest(functional_base.FunctionalTestsBase):
+
+    def _check_for_restriction_reason(self, events,
+                                      reason, num_expected=1):
+        matched = [e for e in events
+                   if e.resource_status_reason == reason]
+        return len(matched) == num_expected
+
+    def test_update(self):
+        stack_identifier = self.stack_create(template=test_template)
+
+        update_template = test_template.copy()
+        props = update_template['resources']['bar']['properties']
+        props['value'] = '4567'
+
+        # check update fails - with 'both' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_both_restrict,
+                          expected_status='UPDATE_FAILED')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'CREATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_update_restrict))
+
+        # Ensure the timestamp changes, since this will be very quick
+        time.sleep(1)
+
+        # check update succeeds - with only 'replace' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_replace_restrict,
+                          expected_status='UPDATE_COMPLETE')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'UPDATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertFalse(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_update_restrict, 2))
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_replace_restrict, 0))
+
+    def test_replace(self):
+        stack_identifier = self.stack_create(template=test_template)
+
+        update_template = test_template.copy()
+        props = update_template['resources']['bar']['properties']
+        props['update_replace'] = True
+
+        # check replace fails - with 'both' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_both_restrict,
+                          expected_status='UPDATE_FAILED')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'CREATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_replace_restrict))
+
+        # Ensure the timestamp changes, since this will be very quick
+        time.sleep(1)
+
+        # check replace fails - with only 'replace' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_replace_restrict,
+                          expected_status='UPDATE_FAILED')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'CREATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_replace_restrict, 2))
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_update_restrict, 0))
+
+    def test_update_type_changed(self):
+        stack_identifier = self.stack_create(template=test_template)
+
+        update_template = test_template.copy()
+        rsrc = update_template['resources']['bar']
+        rsrc['type'] = 'OS::Heat::None'
+
+        # check replace fails - with 'both' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_both_restrict,
+                          expected_status='UPDATE_FAILED')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'CREATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_replace_restrict))
+
+        # Ensure the timestamp changes, since this will be very quick
+        time.sleep(1)
+
+        # check replace fails - with only 'replace' restricted
+        self.update_stack(stack_identifier, update_template,
+                          env_replace_restrict,
+                          expected_status='UPDATE_FAILED')
+
+        self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
+                                                    'CREATE_COMPLETE'))
+        resource_events = self.client.events.list(stack_identifier, 'bar')
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_replace_restrict, 2))
+        self.assertTrue(
+            self._check_for_restriction_reason(resource_events,
+                                               reason_update_restrict, 0))
diff --git a/heat_tempest_plugin/tests/functional/test_validation.py b/heat_tempest_plugin/tests/functional/test_validation.py
new file mode 100644
index 0000000..ab1762e
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_validation.py
@@ -0,0 +1,92 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class StackValidationTest(functional_base.FunctionalTestsBase):
+
+    def setUp(self):
+        super(StackValidationTest, self).setUp()
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No image configured to test")
+
+        if not self.conf.minimal_instance_type:
+            raise self.skipException(
+                "No minimal_instance_type configured to test")
+
+        self.assign_keypair()
+
+    def test_stack_validate_provider_references_parent_resource(self):
+        template = '''
+heat_template_version: 2014-10-16
+parameters:
+  keyname:
+    type: string
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+resources:
+  config:
+    type: My::Config
+    properties:
+        server: {get_resource: server}
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      key_name: {get_param: keyname}
+      networks: [{network: {get_param: network} }]
+      user_data_format: SOFTWARE_CONFIG
+
+'''
+        config_template = '''
+heat_template_version: 2014-10-16
+parameters:
+  server:
+    type: string
+resources:
+  config:
+    type: OS::Heat::SoftwareConfig
+
+  deployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config:
+        get_resource: config
+      server:
+        get_param: server
+'''
+        files = {'provider.yaml': config_template}
+        env = {'resource_registry':
+               {'My::Config': 'provider.yaml'}}
+        parameters = {'keyname': self.keypair_name,
+                      'flavor': self.conf.minimal_instance_type,
+                      'image': self.conf.minimal_image_ref,
+                      'network': self.conf.fixed_network_name}
+        # Note we don't wait for CREATE_COMPLETE, because we're using a
+        # minimal image without the tools to apply the config.
+        # The point of the test is just to prove that validation won't
+        # falsely prevent stack creation starting, ref bug #1407100
+        # Note that we can be sure My::Config will stay IN_PROGRESS as
+        # there's no signal sent to the deployment
+        self.stack_create(template=template,
+                          files=files,
+                          environment=env,
+                          parameters=parameters,
+                          expected_status='CREATE_IN_PROGRESS')
diff --git a/heat_tempest_plugin/tests/functional/test_waitcondition.py b/heat_tempest_plugin/tests/functional/test_waitcondition.py
new file mode 100644
index 0000000..84d8ef1
--- /dev/null
+++ b/heat_tempest_plugin/tests/functional/test_waitcondition.py
@@ -0,0 +1,72 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from keystoneclient.v3 import client as keystoneclient
+from zaqarclient.queues.v2 import client as zaqarclient
+
+from heat_tempest_plugin.tests.functional import functional_base
+
+
+class ZaqarWaitConditionTest(functional_base.FunctionalTestsBase):
+    template = '''
+heat_template_version: "2013-05-23"
+
+resources:
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: {get_resource: wait_handle}
+      timeout: 120
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+    properties:
+      signal_transport: ZAQAR_SIGNAL
+
+outputs:
+  wait_data:
+   value: {'Fn::Select': ['data_id', {get_attr: [wait_condition, data]}]}
+'''
+
+    def test_signal_queues(self):
+        stack_identifier = self.stack_create(
+            template=self.template,
+            expected_status=None)
+        self._wait_for_resource_status(stack_identifier, 'wait_handle',
+                                       'CREATE_COMPLETE')
+        resource = self.client.resources.get(stack_identifier, 'wait_handle')
+        signal = json.loads(resource.attributes['signal'])
+        ks = keystoneclient.Client(
+            auth_url=signal['auth_url'],
+            user_id=signal['user_id'],
+            password=signal['password'],
+            project_id=signal['project_id'])
+        endpoint = ks.service_catalog.url_for(
+            service_type='messaging', endpoint_type='publicURL')
+        conf = {
+            'auth_opts': {
+                'backend': 'keystone',
+                'options': {
+                    'os_auth_token': ks.auth_token,
+                    'os_project_id': signal['project_id']
+                }
+            }
+        }
+
+        zaqar = zaqarclient.Client(endpoint, conf=conf)
+
+        queue = zaqar.queue(signal['queue_id'])
+        queue.post({'body': {'data': 'here!', 'id': 'data_id'}, 'ttl': 600})
+        self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        stack = self.client.stacks.get(stack_identifier)
+        self.assertEqual('here!', stack.outputs[0]['output_value'])
diff --git a/heat_tempest_plugin/tests/scenario/__init__.py b/heat_tempest_plugin/tests/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/__init__.py
diff --git a/heat_tempest_plugin/tests/scenario/scenario_base.py b/heat_tempest_plugin/tests/scenario/scenario_base.py
new file mode 100644
index 0000000..5cfb5d1
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/scenario_base.py
@@ -0,0 +1,63 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_utils import reflection
+
+from heat_tempest_plugin.common import test
+
+
+class ScenarioTestsBase(test.HeatIntegrationTest):
+    """This class defines common parameters for scenario tests."""
+
+    def setUp(self):
+        super(ScenarioTestsBase, self).setUp()
+        self.check_skip()
+        self.sub_dir = 'templates'
+        self.assign_keypair()
+
+        if not self.conf.fixed_network_name:
+            raise self.skipException("No default network configured to test")
+        self.net = self._get_network()
+
+        if not self.conf.minimal_image_ref:
+            raise self.skipException("No minimal image configured to test")
+        if not self.conf.minimal_instance_type:
+            raise self.skipException("No minimal flavor configured to test")
+
+    def launch_stack(self, template_name, expected_status='CREATE_COMPLETE',
+                     parameters=None, **kwargs):
+        template = self._load_template(__file__, template_name, self.sub_dir)
+
+        parameters = parameters or {}
+
+        if kwargs.get('add_parameters'):
+            parameters.update(kwargs['add_parameters'])
+
+        stack_id = self.stack_create(
+            stack_name=kwargs.get('stack_name'),
+            template=template,
+            files=kwargs.get('files'),
+            parameters=parameters,
+            environment=kwargs.get('environment'),
+            expected_status=expected_status
+        )
+
+        return stack_id
+
+    def check_skip(self):
+        test_cls_name = reflection.get_class_name(self, fully_qualified=False)
+        test_method_name = '.'.join([test_cls_name, self._testMethodName])
+        test_skipped = (self.conf.skip_scenario_test_list and (
+            test_cls_name in self.conf.skip_scenario_test_list or
+            test_method_name in self.conf.skip_scenario_test_list))
+        if self.conf.skip_scenario_tests or test_skipped:
+            self.skipTest('Test disabled in conf, skipping')
diff --git a/heat_tempest_plugin/tests/scenario/templates/app_server_lbv2_neutron.yaml b/heat_tempest_plugin/tests/scenario/templates/app_server_lbv2_neutron.yaml
new file mode 100644
index 0000000..f750a98
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/app_server_lbv2_neutron.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-10-15
+
+description: |
+  App server that is a member of Neutron Pool.
+
+parameters:
+
+  image:
+    type: string
+
+  flavor:
+    type: string
+
+  net:
+    type: string
+
+  sec_group:
+    type: string
+
+  pool:
+    type: string
+
+  app_port:
+    type: number
+
+  timeout:
+    type: number
+
+  subnet:
+    type: string
+
+resources:
+
+  config:
+    type: OS::Test::WebAppConfig
+    properties:
+      app_port: { get_param: app_port }
+      wc_curl_cli: { get_attr: [ handle, curl_cli ] }
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      networks:
+        - network: { get_param: net }
+      security_groups:
+        - { get_param: sec_group }
+      user_data_format: RAW
+      user_data: { get_resource: config }
+
+  handle:
+    type: OS::Heat::WaitConditionHandle
+
+  waiter:
+    type: OS::Heat::WaitCondition
+    depends_on: server
+    properties:
+      timeout: { get_param: timeout }
+      handle: { get_resource: handle }
+
+  pool_member:
+    type: OS::Neutron::LBaaS::PoolMember
+    depends_on: waiter
+    properties:
+      address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
+      pool: { get_param: pool }
+      protocol_port: { get_param: app_port }
+      subnet: { get_param: subnet }
diff --git a/heat_tempest_plugin/tests/scenario/templates/app_server_neutron.yaml b/heat_tempest_plugin/tests/scenario/templates/app_server_neutron.yaml
new file mode 100644
index 0000000..9cbf82a
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/app_server_neutron.yaml
@@ -0,0 +1,65 @@
+heat_template_version: 2015-10-15
+
+description: |
+  App server that is a member of Neutron Pool.
+
+parameters:
+
+  image:
+    type: string
+
+  flavor:
+    type: string
+
+  net:
+    type: string
+
+  sec_group:
+    type: string
+
+  pool_id:
+    type: string
+
+  app_port:
+    type: number
+
+  timeout:
+    type: number
+
+resources:
+
+  config:
+    type: OS::Test::WebAppConfig
+    properties:
+      app_port: { get_param: app_port }
+      wc_curl_cli: { get_attr: [ handle, curl_cli ] }
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      networks:
+        - network: { get_param: net }
+      security_groups:
+        - { get_param: sec_group }
+      user_data_format: RAW
+      user_data: { get_resource: config }
+
+  handle:
+    type: OS::Heat::WaitConditionHandle
+
+  waiter:
+    type: OS::Heat::WaitCondition
+    depends_on: server
+    properties:
+      timeout: { get_param: timeout }
+      handle: { get_resource: handle }
+
+  pool_member:
+    type: OS::Neutron::PoolMember
+    depends_on: waiter
+    properties:
+      address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
+      pool_id: { get_param: pool_id }
+      protocol_port: { get_param: app_port }
diff --git a/heat_tempest_plugin/tests/scenario/templates/boot_config_none_env.yaml b/heat_tempest_plugin/tests/scenario/templates/boot_config_none_env.yaml
new file mode 100644
index 0000000..91d130c
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/boot_config_none_env.yaml
@@ -0,0 +1,5 @@
+# Defines a Heat::InstallConfigAgent config resource which performs no config.
+# This environment can be used when the image already has the required agents
+# installed and configured.
+resource_registry:
+  "Heat::InstallConfigAgent": "OS::Heat::SoftwareConfig"
\ No newline at end of file
diff --git a/heat_tempest_plugin/tests/scenario/templates/netcat-webapp.yaml b/heat_tempest_plugin/tests/scenario/templates/netcat-webapp.yaml
new file mode 100644
index 0000000..fdb0335
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/netcat-webapp.yaml
@@ -0,0 +1,35 @@
+heat_template_version: 2015-10-15
+
+description: |
+  Simplest web-app using netcat reporting only hostname.
+  Specifically tailored for minimal Cirros image.
+
+parameters:
+
+  app_port:
+    type: number
+
+  wc_curl_cli:
+    type: string
+
+resources:
+
+  webapp_nc:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: ungrouped
+      config:
+        str_replace:
+          template: |
+            #! /bin/sh -v
+            Body=$(hostname)
+            Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
+            wc_notify --data-binary '{"status": "SUCCESS"}'
+            while true ; do echo -e $Response | nc -llp PORT; done
+          params:
+            PORT: { get_param: app_port }
+            wc_notify: { get_param: wc_curl_cli }
+
+outputs:
+  OS::stack_id:
+    value: { get_resource: webapp_nc }
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_aodh_alarm.yaml b/heat_tempest_plugin/tests/scenario/templates/test_aodh_alarm.yaml
new file mode 100644
index 0000000..d4c9745
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_aodh_alarm.yaml
@@ -0,0 +1,36 @@
+heat_template_version: 2013-05-23
+resources:
+  asg:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      max_size: 5
+      min_size: 1
+      resource:
+        type: OS::Heat::RandomString
+  scaleup_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: {get_resource: asg}
+      cooldown: 0
+      scaling_adjustment: 1
+  alarm:
+    type: OS::Aodh::Alarm
+    properties:
+      description: Scale-up if the average CPU > 50% for 1 minute
+      meter_name: test_meter
+      statistic: count
+      comparison_operator: ge
+      threshold: 1
+      period: 60
+      evaluation_periods: 1
+      alarm_actions:
+        - str_replace:
+            template: trust+url
+            params:
+              url: {get_attr: [scaleup_policy, signal_url]}
+      matching_metadata:
+        metadata.metering.stack_id: {get_param: "OS::stack_id"}
+outputs:
+  asg_size:
+    value: {get_attr: [asg, current_size]}
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lb_neutron.yaml b/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lb_neutron.yaml
new file mode 100644
index 0000000..d47e787
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lb_neutron.yaml
@@ -0,0 +1,113 @@
+heat_template_version: 2015-04-30
+
+description: |
+  Template which tests Neutron load balancing requests to members of
+  Heat AutoScalingGroup.
+  Instances must be running some webserver on a given app_port
+  producing HTTP response that is different between servers
+  but stable over time for given server.
+
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  net:
+    type: string
+  subnet:
+    type: string
+  public_net:
+    type: string
+  app_port:
+    type: number
+    default: 8080
+  lb_port:
+    type: number
+    default: 80
+  timeout:
+    type: number
+    default: 600
+
+resources:
+
+  sec_group:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      rules:
+      - remote_ip_prefix: 0.0.0.0/0
+        protocol: tcp
+        port_range_min: { get_param: app_port }
+        port_range_max: { get_param: app_port }
+
+  asg:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 1
+      max_size: 2
+      min_size: 1
+      resource:
+        type: OS::Test::NeutronAppServer
+        properties:
+          image: { get_param: image }
+          flavor: { get_param: flavor }
+          net: { get_param: net}
+          sec_group: { get_resource: sec_group }
+          app_port: { get_param: app_port }
+          pool_id: { get_resource: pool }
+          timeout: { get_param: timeout }
+
+  scale_up:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: asg }
+      scaling_adjustment: 1
+
+  scale_down:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: asg }
+      scaling_adjustment: -1
+
+  health_monitor:
+    type: OS::Neutron::HealthMonitor
+    properties:
+      delay: 3
+      type: HTTP
+      timeout: 3
+      max_retries: 3
+
+  pool:
+    type: OS::Neutron::Pool
+    properties:
+      lb_method: ROUND_ROBIN
+      protocol: HTTP
+      subnet: { get_param: subnet }
+      monitors:
+      - { get_resource: health_monitor }
+      vip:
+        protocol_port: { get_param: lb_port }
+
+  floating_ip:
+     type: OS::Neutron::FloatingIP
+     properties:
+       floating_network: { get_param: public_net }
+       port_id:
+         { get_attr: [pool, vip, 'port_id'] }
+
+  loadbalancer:
+    type: OS::Neutron::LoadBalancer
+    properties:
+      pool_id: { get_resource: pool }
+      protocol_port: { get_param: app_port }
+
+outputs:
+  lburl:
+    description: URL of the loadbalanced app
+    value:
+      str_replace:
+        template: http://IP_ADDRESS:PORT
+        params:
+          IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
+          PORT: { get_param: lb_port }
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lbv2_neutron.yaml b/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lbv2_neutron.yaml
new file mode 100644
index 0000000..4702366
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_autoscaling_lbv2_neutron.yaml
@@ -0,0 +1,116 @@
+heat_template_version: 2015-04-30
+
+description: |
+  Template which tests Neutron load balancing requests to members of
+  Heat AutoScalingGroup. This uses LBaas V2.
+  Instances must be running some webserver on a given app_port
+  producing HTTP response that is different between servers
+  but stable over time for given server.
+
+parameters:
+  flavor:
+    type: string
+  image:
+    type: string
+  net:
+    type: string
+  subnet:
+    type: string
+  public_net:
+    type: string
+  app_port:
+    type: number
+    default: 8080
+  lb_port:
+    type: number
+    default: 80
+  timeout:
+    type: number
+    default: 600
+
+resources:
+
+  sec_group:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      rules:
+      - remote_ip_prefix: 0.0.0.0/0
+        protocol: tcp
+        port_range_min: { get_param: app_port }
+        port_range_max: { get_param: app_port }
+
+  asg:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 1
+      max_size: 2
+      min_size: 1
+      resource:
+        type: OS::Test::NeutronAppServer
+        properties:
+          image: { get_param: image }
+          flavor: { get_param: flavor }
+          net: { get_param: net}
+          sec_group: { get_resource: sec_group }
+          app_port: { get_param: app_port }
+          pool: { get_resource: pool }
+          subnet: { get_param: subnet }
+          timeout: { get_param: timeout }
+
+  scale_up:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: asg }
+      scaling_adjustment: 1
+
+  scale_down:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: asg }
+      scaling_adjustment: -1
+
+  health_monitor:
+    type: OS::Neutron::LBaaS::HealthMonitor
+    properties:
+      delay: 3
+      type: HTTP
+      timeout: 3
+      max_retries: 3
+      pool: { get_resource: pool }
+
+  pool:
+    type: OS::Neutron::LBaaS::Pool
+    properties:
+      lb_algorithm: ROUND_ROBIN
+      protocol: HTTP
+      listener: { get_resource: listener }
+
+  listener:
+    type: OS::Neutron::LBaaS::Listener
+    properties:
+      loadbalancer: { get_resource: loadbalancer }
+      protocol: HTTP
+      protocol_port: { get_param: lb_port }
+
+  loadbalancer:
+    type: OS::Neutron::LBaaS::LoadBalancer
+    properties:
+      vip_subnet: { get_param: subnet }
+
+  floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: public_net }
+      port_id: { get_attr: [loadbalancer, vip_port_id] }
+
+outputs:
+  lburl:
+    description: URL of the loadbalanced app
+    value:
+      str_replace:
+        template: http://IP_ADDRESS:PORT
+        params:
+          IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
+          PORT: { get_param: lb_port }
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_base_resources.yaml b/heat_tempest_plugin/tests/scenario/templates/test_base_resources.yaml
new file mode 100644
index 0000000..bff6185
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_base_resources.yaml
@@ -0,0 +1,110 @@
+heat_template_version: 2014-10-16
+
+description: >
+  This HOT template that just defines a single server.
+  Contains just base features to verify base heat support.
+
+parameters:
+  key_name:
+    type: string
+    default: key-01
+    description: Name of an existing key pair to use for the server
+  flavor:
+    type: string
+    description: Flavor for the server to be created
+    default: m1.small
+    constraints:
+      - custom_constraint: nova.flavor
+  image:
+    type: string
+    description: Image ID or image name to use for the server
+    constraints:
+      - custom_constraint: glance.image
+  vol_size:
+    type: number
+    description: The size of the Cinder volume
+    default: 1
+  private_net_name:
+    type: string
+    default: private-net-01
+    description: Name of private network to be created
+  private_net_cidr:
+    type: string
+    default: 192.168.101.0/24
+    description: Private network address (CIDR notation)
+  private_net_gateway:
+    type: string
+    default: 192.168.101.1
+    description: Private network gateway address
+  private_net_pool_start:
+    type: string
+    default: 192.168.101.2
+    description: Start of private network IP address allocation pool
+  private_net_pool_end:
+    type: string
+    default: 192.168.101.127
+    description: End of private network IP address allocation pool
+  echo_foo:
+    default: fooooo
+    type: string
+
+resources:
+  private_net:
+    type: OS::Neutron::Net
+    properties:
+      name: { get_param: private_net_name }
+
+  private_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network_id: { get_resource: private_net }
+      cidr: { get_param: private_net_cidr }
+      gateway_ip: { get_param: private_net_gateway }
+      allocation_pools:
+        - start: { get_param: private_net_pool_start }
+          end: { get_param: private_net_pool_end }
+
+  server_port:
+    type: OS::Neutron::Port
+    properties:
+      network_id: { get_resource: private_net }
+      fixed_ips:
+        - subnet_id: { get_resource: private_subnet }
+
+  key:
+    type: OS::Nova::KeyPair
+    properties:
+      name: { get_param: key_name }
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      key_name: { get_resource: key }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      networks:
+        - port: { get_resource: server_port }
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/bash
+            echo echo_foo
+          params:
+            echo_foo: { get_param: echo_foo }
+
+  vol:
+    type: OS::Cinder::Volume
+    properties:
+      size: { get_param: vol_size }
+
+  vol_att:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      instance_uuid: { get_resource: server }
+      volume_id: { get_resource: vol }
+      mountpoint: /dev/vdb
+
+outputs:
+  server_networks:
+    description: The networks of the deployed server
+    value: { get_attr: [server, networks] }
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_server_cfn_init.yaml b/heat_tempest_plugin/tests/scenario/templates/test_server_cfn_init.yaml
new file mode 100644
index 0000000..9f94717
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_server_cfn_init.yaml
@@ -0,0 +1,97 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which uses a wait condition to confirm that a minimal
+  cfn-init and cfn-signal has worked
+Parameters:
+  key_name:
+    Type: String
+  flavor:
+    Type: String
+  image:
+    Type: String
+  subnet:
+    Type: String
+  timeout:
+    Type: Number
+Resources:
+  CfnUser:
+    Type: AWS::IAM::User
+  SmokeSecurityGroup:
+    Type: AWS::EC2::SecurityGroup
+    Properties:
+      GroupDescription: Enable only ping and SSH access
+      SecurityGroupIngress:
+      - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
+      - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
+  SmokeKeys:
+    Type: AWS::IAM::AccessKey
+    Properties:
+      UserName: {Ref: CfnUser}
+
+  ElasticIp:
+    Type: AWS::EC2::EIP
+    Properties:
+      Domain: vpc
+
+  SmokeServerElasticIp:
+    Type: AWS::EC2::EIPAssociation
+    Properties:
+       EIP: {Ref: ElasticIp}
+       InstanceId: {Ref: SmokeServer}
+
+  SmokeServer:
+    Type: AWS::EC2::Instance
+    Metadata:
+      AWS::CloudFormation::Init:
+        config:
+          files:
+            /tmp/smoke-status:
+              content: smoke test complete
+            /etc/cfn/cfn-credentials:
+              content:
+                Fn::Replace:
+                - SmokeKeys: {Ref: SmokeKeys}
+                  SecretAccessKey:
+                    'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
+                - |
+                  AWSAccessKeyId=SmokeKeys
+                  AWSSecretKey=SecretAccessKey
+              mode: '000400'
+              owner: root
+              group: root
+    Properties:
+      ImageId: {Ref: image}
+      InstanceType: {Ref: flavor}
+      KeyName: {Ref: key_name}
+      SubnetId: {Ref: subnet}
+      SecurityGroups:
+      - {Ref: SmokeSecurityGroup}
+      UserData:
+        Fn::Replace:
+        - WaitHandle: {Ref: WaitHandle}
+        - |
+          #!/bin/bash -v
+          /opt/aws/bin/cfn-init
+          /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
+              --id smoke_status "WaitHandle"
+  WaitHandle:
+    Type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    Type: AWS::CloudFormation::WaitCondition
+    DependsOn: SmokeServer
+    Properties:
+      Handle: {Ref: WaitHandle}
+      Timeout: {Ref: timeout}
+Outputs:
+  WaitConditionStatus:
+    Description: Contents of /tmp/smoke-status on SmokeServer
+    Value:
+      Fn::GetAtt: [WaitCondition, Data]
+  ElasticIp_Id:
+    Description: Elastic ip allocation id
+    Value:
+      Fn::GetAtt: [ElasticIp, AllocationId]
+  SmokeServerElasticIp:
+    Description: Elastic ip address of server
+    Value:
+      Ref: ElasticIp
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_server_signal.yaml b/heat_tempest_plugin/tests/scenario/templates/test_server_signal.yaml
new file mode 100644
index 0000000..4466a5e
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_server_signal.yaml
@@ -0,0 +1,107 @@
+heat_template_version: 2013-05-23
+description: |
+  Template which uses a wait condition to confirm that a minimal
+  signalling works in a created network
+parameters:
+  key_name:
+    type: string
+  flavor:
+    type: string
+  image:
+    type: string
+  subnet_cidr:
+    type: string
+    default: 10.100.0.0/16
+  timeout:
+    type: number
+  public_net:
+    type: string
+    default: public
+  private_net:
+    type: string
+    default: heat-net
+  dns_servers:
+    type: comma_delimited_list
+    default: ["8.8.8.8", "8.8.4.4"]
+  user_data_format:
+    type: string
+    default: RAW
+resources:
+  sg:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name: the_sg
+      description: Ping and SSH
+      rules:
+      - protocol: icmp
+      - protocol: tcp
+        port_range_min: 22
+        port_range_max: 22
+
+  floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: {get_param: public_net}
+
+  network:
+    type: OS::Neutron::Net
+
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: {get_resource: network}
+      ip_version: 4
+      cidr: {get_param: subnet_cidr}
+      dns_nameservers: {get_param: dns_servers}
+
+  router:
+    type: OS::Neutron::Router
+    properties:
+      external_gateway_info:
+        network: {get_param: public_net}
+
+  router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: {get_resource: router}
+      subnet: {get_resource: subnet}
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      key_name: {get_param: key_name}
+      networks:
+      - subnet: {get_resource: subnet}
+      security_groups:
+      - {get_resource: sg}
+      user_data_format: {get_param: user_data_format}
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/sh
+            wc_notify --data-binary '{"status": "SUCCESS", "data": "test complete"}'
+          params:
+            wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+
+  server_floating_ip_assoc:
+    type: OS::Neutron::FloatingIPAssociation
+    properties:
+      floatingip_id: {get_resource: floating_ip}
+      port_id: {get_attr: [server, addresses, {get_resource: network}, 0, port]}
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: {get_resource: wait_handle}
+      timeout: {get_param: timeout}
+
+outputs:
+  server_ip:
+    value: {get_attr: [floating_ip, floating_ip_address]}
+  wc_data:
+    value: {get_attr: [wait_condition, data]}
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_server_software_config.yaml b/heat_tempest_plugin/tests/scenario/templates/test_server_software_config.yaml
new file mode 100644
index 0000000..bf8fa9b
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_server_software_config.yaml
@@ -0,0 +1,173 @@
+heat_template_version: 2014-10-16
+parameters:
+  key_name:
+    type: string
+  flavor:
+    type: string
+  image:
+    type: string
+  network:
+    type: string
+  signal_transport:
+    type: string
+    default: CFN_SIGNAL
+  software_config_transport:
+    type: string
+    default: POLL_SERVER_CFN
+  dep1_foo:
+    default: fooooo
+    type: string
+  dep1_bar:
+    default: baaaaa
+    type: string
+  dep2a_bar:
+    type: string
+    default: barrr
+  dep3_foo:
+    default: fo
+    type: string
+  dep3_bar:
+    default: ba
+    type: string
+
+resources:
+
+  the_sg:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name: the_sg
+      description: Ping and SSH
+      rules:
+      - protocol: icmp
+      - protocol: tcp
+        port_range_min: 22
+        port_range_max: 22
+
+  cfg1:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      inputs:
+      - name: foo
+      - name: bar
+      outputs:
+      - name: result
+      config: {get_file: cfg1.sh}
+
+  cfg2a:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: cfn-init
+      inputs:
+      - name: bar
+      config:
+        config:
+          files:
+            /tmp/cfn-init-foo:
+              content:
+                get_input: bar
+              mode: '000644'
+
+  cfg2b:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      outputs:
+      - name: result
+      config: |
+        #!/bin/sh
+        echo -n "The file /tmp/cfn-init-foo contains `cat /tmp/cfn-init-foo` for server $deploy_server_id during $deploy_action" > $heat_outputs_path.result
+
+  cfg3:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: puppet
+      inputs:
+      - name: foo
+      - name: bar
+      outputs:
+      - name: result
+      config: {get_file: cfg3.pp}
+
+  dep1:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config:
+        get_resource: cfg1
+      server:
+        get_resource: server
+      input_values:
+        foo: {get_param: dep1_foo}
+        bar: {get_param: dep1_bar}
+      signal_transport: {get_param: signal_transport}
+
+  dep2a:
+    type: OS::Heat::StructuredDeployment
+    properties:
+      name: 10_dep2a
+      signal_transport: NO_SIGNAL
+      config:
+        get_resource: cfg2a
+      server:
+        get_resource: server
+      input_values:
+        bar: {get_param: dep2a_bar}
+
+  dep2b:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      name: 20_dep2b
+      config:
+        get_resource: cfg2b
+      server:
+        get_resource: server
+      signal_transport: {get_param: signal_transport}
+
+  dep3:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config:
+        get_resource: cfg3
+      server:
+        get_resource: server
+      input_values:
+        foo: {get_param: dep3_foo}
+        bar: {get_param: dep3_bar}
+      signal_transport: {get_param: signal_transport}
+
+  cfg_user_data:
+    type: Heat::InstallConfigAgent
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      key_name: {get_param: key_name}
+      security_groups:
+      - {get_resource: the_sg}
+      networks:
+      - network: {get_param: network}
+      user_data_format: SOFTWARE_CONFIG
+      software_config_transport: {get_param: software_config_transport}
+      user_data: {get_attr: [cfg_user_data, config]}
+
+outputs:
+  res1:
+    value:
+      result: {get_attr: [dep1, result]}
+      stdout: {get_attr: [dep1, deploy_stdout]}
+      stderr: {get_attr: [dep1, deploy_stderr]}
+      status_code: {get_attr: [dep1, deploy_status_code]}
+  res2:
+    value:
+      result: {get_attr: [dep2b, result]}
+      stdout: {get_attr: [dep2b, deploy_stdout]}
+      stderr: {get_attr: [dep2b, deploy_stderr]}
+      status_code: {get_attr: [dep2b, deploy_status_code]}
+  res3:
+    value:
+      result: {get_attr: [dep3, result]}
+      stdout: {get_attr: [dep3, deploy_stdout]}
+      stderr: {get_attr: [dep3, deploy_stderr]}
+      status_code: {get_attr: [dep3, deploy_status_code]}
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_volumes_create_from_backup.yaml b/heat_tempest_plugin/tests/scenario/templates/test_volumes_create_from_backup.yaml
new file mode 100644
index 0000000..ab1edf8
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_volumes_create_from_backup.yaml
@@ -0,0 +1,118 @@
+heat_template_version: 2013-05-23
+
+parameters:
+  key_name:
+    type: string
+    description: keypair to enable SSH access to the instance.
+
+  instance_type:
+    type: string
+    description: Type of the instance to be created.
+
+  image_id:
+    type: string
+    description: ID of the image to use for the instance to be created.
+
+  timeout:
+    type: number
+    description: Stack creation timeout
+
+  dev_name:
+    type: string
+    description: Expected device name for volume
+    default: vdb
+
+  rescan_timeout:
+    type: number
+    description: Max number of seconds to wait for volume after rescan
+    default: 120
+
+  backup_id:
+    type: string
+    description: backup_id to create volume from
+
+  network:
+    type: string
+
+  volume_description:
+    type: string
+    description: Description of volume
+    default: A volume description
+
+resources:
+  volume:
+    type: OS::Cinder::Volume
+    properties:
+      backup_id: { get_param: backup_id }
+      description: { get_param: volume_description }
+
+  volume_attachment:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      volume_id: { get_resource: volume }
+      instance_uuid: { get_resource: instance }
+
+  instance:
+    type: OS::Nova::Server
+    properties:
+      image: { get_param: image_id }
+      flavor: { get_param: instance_type }
+      key_name: { get_param: key_name }
+      networks:
+      - uuid: {get_param: network}
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/sh
+            # Trigger rescan to ensure we see the attached volume
+            for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
+            # Wait for the rescan as the volume doesn't appear immediately
+            for i in $(seq 1 rescan_timeout)
+            do
+              grep -q dev_name /proc/partitions && break
+              sleep 1
+            done
+            if grep -q dev_name /proc/partitions
+            then
+              mount /dev/dev_name /mnt
+              TESTDATA=$(cat /mnt/testfile)
+              curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Volume Data:'$TESTDATA'", "UniqueId": "instance1"}' "wc_url"
+            else
+              curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
+            fi
+          params:
+            wc_url: { get_resource: wait_handle }
+            dev_name: { get_param: dev_name }
+            rescan_timeout: { get_param: rescan_timeout }
+
+  wait_handle:
+    type: OS::Heat::UpdateWaitConditionHandle
+
+  wait_condition:
+    type: AWS::CloudFormation::WaitCondition
+    properties:
+      Count: 1
+      Handle: { get_resource: wait_handle }
+      Timeout: { get_param: timeout }
+
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
+
+  testfile_data:
+    description: Contents of /mnt/testfile from the mounted volume
+    value: { get_attr: ['wait_condition', 'Data'] }
diff --git a/heat_tempest_plugin/tests/scenario/templates/test_volumes_delete_snapshot.yaml b/heat_tempest_plugin/tests/scenario/templates/test_volumes_delete_snapshot.yaml
new file mode 100644
index 0000000..3893b52
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/templates/test_volumes_delete_snapshot.yaml
@@ -0,0 +1,124 @@
+heat_template_version: 2013-05-23
+
+parameters:
+  key_name:
+    type: string
+    description: keypair to enable SSH access to the instance.
+
+  instance_type:
+    type: string
+    description: Type of the instance to be created.
+
+  image_id:
+    type: string
+    description: ID of the image to use for the instance to be created.
+
+  timeout:
+    type: number
+    description: Stack creation timeout
+
+  dev_name:
+    type: string
+    description: Expected device name for volume
+    default: vdb
+
+  test_string:
+    type: string
+    description: Test string which is written to volume
+    default: ateststring
+
+  rescan_timeout:
+    type: number
+    description: Max number of seconds to wait for volume after rescan
+    default: 120
+
+  network:
+    type: string
+
+  volume_description:
+    type: string
+    description: Description of volume
+    default: A volume description
+
+  volume_size:
+    type: number
+    description: Size of volume
+    default: 1
+
+resources:
+  volume:
+    deletion_policy: 'Snapshot'
+    type: OS::Cinder::Volume
+    properties:
+      size: {get_param: volume_size}
+      description: {get_param: volume_description}
+
+  volume_attachment:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      volume_id: { get_resource: volume }
+      instance_uuid: { get_resource: instance }
+
+  instance:
+    type: OS::Nova::Server
+    properties:
+      image: { get_param: image_id }
+      flavor: { get_param: instance_type }
+      key_name: { get_param: key_name }
+      networks:
+      - uuid: {get_param: network}
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/sh
+            # Trigger rescan to ensure we see the attached volume
+            for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
+            # Wait for the rescan as the volume doesn't appear immediately
+            for i in $(seq 1 rescan_timeout)
+            do
+              grep -q dev_name /proc/partitions && break
+              sleep 1
+            done
+            if grep -q dev_name /proc/partitions
+            then
+              mkfs.ext4 /dev/dev_name
+              mount /dev/dev_name /mnt
+              echo "test_string" > /mnt/testfile
+              umount /mnt
+              curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Completed volume configuration.", "UniqueId": "instance1"}' "wc_url"
+            else
+              curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
+            fi
+          params:
+            wc_url: { get_resource: wait_handle }
+            dev_name: { get_param: dev_name }
+            rescan_timeout: { get_param: rescan_timeout }
+            test_string: { get_param: test_string }
+
+  wait_handle:
+    type: OS::Heat::UpdateWaitConditionHandle
+
+  wait_condition:
+    type: AWS::CloudFormation::WaitCondition
+    properties:
+      Count: 1
+      Handle: { get_resource: wait_handle }
+      Timeout: { get_param: timeout }
+
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/heat_tempest_plugin/tests/scenario/test_aodh_alarm.py b/heat_tempest_plugin/tests/scenario/test_aodh_alarm.py
new file mode 100644
index 0000000..ec436de
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_aodh_alarm.py
@@ -0,0 +1,57 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+LOG = logging.getLogger(__name__)
+
+
+class AodhAlarmTest(scenario_base.ScenarioTestsBase):
+    """Class is responsible for testing of aodh usage."""
+    def setUp(self):
+        super(AodhAlarmTest, self).setUp()
+        self.template = self._load_template(__file__,
+                                            'test_aodh_alarm.yaml',
+                                            'templates')
+
+    def check_instance_count(self, stack_identifier, expected):
+        stack = self.client.stacks.get(stack_identifier)
+        actual = self._stack_output(stack, 'asg_size')
+        if actual != expected:
+            LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
+                                                                 actual))
+        return actual == expected
+
+    def test_alarm(self):
+        """Confirm we can create an alarm and trigger it."""
+
+        # 1. create the stack
+        stack_identifier = self.stack_create(template=self.template)
+
+        # 2. send ceilometer a metric (should cause the alarm to fire)
+        sample = {}
+        sample['counter_type'] = 'gauge'
+        sample['counter_name'] = 'test_meter'
+        sample['counter_volume'] = 1
+        sample['counter_unit'] = 'count'
+        sample['resource_metadata'] = {'metering.stack_id':
+                                       stack_identifier.split('/')[-1]}
+        sample['resource_id'] = 'shouldnt_matter'
+        self.metering_client.samples.create(**sample)
+
+        # 3. confirm we get a scaleup.
+        # Note: there is little point waiting more than 60s+time to scale up.
+        self.assertTrue(test.call_until_true(
+            120, 2, self.check_instance_count, stack_identifier, 2))
diff --git a/heat_tempest_plugin/tests/scenario/test_autoscaling_lb.py b/heat_tempest_plugin/tests/scenario/test_autoscaling_lb.py
new file mode 100644
index 0000000..e576867
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_autoscaling_lb.py
@@ -0,0 +1,110 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+import requests
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+
+class AutoscalingLoadBalancerTest(scenario_base.ScenarioTestsBase):
+    """The class is responsible for testing ASG + LBv1 scenario.
+
+    The very common use case tested is an autoscaling group
+    of some web application servers behind a loadbalancer.
+    """
+
+    def setUp(self):
+        super(AutoscalingLoadBalancerTest, self).setUp()
+        self.template_name = 'test_autoscaling_lb_neutron.yaml'
+        self.app_server_template_name = 'app_server_neutron.yaml'
+        self.webapp_template_name = 'netcat-webapp.yaml'
+        if not self.is_network_extension_supported('lbaas'):
+            self.skipTest('LBaas v1 extension not available, skipping')
+
+    def check_num_responses(self, url, expected_num, retries=10):
+        resp = set()
+        for count in range(retries):
+            time.sleep(1)
+            try:
+                r = requests.get(url, verify=self.verify_cert)
+            except requests.exceptions.ConnectionError:
+                # The LB may not be up yet, let's retry
+                continue
+            # skip unsuccessful requests
+            if r.status_code == 200:
+                resp.add(r.text)
+        self.assertEqual(expected_num, len(resp))
+
+    def test_autoscaling_loadbalancer_neutron(self):
+        """Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.
+
+        The scenario is the following:
+            1. Launch a stack with a load balancer and autoscaling group
+               of one server, wait until stack create is complete.
+            2. Check that there is only one distinctive response from
+               loadbalanced IP.
+            3. Signal the scale_up policy, wait until all resources in
+               autoscaling group are complete.
+            4. Check that now there are two distinctive responses from
+               loadbalanced IP.
+        """
+
+        parameters = {
+            'flavor': self.conf.minimal_instance_type,
+            'image': self.conf.minimal_image_ref,
+            'net': self.conf.fixed_network_name,
+            'subnet': self.conf.fixed_subnet_name,
+            'public_net': self.conf.floating_network_name,
+            'app_port': 8080,
+            'lb_port': 80,
+            'timeout': 600
+        }
+
+        app_server_template = self._load_template(
+            __file__, self.app_server_template_name, self.sub_dir
+        )
+        webapp_template = self._load_template(
+            __file__, self.webapp_template_name, self.sub_dir
+        )
+        files = {'appserver.yaml': app_server_template,
+                 'webapp.yaml': webapp_template}
+        env = {'resource_registry':
+               {'OS::Test::NeutronAppServer': 'appserver.yaml',
+                'OS::Test::WebAppConfig': 'webapp.yaml'}}
+        # Launch stack
+        sid = self.launch_stack(
+            template_name=self.template_name,
+            parameters=parameters,
+            files=files,
+            environment=env
+        )
+        stack = self.client.stacks.get(sid)
+        lb_url = self._stack_output(stack, 'lburl')
+        # Check number of distinctive responces, must be 1
+        self.check_num_responses(lb_url, 1)
+
+        # Signal the scaling hook
+        self.client.resources.signal(sid, 'scale_up')
+
+        # Wait for AutoScalingGroup update to finish
+        asg = self.client.resources.get(sid, 'asg')
+        test.call_until_true(self.conf.build_timeout,
+                             self.conf.build_interval,
+                             self.check_autoscale_complete,
+                             asg.physical_resource_id, 2, sid, 'scale_up')
+
+        # Check number of distinctive responses, must now be 2
+        self.check_num_responses(lb_url, 2)
diff --git a/heat_tempest_plugin/tests/scenario/test_autoscaling_lbv2.py b/heat_tempest_plugin/tests/scenario/test_autoscaling_lbv2.py
new file mode 100644
index 0000000..65fa91c
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_autoscaling_lbv2.py
@@ -0,0 +1,110 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+import requests
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+
+class AutoscalingLoadBalancerv2Test(scenario_base.ScenarioTestsBase):
+    """The class is responsible for testing ASG + LBv2 scenario.
+
+    The very common use case tested is an autoscaling group
+    of some web application servers behind a loadbalancer.
+    """
+
+    def setUp(self):
+        super(AutoscalingLoadBalancerv2Test, self).setUp()
+        self.template_name = 'test_autoscaling_lbv2_neutron.yaml'
+        self.app_server_template_name = 'app_server_lbv2_neutron.yaml'
+        self.webapp_template_name = 'netcat-webapp.yaml'
+        if not self.is_network_extension_supported('lbaasv2'):
+            self.skipTest('LBaasv2 extension not available, skipping')
+
+    def check_num_responses(self, url, expected_num, retries=20):
+        resp = set()
+        for count in range(retries):
+            time.sleep(2)
+            try:
+                r = requests.get(url, verify=self.verify_cert)
+            except requests.exceptions.ConnectionError:
+                # The LB may not be up yet, let's retry
+                continue
+            # skip unsuccessful requests
+            if r.status_code == 200:
+                resp.add(r.text)
+                if len(resp) == expected_num:
+                    break
+        self.assertEqual(expected_num, len(resp))
+
+    def test_autoscaling_loadbalancer_neutron(self):
+        """Check work of AutoScaing and Neutron LBaaS v2 resource in Heat.
+
+        The scenario is the following:
+            1. Launch a stack with a load balancer and autoscaling group
+               of one server, wait until stack create is complete.
+            2. Check that there is only one distinctive response from
+               loadbalanced IP.
+            3. Signal the scale_up policy, wait until all resources in
+               autoscaling group are complete.
+            4. Check that now there are two distinctive responses from
+               loadbalanced IP.
+        """
+
+        parameters = {
+            'flavor': self.conf.minimal_instance_type,
+            'image': self.conf.minimal_image_ref,
+            'net': self.conf.fixed_network_name,
+            'subnet': self.conf.fixed_subnet_name,
+            'public_net': self.conf.floating_network_name
+        }
+
+        app_server_template = self._load_template(
+            __file__, self.app_server_template_name, self.sub_dir
+        )
+        webapp_template = self._load_template(
+            __file__, self.webapp_template_name, self.sub_dir
+        )
+        files = {'appserver.yaml': app_server_template,
+                 'webapp.yaml': webapp_template}
+        env = {'resource_registry':
+               {'OS::Test::NeutronAppServer': 'appserver.yaml',
+                'OS::Test::WebAppConfig': 'webapp.yaml'}}
+
+        # Launch stack
+        sid = self.launch_stack(
+            template_name=self.template_name,
+            parameters=parameters,
+            files=files,
+            environment=env
+        )
+        stack = self.client.stacks.get(sid)
+        lb_url = self._stack_output(stack, 'lburl')
+        # Check number of distinctive responces, must be 1
+        self.check_num_responses(lb_url, 1)
+
+        # Signal the scaling hook
+        self.client.resources.signal(sid, 'scale_up')
+
+        # Wait for AutoScalingGroup update to finish
+        asg = self.client.resources.get(sid, 'asg')
+        test.call_until_true(self.conf.build_timeout,
+                             self.conf.build_interval,
+                             self.check_autoscale_complete,
+                             asg.physical_resource_id, 2, sid, 'scale_up')
+
+        # Check number of distinctive responses, must now be 2
+        self.check_num_responses(lb_url, 2)
diff --git a/heat_tempest_plugin/tests/scenario/test_base_resources.py b/heat_tempest_plugin/tests/scenario/test_base_resources.py
new file mode 100644
index 0000000..e63242e
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_base_resources.py
@@ -0,0 +1,73 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat_tempest_plugin.common import test
+from heat_tempest_plugin.tests.scenario import scenario_base
+from heatclient.common import template_utils
+
+
+class BasicResourcesTest(scenario_base.ScenarioTestsBase):
+
+    def setUp(self):
+        super(BasicResourcesTest, self).setUp()
+        if not self.conf.image_ref:
+            raise self.skipException("No image configured to test")
+        if not self.conf.instance_type:
+            raise self.skipException("No flavor configured to test")
+
+    def check_stack(self):
+        sid = self.stack_identifier
+        # Check that stack were created
+        self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+        server_resource = self.client.resources.get(sid, 'server')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+        self.assertEqual(server.id, server_id)
+
+        stack = self.client.stacks.get(sid)
+
+        server_networks = self._stack_output(stack, 'server_networks')
+        self.assertIn(self.private_net_name, server_networks)
+
+    def test_base_resources_integration(self):
+        """Define test for base resources interation from core porjects
+
+        The alternative scenario is the following:
+            1. Create a stack with basic resources from core projects.
+            2. Check that all stack resources are created successfully.
+            3. Wait for deployment.
+            4. Check that stack was created.
+            5. Check stack outputs.
+        """
+
+        self.private_net_name = test.rand_name('heat-net')
+        parameters = {
+            'key_name': test.rand_name('heat-key'),
+            'flavor': self.conf.instance_type,
+            'image': self.conf.image_ref,
+            'vol_size': self.conf.volume_size,
+            'private_net_name': self.private_net_name
+        }
+
+        env_files, env = template_utils.process_environment_and_files(
+            self.conf.boot_config_env)
+
+        # Launch stack
+        self.stack_identifier = self.launch_stack(
+            template_name='test_base_resources.yaml',
+            parameters=parameters,
+            expected_status=None,
+            environment=env
+        )
+
+        # Check stack
+        self.check_stack()
diff --git a/heat_tempest_plugin/tests/scenario/test_server_cfn_init.py b/heat_tempest_plugin/tests/scenario/test_server_cfn_init.py
new file mode 100644
index 0000000..5ea0fb9
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_server_cfn_init.py
@@ -0,0 +1,122 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from heat_tempest_plugin.common import exceptions
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+
+class CfnInitIntegrationTest(scenario_base.ScenarioTestsBase):
+    """Testing cfn-init and cfn-signal workability."""
+
+    def setUp(self):
+        super(CfnInitIntegrationTest, self).setUp()
+        if not self.conf.image_ref:
+            raise self.skipException("No image configured to test")
+        if not self.conf.instance_type:
+            raise self.skipException("No flavor configured to test")
+
+    def check_stack(self, sid):
+        # Check status of all resources
+        for res in ('WaitHandle', 'SmokeSecurityGroup', 'SmokeKeys',
+                    'CfnUser', 'SmokeServer', 'SmokeServerElasticIp'):
+            self._wait_for_resource_status(
+                sid, res, 'CREATE_COMPLETE')
+
+        server_resource = self.client.resources.get(sid, 'SmokeServer')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+
+        try:
+            self._wait_for_resource_status(
+                sid, 'WaitCondition', 'CREATE_COMPLETE')
+        finally:
+            # attempt to log the server console regardless of WaitCondition
+            # going to complete. This allows successful and failed cloud-init
+            # logs to be compared
+            self._log_console_output(servers=[server])
+
+        stack = self.client.stacks.get(sid)
+
+        # This is an assert of great significance, as it means the following
+        # has happened:
+        # - cfn-init read the provided metadata and wrote out a file
+        # - a user was created and credentials written to the server
+        # - a cfn-signal was built which was signed with provided credentials
+        # - the wait condition was fulfilled and the stack has changed state
+        wait_status = json.loads(
+            self._stack_output(stack, 'WaitConditionStatus'))
+        self.assertEqual('smoke test complete', wait_status['smoke_status'])
+
+        # Check EIP attributes.
+        server_floatingip_id = self._stack_output(stack,
+                                                  'ElasticIp_Id')
+        self.assertIsNotNone(server_floatingip_id)
+
+        # Fetch EIP details.
+        net_show = self.network_client.show_floatingip(
+            floatingip=server_floatingip_id)
+        floating_ip = net_show['floatingip']['floating_ip_address']
+        port_id = net_show['floatingip']['port_id']
+
+        # Ensure that EIP was assigned to server.
+        port_show = self.network_client.show_port(port=port_id)
+        self.assertEqual(server.id, port_show['port']['device_id'])
+        server_ip = self._stack_output(stack, 'SmokeServerElasticIp')
+        self.assertEqual(server_ip, floating_ip)
+
+        # Check that created server is reachable
+        if not self._ping_ip_address(server_ip):
+            self._log_console_output(servers=[server])
+            self.fail(
+                "Timed out waiting for %s to become reachable" % server_ip)
+
+        # Check that the user can authenticate with the generated keypair
+        if self.keypair:
+            try:
+                linux_client = self.get_remote_client(
+                    server_ip, username='ec2-user')
+                linux_client.validate_authentication()
+            except (exceptions.ServerUnreachable,
+                    exceptions.SSHTimeout):
+                self._log_console_output(servers=[server])
+                raise
+
+    def test_server_cfn_init(self):
+        """Check cfn-init and cfn-signal availability on the created server.
+
+        The alternative scenario is the following:
+            1. Create a stack with a server and configured security group.
+            2. Check that all stack resources were created.
+            3. Check that created server is reachable.
+            4. Check that stack was created successfully.
+            5. Check that is it possible to connect to server
+               via generated keypair.
+        """
+        parameters = {
+            'key_name': self.keypair_name,
+            'flavor': self.conf.instance_type,
+            'image': self.conf.image_ref,
+            'timeout': self.conf.build_timeout,
+            'subnet': self.net['subnets'][0],
+        }
+
+        # Launch stack
+        stack_id = self.launch_stack(
+            template_name="test_server_cfn_init.yaml",
+            parameters=parameters,
+            expected_status=None
+        )
+
+        # Check stack
+        self.check_stack(stack_id)
diff --git a/heat_tempest_plugin/tests/scenario/test_server_signal.py b/heat_tempest_plugin/tests/scenario/test_server_signal.py
new file mode 100644
index 0000000..e2730dd
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_server_signal.py
@@ -0,0 +1,85 @@
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from heat_tempest_plugin.common import exceptions
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+
+class ServerSignalIntegrationTest(scenario_base.ScenarioTestsBase):
+    """Test a server in a created network can signal to heat."""
+
+    def _test_server_signal(self, user_data_format='RAW',
+                            image=None):
+        """Check a server in a created network can signal to heat."""
+        parameters = {
+            'key_name': self.keypair_name,
+            'flavor': self.conf.minimal_instance_type,
+            'image': image,
+            'timeout': self.conf.build_timeout,
+            'user_data_format': user_data_format
+        }
+
+        # Launch stack
+        sid = self.launch_stack(
+            template_name="test_server_signal.yaml",
+            parameters=parameters,
+            expected_status=None
+        )
+
+        # Check status of all resources
+        for res in ('sg', 'floating_ip', 'network', 'router', 'subnet',
+                    'router_interface', 'wait_handle', 'server',
+                    'server_floating_ip_assoc'):
+            self._wait_for_resource_status(
+                sid, res, 'CREATE_COMPLETE')
+
+        server_resource = self.client.resources.get(sid, 'server')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+
+        try:
+            self._wait_for_resource_status(
+                sid, 'wait_condition', 'CREATE_COMPLETE')
+        except (exceptions.StackResourceBuildErrorException,
+                exceptions.TimeoutException):
+            raise
+        finally:
+            # attempt to log the server console regardless of WaitCondition
+            # going to complete. This allows successful and failed cloud-init
+            # logs to be compared
+            self._log_console_output(servers=[server])
+
+        stack = self.client.stacks.get(sid)
+
+        wc_data = json.loads(
+            self._stack_output(stack, 'wc_data'))
+        self.assertEqual({'1': 'test complete'}, wc_data)
+
+        server_ip = self._stack_output(stack, 'server_ip')
+
+        # Check that created server is reachable
+        if not self._ping_ip_address(server_ip):
+            self._log_console_output(servers=[server])
+            self.fail(
+                "Timed out waiting for %s to become reachable" % server_ip)
+
+    def test_server_signal_userdata_format_raw(self):
+        self._test_server_signal(image=self.conf.minimal_image_ref)
+
+    def test_server_signal_userdata_format_software_config(self):
+        if not self.conf.image_ref:
+            raise self.skipException("No image configured to test")
+        self._test_server_signal(user_data_format='SOFTWARE_CONFIG',
+                                 image=self.conf.image_ref)
diff --git a/heat_tempest_plugin/tests/scenario/test_server_software_config.py b/heat_tempest_plugin/tests/scenario/test_server_software_config.py
new file mode 100644
index 0000000..1546684
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_server_software_config.py
@@ -0,0 +1,171 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heatclient.common import template_utils
+import six
+
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+CFG1_SH = '''#!/bin/sh
+echo "Writing to /tmp/$bar"
+echo $foo > /tmp/$bar
+echo -n "The file /tmp/$bar contains `cat /tmp/$bar` for server \
+$deploy_server_id during $deploy_action" > $heat_outputs_path.result
+echo "Written to /tmp/$bar"
+echo "Output to stderr" 1>&2
+'''
+
+CFG3_PP = '''file {'barfile':
+  ensure  => file,
+  mode    => 0644,
+  path    => "/tmp/$::bar",
+  content => "$::foo",
+}
+file {'output_result':
+  ensure  => file,
+  path    => "$::heat_outputs_path.result",
+  mode    => 0644,
+  content => "The file /tmp/$::bar contains $::foo for server \
+$::deploy_server_id during $::deploy_action",
+}
+'''
+
+
+class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
+
+    def setUp(self):
+        super(SoftwareConfigIntegrationTest, self).setUp()
+        if not self.conf.image_ref:
+            raise self.skipException("No image configured to test")
+        if not self.conf.instance_type:
+            raise self.skipException("No flavor configured to test")
+
+    def check_stack(self):
+        sid = self.stack_identifier
+        # Check that all stack resources were created
+        for res in ('cfg2a', 'cfg2b', 'cfg1', 'cfg3', 'server'):
+            self._wait_for_resource_status(
+                sid, res, 'CREATE_COMPLETE')
+
+        server_resource = self.client.resources.get(sid, 'server')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+
+        # Waiting for each deployment to contribute their
+        # config to resource
+        try:
+            for res in ('dep2b', 'dep1', 'dep3'):
+                self._wait_for_resource_status(
+                    sid, res, 'CREATE_IN_PROGRESS')
+
+            server_metadata = self.client.resources.metadata(
+                sid, 'server')
+            deployments = dict((d['name'], d) for d in
+                               server_metadata['deployments'])
+
+            for res in ('dep2a', 'dep2b', 'dep1', 'dep3'):
+                self._wait_for_resource_status(
+                    sid, res, 'CREATE_COMPLETE')
+        finally:
+            # attempt to log the server console regardless of deployments
+            # going to complete. This allows successful and failed boot
+            # logs to be compared
+            self._log_console_output(servers=[server])
+
+        complete_server_metadata = self.client.resources.metadata(
+            sid, 'server')
+
+        # Ensure any previously available deployments haven't changed so
+        # config isn't re-triggered
+        complete_deployments = dict((d['name'], d) for d in
+                                    complete_server_metadata['deployments'])
+        for k, v in six.iteritems(deployments):
+            self.assertEqual(v, complete_deployments[k])
+
+        stack = self.client.stacks.get(sid)
+
+        res1 = self._stack_output(stack, 'res1')
+        self.assertEqual(
+            'The file %s contains %s for server %s during %s' % (
+                '/tmp/baaaaa', 'fooooo', server_id, 'CREATE'),
+            res1['result'])
+        self.assertEqual(0, res1['status_code'])
+        self.assertEqual('Output to stderr\n', res1['stderr'])
+        self.assertGreater(len(res1['stdout']), 0)
+
+        res2 = self._stack_output(stack, 'res2')
+        self.assertEqual(
+            'The file %s contains %s for server %s during %s' % (
+                '/tmp/cfn-init-foo', 'barrr', server_id, 'CREATE'),
+            res2['result'])
+        self.assertEqual(0, res2['status_code'])
+        self.assertEqual('', res2['stderr'])
+        self.assertEqual('', res2['stdout'])
+
+        res3 = self._stack_output(stack, 'res3')
+        self.assertEqual(
+            'The file %s contains %s for server %s during %s' % (
+                '/tmp/ba', 'fo', server_id, 'CREATE'),
+            res3['result'])
+        self.assertEqual(0, res3['status_code'])
+        self.assertEqual('', res3['stderr'])
+        self.assertGreater(len(res1['stdout']), 0)
+
+        dep1_resource = self.client.resources.get(sid, 'dep1')
+        dep1_id = dep1_resource.physical_resource_id
+        dep1_dep = self.client.software_deployments.get(dep1_id)
+        if hasattr(dep1_dep, 'updated_time'):
+            # Only check updated_time if the attribute exists.
+            # This allows latest heat agent code to be tested with
+            # Juno heat (which doesn't expose updated_time)
+            self.assertIsNotNone(dep1_dep.updated_time)
+            self.assertNotEqual(
+                dep1_dep.updated_time,
+                dep1_dep.creation_time)
+
+    def test_server_software_config(self):
+        """Check that passed files with scripts are executed on created server.
+
+        The alternative scenario is the following:
+            1. Create a stack and pass files with scripts.
+            2. Check that all stack resources are created successfully.
+            3. Wait for all deployments.
+            4. Check that stack was created.
+            5. Check stack outputs.
+        """
+
+        parameters = {
+            'key_name': self.keypair_name,
+            'flavor': self.conf.instance_type,
+            'image': self.conf.image_ref,
+            'network': self.net['id']
+        }
+
+        files = {
+            'cfg1.sh': CFG1_SH,
+            'cfg3.pp': CFG3_PP
+        }
+
+        env_files, env = template_utils.process_environment_and_files(
+            self.conf.boot_config_env)
+
+        # Launch stack
+        self.stack_identifier = self.launch_stack(
+            template_name='test_server_software_config.yaml',
+            parameters=parameters,
+            files=dict(list(files.items()) + list(env_files.items())),
+            expected_status=None,
+            environment=env
+        )
+
+        # Check stack
+        self.check_stack()
diff --git a/heat_tempest_plugin/tests/scenario/test_volumes.py b/heat_tempest_plugin/tests/scenario/test_volumes.py
new file mode 100644
index 0000000..eea6e97
--- /dev/null
+++ b/heat_tempest_plugin/tests/scenario/test_volumes.py
@@ -0,0 +1,129 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from cinderclient import exceptions as cinder_exceptions
+from oslo_log import log as logging
+import six
+
+from heat_tempest_plugin.common import exceptions
+from heat_tempest_plugin.tests.scenario import scenario_base
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeBackupRestoreIntegrationTest(scenario_base.ScenarioTestsBase):
+    """Class is responsible for testing of volume backup."""
+
+    def setUp(self):
+        super(VolumeBackupRestoreIntegrationTest, self).setUp()
+        self.volume_description = 'A test volume description 123'
+        self.volume_size = self.conf.volume_size
+
+    def _cinder_verify(self, volume_id, expected_status='available'):
+        self.assertIsNotNone(volume_id)
+        volume = self.volume_client.volumes.get(volume_id)
+        self.assertIsNotNone(volume)
+        self.assertEqual(expected_status, volume.status)
+        self.assertEqual(self.volume_size, volume.size)
+        self.assertEqual(self.volume_description,
+                         volume.display_description)
+
+    def _outputs_verify(self, stack, expected_status='available'):
+        self.assertEqual(expected_status,
+                         self._stack_output(stack, 'status'))
+        self.assertEqual(six.text_type(self.volume_size),
+                         self._stack_output(stack, 'size'))
+        self.assertEqual(self.volume_description,
+                         self._stack_output(stack, 'display_description'))
+
+    def check_stack(self, stack_id, parameters):
+        stack = self.client.stacks.get(stack_id)
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self._stack_output(stack, 'volume_id')
+        self._cinder_verify(volume_id, expected_status='in-use')
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack, expected_status='in-use')
+
+        # Delete the stack and ensure a backup is created for volume_id
+        # but the volume itself is gone
+        self._stack_delete(stack_id)
+        self.assertRaises(cinder_exceptions.NotFound,
+                          self.volume_client.volumes.get,
+                          volume_id)
+
+        backups = self.volume_client.backups.list()
+        self.assertIsNotNone(backups)
+        backups_filtered = [b for b in backups if b.volume_id == volume_id]
+        self.assertEqual(1, len(backups_filtered))
+        backup = backups_filtered[0]
+        self.addCleanup(self.volume_client.backups.delete, backup.id)
+
+        # Now, we create another stack where the volume is created from the
+        # backup created by the previous stack
+        try:
+            stack_identifier2 = self.launch_stack(
+                template_name='test_volumes_create_from_backup.yaml',
+                parameters=parameters,
+                add_parameters={'backup_id': backup.id})
+            stack2 = self.client.stacks.get(stack_identifier2)
+        except exceptions.StackBuildErrorException:
+            LOG.exception("Halting test due to bug: #1382300")
+            return
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id2 = self._stack_output(stack2, 'volume_id')
+        self._cinder_verify(volume_id2, expected_status='in-use')
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack2, expected_status='in-use')
+        testfile_data = self._stack_output(stack2, 'testfile_data')
+        self.assertEqual('{"instance1": "Volume Data:ateststring"}',
+                         testfile_data)
+
+        # Delete the stack and ensure the volume is gone
+        self._stack_delete(stack_identifier2)
+        self.assertRaises(cinder_exceptions.NotFound,
+                          self.volume_client.volumes.get,
+                          volume_id2)
+
+    def test_cinder_volume_create_backup_restore(self):
+        """Ensure the 'Snapshot' deletion policy works.
+
+        This requires a more complex test, but it tests several aspects
+        of the heat cinder resources:
+           1. Create a volume, attach it to an instance, write some data to it
+           2. Delete the stack, with 'Snapshot' specified, creates a backup
+           3. Check the snapshot has created a volume backup
+           4. Create a new stack, where the volume is created from the backup
+           5. Verify the test data written in (1) is present in the new volume
+        """
+        parameters = {
+            'key_name': self.keypair_name,
+            'instance_type': self.conf.minimal_instance_type,
+            'image_id': self.conf.minimal_image_ref,
+            'volume_description': self.volume_description,
+            'timeout': self.conf.build_timeout,
+            'network': self.net['id']
+        }
+
+        # Launch stack
+        stack_id = self.launch_stack(
+            template_name='test_volumes_delete_snapshot.yaml',
+            parameters=parameters,
+            add_parameters={'volume_size': self.volume_size}
+        )
+
+        # Check stack
+        self.check_stack(stack_id, parameters)