Merge "test_aggregates_basic_ops picks a non compute node"
diff --git a/requirements.txt b/requirements.txt
index 4bf2bcf..f907e7d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,7 +17,6 @@
 python-saharaclient>=0.6.0
 python-swiftclient>=2.0.2
 testresources>=0.2.4
-keyring>=2.1
 testrepository>=0.0.18
 oslo.config>=1.2.0
 six>=1.6.0
diff --git a/setup.cfg b/setup.cfg
index 14e1913..339da12 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -20,6 +20,7 @@
 [entry_points]
 console_scripts =
     verify-tempest-config = tempest.cmd.verify_tempest_config:main
+    javelin2 = tempest.cmd.javelin:main
 
 [build_sphinx]
 all_files = 1
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 8d5e223..cc76880 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -38,6 +38,7 @@
         cls._node_group_templates = []
         cls._cluster_templates = []
         cls._data_sources = []
+        cls._job_binary_internals = []
 
     @classmethod
     def tearDownClass(cls):
@@ -47,6 +48,8 @@
                               cls.client.delete_node_group_template)
         cls.cleanup_resources(getattr(cls, '_data_sources', []),
                               cls.client.delete_data_source)
+        cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
+                              cls.client.delete_job_binary_internal)
         cls.clear_isolated_creds()
         super(BaseDataProcessingTest, cls).tearDownClass()
 
@@ -112,3 +115,16 @@
         cls._data_sources.append(body['id'])
 
         return resp, body
+
+    @classmethod
+    def create_job_binary_internal(cls, name, data):
+        """Creates watched job binary internal with specified params.
+
+        It returns created object. All resources created in this method will
+        be automatically removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_job_binary_internal(name, data)
+        # store id of created job binary internal
+        cls._job_binary_internals.append(body['id'])
+
+        return resp, body
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
new file mode 100644
index 0000000..c72e828
--- /dev/null
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class DataSourceTest(dp_base.BaseDataProcessingTest):
+    @classmethod
+    def setUpClass(cls):
+        super(DataSourceTest, cls).setUpClass()
+        cls.swift_data_source_with_creds = {
+            'url': 'swift://sahara-container.sahara/input-source',
+            'description': 'Test data source',
+            'credentials': {
+                'user': CONF.identity.username,
+                'password': CONF.identity.password
+            },
+            'type': 'swift'
+        }
+        cls.swift_data_source = cls.swift_data_source_with_creds.copy()
+        del cls.swift_data_source['credentials']
+
+        cls.local_hdfs_data_source = {
+            'url': 'input-source',
+            'description': 'Test data source',
+            'type': 'hdfs'
+        }
+
+        cls.external_hdfs_data_source = {
+            'url': 'hdfs://172.18.168.2:8020/usr/hadoop/input-source',
+            'description': 'Test data source',
+            'type': 'hdfs'
+        }
+
+    def _create_data_source(self, source_body, source_name=None):
+        """Creates Data Source with optional name specified.
+
+        It creates a link to input-source file (it may not exist) and ensures
+        response status and source name. Returns id and name of created source.
+        """
+        if not source_name:
+            # generate random name if it's not specified
+            source_name = data_utils.rand_name('sahara-data-source')
+
+        # create data source
+        resp, body = self.create_data_source(source_name, **source_body)
+
+        # ensure that source created successfully
+        self.assertEqual(202, resp.status)
+        self.assertEqual(source_name, body['name'])
+        if source_body['type'] == 'swift':
+            source_body = self.swift_data_source
+        self.assertDictContainsSubset(source_body, body)
+
+        return body['id'], source_name
+
+    def _list_data_sources(self, source_info):
+        # check for data source in list
+        resp, sources = self.client.list_data_sources()
+        self.assertEqual(200, resp.status)
+        sources_info = [(source['id'], source['name']) for source in sources]
+        self.assertIn(source_info, sources_info)
+
+    def _get_data_source(self, source_id, source_name, source_body):
+        # check data source fetch by id
+        resp, source = self.client.get_data_source(source_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(source_name, source['name'])
+        self.assertDictContainsSubset(source_body, source)
+
+    def _delete_data_source(self, source_id):
+        # delete the data source by id
+        resp = self.client.delete_data_source(source_id)[0]
+        self.assertEqual(204, resp.status)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_create(self):
+        self._create_data_source(self.swift_data_source_with_creds)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_list(self):
+        source_info = self._create_data_source(
+            self.swift_data_source_with_creds)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.swift_data_source_with_creds)
+        self._get_data_source(source_id, source_name, self.swift_data_source)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_delete(self):
+        source_id = self._create_data_source(
+            self.swift_data_source_with_creds)[0]
+        self._delete_data_source(source_id)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_create(self):
+        self._create_data_source(self.local_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_list(self):
+        source_info = self._create_data_source(self.local_hdfs_data_source)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.local_hdfs_data_source)
+        self._get_data_source(
+            source_id, source_name, self.local_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_delete(self):
+        source_id = self._create_data_source(self.local_hdfs_data_source)[0]
+        self._delete_data_source(source_id)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_create(self):
+        self._create_data_source(self.external_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_list(self):
+        source_info = self._create_data_source(self.external_hdfs_data_source)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.external_hdfs_data_source)
+        self._get_data_source(
+            source_id, source_name, self.external_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_delete(self):
+        source_id = self._create_data_source(self.external_hdfs_data_source)[0]
+        self._delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index f3af4e8..04f98b4 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -46,7 +46,7 @@
         It creates template and ensures response status and template name.
         Returns id and name of created template.
         """
-        if template_name is None:
+        if not template_name:
             # generate random name if it's not specified
             template_name = data_utils.rand_name('sahara-ng-template')
 
@@ -57,19 +57,13 @@
         # ensure that template created successfully
         self.assertEqual(202, resp.status)
         self.assertEqual(template_name, body['name'])
+        self.assertDictContainsSubset(self.node_group_template, body)
 
         return body['id'], template_name
 
     @test.attr(type='smoke')
     def test_node_group_template_create(self):
-        template_name = data_utils.rand_name('sahara-ng-template')
-        resp, body = self.create_node_group_template(
-            template_name, **self.node_group_template)
-
-        # check that template created successfully
-        self.assertEqual(resp.status, 202)
-        self.assertEqual(template_name, body['name'])
-        self.assertDictContainsSubset(self.node_group_template, body)
+        self._create_node_group_template()
 
     @test.attr(type='smoke')
     def test_node_group_template_list(self):
@@ -77,7 +71,6 @@
 
         # check for node group template in list
         resp, templates = self.client.list_node_group_templates()
-
         self.assertEqual(200, resp.status)
         templates_info = [(template['id'], template['name'])
                           for template in templates]
@@ -89,7 +82,6 @@
 
         # check node group template fetch by id
         resp, template = self.client.get_node_group_template(template_id)
-
         self.assertEqual(200, resp.status)
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.node_group_template, template)
@@ -100,5 +92,4 @@
 
         # delete the node group template by id
         resp = self.client.delete_node_group_template(template_id)[0]
-
         self.assertEqual(204, resp.status)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index c6832a2..d643f23 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -23,10 +23,8 @@
         It ensures response status and main plugins availability.
         """
         resp, plugins = self.client.list_plugins()
-
         self.assertEqual(200, resp.status)
-
-        plugins_names = list([plugin['name'] for plugin in plugins])
+        plugins_names = [plugin['name'] for plugin in plugins]
         self.assertIn('vanilla', plugins_names)
         self.assertIn('hdp', plugins_names)
 
@@ -40,14 +38,12 @@
     def test_plugin_get(self):
         for plugin_name in self._list_all_plugin_names():
             resp, plugin = self.client.get_plugin(plugin_name)
-
             self.assertEqual(200, resp.status)
             self.assertEqual(plugin_name, plugin['name'])
 
             for plugin_version in plugin['versions']:
                 resp, detailed_plugin = self.client.get_plugin(plugin_name,
                                                                plugin_version)
-
                 self.assertEqual(200, resp.status)
                 self.assertEqual(plugin_name, detailed_plugin['name'])
 
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 2592409..9eda13e 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -35,11 +35,14 @@
         upload the image file, get image and get image file api's
         """
 
+        uuid = '00000000-1111-2222-3333-444455556666'
         image_name = data_utils.rand_name('image')
         resp, body = self.create_image(name=image_name,
                                        container_format='bare',
                                        disk_format='raw',
-                                       visibility='public')
+                                       visibility='public',
+                                       ramdisk_id=uuid)
+
         self.assertIn('id', body)
         image_id = body.get('id')
         self.assertIn('name', body)
@@ -60,6 +63,7 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(image_id, body['id'])
         self.assertEqual(image_name, body['name'])
+        self.assertEqual(uuid, body['ramdisk_id'])
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 3233db7..d8953d8 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -35,7 +35,7 @@
         # Delete non existing tag.
         resp, body = self.create_image(container_format='bare',
                                        disk_format='raw',
-                                       is_public=True,
+                                       visibility='public'
                                        )
         image_id = body['id']
         tag = data_utils.rand_name('non-exist-tag-')
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index f4050c5..3b05f42 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -28,8 +28,9 @@
         List L3 agents hosting the given router.
         Add and Remove Router to L3 agent
 
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
+    v2.0 of the Neutron API is assumed.
+
+    The l3_agent_scheduler extension is required for these tests.
     """
 
     @classmethod
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index fb39087..2002927 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -147,3 +147,14 @@
 server_actions_delete_password = {
     'status_code': [204]
 }
+
+get_console_output = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'output': {'type': 'string'}
+        },
+        'required': ['output']
+    }
+}
diff --git a/tempest/cli/simple_read_only/test_heat.py b/tempest/cli/simple_read_only/test_heat.py
index cf4580c..7a952fc 100644
--- a/tempest/cli/simple_read_only/test_heat.py
+++ b/tempest/cli/simple_read_only/test_heat.py
@@ -85,6 +85,9 @@
     def test_heat_help(self):
         self.heat('help')
 
+    def test_heat_bash_completion(self):
+        self.heat('bash-completion')
+
     def test_heat_help_cmd(self):
         # Check requesting help for a specific command works
         help_text = self.heat('help resource-template')
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
new file mode 100755
index 0000000..20ee63e
--- /dev/null
+++ b/tempest/cmd/javelin.py
@@ -0,0 +1,430 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Javelin makes resources that should survive an upgrade.
+
+Javelin is a tool for creating, verifying, and deleting a small set of
+resources in a declarative way.
+
+"""
+
+import logging
+import os
+import sys
+import unittest
+import yaml
+
+import argparse
+
+import tempest.auth
+from tempest import exceptions
+from tempest.services.compute.json import flavors_client
+from tempest.services.compute.json import servers_client
+from tempest.services.identity.json import identity_client
+from tempest.services.image.v2.json import image_client
+from tempest.services.object_storage import container_client
+from tempest.services.object_storage import object_client
+
+OPTS = {}
+USERS = {}
+RES = {}
+
+LOG = None
+
+
+class OSClient(object):
+    _creds = None
+    identity = None
+    servers = None
+
+    def __init__(self, user, pw, tenant):
+        _creds = tempest.auth.KeystoneV2Credentials(
+            username=user,
+            password=pw,
+            tenant_name=tenant)
+        _auth = tempest.auth.KeystoneV2AuthProvider(_creds)
+        self.identity = identity_client.IdentityClientJSON(_auth)
+        self.servers = servers_client.ServersClientJSON(_auth)
+        self.objects = object_client.ObjectClient(_auth)
+        self.containers = container_client.ContainerClient(_auth)
+        self.images = image_client.ImageClientV2JSON(_auth)
+        self.flavors = flavors_client.FlavorsClientJSON(_auth)
+
+
+def load_resources(fname):
+    """Load the expected resources from a yaml flie."""
+    return yaml.load(open(fname, 'r'))
+
+
+def keystone_admin():
+    return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
+
+
+def client_for_user(name):
+    LOG.debug("Entering client_for_user")
+    if name in USERS:
+        user = USERS[name]
+        LOG.debug("Created client for user %s" % user)
+        return OSClient(user['name'], user['pass'], user['tenant'])
+    else:
+        LOG.error("%s not found in USERS: %s" % (name, USERS))
+
+###################
+#
+# TENANTS
+#
+###################
+
+
+def create_tenants(tenants):
+    """Create tenants from resource definition.
+
+    Don't create the tenants if they already exist.
+    """
+    admin = keystone_admin()
+    _, body = admin.identity.list_tenants()
+    existing = [x['name'] for x in body]
+    for tenant in tenants:
+        if tenant not in existing:
+            admin.identity.create_tenant(tenant)
+        else:
+            LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+
+##############
+#
+# USERS
+#
+##############
+
+
+def _users_for_tenant(users, tenant):
+    u_for_t = []
+    for user in users:
+        for n in user:
+            if user[n]['tenant'] == tenant:
+                u_for_t.append(user[n])
+    return u_for_t
+
+
+def _tenants_from_users(users):
+    tenants = set()
+    for user in users:
+        for n in user:
+            tenants.add(user[n]['tenant'])
+    return tenants
+
+
+def _assign_swift_role(user):
+    admin = keystone_admin()
+    resp, roles = admin.identity.list_roles()
+    role = next(r for r in roles if r['name'] == 'Member')
+    LOG.debug(USERS[user])
+    try:
+        admin.identity.assign_user_role(
+            USERS[user]['tenant_id'],
+            USERS[user]['id'],
+            role['id'])
+    except exceptions.Conflict:
+        # don't care if it's already assigned
+        pass
+
+
+def create_users(users):
+    """Create tenants from resource definition.
+
+    Don't create the tenants if they already exist.
+    """
+    global USERS
+    LOG.info("Creating users")
+    admin = keystone_admin()
+    for u in users:
+        try:
+            tenant = admin.identity.get_tenant_by_name(u['tenant'])
+        except exceptions.NotFound:
+            LOG.error("Tenant: %s - not found" % u['tenant'])
+            continue
+        try:
+            admin.identity.get_user_by_username(tenant['id'], u['name'])
+            LOG.warn("User '%s' already exists in this environment"
+                     % u['name'])
+        except exceptions.NotFound:
+            admin.identity.create_user(
+                u['name'], u['pass'], tenant['id'],
+                "%s@%s" % (u['name'], tenant['id']),
+                enabled=True)
+
+
+def collect_users(users):
+    global USERS
+    LOG.info("Creating users")
+    admin = keystone_admin()
+    for u in users:
+        tenant = admin.identity.get_tenant_by_name(u['tenant'])
+        u['tenant_id'] = tenant['id']
+        USERS[u['name']] = u
+        body = admin.identity.get_user_by_username(tenant['id'], u['name'])
+        USERS[u['name']]['id'] = body['id']
+
+
+class JavelinCheck(unittest.TestCase):
+    def __init__(self, users, resources):
+        super(JavelinCheck, self).__init__()
+        self.users = users
+        self.res = resources
+
+    def runTest(self, *args):
+        pass
+
+    def check(self):
+        self.check_users()
+        self.check_objects()
+        self.check_servers()
+
+    def check_users(self):
+        """Check that the users we expect to exist, do.
+
+        We don't use the resource list for this because we need to validate
+        that things like tenantId didn't drift across versions.
+        """
+        for name, user in self.users.iteritems():
+            client = keystone_admin()
+            _, found = client.identity.get_user(user['id'])
+            self.assertEqual(found['name'], user['name'])
+            self.assertEqual(found['tenantId'], user['tenant_id'])
+
+            # also ensure we can auth with that user, and do something
+            # on the cloud. We don't care about the results except that it
+            # remains authorized.
+            client = client_for_user(user['name'])
+            resp, body = client.servers.list_servers()
+            self.assertEqual(resp['status'], '200')
+
+    def check_objects(self):
+        """Check that the objects created are still there."""
+        for obj in self.res['objects']:
+            client = client_for_user(obj['owner'])
+            r, contents = client.objects.get_object(
+                obj['container'], obj['name'])
+            source = _file_contents(obj['file'])
+            self.assertEqual(contents, source)
+
+    def check_servers(self):
+        """Check that the servers are still up and running."""
+        for server in self.res['servers']:
+            client = client_for_user(server['owner'])
+            found = _get_server_by_name(client, server['name'])
+            self.assertIsNotNone(
+                found,
+                "Couldn't find expected server %s" % server['name'])
+
+            r, found = client.servers.get_server(found['id'])
+            # get the ipv4 address
+            addr = found['addresses']['private'][0]['addr']
+            self.assertEqual(os.system("ping -c 1 " + addr), 0,
+                             "Server %s is not pingable at %s" % (
+                                 server['name'], addr))
+
+
+#######################
+#
+# OBJECTS
+#
+#######################
+
+
+def _file_contents(fname):
+    with open(fname, 'r') as f:
+        return f.read()
+
+
+def create_objects(objects):
+    LOG.info("Creating objects")
+    for obj in objects:
+        LOG.debug("Object %s" % obj)
+        _assign_swift_role(obj['owner'])
+        client = client_for_user(obj['owner'])
+        client.containers.create_container(obj['container'])
+        client.objects.create_object(
+            obj['container'], obj['name'],
+            _file_contents(obj['file']))
+
+#######################
+#
+# IMAGES
+#
+#######################
+
+
+def create_images(images):
+    for image in images:
+        client = client_for_user(image['owner'])
+
+        # only upload a new image if the name isn't there
+        r, body = client.images.image_list()
+        names = [x['name'] for x in body]
+        if image['name'] in names:
+            continue
+
+        # special handling for 3 part image
+        extras = {}
+        if image['format'] == 'ami':
+            r, aki = client.images.create_image(
+                'javelin_' + image['aki'], 'aki', 'aki')
+            client.images.store_image(aki.get('id'), open(image['aki'], 'r'))
+            extras['kernel_id'] = aki.get('id')
+
+            r, ari = client.images.create_image(
+                'javelin_' + image['ari'], 'ari', 'ari')
+            client.images.store_image(ari.get('id'), open(image['ari'], 'r'))
+            extras['ramdisk_id'] = ari.get('id')
+
+        r, body = client.images.create_image(
+            image['name'], image['format'], image['format'], **extras)
+        image_id = body.get('id')
+        client.images.store_image(image_id, open(image['file'], 'r'))
+
+
+#######################
+#
+# SERVERS
+#
+#######################
+
+def _get_server_by_name(client, name):
+    r, body = client.servers.list_servers()
+    for server in body['servers']:
+        if name == server['name']:
+            return server
+    return None
+
+
+def _get_image_by_name(client, name):
+    r, body = client.images.image_list()
+    for image in body:
+        if name == image['name']:
+            return image
+    return None
+
+
+def _get_flavor_by_name(client, name):
+    r, body = client.flavors.list_flavors()
+    for flavor in body:
+        if name == flavor['name']:
+            return flavor
+    return None
+
+
+def create_servers(servers):
+    for server in servers:
+        client = client_for_user(server['owner'])
+
+        if _get_server_by_name(client, server['name']):
+            continue
+
+        image_id = _get_image_by_name(client, server['image'])['id']
+        flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
+        client.servers.create_server(server['name'], image_id, flavor_id)
+
+
+#######################
+#
+# MAIN LOGIC
+#
+#######################
+
+def create_resources():
+    LOG.info("Creating Resources")
+    # first create keystone level resources, and we need to be admin
+    # for those.
+    create_tenants(RES['tenants'])
+    create_users(RES['users'])
+    collect_users(RES['users'])
+
+    # next create resources in a well known order
+    create_objects(RES['objects'])
+    create_images(RES['images'])
+    create_servers(RES['servers'])
+
+
+def get_options():
+    global OPTS
+    parser = argparse.ArgumentParser(
+        description='Create and validate a fixed set of OpenStack resources')
+    parser.add_argument('-m', '--mode',
+                        metavar='<create|check|destroy>',
+                        required=True,
+                        help=('One of (create, check, destroy)'))
+    parser.add_argument('-r', '--resources',
+                        required=True,
+                        metavar='resourcefile.yaml',
+                        help='Resources definition yaml file')
+    # auth bits, letting us also just source the devstack openrc
+    parser.add_argument('--os-username',
+                        metavar='<auth-user-name>',
+                        default=os.environ.get('OS_USERNAME'),
+                        help=('Defaults to env[OS_USERNAME].'))
+    parser.add_argument('--os-password',
+                        metavar='<auth-password>',
+                        default=os.environ.get('OS_PASSWORD'),
+                        help=('Defaults to env[OS_PASSWORD].'))
+    parser.add_argument('--os-tenant-name',
+                        metavar='<auth-tenant-name>',
+                        default=os.environ.get('OS_TENANT_NAME'),
+                        help=('Defaults to env[OS_TENANT_NAME].'))
+
+    OPTS = parser.parse_args()
+    if OPTS.mode not in ('create', 'check', 'destroy'):
+        print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
+        parser.print_help()
+        sys.exit(1)
+
+
+def setup_logging(debug=True):
+    global LOG
+    LOG = logging.getLogger(__name__)
+    if debug:
+        LOG.setLevel(logging.DEBUG)
+    else:
+        LOG.setLevel(logging.INFO)
+
+    ch = logging.StreamHandler(sys.stdout)
+    ch.setLevel(logging.DEBUG)
+    formatter = logging.Formatter(
+        datefmt='%Y-%m-%d %H:%M:%S',
+        fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
+    ch.setFormatter(formatter)
+    LOG.addHandler(ch)
+
+
+def main():
+    global RES
+    get_options()
+    setup_logging()
+    RES = load_resources(OPTS.resources)
+
+    if OPTS.mode == 'create':
+        create_resources()
+    elif OPTS.mode == 'check':
+        collect_users(RES['users'])
+        checker = JavelinCheck(USERS, RES)
+        checker.check()
+    elif OPTS.mode == 'destroy':
+        LOG.warn("Destroy mode not yet implemented")
+    else:
+        LOG.error('Unknown mode %s' % OPTS.mode)
+        return 1
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
new file mode 100644
index 0000000..f7cb8a9
--- /dev/null
+++ b/tempest/cmd/resources.yaml
@@ -0,0 +1,51 @@
+# This is a yaml description for the most basic definitions
+# of what should exist across the resource boundary. Perhaps
+# one day this will grow into a Heat resource template, but as
+# Heat isn't a known working element in the upgrades, we do
+# this much simpler thing for now.
+
+tenants:
+  - javelin
+  - discuss
+
+users:
+  - name: javelin
+    pass: gungnir
+    tenant: javelin
+  - name: javelin2
+    pass: gungnir2
+    tenant: discuss
+
+secgroups:
+  - angon:
+    owner: javelin
+    rules:
+      - 'icmp -1 -1 0.0.0.0/0'
+      - 'tcp 22 22 0.0.0.0/0'
+
+# resources that we want to create
+images:
+  - name: javelin_cirros
+    owner: javelin
+    file: cirros-0.3.2-x86_64-blank.img
+    format: ami
+    aki: cirros-0.3.2-x86_64-vmlinuz
+    ari: cirros-0.3.2-x86_64-initrd
+volumes:
+  - assegai:
+    - owner: javelin
+    - gb: 1
+servers:
+  - name: peltast
+    owner: javelin
+    flavor: m1.small
+    image: javelin_cirros
+  - name: hoplite
+    owner: javelin
+    flavor: m1.medium
+    image: javelin_cirros
+objects:
+  - container: jc1
+    name: javelin1
+    owner: javelin
+    file: /etc/hosts
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 10223a0..3c527f5 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -238,6 +238,14 @@
                 return resp[i]
         return ""
 
+    def _log_request_start(self, method, req_url, req_headers={},
+                           req_body=None):
+        caller_name = misc_utils.find_test_caller()
+        trace_regex = CONF.debug.trace_requests
+        if trace_regex and re.search(trace_regex, caller_name):
+            self.LOG.debug('Starting Request (%s): %s %s' %
+                           (caller_name, method, req_url))
+
     def _log_request(self, method, req_url, resp,
                      secs="", req_headers={},
                      req_body=None, resp_body=None):
@@ -364,6 +372,7 @@
 
         # Do the actual request, and time it
         start = time.time()
+        self._log_request_start(method, req_url)
         resp, resp_body = self.http_obj.request(
             req_url, method, headers=req_headers, body=req_body)
         end = time.time()
diff --git a/tempest/common/utils/misc.py b/tempest/common/utils/misc.py
index b9f411b..0d78273 100644
--- a/tempest/common/utils/misc.py
+++ b/tempest/common/utils/misc.py
@@ -60,6 +60,9 @@
                 break
             elif re.search("^_run_cleanup", name):
                 is_cleanup = True
+            elif name == 'main':
+                caller_name = 'main'
+                break
             else:
                 cname = ""
                 if 'self' in frame.f_locals:
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 9c7eec6..77c73df 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -210,11 +210,13 @@
                                post_body)
         if response_key is not None:
             body = json.loads(body)
-            # Check for Schema as 'None' because if we donot have any server
+            # Check for Schema as 'None' because if we do not have any server
             # action schema implemented yet then they can pass 'None' to skip
             # the validation.Once all server action has their schema
             # implemented then, this check can be removed if every actions are
             # supposed to validate their response.
+            # TODO(GMann): Remove the below 'if' check once all server actions
+            # schema are implemented.
             if schema is not None:
                 self.validate_response(schema, resp, body)
             body = body[response_key]
@@ -427,7 +429,7 @@
 
     def get_console_output(self, server_id, length):
         return self.action(server_id, 'os-getConsoleOutput', 'output',
-                           None, length=length)
+                           common_schema.get_console_output, length=length)
 
     def list_virtual_interfaces(self, server_id):
         """
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index caa0a1d..2a83f88 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -209,7 +209,17 @@
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         if response_key is not None:
-            body = json.loads(body)[response_key]
+            body = json.loads(body)
+            # Check for Schema as 'None' because if we do not have any server
+            # action schema implemented yet then they can pass 'None' to skip
+            # the validation.Once all server action has their schema
+            # implemented then, this check can be removed if every actions are
+            # supposed to validate their response.
+            # TODO(GMann): Remove the below 'if' check once all server actions
+            # schema are implemented.
+            if schema is not None:
+                self.validate_response(schema, resp, body)
+            body = body[response_key]
         else:
             self.validate_response(schema, resp, body)
         return resp, body
@@ -256,7 +266,7 @@
         if 'disk_config' in kwargs:
             kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
             del kwargs['disk_config']
-        return self.action(server_id, 'rebuild', 'server', **kwargs)
+        return self.action(server_id, 'rebuild', 'server', None, **kwargs)
 
     def resize(self, server_id, flavor_ref, **kwargs):
         """Changes the flavor of a server."""
@@ -421,11 +431,12 @@
 
     def get_console_output(self, server_id, length):
         return self.action(server_id, 'get_console_output', 'output',
-                           length=length)
+                           common_schema.get_console_output, length=length)
 
     def rescue_server(self, server_id, **kwargs):
         """Rescue the provided server."""
-        return self.action(server_id, 'rescue', 'admin_password', **kwargs)
+        return self.action(server_id, 'rescue', 'admin_password',
+                           None, **kwargs)
 
     def unrescue_server(self, server_id):
         """Unrescue the provided server."""
@@ -483,9 +494,9 @@
     def get_spice_console(self, server_id, console_type):
         """Get URL of Spice console."""
         return self.action(server_id, "get_spice_console"
-                           "console", type=console_type)
+                           "console", None, type=console_type)
 
     def get_rdp_console(self, server_id, console_type):
         """Get URL of RDP console."""
         return self.action(server_id, "get_rdp_console"
-                           "console", type=console_type)
+                           "console", None, type=console_type)
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index b07e663..73e67c3 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -161,3 +161,28 @@
 
         uri = 'data-sources/%s' % source_id
         return self.delete(uri)
+
+    def list_job_binary_internals(self):
+        """List all job binary internals for a user."""
+
+        uri = 'job-binary-internals'
+        return self._request_and_parse(self.get, uri, 'binaries')
+
+    def get_job_binary_internal(self, job_binary_id):
+        """Returns the details of a single job binary internal."""
+
+        uri = 'job-binary-internals/%s' % job_binary_id
+        return self._request_and_parse(self.get, uri, 'job_binary_internal')
+
+    def create_job_binary_internal(self, name, data):
+        """Creates job binary internal with specified params."""
+
+        uri = 'job-binary-internals/%s' % name
+        return self._request_and_parse(self.put, uri, 'job_binary_internal',
+                                       data)
+
+    def delete_job_binary_internal(self, job_binary_id):
+        """Deletes the specified job binary internal by id."""
+
+        uri = 'job-binary-internals/%s' % job_binary_id
+        return self.delete(uri)
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index b3014fc..201869e 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -70,13 +70,12 @@
             "disk_format": disk_format,
         }
 
-        for option in ['visibility']:
-            if option in kwargs:
-                value = kwargs.get(option)
-                if isinstance(value, dict) or isinstance(value, tuple):
-                    params.update(value)
-                else:
-                    params[option] = value
+        for option in kwargs:
+            value = kwargs.get(option)
+            if isinstance(value, dict) or isinstance(value, tuple):
+                params.update(value)
+            else:
+                params[option] = value
 
         data = json.dumps(params)
         self._validate_schema(data)
diff --git a/tempest/tests/base.py b/tempest/tests/base.py
index 15e4311..f4df3b9 100644
--- a/tempest/tests/base.py
+++ b/tempest/tests/base.py
@@ -12,28 +12,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import os
-
-import fixtures
 import mock
-import testtools
 
-from tempest.openstack.common.fixture import moxstubout
+from oslotest import base
+from oslotest import moxstubout
 
 
-class TestCase(testtools.TestCase):
+class TestCase(base.BaseTestCase):
 
     def setUp(self):
         super(TestCase, self).setUp()
-        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
-                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
-            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
-        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
-                os.environ.get('OS_STDERR_CAPTURE') == '1'):
-            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
-
         mox_fixture = self.useFixture(moxstubout.MoxStubout())
         self.mox = mox_fixture.mox
         self.stubs = mox_fixture.stubs
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 03333be..1dcddad 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -16,11 +16,12 @@
 import copy
 import datetime
 
+from oslotest import mockpatch
+
 from tempest import auth
 from tempest.common import http
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
 from tempest.tests import base
 from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 804204a..6b678f7 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -14,13 +14,12 @@
 
 
 import mock
-import testtools
-
 from oslo.config import cfg
+from oslotest import mockpatch
+import testtools
 
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
 from tempest import test
 from tempest.tests import base
 from tempest.tests import fake_config
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index 64ad3bc..d20520c 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -15,11 +15,12 @@
 import httplib2
 import json
 
+from oslotest import mockpatch
+
 from tempest.common import rest_client
 from tempest.common import xml_utils as xml
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
 from tempest.tests import base
 from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index a6eedc4..0da52dc 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -14,6 +14,7 @@
 
 import contextlib
 import socket
+import time
 
 import mock
 import testtools
@@ -43,25 +44,21 @@
             rsa_mock.assert_not_called()
             cs_mock.assert_not_called()
 
-    def test_get_ssh_connection(self):
-        c_mock = self.patch('paramiko.SSHClient')
-        aa_mock = self.patch('paramiko.AutoAddPolicy')
-        s_mock = self.patch('time.sleep')
-        t_mock = self.patch('time.time')
+    def _set_ssh_connection_mocks(self):
+        client_mock = mock.MagicMock()
+        client_mock.connect.return_value = True
+        return (self.patch('paramiko.SSHClient'),
+                self.patch('paramiko.AutoAddPolicy'),
+                client_mock)
 
+    def test_get_ssh_connection(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
+        s_mock = self.patch('time.sleep')
+
+        c_mock.return_value = client_mock
         aa_mock.return_value = mock.sentinel.aa
 
-        def reset_mocks():
-            aa_mock.reset_mock()
-            c_mock.reset_mock()
-            s_mock.reset_mock()
-            t_mock.reset_mock()
-
         # Test normal case for successful connection on first try
-        client_mock = mock.MagicMock()
-        c_mock.return_value = client_mock
-        client_mock.connect.return_value = True
-
         client = ssh.Client('localhost', 'root', timeout=2)
         client._get_ssh_connection(sleep=1)
 
@@ -79,50 +76,40 @@
         )]
         self.assertEqual(expected_connect, client_mock.connect.mock_calls)
         s_mock.assert_not_called()
-        t_mock.assert_called_once_with()
 
-        reset_mocks()
+    def test_get_ssh_connection_two_attemps(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
 
-        # Test case when connection fails on first two tries and
-        # succeeds on third try (this validates retry logic)
-        client_mock.connect.side_effect = [socket.error, socket.error, True]
-        t_mock.side_effect = [
-            1000,  # Start time
-            1000,  # LOG.warning() calls time.time() loop 1
-            1001,  # Sleep loop 1
-            1001,  # LOG.warning() calls time.time() loop 2
-            1002   # Sleep loop 2
+        c_mock.return_value = client_mock
+        client_mock.connect.side_effect = [
+            socket.error,
+            mock.MagicMock()
         ]
 
+        client = ssh.Client('localhost', 'root', timeout=1)
+        start_time = int(time.time())
         client._get_ssh_connection(sleep=1)
+        end_time = int(time.time())
+        self.assertTrue((end_time - start_time) < 3)
+        self.assertTrue((end_time - start_time) > 1)
 
-        expected_sleeps = [
-            mock.call(2),
-            mock.call(3)
-        ]
-        self.assertEqual(expected_sleeps, s_mock.mock_calls)
+    def test_get_ssh_connection_timeout(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
 
-        reset_mocks()
-
-        # Test case when connection fails on first three tries and
-        # exceeds the timeout, so expect to raise a Timeout exception
+        c_mock.return_value = client_mock
         client_mock.connect.side_effect = [
             socket.error,
             socket.error,
-            socket.error
-        ]
-        t_mock.side_effect = [
-            1000,  # Start time
-            1000,  # LOG.warning() calls time.time() loop 1
-            1001,  # Sleep loop 1
-            1001,  # LOG.warning() calls time.time() loop 2
-            1002,  # Sleep loop 2
-            1003,  # Sleep loop 3
-            1004  # LOG.error() calls time.time()
+            socket.error,
         ]
 
+        client = ssh.Client('localhost', 'root', timeout=2)
+        start_time = int(time.time())
         with testtools.ExpectedException(exceptions.SSHTimeout):
             client._get_ssh_connection()
+        end_time = int(time.time())
+        self.assertTrue((end_time - start_time) < 4)
+        self.assertTrue((end_time - start_time) >= 2)
 
     def test_exec_command(self):
         gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
diff --git a/test-requirements.txt b/test-requirements.txt
index 942a7c3..b9c75c8 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,3 +7,4 @@
 mox>=0.5.3
 mock>=1.0
 coverage>=3.6
+oslotest