Merge "Add configuration guide sections for more required options"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 1010ba5..2a72635 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -802,6 +802,10 @@
 # attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
 #ipv6_subnet_attributes = false
 
+# Does the test environment support changing port admin state (boolean
+# value)
+#port_admin_state_change = true
+
 
 [object-storage]
 
diff --git a/requirements.txt b/requirements.txt
index bf7471e..0d7fc0d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,6 @@
 python-glanceclient>=0.15.0
 python-cinderclient>=1.1.0
 python-heatclient>=0.3.0
-python-swiftclient>=2.2.0
 testrepository>=0.0.18
 oslo.concurrency>=1.8.0,<1.9.0         # Apache-2.0
 oslo.config>=1.9.3,<1.10.0  # Apache-2.0
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index d91fbaa..5a903b7 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -12,14 +12,216 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from collections import OrderedDict
+
+import six
 from tempest_lib import exceptions as lib_exc
 
 from tempest import config
+from tempest import exceptions
 import tempest.test
 
 
 CONF = config.CONF
 
+"""Default templates.
+There should always be at least a master1 and a worker1 node
+group template."""
+DEFAULT_TEMPLATES = {
+    'vanilla': OrderedDict([
+        ('2.6.0', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['namenode', 'resourcemanager',
+                                       'hiveserver']
+                },
+                'master2': {
+                    'count': 1,
+                    'node_processes': ['oozie', 'historyserver',
+                                       'secondarynamenode']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['datanode', 'nodemanager'],
+                    'node_configs': {
+                        'MapReduce': {
+                            'yarn.app.mapreduce.am.resource.mb': 256,
+                            'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
+                        },
+                        'YARN': {
+                            'yarn.scheduler.minimum-allocation-mb': 256,
+                            'yarn.scheduler.maximum-allocation-mb': 1024,
+                            'yarn.nodemanager.vmem-check-enabled': False
+                        }
+                    }
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs.replication': 1
+                }
+            }
+        }),
+        ('1.2.1', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['namenode', 'jobtracker']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['datanode', 'tasktracker'],
+                    'node_configs': {
+                        'HDFS': {
+                            'Data Node Heap Size': 1024
+                        },
+                        'MapReduce': {
+                            'Task Tracker Heap Size': 1024
+                        }
+                    }
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs.replication': 1
+                },
+                'MapReduce': {
+                    'mapred.map.tasks.speculative.execution': False,
+                    'mapred.child.java.opts': '-Xmx500m'
+                },
+                'general': {
+                    'Enable Swift': False
+                }
+            }
+        })
+    ]),
+    'hdp': OrderedDict([
+        ('2.0.6', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
+                                       'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
+                                       'HISTORYSERVER', 'RESOURCEMANAGER',
+                                       'GANGLIA_SERVER', 'NAGIOS_SERVER',
+                                       'OOZIE_SERVER']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['HDFS_CLIENT', 'DATANODE',
+                                       'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
+                                       'MAPREDUCE2_CLIENT', 'NODEMANAGER',
+                                       'PIG', 'OOZIE_CLIENT']
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs.replication': 1
+                }
+            }
+        })
+    ]),
+    'spark': OrderedDict([
+        ('1.0.0', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['namenode', 'master']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['datanode', 'slave']
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs.replication': 1
+                }
+            }
+        })
+    ]),
+    'cdh': OrderedDict([
+        ('5.3.0', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['CLOUDERA_MANAGER']
+                },
+                'master2': {
+                    'count': 1,
+                    'node_processes': ['HDFS_NAMENODE',
+                                       'YARN_RESOURCEMANAGER']
+                },
+                'master3': {
+                    'count': 1,
+                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
+                                       'HDFS_SECONDARYNAMENODE',
+                                       'HIVE_METASTORE', 'HIVE_SERVER2']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs_replication': 1
+                }
+            }
+        }),
+        ('5', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['CLOUDERA_MANAGER']
+                },
+                'master2': {
+                    'count': 1,
+                    'node_processes': ['HDFS_NAMENODE',
+                                       'YARN_RESOURCEMANAGER']
+                },
+                'master3': {
+                    'count': 1,
+                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
+                                       'HDFS_SECONDARYNAMENODE',
+                                       'HIVE_METASTORE', 'HIVE_SERVER2']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
+                }
+            },
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs_replication': 1
+                }
+            }
+        })
+    ]),
+    'mapr': OrderedDict([
+        ('4.0.1.mrv2', {
+            'NODES': {
+                'master1': {
+                    'count': 1,
+                    'node_processes': ['CLDB', 'FileServer', 'ZooKeeper',
+                                       'NodeManager', 'ResourceManager',
+                                       'HistoryServer', 'Oozie']
+                },
+                'worker1': {
+                    'count': 1,
+                    'node_processes': ['FileServer', 'NodeManager', 'Pig']
+                }
+            },
+            'cluster_configs': {
+                'Hive': {
+                    'Hive Version': '0.13',
+                }
+            }
+        })
+    ]),
+}
+
 
 class BaseDataProcessingTest(tempest.test.BaseTestCase):
 
@@ -28,6 +230,7 @@
         super(BaseDataProcessingTest, cls).skip_checks()
         if not CONF.service_available.sahara:
             raise cls.skipException('Sahara support is required')
+        cls.default_plugin = cls._get_default_plugin()
 
     @classmethod
     def setup_credentials(cls):
@@ -43,6 +246,10 @@
     def resource_setup(cls):
         super(BaseDataProcessingTest, cls).resource_setup()
 
+        cls.default_version = cls._get_default_version()
+        if cls.default_plugin is not None and cls.default_version is None:
+            raise exceptions.InvalidConfiguration(
+                message="No known Sahara plugin version was found")
         cls.flavor_ref = CONF.compute.flavor_ref
 
         # add lists for watched resources
@@ -172,3 +379,100 @@
         cls._jobs.append(resp_body['id'])
 
         return resp_body
+
+    @classmethod
+    def _get_default_plugin(cls):
+        """Returns the default plugin used for testing."""
+        if len(CONF.data_processing_feature_enabled.plugins) == 0:
+            return None
+
+        for plugin in CONF.data_processing_feature_enabled.plugins:
+            if plugin in DEFAULT_TEMPLATES.keys():
+                break
+        else:
+            plugin = ''
+        return plugin
+
+    @classmethod
+    def _get_default_version(cls):
+        """Returns the default plugin version used for testing.
+        This is gathered separately from the plugin to allow
+        the usage of plugin name in skip_checks. This method is
+        rather invoked into resource_setup, which allows API calls
+        and exceptions.
+        """
+        if not cls.default_plugin:
+            return None
+        plugin = cls.client.get_plugin(cls.default_plugin)
+
+        for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
+            if version in plugin['versions']:
+                break
+        else:
+            version = None
+
+        return version
+
+    @classmethod
+    def get_node_group_template(cls, nodegroup='worker1'):
+        """Returns a node group template for the default plugin."""
+        try:
+            plugin_data = (
+                DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
+            )
+            nodegroup_data = plugin_data['NODES'][nodegroup]
+            node_group_template = {
+                'description': 'Test node group template',
+                'plugin_name': cls.default_plugin,
+                'hadoop_version': cls.default_version,
+                'node_processes': nodegroup_data['node_processes'],
+                'flavor_id': cls.flavor_ref,
+                'node_configs': nodegroup_data.get('node_configs', {}),
+            }
+            return node_group_template
+        except (IndexError, KeyError):
+            return None
+
+    @classmethod
+    def get_cluster_template(cls, node_group_template_ids=None):
+        """Returns a cluster template for the default plugin.
+        node_group_template_defined contains the type and ID of pre-defined
+        node group templates that have to be used in the cluster template
+        (instead of dynamically defining them with 'node_processes').
+        """
+        if node_group_template_ids is None:
+            node_group_template_ids = {}
+        try:
+            plugin_data = (
+                DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
+            )
+
+            all_node_groups = []
+            for ng_name, ng_data in six.iteritems(plugin_data['NODES']):
+                node_group = {
+                    'name': '%s-node' % (ng_name),
+                    'flavor_id': cls.flavor_ref,
+                    'count': ng_data['count']
+                }
+                if ng_name in node_group_template_ids.keys():
+                    # node group already defined, use it
+                    node_group['node_group_template_id'] = (
+                        node_group_template_ids[ng_name]
+                    )
+                else:
+                    # node_processes list defined on-the-fly
+                    node_group['node_processes'] = ng_data['node_processes']
+                if 'node_configs' in ng_data:
+                    node_group['node_configs'] = ng_data['node_configs']
+                all_node_groups.append(node_group)
+
+            cluster_template = {
+                'description': 'Test cluster template',
+                'plugin_name': cls.default_plugin,
+                'hadoop_version': cls.default_version,
+                'cluster_configs': plugin_data.get('cluster_configs', {}),
+                'node_groups': all_node_groups,
+            }
+            return cluster_template
+        except (IndexError, KeyError):
+            return None
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index 8a63c3f..cebf493 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -15,6 +15,7 @@
 from tempest_lib.common.utils import data_utils
 
 from tempest.api.data_processing import base as dp_base
+from tempest import exceptions
 from tempest import test
 
 
@@ -23,55 +24,30 @@
     sahara/restapi/rest_api_v1.0.html#cluster-templates
     """
     @classmethod
+    def skip_checks(cls):
+        super(ClusterTemplateTest, cls).skip_checks()
+        if cls.default_plugin is None:
+            raise cls.skipException("No Sahara plugins configured")
+
+    @classmethod
     def resource_setup(cls):
         super(ClusterTemplateTest, cls).resource_setup()
-        # create node group template
-        node_group_template = {
-            'name': data_utils.rand_name('sahara-ng-template'),
-            'description': 'Test node group template',
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'node_processes': ['datanode'],
-            'flavor_id': cls.flavor_ref,
-            'node_configs': {
-                'HDFS': {
-                    'Data Node Heap Size': 1024
-                }
-            }
-        }
-        resp_body = cls.create_node_group_template(**node_group_template)
-        node_group_template_id = resp_body['id']
 
-        cls.full_cluster_template = {
-            'description': 'Test cluster template',
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs.replication': 2
-                },
-                'MapReduce': {
-                    'mapred.map.tasks.speculative.execution': False,
-                    'mapred.child.java.opts': '-Xmx500m'
-                },
-                'general': {
-                    'Enable Swift': False
-                }
-            },
-            'node_groups': [
-                {
-                    'name': 'master-node',
-                    'flavor_id': cls.flavor_ref,
-                    'node_processes': ['namenode'],
-                    'count': 1
-                },
-                {
-                    'name': 'worker-node',
-                    'node_group_template_id': node_group_template_id,
-                    'count': 3
-                }
-            ]
-        }
+        # pre-define a node group templates
+        node_group_template_w = cls.get_node_group_template('worker1')
+        if node_group_template_w is None:
+            raise exceptions.InvalidConfiguration(
+                message="No known Sahara plugin was found")
+
+        node_group_template_w['name'] = data_utils.rand_name(
+            'sahara-ng-template')
+        resp_body = cls.create_node_group_template(**node_group_template_w)
+        node_group_template_id = resp_body['id']
+        configured_node_group_templates = {'worker1': node_group_template_id}
+
+        cls.full_cluster_template = cls.get_cluster_template(
+            configured_node_group_templates)
+
         # create cls.cluster_template variable to use for comparison to cluster
         # template response body. The 'node_groups' field in the response body
         # has some extra info that post body does not have. The 'node_groups'
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index d7381f4..4068027 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,27 +19,16 @@
 
 
 class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(NodeGroupTemplateTest, cls).skip_checks()
+        if cls.default_plugin is None:
+            raise cls.skipException("No Sahara plugins configured")
+
     @classmethod
     def resource_setup(cls):
         super(NodeGroupTemplateTest, cls).resource_setup()
-        cls.node_group_template = {
-            'description': 'Test node group template',
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'node_processes': [
-                'datanode',
-                'tasktracker'
-            ],
-            'flavor_id': cls.flavor_ref,
-            'node_configs': {
-                'HDFS': {
-                    'Data Node Heap Size': 1024
-                },
-                'MapReduce': {
-                    'Task Tracker Heap Size': 1024
-                }
-            }
-        }
 
     def _create_node_group_template(self, template_name=None):
         """Creates Node Group Template with optional name specified.
@@ -47,6 +36,10 @@
         It creates template, ensures template name and response body.
         Returns id and name of created template.
         """
+        self.node_group_template = self.get_node_group_template()
+        self.assertIsNotNone(self.node_group_template,
+                             "No known Sahara plugin was found")
+
         if not template_name:
             # generate random name if it's not specified
             template_name = data_utils.rand_name('sahara-ng-template')
diff --git a/tempest/api/messaging/base.py b/tempest/api/messaging/base.py
index b3ed941..c4214f2 100644
--- a/tempest/api/messaging/base.py
+++ b/tempest/api/messaging/base.py
@@ -71,7 +71,7 @@
     @classmethod
     def check_queue_exists(cls, queue_name):
         """Wrapper utility that checks the existence of a test queue."""
-        resp, body = cls.client.get_queue(queue_name)
+        resp, body = cls.client.show_queue(queue_name)
         return resp, body
 
     @classmethod
@@ -89,13 +89,13 @@
     @classmethod
     def get_queue_stats(cls, queue_name):
         """Wrapper utility that returns the queue stats."""
-        resp, body = cls.client.get_queue_stats(queue_name)
+        resp, body = cls.client.show_queue_stats(queue_name)
         return resp, body
 
     @classmethod
     def get_queue_metadata(cls, queue_name):
         """Wrapper utility that gets a queue metadata."""
-        resp, body = cls.client.get_queue_metadata(queue_name)
+        resp, body = cls.client.show_queue_metadata(queue_name)
         return resp, body
 
     @classmethod
@@ -121,14 +121,14 @@
     @classmethod
     def get_single_message(cls, message_uri):
         """Wrapper utility that gets a single message."""
-        resp, body = cls.client.get_single_message(message_uri)
+        resp, body = cls.client.show_single_message(message_uri)
 
         return resp, body
 
     @classmethod
     def get_multiple_messages(cls, message_uri):
         """Wrapper utility that gets multiple messages."""
-        resp, body = cls.client.get_multiple_messages(message_uri)
+        resp, body = cls.client.show_multiple_messages(message_uri)
 
         return resp, body
 
diff --git a/tempest/api/messaging/test_messages.py b/tempest/api/messaging/test_messages.py
index f982f59..c8640b3 100644
--- a/tempest/api/messaging/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -49,7 +49,7 @@
 
         # Get on the posted messages
         message_uri = resp['location']
-        resp, _ = self.client.get_multiple_messages(message_uri)
+        resp, _ = self.client.show_multiple_messages(message_uri)
         # The test has an assertion here, because the response cannot be 204
         # in this case (the client allows 200 or 204 for this API call).
         self.assertEqual('200', resp['status'])
@@ -74,7 +74,7 @@
         message_uri = body['resources'][0]
 
         # Get posted message
-        resp, _ = self.client.get_single_message(message_uri)
+        resp, _ = self.client.show_single_message(message_uri)
         # The test has an assertion here, because the response cannot be 204
         # in this case (the client allows 200 or 204 for this API call).
         self.assertEqual('200', resp['status'])
@@ -87,7 +87,7 @@
         message_uri = resp['location']
 
         # Get posted messages
-        resp, _ = self.client.get_multiple_messages(message_uri)
+        resp, _ = self.client.show_multiple_messages(message_uri)
         # The test has an assertion here, because the response cannot be 204
         # in this case (the client allows 200 or 204 for this API call).
         self.assertEqual('200', resp['status'])
@@ -103,7 +103,7 @@
         self.client.delete_messages(message_uri)
 
         message_uri = message_uri.replace('/messages/', '/messages?ids=')
-        resp, _ = self.client.get_multiple_messages(message_uri)
+        resp, _ = self.client.show_multiple_messages(message_uri)
         # The test has an assertion here, because the response has to be 204
         # in this case (the client allows 200 or 204 for this API call).
         self.assertEqual('204', resp['status'])
@@ -117,7 +117,7 @@
 
         # Delete multiple messages
         self.client.delete_messages(message_uri)
-        resp, _ = self.client.get_multiple_messages(message_uri)
+        resp, _ = self.client.show_multiple_messages(message_uri)
         # The test has an assertion here, because the response has to be 204
         # in this case (the client allows 200 or 204 for this API call).
         self.assertEqual('204', resp['status'])
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index c444e0b..2dac346 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -44,7 +44,7 @@
 
         self.delete_queue(queue_name)
         self.assertRaises(lib_exc.NotFound,
-                          self.client.get_queue,
+                          self.client.show_queue,
                           queue_name)
 
 
diff --git a/tempest/cli/simple_read_only/object_storage/__init__.py b/tempest/cli/simple_read_only/object_storage/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/cli/simple_read_only/object_storage/__init__.py
+++ /dev/null
diff --git a/tempest/cli/simple_read_only/object_storage/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
deleted file mode 100644
index 7201eab..0000000
--- a/tempest/cli/simple_read_only/object_storage/test_swift.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-
-from tempest_lib import exceptions
-
-from tempest import cli
-from tempest import config
-from tempest import test
-
-CONF = config.CONF
-
-
-class SimpleReadOnlySwiftClientTest(cli.ClientTestBase):
-    """Basic, read-only tests for Swift CLI client.
-
-    Checks return values and output of read-only commands.
-    These tests do not presume any content, nor do they create
-    their own. They only verify the structure of output if present.
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        if not CONF.service_available.swift:
-            msg = ("%s skipped as Swift is not available" % cls.__name__)
-            raise cls.skipException(msg)
-        super(SimpleReadOnlySwiftClientTest, cls).resource_setup()
-
-    def swift(self, *args, **kwargs):
-        return self.clients.swift(
-            *args, endpoint_type=CONF.object_storage.endpoint_type, **kwargs)
-
-    @test.idempotent_id('74360cdc-e7ec-493f-8a87-2b65f4d54aa3')
-    def test_swift_fake_action(self):
-        self.assertRaises(exceptions.CommandFailed,
-                          self.swift,
-                          'this-does-not-exist')
-
-    @test.idempotent_id('809ec373-828e-4279-8df6-9d4db81c7909')
-    def test_swift_list(self):
-        self.swift('list')
-
-    @test.idempotent_id('325d5fe4-e5ab-4f52-aec4-357533f24fa1')
-    def test_swift_stat(self):
-        output = self.swift('stat')
-        entries = ['Account', 'Containers', 'Objects', 'Bytes', 'Content-Type',
-                   'X-Timestamp', 'X-Trans-Id']
-        for entry in entries:
-            self.assertTrue(entry in output)
-
-    @test.idempotent_id('af1483e1-dafd-4552-a39b-b9d337df808b')
-    def test_swift_capabilities(self):
-        output = self.swift('capabilities')
-        entries = ['account_listing_limit', 'container_listing_limit',
-                   'max_file_size', 'Additional middleware']
-        for entry in entries:
-            self.assertTrue(entry in output)
-
-    @test.idempotent_id('29c83a64-8eb7-418c-a39b-c70cefa5b695')
-    def test_swift_help(self):
-        help_text = self.swift('', flags='--help')
-        lines = help_text.split('\n')
-        self.assertFirstLineStartsWith(lines, 'Usage: swift')
-
-        commands = []
-        cmds_start = lines.index('Positional arguments:')
-        cmds_end = lines.index('Examples:')
-        command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
-        for line in lines[cmds_start:cmds_end]:
-            match = command_pattern.match(line)
-            if match:
-                commands.append(match.group(1))
-        commands = set(commands)
-        wanted_commands = set(('stat', 'list', 'delete',
-                               'download', 'post', 'upload'))
-        self.assertFalse(wanted_commands - commands)
-
-    # Optional arguments:
-
-    @test.idempotent_id('2026be82-4e53-4414-a828-f1c894b8cf0f')
-    def test_swift_version(self):
-        self.swift('', flags='--version')
-
-    @test.idempotent_id('0ae6172e-3df7-42b8-a987-d42609ada6ed')
-    def test_swift_debug_list(self):
-        self.swift('list', flags='--debug')
-
-    @test.idempotent_id('1bdf5dd0-7df5-446c-a124-2b0703a5d199')
-    def test_swift_retries_list(self):
-        self.swift('list', flags='--retries 3')
-
-    @test.idempotent_id('64eae749-8fbd-4d85-bc7f-f706d3581c6f')
-    def test_swift_region_list(self):
-        region = CONF.object_storage.region
-        if not region:
-            region = CONF.identity.region
-        self.swift('list', flags='--os-region-name ' + region)
diff --git a/tempest/config.py b/tempest/config.py
index 6b8113e..bcbe41f 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -506,6 +506,10 @@
                      "the extended IPv6 attributes ipv6_ra_mode "
                      "and ipv6_address_mode"
                 ),
+    cfg.BoolOpt('port_admin_state_change',
+                default=True,
+                help="Does the test environment support changing"
+                     " port admin state"),
 ]
 
 messaging_group = cfg.OptGroup(name='messaging',
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 0d17048..8f37d74 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -185,7 +185,7 @@
             # Start netcat
             start_server = ('while true; do '
                             'sudo nc -ll -p %(port)s -e sh /tmp/%(script)s; '
-                            'done &')
+                            'done > /dev/null &')
             cmd = start_server % {'port': self.port1,
                                   'script': 'script1'}
             ssh_client.exec_command(cmd)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 2c9e845..b97ad0b 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -590,6 +590,9 @@
     @testtools.skipIf(CONF.baremetal.driver_enabled,
                       'admin_state of instance ports cannot be altered '
                       'for baremetal nodes')
+    @testtools.skipUnless(CONF.network_feature_enabled.port_admin_state_change,
+                          "Changing a port's admin state is not supported "
+                          "by the test environment")
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_update_instance_port_admin_state(self):
@@ -653,22 +656,3 @@
         self.assertEqual(self.network['id'], port['network_id'])
         self.assertEqual('', port['device_id'])
         self.assertEqual('', port['device_owner'])
-
-    @test.idempotent_id('51641c7d-119a-44cd-aac6-b5b9f86dd808')
-    @test.services('compute', 'network')
-    def test_creation_of_server_attached_to_user_created_port(self):
-        self.security_group = (
-            self._create_security_group(tenant_id=self.tenant_id))
-        network, subnet, router = self.create_networks()
-        kwargs = {
-            'security_groups': [self.security_group['id']],
-        }
-
-        port = self._create_port(network.id, **kwargs)
-        name = data_utils.rand_name('server-smoke')
-        server = self._create_server(name, network, port.id)
-        self._check_tenant_network_connectivity()
-        floating_ip = self.create_floating_ip(server)
-        self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
-        self.check_public_network_connectivity(
-            should_connect=True)
diff --git a/tempest/services/messaging/json/messaging_client.py b/tempest/services/messaging/json/messaging_client.py
index 36444a9..483ba93 100644
--- a/tempest/services/messaging/json/messaging_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -58,7 +58,7 @@
         self.expected_success(201, resp.status)
         return resp, body
 
-    def get_queue(self, queue_name):
+    def show_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
         self.expected_success(204, resp.status)
@@ -76,14 +76,14 @@
         self.expected_success(204, resp.status)
         return resp, body
 
-    def get_queue_stats(self, queue_name):
+    def show_queue_stats(self, queue_name):
         uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
         body = json.loads(body)
         self.validate_response(queues_schema.queue_stats, resp, body)
         return resp, body
 
-    def get_queue_metadata(self, queue_name):
+    def show_queue_metadata(self, queue_name):
         uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
         self.expected_success(200, resp.status)
@@ -117,7 +117,7 @@
 
         return resp, body
 
-    def get_single_message(self, message_uri):
+    def show_single_message(self, message_uri):
         resp, body = self.get(message_uri, extra_headers=True,
                               headers=self.headers)
         if resp['status'] != '204':
@@ -126,7 +126,7 @@
                                    body)
         return resp, body
 
-    def get_multiple_messages(self, message_uri):
+    def show_multiple_messages(self, message_uri):
         resp, body = self.get(message_uri, extra_headers=True,
                               headers=self.headers)
 
diff --git a/tempest/tests/common/test_accounts.py b/tempest/tests/common/test_accounts.py
index b176675..b4048ba 100644
--- a/tempest/tests/common/test_accounts.py
+++ b/tempest/tests/common/test_accounts.py
@@ -68,13 +68,8 @@
              'password': 'p', 'roles': [cfg.CONF.identity.admin_role]},
             {'username': 'test_user12', 'tenant_name': 'test_tenant12',
              'password': 'p', 'roles': [cfg.CONF.identity.admin_role]},
-            {'username': 'test_user13', 'tenant_name': 'test_tenant13',
-             'password': 'p', 'resources': {'network': 'network-1'}},
-            {'username': 'test_user14', 'tenant_name': 'test_tenant14',
-             'password': 'p', 'roles': ['role-7', 'role-11'],
-             'resources': {'network': 'network-2'}},
         ]
-        self.useFixture(mockpatch.Patch(
+        self.accounts_mock = self.useFixture(mockpatch.Patch(
             'tempest.common.accounts.read_accounts_yaml',
             return_value=self.test_accounts))
         cfg.CONF.set_default('test_accounts_file', 'fake_path', group='auth')
@@ -281,14 +276,22 @@
         calls = get_free_hash_mock.mock.mock_calls
         self.assertEqual(len(calls), 1)
         args = calls[0][1][0]
-        self.assertEqual(len(args), 12)
+        self.assertEqual(len(args), 10)
         for i in admin_hashes:
             self.assertNotIn(i, args)
 
     def test_networks_returned_with_creds(self):
+        test_accounts = [
+            {'username': 'test_user13', 'tenant_name': 'test_tenant13',
+             'password': 'p', 'resources': {'network': 'network-1'}},
+            {'username': 'test_user14', 'tenant_name': 'test_tenant14',
+             'password': 'p', 'roles': ['role-7', 'role-11'],
+             'resources': {'network': 'network-2'}}]
+        # Clear previous mock using self.test_accounts
+        self.accounts_mock.cleanUp()
         self.useFixture(mockpatch.Patch(
             'tempest.common.accounts.read_accounts_yaml',
-            return_value=self.test_accounts))
+            return_value=test_accounts))
         test_accounts_class = accounts.Accounts('v2', 'test_name')
         with mock.patch('tempest.services.compute.json.networks_client.'
                         'NetworksClientJSON.list_networks',