Merge "create_server: pass arguments as part of kwargs"
diff --git a/setup.cfg b/setup.cfg
index 183c1fb..cc3a365 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,6 +33,7 @@
     tempest-account-generator = tempest.cmd.account_generator:main
     tempest = tempest.cmd.main:main
 tempest.cm =
+    account-generator = tempest.cmd.account_generator:TempestAccountGenerator
     init = tempest.cmd.init:TempestInit
     cleanup = tempest.cmd.cleanup:TempestCleanup
     run-stress = tempest.cmd.run_stress:TempestRunStress
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
new file mode 100644
index 0000000..814a876
--- /dev/null
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -0,0 +1,68 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ServersOnMultiNodesTest, cls).skip_checks()
+
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping multi-nodes test.")
+
+    def _get_host(self, server_id):
+        return self.os_adm.servers_client.show_server(
+            server_id)['server']['OS-EXT-SRV-ATTR:host']
+
+    @test.idempotent_id('26a9d5df-6890-45f2-abc4-a659290cb130')
+    def test_create_servers_on_same_host(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        hints = {'same_host': server01}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertEqual(host01, host02)
+
+    @test.idempotent_id('cc7ca884-6e3e-42a3-a92f-c522fcf25e8e')
+    def test_create_servers_on_different_hosts(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        hints = {'different_host': server01}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertNotEqual(host01, host02)
+
+    @test.idempotent_id('7869cc84-d661-4e14-9f00-c18cdc89cf57')
+    def test_create_servers_on_different_hosts_with_list_of_servers(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        # This scheduler-hint supports list of servers also.
+        hints = {'different_host': [server01]}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertNotEqual(host01, host02)
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 4e88f65..b6d0c48 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from collections import OrderedDict
+import copy
 
 import six
 from tempest_lib import exceptions as lib_exc
@@ -27,42 +28,93 @@
 """Default templates.
 There should always be at least a master1 and a worker1 node
 group template."""
-DEFAULT_TEMPLATES = {
-    'vanilla': OrderedDict([
-        ('2.6.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['namenode', 'resourcemanager',
-                                       'hiveserver']
+BASE_VANILLA_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['namenode', 'resourcemanager',
+                               'hiveserver']
+        },
+        'master2': {
+            'count': 1,
+            'node_processes': ['oozie', 'historyserver',
+                               'secondarynamenode']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['datanode', 'nodemanager'],
+            'node_configs': {
+                'MapReduce': {
+                    'yarn.app.mapreduce.am.resource.mb': 256,
+                    'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
                 },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['oozie', 'historyserver',
-                                       'secondarynamenode']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['datanode', 'nodemanager'],
-                    'node_configs': {
-                        'MapReduce': {
-                            'yarn.app.mapreduce.am.resource.mb': 256,
-                            'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
-                        },
-                        'YARN': {
-                            'yarn.scheduler.minimum-allocation-mb': 256,
-                            'yarn.scheduler.maximum-allocation-mb': 1024,
-                            'yarn.nodemanager.vmem-check-enabled': False
-                        }
-                    }
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs.replication': 1
+                'YARN': {
+                    'yarn.scheduler.minimum-allocation-mb': 256,
+                    'yarn.scheduler.maximum-allocation-mb': 1024,
+                    'yarn.nodemanager.vmem-check-enabled': False
                 }
             }
-        }),
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs.replication': 1
+        }
+    }
+}
+
+BASE_SPARK_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['namenode', 'master']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['datanode', 'slave']
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs.replication': 1
+        }
+    }
+}
+
+BASE_CDH_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['CLOUDERA_MANAGER']
+        },
+        'master2': {
+            'count': 1,
+            'node_processes': ['HDFS_NAMENODE',
+                               'YARN_RESOURCEMANAGER']
+        },
+        'master3': {
+            'count': 1,
+            'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
+                               'HDFS_SECONDARYNAMENODE',
+                               'HIVE_METASTORE', 'HIVE_SERVER2']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs_replication': 1
+        }
+    }
+}
+
+
+DEFAULT_TEMPLATES = {
+    'vanilla': OrderedDict([
+        ('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
+        ('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
         ('1.2.1', {
             'NODES': {
                 'master1': {
@@ -123,81 +175,13 @@
         })
     ]),
     'spark': OrderedDict([
-        ('1.0.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['namenode', 'master']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['datanode', 'slave']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs.replication': 1
-                }
-            }
-        })
+        ('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
+        ('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
     ]),
     'cdh': OrderedDict([
-        ('5.3.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['CLOUDERA_MANAGER']
-                },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['HDFS_NAMENODE',
-                                       'YARN_RESOURCEMANAGER']
-                },
-                'master3': {
-                    'count': 1,
-                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
-                                       'HDFS_SECONDARYNAMENODE',
-                                       'HIVE_METASTORE', 'HIVE_SERVER2']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs_replication': 1
-                }
-            }
-        }),
-        ('5', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['CLOUDERA_MANAGER']
-                },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['HDFS_NAMENODE',
-                                       'YARN_RESOURCEMANAGER']
-                },
-                'master3': {
-                    'count': 1,
-                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
-                                       'HDFS_SECONDARYNAMENODE',
-                                       'HIVE_METASTORE', 'HIVE_SERVER2']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs_replication': 1
-                }
-            }
-        })
+        ('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
+        ('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
+        ('5', copy.deepcopy(BASE_CDH_DESC))
     ]),
     'mapr': OrderedDict([
         ('4.0.1.mrv2', {
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 64f3174..1a84d06 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -18,11 +18,27 @@
 from tempest.api.image import base
 from tempest.common.utils import data_utils
 from tempest import config
+from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
 
 
+def get_container_and_disk_format():
+    a_formats = ['ami', 'ari', 'aki']
+
+    container_format = CONF.image.container_formats[0]
+    disk_format = CONF.image.disk_formats[0]
+
+    if container_format in a_formats and container_format != disk_format:
+        msg = ("The container format and the disk format don't match. "
+               "Contaiter format: %(container)s, Disk format: %(disk)s." %
+               {'container': container_format, 'disk': disk_format})
+        raise exceptions.InvalidConfiguration(message=msg)
+
+    return container_format, disk_format
+
+
 class CreateRegisterImagesTest(base.BaseV1ImageTest):
     """Here we test the registration and creation of images."""
 
@@ -30,9 +46,10 @@
     def test_register_then_upload(self):
         # Register, then upload an image
         properties = {'prop1': 'val1'}
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Name',
-                                 container_format='bare',
-                                 disk_format='raw',
+                                 container_format=container_format,
+                                 disk_format=disk_format,
                                  is_public=False,
                                  properties=properties)
         self.assertIn('id', body)
@@ -52,9 +69,10 @@
     @test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
     def test_register_remote_image(self):
         # Register a new remote image
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Remote Image',
-                                 container_format='bare',
-                                 disk_format='raw', is_public=False,
+                                 container_format=container_format,
+                                 disk_format=disk_format, is_public=False,
                                  location=CONF.image.http_image,
                                  properties={'key1': 'value1',
                                              'key2': 'value2'})
@@ -68,9 +86,10 @@
 
     @test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
     def test_register_http_image(self):
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Http Image',
-                                 container_format='bare',
-                                 disk_format='raw', is_public=False,
+                                 container_format=container_format,
+                                 disk_format=disk_format, is_public=False,
                                  copy_from=CONF.image.http_image)
         self.assertIn('id', body)
         image_id = body.get('id')
@@ -82,10 +101,11 @@
     @test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
     def test_register_image_with_min_ram(self):
         # Register an image with min ram
+        container_format, disk_format = get_container_and_disk_format()
         properties = {'prop1': 'val1'}
         body = self.create_image(name='New_image_with_min_ram',
-                                 container_format='bare',
-                                 disk_format='raw',
+                                 container_format=container_format,
+                                 disk_format=disk_format,
                                  is_public=False,
                                  min_ram=40,
                                  properties=properties)
@@ -103,22 +123,51 @@
     """Here we test the listing of image information"""
 
     @classmethod
+    def skip_checks(cls):
+        super(ListImagesTest, cls).skip_checks()
+        if (len(CONF.image.container_formats) < 2
+           or len(CONF.image.disk_formats) < 2):
+            skip_msg = ("%s skipped as multiple container formats "
+                        "or disk formats are not available." % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
     def resource_setup(cls):
         super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
-        img1 = cls._create_remote_image('one', 'bare', 'raw')
-        img2 = cls._create_remote_image('two', 'ami', 'ami')
-        img3 = cls._create_remote_image('dup', 'bare', 'raw')
-        img4 = cls._create_remote_image('dup', 'bare', 'raw')
-        img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
-        img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
-        img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
-        img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
+        a_formats = ['ami', 'ari', 'aki']
+
+        (cls.container_format,
+         cls.container_format_alt) = CONF.image.container_formats[:2]
+        cls.disk_format, cls.disk_format_alt = CONF.image.disk_formats[:2]
+        if cls.container_format in a_formats:
+            cls.disk_format = cls.container_format
+        if cls.container_format_alt in a_formats:
+            cls.disk_format_alt = cls.container_format_alt
+
+        img1 = cls._create_remote_image('one', cls.container_format,
+                                        cls.disk_format)
+        img2 = cls._create_remote_image('two', cls.container_format_alt,
+                                        cls.disk_format_alt)
+        img3 = cls._create_remote_image('dup', cls.container_format,
+                                        cls.disk_format)
+        img4 = cls._create_remote_image('dup', cls.container_format,
+                                        cls.disk_format)
+        img5 = cls._create_standard_image('1', cls.container_format_alt,
+                                          cls.disk_format_alt, 42)
+        img6 = cls._create_standard_image('2', cls.container_format_alt,
+                                          cls.disk_format_alt, 142)
+        img7 = cls._create_standard_image('33', cls.container_format,
+                                          cls.disk_format, 142)
+        img8 = cls._create_standard_image('33', cls.container_format,
+                                          cls.disk_format, 142)
         cls.created_set = set(cls.created_images)
-        # 5x bare, 3x ami
-        cls.bare_set = set((img1, img3, img4, img7, img8))
-        cls.ami_set = set((img2, img5, img6))
+        # same container format
+        cls.same_container_format_set = set((img1, img3, img4, img7, img8))
+        # same disk format
+        cls.same_disk_format_set = set((img2, img5, img6))
+
         # 1x with size 42
         cls.size42_set = set((img5,))
         # 3x with size 142
@@ -167,22 +216,25 @@
 
     @test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
     def test_index_disk_format(self):
-        images_list = self.client.list_images(disk_format='ami')['images']
+        images_list = self.client.list_images(
+            disk_format=self.disk_format_alt)['images']
         for image in images_list:
-            self.assertEqual(image['disk_format'], 'ami')
+            self.assertEqual(image['disk_format'], self.disk_format_alt)
         result_set = set(map(lambda x: x['id'], images_list))
-        self.assertTrue(self.ami_set <= result_set)
-        self.assertFalse(self.created_set - self.ami_set <= result_set)
+        self.assertTrue(self.same_disk_format_set <= result_set)
+        self.assertFalse(self.created_set - self.same_disk_format_set
+                         <= result_set)
 
     @test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
     def test_index_container_format(self):
-        images_list = (self.client.list_images(container_format='bare')
-                       ['images'])
+        images_list = self.client.list_images(
+            container_format=self.container_format)['images']
         for image in images_list:
-            self.assertEqual(image['container_format'], 'bare')
+            self.assertEqual(image['container_format'], self.container_format)
         result_set = set(map(lambda x: x['id'], images_list))
-        self.assertTrue(self.bare_set <= result_set)
-        self.assertFalse(self.created_set - self.bare_set <= result_set)
+        self.assertTrue(self.same_container_format_set <= result_set)
+        self.assertFalse(self.created_set - self.same_container_format_set
+                         <= result_set)
 
     @test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
     def test_index_max_size(self):
@@ -231,7 +283,9 @@
     @classmethod
     def resource_setup(cls):
         super(UpdateImageMetaTest, cls).resource_setup()
-        cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
+        container_format, disk_format = get_container_and_disk_format()
+        cls.image_id = cls._create_standard_image('1', container_format,
+                                                  disk_format, 42)
 
     @classmethod
     def _create_standard_image(cls, name, container_format,
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 769c680..32d6ef1 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -85,7 +85,9 @@
 import argparse
 import netaddr
 import os
+import traceback
 
+from cliff import command
 from oslo_log import log as logging
 import tempest_lib.auth
 from tempest_lib.common.utils import data_utils
@@ -105,6 +107,11 @@
 
 LOG = None
 CONF = config.CONF
+DESCRIPTION = ('Create accounts.yaml file for concurrent test runs.%s'
+               'One primary user, one alt user, '
+               'one swift admin, one stack owner '
+               'and one admin (optionally) will be created '
+               'for each concurrent thread.' % os.linesep)
 
 
 def setup_logging():
@@ -397,20 +404,7 @@
     LOG.info('%s generated successfully!' % opts.accounts)
 
 
-def get_options():
-    usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
-                    'To see help on specific argument, do:\n'
-                    'tempest-account-generator <ARG> -h')
-    parser = argparse.ArgumentParser(
-        description='Create accounts.yaml file for concurrent test runs. '
-                    'One primary user, one alt user, '
-                    'one swift admin, one stack owner '
-                    'and one admin (optionally) will be created '
-                    'for each concurrent thread.',
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-        usage=usage_string
-    )
-
+def _parser_add_args(parser):
     parser.add_argument('-c', '--config-file',
                         metavar='/etc/tempest.conf',
                         help='path to tempest config file')
@@ -447,16 +441,50 @@
                         metavar='accounts_file.yaml',
                         help='Output accounts yaml file')
 
+
+def get_options():
+    usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
+                    'To see help on specific argument, do:\n'
+                    'tempest-account-generator <ARG> -h')
+    parser = argparse.ArgumentParser(
+        description=DESCRIPTION,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        usage=usage_string
+    )
+
+    _parser_add_args(parser)
     opts = parser.parse_args()
-    if opts.config_file:
-        config.CONF.set_config_path(opts.config_file)
     return opts
 
 
+class TempestAccountGenerator(command.Command):
+
+    def get_parser(self, prog_name):
+        parser = super(TempestAccountGenerator, self).get_parser(prog_name)
+        _parser_add_args(parser)
+        return parser
+
+    def take_action(self, parsed_args):
+        try:
+            return main(parsed_args)
+        except Exception:
+            LOG.exception("Failure generating test accounts.")
+            traceback.print_exc()
+            raise
+        return 0
+
+    def get_description(self):
+        return DESCRIPTION
+
+
 def main(opts=None):
-    if not opts:
-        opts = get_options()
     setup_logging()
+    if not opts:
+        LOG.warn("Use of: 'tempest-account-generator' is deprecated, "
+                 "please use: 'tempest account-generator'")
+        opts = get_options()
+    if opts.config_file:
+        config.CONF.set_config_path(opts.config_file)
     resources = generate_resources(opts)
     create_resources(opts, resources)
     dump_accounts(opts, resources)
diff --git a/tempest/config.py b/tempest/config.py
index 92123b9..6942172 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1046,7 +1046,8 @@
     cfg.StrOpt('img_dir',
                default='/opt/stack/new/devstack/files/images/'
                'cirros-0.3.1-x86_64-uec',
-               help='Directory containing image files'),
+               help='Directory containing image files',
+               deprecated_for_removal=True),
     cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
                default='cirros-0.3.1-x86_64-disk.img',
                help='Image file name'),
@@ -1060,13 +1061,16 @@
                 'Use for custom images which require them'),
     cfg.StrOpt('ami_img_file',
                default='cirros-0.3.1-x86_64-blank.img',
-               help='AMI image file name'),
+               help='AMI image file name',
+               deprecated_for_removal=True),
     cfg.StrOpt('ari_img_file',
                default='cirros-0.3.1-x86_64-initrd',
-               help='ARI image file name'),
+               help='ARI image file name',
+               deprecated_for_removal=True),
     cfg.StrOpt('aki_img_file',
                default='cirros-0.3.1-x86_64-vmlinuz',
-               help='AKI image file name'),
+               help='AKI image file name',
+               deprecated_for_removal=True),
     cfg.IntOpt(
         'large_ops_number',
         default=0,
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index ff9dabf..d9c6924 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -47,13 +47,6 @@
 
     """
 
-    def _wait_for_server_status(self, server, status):
-        server_id = server['id']
-        # Raise on error defaults to True, which is consistent with the
-        # original function from scenario tests here
-        waiters.wait_for_server_status(self.servers_client,
-                                       server_id, status)
-
     def nova_list(self):
         servers = self.servers_client.list_servers()
         # The list servers in the compute client is inconsistent...
@@ -81,7 +74,8 @@
 
     def nova_reboot(self, server):
         self.servers_client.reboot_server(server['id'], type='SOFT')
-        self._wait_for_server_status(server, 'ACTIVE')
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
 
     def check_partitions(self):
         # NOTE(andreaf) The device name may be different on different guest OS