Merge "Configure quantum basic ops tests as smoke."
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index bc9d9bd..9e71f3d 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -52,6 +52,7 @@
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
+ return_reservation_id: Enable/Disable the return of reservation id
"""
post_body = {
'name': name,
@@ -63,7 +64,8 @@
'security_groups', 'networks', 'user_data',
'availability_zone', 'accessIPv4', 'accessIPv6',
'min_count', 'max_count', ('metadata', 'meta'),
- ('OS-DCF:diskConfig', 'disk_config')]:
+ ('OS-DCF:diskConfig', 'disk_config'),
+ 'return_reservation_id']:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
@@ -77,6 +79,10 @@
resp, body = self.post('servers', post_body, self.headers)
body = json.loads(body)
+ # NOTE(maurosr): this deals with the case of multiple server create
+ # with return reservation id set True
+ if 'reservation_id' in body:
+ return resp, body
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index e6c2a6c..f5fd4a6 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -235,7 +235,8 @@
name=name)
for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name",
- "user_data", "availability_zone"]:
+ "user_data", "availability_zone", "min_count",
+ "max_count", "return_reservation_id"]:
if attr in kwargs:
server.add_attr(attr, kwargs[attr])
diff --git a/tempest/tests/compute/servers/test_multiple_create.py b/tempest/tests/compute/servers/test_multiple_create.py
new file mode 100644
index 0000000..ad5d604
--- /dev/null
+++ b/tempest/tests/compute/servers/test_multiple_create.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests.compute import base
+
+
+class MultipleCreateTestJSON(base.BaseComputeTest):
+ _interface = 'json'
+ _name = 'multiple-create-test'
+
+ def _get_created_servers(self, name):
+ """Get servers created which name match with name param."""
+ resp, body = self.servers_client.list_servers()
+ servers = body['servers']
+ servers_created = []
+ for server in servers:
+ if server['name'].startswith(name):
+ servers_created.append(server)
+ return servers_created
+
+ def _generate_name(self):
+ return rand_name(self._name)
+
+ def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
+ """
+ This is the right way to create_multiple servers and manage to get the
+ created servers into the servers list to be cleaned up after all.
+ """
+ kwargs['name'] = kwargs.get('name', self._generate_name())
+ resp, body = self.create_server(**kwargs)
+ created_servers = self._get_created_servers(kwargs['name'])
+ # NOTE(maurosr): append it to cls.servers list from base.BaseCompute
+ # class.
+ self.servers.append(created_servers)
+ # NOTE(maurosr): get a server list, check status of the ones with names
+ # that match and wait for them become active. At a first look, since
+ # they are building in parallel, wait inside the for doesn't seem be
+ # harmful to the performance
+ if wait_until is not None:
+ for server in created_servers:
+ self.servers_client.wait_for_server_status(server['id'],
+ wait_until)
+
+ return resp, body
+
+ @attr(type='positive')
+ def test_multiple_create(self):
+ resp, body = self._create_multiple_servers(wait_until='ACTIVE',
+ min_count=1,
+ max_count=2)
+ # NOTE(maurosr): do status response check and also make sure that
+ # reservation_id is not in the response body when the request send
+ # contains return_reservation_id=False
+ self.assertEqual('202', resp['status'])
+ self.assertFalse('reservation_id' in body)
+
+ @attr(type='negative')
+ def test_min_count_less_than_one(self):
+ invalid_min_count = 0
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=invalid_min_count)
+
+ @attr(type='negative')
+ def test_min_count_non_integer(self):
+ invalid_min_count = 2.5
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=invalid_min_count)
+
+ @attr(type='negative')
+ def test_max_count_less_than_one(self):
+ invalid_max_count = 0
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ max_count=invalid_max_count)
+
+ @attr(type='negative')
+ def test_max_count_non_integer(self):
+ invalid_max_count = 2.5
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ max_count=invalid_max_count)
+
+ @attr(type='negative')
+ def test_max_count_less_than_min_count(self):
+ min_count = 3
+ max_count = 2
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=min_count,
+ max_count=max_count)
+
+ @attr(type='positive')
+ def test_multiple_create_with_reservation_return(self):
+ resp, body = self._create_multiple_servers(wait_until='ACTIVE',
+ min_count=1,
+ max_count=2,
+ return_reservation_id=True)
+ self.assertTrue(resp['status'], 202)
+ self.assertIn('reservation_id', body)
+
+
+class MultipleCreateTestXML(MultipleCreateTestJSON):
+ _interface = 'xml'
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
index e6c1990..3129484 100755
--- a/tools/find_stack_traces.py
+++ b/tools/find_stack_traces.py
@@ -22,6 +22,46 @@
import sys
import urllib2
+import pprint
+pp = pprint.PrettyPrinter()
+
+NOVA_TIMESTAMP = r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d"
+
+NOVA_REGEX = r"(?P<timestamp>%s) (?P<pid>\d+ )?(?P<level>(ERROR|TRACE)) " \
+ "(?P<module>[\w\.]+) (?P<msg>.*)" % (NOVA_TIMESTAMP)
+
+
+class StackTrace(object):
+ timestamp = None
+ pid = None
+ level = ""
+ module = ""
+ msg = ""
+
+ def __init__(self, timestamp=None, pid=None, level="", module="",
+ msg=""):
+ self.timestamp = timestamp
+ self.pid = pid
+ self.level = level
+ self.module = module
+ self.msg = msg
+
+ def append(self, msg):
+ self.msg = self.msg + msg
+
+ def is_same(self, data):
+ return (data['timestamp'] == self.timestamp and
+ data['level'] == self.level)
+
+ def not_none(self):
+ return self.timestamp is not None
+
+ def __str__(self):
+ buff = "<%s %s %s>\n" % (self.timestamp, self.level, self.module)
+ for line in self.msg.splitlines():
+ buff = buff + line + "\n"
+ return buff
+
def hunt_for_stacktrace(url):
"""Return TRACE or ERROR lines out of logs."""
@@ -29,11 +69,33 @@
buf = StringIO.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
content = f.read()
- traces = re.findall('^(.*? (TRACE|ERROR) .*?)$', content, re.MULTILINE)
- tracelist = map(lambda x: x[0], traces)
- # filter out log definitions as false possitives
- return filter(lambda x: not re.search('logging_exception_prefix', x),
- tracelist)
+
+ traces = []
+ trace = StackTrace()
+ for line in content.splitlines():
+ m = re.match(NOVA_REGEX, line)
+ if m:
+ data = m.groupdict()
+ if trace.not_none() and trace.is_same(data):
+ trace.append(data['msg'] + "\n")
+ else:
+ trace = StackTrace(
+ timestamp=data.get('timestamp'),
+ pid=data.get('pid'),
+ level=data.get('level'),
+ module=data.get('module'),
+ msg=data.get('msg'))
+
+ else:
+ if trace.not_none():
+ traces.append(trace)
+ trace = StackTrace()
+
+ # once more at the end to pick up any stragglers
+ if trace.not_none():
+ traces.append(trace)
+
+ return traces
def log_url(url, log):
@@ -60,6 +122,18 @@
sys.exit(0)
+def print_stats(items, fname, verbose=False):
+ errors = len(filter(lambda x: x.level == "ERROR", items))
+ traces = len(filter(lambda x: x.level == "TRACE", items))
+ print "%d ERRORS found in %s" % (errors, fname)
+ print "%d TRACES found in %s" % (traces, fname)
+
+ if verbose:
+ for item in items:
+ print item
+ print "\n\n"
+
+
def main():
if len(sys.argv) == 2:
url = sys.argv[1]
@@ -72,10 +146,10 @@
for log in loglist:
logurl = log_url(url, log)
traces = hunt_for_stacktrace(logurl)
+
if traces:
- print "\n\nTRACES found in %s\n" % log
- for line in traces:
- print line
+ print_stats(traces, log, verbose=True)
+
else:
usage()