MOL 305, added permanent journal that allows to remove
virtual machines that has remained from previous tests
that was crashed.
diff --git a/config.yaml b/config.yaml
index 040c49e..1715dfa 100644
--- a/config.yaml
+++ b/config.yaml
@@ -51,22 +51,38 @@
# test-vm: system-cpu
# # tests to run
-# tests: #$include(tests.yaml)
-# # - with_test_nodes:
-# # openstack:
-# # vm_params:
-# # count: x1
-# # img_name: disk_io_perf
-# # flavor_name: disk_io_perf.256
-# # keypair_name: disk_io_perf
-# # network_zone_name: novanetwork
-# # flt_ip_pool: nova
-# # creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
-# # tests:
-# # - pgbench:
-# # opts:
-# # num_clients: [4, 8, 12]
-# # transactions: [1, 2, 3]
+tests: # $include(tests.yaml)
+ start_test_nodes:
+ openstack:
+ vm_params:
+ count: x1
+ img_name: disk_io_perf
+ flavor_name: disk_io_perf.256
+ keypair_name: disk_io_perf
+ network_zone_name: novanetwork
+ flt_ip_pool: nova
+ creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
+ internal_tests:
+ - pgbench:
+ opts:
+ num_clients: [4, 8, 12]
+ transactions: [1, 2, 3]
+
+ with_test_nodes:
+ openstack:
+ vm_params:
+ count: x1
+ img_name: disk_io_perf
+ flavor_name: disk_io_perf.256
+ keypair_name: disk_io_perf
+ network_zone_name: novanetwork
+ flt_ip_pool: nova
+ creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
+ internal_tests:
+ - pgbench:
+ opts:
+ num_clients: [4, 8, 12]
+ transactions: [1, 2, 3]
# - io: tests/io_task_test.cfg
# # - vm_count:
diff --git a/run_test.py b/run_test.py
index afd8bca..89b793f 100755
--- a/run_test.py
+++ b/run_test.py
@@ -1,3 +1,5 @@
+import os
+import pickle
import sys
import json
import Queue
@@ -16,7 +18,6 @@
from nodes.node import Node
from config import cfg_dict, parse_config
from tests.itest import IOPerfTest, PgBenchTest
-
from sensors.api import start_monitoring
@@ -97,29 +98,30 @@
res_q = Queue.Queue()
for test in config['tests']:
- for name, params in test.items():
- logger.info("Starting {0} tests".format(name))
+ for test in config['tests'][test]['internal_tests']:
+ for name, params in test.items():
+ logger.info("Starting {0} tests".format(name))
- threads = []
- barrier = utils.Barrier(len(test_nodes))
- for node in test_nodes:
- msg = "Starting {0} test on {1} node"
- logger.debug(msg.format(name, node.conn_url))
- test = tool_type_mapper[name](params, res_q.put)
- th = threading.Thread(None, test_thread, None,
- (test, node, barrier))
- threads.append(th)
- th.daemon = True
- th.start()
+ threads = []
+ barrier = utils.Barrier(len(test_nodes))
+ for node in test_nodes:
+ msg = "Starting {0} test on {1} node"
+ logger.debug(msg.format(name, node.conn_url))
+ test = tool_type_mapper[name](params, res_q.put)
+ th = threading.Thread(None, test_thread, None,
+ (test, node, barrier))
+ threads.append(th)
+ th.daemon = True
+ th.start()
- for th in threads:
- th.join()
+ for th in threads:
+ th.join()
- results = []
- while not res_q.empty():
- results.append(res_q.get())
- # logger.info("Get test result {0!r}".format(results[-1]))
- yield name, results
+ results = []
+ while not res_q.empty():
+ results.append(res_q.get())
+ # logger.info("Get test result {0!r}".format(results[-1]))
+ yield name, results
def parse_args(argv):
@@ -207,17 +209,51 @@
ctx.sensor_data = ctx.sensors_control_queue.get()
-def run_tests_stage(cfg, ctx):
+def run_all_test(cfg, ctx, store_nodes):
ctx.results = []
+ if 'start_test_nodes' in cfg['tests']:
+ params = cfg['tests']['start_test_nodes']['openstack']
+ for new_node in start_vms.launch_vms(params):
+ new_node.roles.append('testnode')
+ ctx.nodes.append(new_node)
+
if 'tests' in cfg:
+ store_nodes(ctx.nodes)
ctx.results.extend(run_tests(cfg_dict, ctx.nodes))
- # if 'start_test_nodes' in opts.stages:
- # params = cfg_dict['start_test_nodes']['openstack']
- # for new_node in start_vms.launch_vms(params):
- # new_node.roles.append('testnode')
- # nodes.append(new_node)
+
+def shut_down_vms(cfg, ctx):
+ with open('vm_journal.log') as f:
+ data = str(f.read())
+ nodes = pickle.loads(data)
+
+ for node in nodes:
+ logger.info("Node " + str(node) + " has been loaded")
+
+ logger.info("Removing nodes")
+ start_vms.clear_nodes()
+ logger.info("Nodes has been removed")
+
+
+def store_nodes(nodes):
+ with open('vm_journal.log', 'w+') as f:
+ f.write(pickle.dumps([nodes]))
+ for node in nodes:
+ logger.info("Node " + str(node) + " has been stored")
+
+
+def clear_enviroment(cfg, ctx):
+ if os.path.exists('vm_journal.log'):
+ shut_down_vms(cfg, ctx)
+ os.remove('vm_journal.log')
+
+
+def run_tests_stage(cfg, ctx):
+ # clear nodes that possible were created on previous test running
+ clear_enviroment(cfg, ctx)
+ ctx.clear_calls_stack.append(shut_down_vms)
+ run_all_test(cfg, ctx, store_nodes)
def disconnect_stage(cfg, ctx):
@@ -279,10 +315,12 @@
ctx.build_meta['build_descrption'] = opts.build_description
ctx.build_meta['build_type'] = opts.build_type
ctx.build_meta['username'] = opts.username
-
+ logger.setLevel(logging.INFO)
+ logger.addHandler(logging.FileHandler('log.txt'))
try:
for stage in stages:
logger.info("Start {0.__name__} stage".format(stage))
+ print "Start {0.__name__} stage".format(stage)
stage(cfg_dict, ctx)
finally:
exc, cls, tb = sys.exc_info()
diff --git a/start_vms.py b/start_vms.py
index 81f3e81..f7cb09f 100644
--- a/start_vms.py
+++ b/start_vms.py
@@ -10,6 +10,7 @@
from nodes.node import Node
from nodes.openstack import get_floating_ip
+from utils import parse_creds
logger = logging.getLogger("io-perf-tool")
@@ -20,11 +21,13 @@
passwd = env('OS_PASSWORD')
tenant = env('OS_TENANT_NAME')
auth_url = env('OS_AUTH_URL')
+
return name, passwd, tenant, auth_url
def nova_connect():
- return n_client('1.1', *ostack_get_creds())
+ return n_client('1.1', *ostack_get_creds()
+ )
def create_keypair(nova, name, key_path):
@@ -84,9 +87,9 @@
def launch_vms(config):
- creds = config['creds']
- if creds != 'ENV':
- raise ValueError("Only 'ENV' creds are supported")
+ creds = config['vm_params']['creds']
+ # if creds != 'ENV':
+ # raise ValueError("Only 'ENV' creds are supported")
logger.debug("Starting new nodes on openstack")
conn = nova_connect()
@@ -197,6 +200,11 @@
return flt_ip.ip, nova.servers.get(srv.id)
+def clear_nodes():
+ nova = nova_connect()
+ clear_all(nova)
+
+
def clear_all(nova, name_templ="ceph-test-{0}"):
deleted_srvs = set()
for srv in nova.servers.list():