Reworked SPT tests for running at MOS inside rally pod
These SPT tests are taken from the CVP-SPT, but reworked
to test MOS inside rally pod.
Here is the list of changes since CVP-SPT:
* Switched to Python3
* Removed all Salt related code
* Removed HW2HW test
* Default global_config.yaml is suitable for MOS
* Switched to iperf3
* Added smart waiters for VMs to be Active, VMs to be reachable by FIPs
* Extended pytest.ini file with logging settings
* Added lots of loggers at info level to understand what happends during the test run
* Extended & fixed README with the actual instruction
* Ability to use iperf3 even if there is no Internet at VMs
* Fixed the coding style according PEP8
* Various small fixes, enhancements
Change-Id: I31a1b8c8c827133d144377031c6f546d8c82a47d
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..8e858b3
--- /dev/null
+++ b/utils/__init__.py
@@ -0,0 +1,73 @@
+import os
+import yaml
+import logging
+
+from utils import os_client
+
+logger = logging.getLogger(__name__)
+
+
+def compile_pairs(nodes):
+ result = {}
+ if len(nodes) %2 != 0:
+ nodes.pop(1)
+ pairs = list(zip(*[iter(nodes)] * 2))
+ for pair in pairs:
+ result[pair[0]+'<>'+pair[1]] = pair
+ return result
+
+
+def get_pairs():
+ config = get_configuration()
+ cmp_hosts = config.get('CMP_HOSTS') or []
+ skipped_nodes = config.get('skipped_nodes') or []
+ if skipped_nodes:
+ print(("\nNotice: {} nodes will be skipped for vm2vm test".format(
+ ",".join(skipped_nodes))))
+ logger.info("Skipping nodes {}".format(",".join(skipped_nodes)))
+ if not cmp_hosts:
+ openstack_clients = os_client.OfficialClientManager(
+ username=os.environ['OS_USERNAME'],
+ password=os.environ['OS_PASSWORD'],
+ tenant_name=os.environ['OS_PROJECT_NAME'],
+ auth_url=os.environ['OS_AUTH_URL'],
+ cert=False,
+ domain=os.environ['OS_PROJECT_DOMAIN_NAME']
+ )
+ os_actions = os_client.OSCliActions(openstack_clients)
+ nova_computes = os_actions.list_nova_computes()
+ if len(nova_computes) < 2:
+ raise BaseException(
+ "At least 2 compute hosts are needed for VM2VM test, "
+ "now: {}.".format(len(nova_computes)))
+ cmp_hosts = [n.host_name for n in nova_computes
+ if n.host_name not in skipped_nodes]
+ if len(cmp_hosts) < 2:
+ raise BaseException(
+ "At least 2 compute hosts are needed for VM2VM test. "
+ "Cannot create a pair from {}. Please check skip list, at "
+ "least 2 computes should be tested.".format(cmp_hosts))
+ logger.info("CMP_HOSTS option is not set, using host pair from "
+ "Nova compute list. Pair generated: {}".format(cmp_hosts))
+
+ return compile_pairs(cmp_hosts)
+
+
+def get_configuration():
+ """function returns configuration for environment
+ and for test if it's specified"""
+
+ global_config_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../global_config.yaml")
+ with open(global_config_file, 'r') as file:
+ global_config = yaml.load(file, Loader=yaml.SafeLoader)
+ for param in list(global_config.keys()):
+ if param in list(os.environ.keys()):
+ if ',' in os.environ[param]:
+ global_config[param] = []
+ for item in os.environ[param].split(','):
+ global_config[param].append(item)
+ else:
+ global_config[param] = os.environ[param]
+
+ return global_config
diff --git a/utils/helpers.py b/utils/helpers.py
new file mode 100644
index 0000000..800a9fe
--- /dev/null
+++ b/utils/helpers.py
@@ -0,0 +1,24 @@
+import texttable as tt
+
+
+class helpers(object):
+
+ def __init__(self):
+ pass
+
+ def draw_table_with_results(self, global_results):
+ tab = tt.Texttable()
+ header = [
+ 'node name 1',
+ 'node name 2',
+ 'network',
+ 'bandwidth >',
+ 'bandwidth <',
+ ]
+ tab.set_cols_align(['l', 'l', 'l', 'l', 'l'])
+ tab.set_cols_width([27, 27, 15, 20, '20'])
+ tab.header(header)
+ for row in global_results:
+ tab.add_row(row)
+ s = tab.draw()
+ print(s)
diff --git a/utils/os_client.py b/utils/os_client.py
new file mode 100644
index 0000000..12aad1b
--- /dev/null
+++ b/utils/os_client.py
@@ -0,0 +1,429 @@
+from cinderclient import client as cinder_client
+from glanceclient import client as glance_client
+from keystoneauth1 import identity as keystone_identity
+from keystoneauth1 import session as keystone_session
+from keystoneclient.v3 import client as keystone_client
+from neutronclient.v2_0 import client as neutron_client
+from novaclient import client as novaclient
+
+import logging
+import os
+import random
+import time
+
+import utils
+
+logger = logging.getLogger(__name__)
+
+
+class OfficialClientManager(object):
+ """Manager that provides access to the official python clients for
+ calling various OpenStack APIs.
+ """
+
+ CINDERCLIENT_VERSION = 3
+ GLANCECLIENT_VERSION = 2
+ KEYSTONECLIENT_VERSION = 3
+ NEUTRONCLIENT_VERSION = 2
+ NOVACLIENT_VERSION = 2
+ INTERFACE = 'admin'
+ if "OS_ENDPOINT_TYPE" in list(os.environ.keys()):
+ INTERFACE = os.environ["OS_ENDPOINT_TYPE"]
+
+ def __init__(self, username=None, password=None,
+ tenant_name=None, auth_url=None, endpoint_type="internalURL",
+ cert=False, domain="Default", **kwargs):
+ self.traceback = ""
+
+ self.client_attr_names = [
+ "auth",
+ "compute",
+ "network",
+ "volume",
+ "image",
+ ]
+ self.username = username
+ self.password = password
+ self.tenant_name = tenant_name
+ self.project_name = tenant_name
+ self.auth_url = auth_url
+ self.endpoint_type = endpoint_type
+ self.cert = cert
+ self.domain = domain
+ self.kwargs = kwargs
+
+ # Lazy clients
+ self._auth = None
+ self._compute = None
+ self._network = None
+ self._volume = None
+ self._image = None
+
+ @classmethod
+ def _get_auth_session(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default'):
+ if None in (username, password, tenant_name):
+ print((username, password, tenant_name))
+ msg = ("Missing required credentials for identity client. "
+ "username: {username}, password: {password}, "
+ "tenant_name: {tenant_name}").format(
+ username=username,
+ password=password,
+ tenant_name=tenant_name
+ )
+ raise msg
+
+ if cert and "https" not in auth_url:
+ auth_url = auth_url.replace("http", "https")
+
+ if "v2" in auth_url:
+ raise BaseException("Keystone v2 is deprecated since OpenStack"
+ "Queens release. So current OS_AUTH_URL {} "
+ "is not valid. Please use Keystone v3."
+ "".format(auth_url))
+ else:
+ auth_url = auth_url if ("v3" in auth_url) else "{}{}".format(
+ auth_url, "/v3")
+ auth = keystone_identity.v3.Password(
+ auth_url=auth_url,
+ user_domain_name=domain,
+ username=username,
+ password=password,
+ project_domain_name=domain,
+ project_name=tenant_name)
+
+ auth_session = keystone_session.Session(auth=auth, verify=cert)
+ # auth_session.get_auth_headers()
+ return auth_session
+
+ @classmethod
+ def get_auth_client(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default', **kwargs):
+ session = cls._get_auth_session(
+ username=username,
+ password=password,
+ tenant_name=tenant_name,
+ auth_url=auth_url,
+ cert=cert,
+ domain=domain)
+ keystone = keystone_client.Client(version=cls.KEYSTONECLIENT_VERSION,
+ session=session, **kwargs)
+ keystone.management_url = auth_url
+ return keystone
+
+ @classmethod
+ def get_compute_client(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default', **kwargs):
+ session = cls._get_auth_session(
+ username=username, password=password, tenant_name=tenant_name,
+ auth_url=auth_url, cert=cert, domain=domain)
+ service_type = 'compute'
+ compute_client = novaclient.Client(
+ version=cls.NOVACLIENT_VERSION, session=session,
+ service_type=service_type, os_cache=False, **kwargs)
+ return compute_client
+
+ @classmethod
+ def get_network_client(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default', **kwargs):
+ session = cls._get_auth_session(
+ username=username, password=password, tenant_name=tenant_name,
+ auth_url=auth_url, cert=cert, domain=domain)
+ service_type = 'network'
+ return neutron_client.Client(
+ service_type=service_type, session=session,
+ interface=cls.INTERFACE, **kwargs)
+
+ @classmethod
+ def get_volume_client(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default', **kwargs):
+ session = cls._get_auth_session(
+ username=username, password=password, tenant_name=tenant_name,
+ auth_url=auth_url, cert=cert, domain=domain)
+ service_type = 'volume'
+ return cinder_client.Client(
+ version=cls.CINDERCLIENT_VERSION,
+ service_type=service_type,
+ interface=cls.INTERFACE,
+ session=session, **kwargs)
+
+ @classmethod
+ def get_image_client(cls, username=None, password=None,
+ tenant_name=None, auth_url=None, cert=None,
+ domain='Default', **kwargs):
+ session = cls._get_auth_session(
+ username=username, password=password, tenant_name=tenant_name,
+ auth_url=auth_url, cert=cert, domain=domain)
+ service_type = 'image'
+ return glance_client.Client(
+ version=cls.GLANCECLIENT_VERSION,
+ service_type=service_type,
+ session=session, interface=cls.INTERFACE,
+ **kwargs)
+
+ @property
+ def auth(self):
+ if self._auth is None:
+ self._auth = self.get_auth_client(
+ self.username, self.password, self.tenant_name, self.auth_url,
+ self.cert, self.domain, endpoint_type=self.endpoint_type
+ )
+ return self._auth
+
+ @property
+ def compute(self):
+ if self._compute is None:
+ self._compute = self.get_compute_client(
+ self.username, self.password, self.tenant_name, self.auth_url,
+ self.cert, self.domain, endpoint_type=self.endpoint_type
+ )
+ return self._compute
+
+ @property
+ def network(self):
+ if self._network is None:
+ self._network = self.get_network_client(
+ self.username, self.password, self.tenant_name, self.auth_url,
+ self.cert, self.domain, endpoint_type=self.endpoint_type
+ )
+ return self._network
+
+ @property
+ def volume(self):
+ if self._volume is None:
+ self._volume = self.get_volume_client(
+ self.username, self.password, self.tenant_name, self.auth_url,
+ self.cert, self.domain, endpoint_type=self.endpoint_type
+ )
+ return self._volume
+
+ @property
+ def image(self):
+
+ if self._image is None:
+ self._image = self.get_image_client(
+ self.username, self.password, self.tenant_name, self.auth_url,
+ self.cert, self.domain
+ )
+ return self._image
+
+
+class OSCliActions(object):
+ def __init__(self, os_clients):
+ self.os_clients = os_clients
+ self.create_fake_ext_net = False
+
+ def get_admin_tenant(self):
+ # TODO Keystone v3 doesnt have tenants attribute
+ return self.os_clients.auth.projects.find(name="admin")
+
+ def get_internal_network(self):
+ networks = [
+ net for net in self.os_clients.network.list_networks()["networks"]
+ if net["admin_state_up"] and not net["router:external"] and
+ len(net["subnets"])
+ ]
+ if networks:
+ net = networks[0]
+ else:
+ net = self.create_network_resources()
+ return net
+
+ def create_fake_external_network(self):
+ logger.info(
+ "Could not find any external network, creating a fake one...")
+ net_name = "spt-ext-net-{}".format(random.randrange(100, 999))
+ net_body = {"network": {"name": net_name,
+ "router:external": True,
+ "provider:network_type": "local"}}
+ try:
+ ext_net = \
+ self.os_clients.network.create_network(net_body)['network']
+ logger.info("Created a fake external net {}".format(net_name))
+ except Exception as e:
+ # in case 'local' net type is absent, create with default type
+ net_body["network"].pop('provider:network_type', None)
+ ext_net = \
+ self.os_clients.network.create_network(net_body)['network']
+ subnet_name = "spt-ext-subnet-{}".format(random.randrange(100, 999))
+ subnet_body = {
+ "subnet": {
+ "name": subnet_name,
+ "network_id": ext_net["id"],
+ "ip_version": 4,
+ "cidr": "10.255.255.0/24",
+ "allocation_pools": [{"start": "10.255.255.100",
+ "end": "10.255.255.200"}]
+ }
+ }
+ self.os_clients.network.create_subnet(subnet_body)
+ self.create_fake_ext_net = True
+ return ext_net
+
+ def get_external_network(self):
+ config = utils.get_configuration()
+ ext_net = config.get('external_network') or ''
+ if not ext_net:
+ networks = [
+ net for net in
+ self.os_clients.network.list_networks()["networks"]
+ if net["admin_state_up"] and net["router:external"] and
+ len(net["subnets"])
+ ]
+ else:
+ networks = [net for net in
+ self.os_clients.network.list_networks()["networks"]
+ if net["name"] == ext_net]
+
+ if networks:
+ ext_net = networks[0]
+ logger.info("Using external net '{}'.".format(ext_net["name"]))
+ else:
+ ext_net = self.create_fake_external_network()
+ return ext_net
+
+ def create_flavor(self, name, ram=256, vcpus=1, disk=2):
+ logger.info("Creating a flavor {}".format(name))
+ return self.os_clients.compute.flavors.create(name, ram, vcpus, disk)
+
+ def create_sec_group(self, rulesets=None):
+ if rulesets is None:
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # iperf3
+ 'ip_protocol': 'tcp',
+ 'from_port': 5201,
+ 'to_port': 5201,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ }
+ ]
+ sg_name = "spt-test-secgroup-{}".format(random.randrange(100, 999))
+ sg_desc = sg_name + " SPT"
+ secgroup = self.os_clients.compute.security_groups.create(
+ sg_name, sg_desc)
+ for ruleset in rulesets:
+ self.os_clients.compute.security_group_rules.create(
+ secgroup.id, **ruleset)
+ logger.info("Created a security group {}".format(sg_name))
+ return secgroup
+
+ def create_basic_server(self, image=None, flavor=None, net=None,
+ availability_zone=None, sec_groups=(),
+ keypair=None):
+ os_conn = self.os_clients
+ net = net or self.get_internal_network()
+ kwargs = {}
+ if sec_groups:
+ kwargs['security_groups'] = sec_groups
+ server = os_conn.compute.servers.create(
+ "spt-test-server-{}".format(random.randrange(100, 999)),
+ image, flavor, nics=[{"net-id": net["id"]}],
+ availability_zone=availability_zone, key_name=keypair, **kwargs)
+
+ return server
+
+ def get_vm(self, vm_id):
+ os_conn = self.os_clients
+ try:
+ vm = os_conn.compute.servers.find(id=vm_id)
+ except Exception as e:
+ raise Exception(
+ "{}. Could not get the VM \"{}\": {}".format(
+ vm_id, e))
+ return vm
+
+ def check_vm_is_active(self, vm_uuid, retry_delay=5, timeout=500):
+ vm = None
+ timeout_reached = False
+ start_time = time.time()
+ expected_state = 'ACTIVE'
+ while not timeout_reached:
+ vm = self.get_vm(vm_uuid)
+ if vm.status == expected_state:
+ logger.info(
+ "VM {} is in {} status.".format(vm_uuid, vm.status))
+ break
+ if vm.status == 'ERROR':
+ break
+ time.sleep(retry_delay)
+ timeout_reached = (time.time() - start_time) > timeout
+ if vm.status != expected_state:
+ logger.info("VM {} is in {} status.".format(vm_uuid, vm.status))
+ raise TimeoutError(
+ "VM {vm_uuid} on is expected to be in '{expected_state}' "
+ "state, but is in '{actual}' state instead.".format(
+ vm_uuid=vm_uuid, expected_state=expected_state,
+ actual=vm.status))
+
+ def create_network(self, tenant_id):
+ net_name = "spt-test-net-{}".format(random.randrange(100, 999))
+ net_body = {
+ 'network': {
+ 'name': net_name,
+ 'tenant_id': tenant_id
+ }
+ }
+ net = self.os_clients.network.create_network(net_body)['network']
+ logger.info("Created internal network {}".format(net_name))
+ return net
+
+ def create_subnet(self, net, tenant_id, cidr=None):
+ subnet_name = "spt-test-subnet-{}".format(random.randrange(100, 999))
+ subnet_body = {
+ 'subnet': {
+ "name": subnet_name,
+ 'network_id': net['id'],
+ 'ip_version': 4,
+ 'cidr': cidr if cidr else '10.1.7.0/24',
+ 'tenant_id': tenant_id
+ }
+ }
+ subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
+ logger.info("Created subnet {}".format(subnet_name))
+ return subnet
+
+ def create_router(self, ext_net, tenant_id):
+ name = 'spt-test-router-{}'.format(random.randrange(100, 999))
+ router_body = {
+ 'router': {
+ 'name': name,
+ 'external_gateway_info': {
+ 'network_id': ext_net['id']
+ },
+ 'tenant_id': tenant_id
+ }
+ }
+ logger.info("Created a router {}".format(name))
+ router = self.os_clients.network.create_router(router_body)['router']
+ return router
+
+ def create_network_resources(self):
+ tenant_id = self.get_admin_tenant().id
+ self.get_external_network()
+ net = self.create_network(tenant_id)
+ self.create_subnet(net, tenant_id)
+ return net
+
+ def list_nova_computes(self):
+ nova_services = self.os_clients.compute.hosts.list()
+ computes_list = [h for h in nova_services if h.service == "compute"]
+ return computes_list
diff --git a/utils/ssh.py b/utils/ssh.py
new file mode 100644
index 0000000..29a56f0
--- /dev/null
+++ b/utils/ssh.py
@@ -0,0 +1,210 @@
+from io import StringIO
+import logging
+import select
+import utils
+import paramiko
+import time
+import os
+
+logger = logging.getLogger(__name__)
+
+# Suppress paramiko logging
+logging.getLogger("paramiko").setLevel(logging.WARNING)
+
+
+class SSHTransport(object):
+ def __init__(self, address, username, password=None,
+ private_key=None, look_for_keys=False, *args, **kwargs):
+
+ self.address = address
+ self.username = username
+ self.password = password
+ if private_key is not None:
+ self.private_key = paramiko.RSAKey.from_private_key(
+ StringIO(private_key))
+ else:
+ self.private_key = None
+
+ self.look_for_keys = look_for_keys
+ self.buf_size = 1024
+ self.channel_timeout = 10.0
+
+ def _get_ssh_connection(self):
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(
+ paramiko.AutoAddPolicy())
+ ssh.connect(self.address, username=self.username,
+ password=self.password, pkey=self.private_key,
+ timeout=self.channel_timeout)
+ logger.debug("Successfully connected to: {0}".format(self.address))
+ return ssh
+
+ def _get_sftp_connection(self):
+ transport = paramiko.Transport((self.address, 22))
+ transport.connect(username=self.username,
+ password=self.password,
+ pkey=self.private_key)
+
+ return paramiko.SFTPClient.from_transport(transport)
+
+ def exec_sync(self, cmd):
+ logger.debug("Executing {0} on host {1}".format(cmd, self.address))
+ ssh = self._get_ssh_connection()
+ transport = ssh.get_transport()
+ channel = transport.open_session()
+ channel.fileno()
+ channel.exec_command(cmd)
+ channel.shutdown_write()
+ out_data = []
+ err_data = []
+ poll = select.poll()
+ poll.register(channel, select.POLLIN)
+
+ while True:
+ ready = poll.poll(self.channel_timeout)
+ if not any(ready):
+ continue
+ if not ready[0]:
+ continue
+ out_chunk = err_chunk = None
+ if channel.recv_ready():
+ out_chunk = channel.recv(self.buf_size)
+ out_data += out_chunk,
+ if channel.recv_stderr_ready():
+ err_chunk = channel.recv_stderr(self.buf_size)
+ err_data += err_chunk,
+ if channel.closed and not err_chunk and not out_chunk:
+ break
+ exit_status = channel.recv_exit_status()
+ logger.debug("Command {0} executed with status: {1}"
+ .format(cmd, exit_status))
+ return (exit_status, b" ".join(out_data).strip(),
+ b" ".join(err_data).strip())
+
+ def exec_command(self, cmd):
+ exit_status, stdout, stderr = self.exec_sync(cmd)
+ return stdout
+
+ def check_call(self, command, error_info=None, expected=None,
+ raise_on_err=True):
+ """Execute command and check for return code
+ :type command: str
+ :type error_info: str
+ :type expected: list
+ :type raise_on_err: bool
+ :rtype: ExecResult
+ :raises: DevopsCalledProcessError
+ """
+ if expected is None:
+ expected = [0]
+ ret = self.exec_sync(command)
+ exit_code, stdout_str, stderr_str = ret
+ if exit_code not in expected:
+ message = (
+ "{append}Command '{cmd}' returned exit code {code} while "
+ "expected {expected}\n"
+ "\tSTDOUT:\n"
+ "{stdout}"
+ "\n\tSTDERR:\n"
+ "{stderr}".format(
+ append=error_info + '\n' if error_info else '',
+ cmd=command,
+ code=exit_code,
+ expected=expected,
+ stdout=stdout_str,
+ stderr=stderr_str
+ ))
+ logger.error(message)
+ if raise_on_err:
+ exit()
+ return ret
+
+ def put_file(self, source_path, destination_path):
+ sftp = self._get_sftp_connection()
+ sftp.put(source_path, destination_path)
+ sftp.close()
+
+ def put_iperf3_deb_packages_at_vms(self, source_directory,
+ destination_directory):
+ iperf_deb_files = [f for f in os.listdir(source_directory)
+ if "deb" in f]
+ if not iperf_deb_files:
+ raise BaseException(
+ "iperf3 *.deb packages are not found locally at path {}. "
+ "Please recheck 'iperf_deb_package_dir_path' variable in "
+ "global_config.yaml and check *.deb packages are manually "
+ "copied there.".format(source_directory))
+ for f in iperf_deb_files:
+ source_abs_path = "{}/{}".format(source_directory, f)
+ dest_abs_path = "{}/{}".format(destination_directory, f)
+ self.put_file(source_abs_path, dest_abs_path)
+
+ def get_file(self, source_path, destination_path):
+ sftp = self._get_sftp_connection()
+ sftp.get(source_path, destination_path)
+ sftp.close()
+
+ def _is_timed_out(self, start_time, timeout):
+ return (time.time() - timeout) > start_time
+
+ def check_vm_is_reachable_ssh(self, floating_ip, timeout=500, sleep=5):
+ bsleep = sleep
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ _start_time = time.time()
+ attempts = 0
+ while True:
+ try:
+ ssh.connect(floating_ip, username=self.username,
+ password=self.password, pkey=self.private_key,
+ timeout=self.channel_timeout)
+ logger.info("VM with FIP {} is reachable via SSH. Success!"
+ "".format(floating_ip))
+ return True
+ except Exception as e:
+ ssh.close()
+ if self._is_timed_out(_start_time, timeout):
+ logger.info("VM with FIP {} is not reachable via SSH. "
+ "See details: {}".format(floating_ip, e))
+ raise TimeoutError(
+ "\nFailed to establish authenticated ssh connection "
+ "to {} after {} attempts during {} seconds.\n{}"
+ "".format(floating_ip, attempts, timeout, e))
+ attempts += 1
+ logger.info("Failed to establish authenticated ssh connection "
+ "to {}. Number attempts: {}. Retry after {} "
+ "seconds.".format(floating_ip, attempts, bsleep))
+ time.sleep(bsleep)
+
+
+class prepare_iperf(object):
+
+ def __init__(self, fip, user='ubuntu', password='password',
+ private_key=None):
+
+ transport = SSHTransport(fip, user, password, private_key)
+ config = utils.get_configuration()
+
+ # Install iperf3 using apt or downloaded deb package
+ internet_at_vms = utils.get_configuration().get("internet_at_vms")
+ if internet_at_vms.lower() == 'false':
+ logger.info("Copying offline iperf3 deb packages, installing...")
+ path_to_iperf_deb = (config.get('iperf_deb_package_dir_path') or
+ "/artifacts/mos-spt/")
+ home_ubuntu = "/home/ubuntu/"
+ transport.put_iperf3_deb_packages_at_vms(path_to_iperf_deb,
+ home_ubuntu)
+ transport.exec_command('sudo dpkg -i {}*.deb'.format(home_ubuntu))
+ else:
+ logger.info("Installing iperf3 using apt")
+ preparation_cmd = config.get('iperf_prep_string') or ['']
+ transport.exec_command(preparation_cmd)
+ transport.exec_command('sudo apt-get update;'
+ 'sudo apt-get install -y iperf3')
+
+ # Log whether iperf is installed with version
+ check = transport.exec_command('dpkg -l | grep iperf3')
+ logger.debug(check.decode('utf-8'))
+
+ # Staring iperf server
+ transport.exec_command('nohup iperf3 -s > file 2>&1 &')