Added VM2VM test between different routers in 2 projects
Added the test:
VM to VM test in different projects, different networks,
different routers, measure by Floating IPs (common floating net):
* a separate project is created, admin user is added to it
* VMs are in separate projects (admin and newly created),
separate networks, each project has its own router.
VMs have access to each other only by the common floating net.
* VMs are created on the same and different compute nodes.
* Verification is done via Floating IPs in 1 and multiple threads.
Other enhancements:
* renamed "tenant" variables and values to "project"
* more detailed logging (e.g. in which project resource is created)
* some refactoring
* fixed picking existing spt flavor if any
* check the project is empty before deleting it
* added stderr, stdout in case Internet is absent at VM and iperf
was not installed
Related-PROD: PROD-36943
Change-Id: I165ed41259336e586ad16ed9eb27ea59619db4c8
diff --git a/utils/__init__.py b/utils/__init__.py
index 8cfe897..190dbd7 100644
--- a/utils/__init__.py
+++ b/utils/__init__.py
@@ -30,7 +30,7 @@
openstack_clients = os_client.OfficialClientManager(
username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
- tenant_name=os.environ['OS_PROJECT_NAME'],
+ project_name=os.environ['OS_PROJECT_NAME'],
auth_url=os.environ['OS_AUTH_URL'],
cert=False,
domain=os.environ['OS_PROJECT_DOMAIN_NAME']
diff --git a/utils/os_client.py b/utils/os_client.py
index 9b128c5..aa51732 100644
--- a/utils/os_client.py
+++ b/utils/os_client.py
@@ -33,7 +33,7 @@
INTERFACE = os.environ["OS_ENDPOINT_TYPE"]
def __init__(self, username=None, password=None,
- tenant_name=None, auth_url=None, endpoint_type="internalURL",
+ project_name=None, auth_url=None, endpoint_type="internalURL",
cert=False, domain="Default", **kwargs):
self.traceback = ""
@@ -46,8 +46,7 @@
]
self.username = username
self.password = password
- self.tenant_name = tenant_name
- self.project_name = tenant_name
+ self.project_name = project_name
self.auth_url = auth_url
self.endpoint_type = endpoint_type
self.cert = cert
@@ -63,16 +62,16 @@
@classmethod
def _get_auth_session(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default'):
- if None in (username, password, tenant_name):
- sys.stdout.write((username, password, tenant_name))
+ if None in (username, password, project_name):
+ sys.stdout.write((username, password, project_name))
msg = ("Missing required credentials for identity client. "
"username: {username}, password: {password}, "
- "tenant_name: {tenant_name}").format(
+ "project_name: {project_name}").format(
username=username,
password=password,
- tenant_name=tenant_name
+ project_name=project_name
)
raise msg
@@ -93,7 +92,7 @@
username=username,
password=password,
project_domain_name=domain,
- project_name=tenant_name)
+ project_name=project_name)
auth_session = keystone_session.Session(auth=auth, verify=cert)
# auth_session.get_auth_headers()
@@ -101,12 +100,12 @@
@classmethod
def get_auth_client(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default', **kwargs):
session = cls._get_auth_session(
username=username,
password=password,
- tenant_name=tenant_name,
+ project_name=project_name,
auth_url=auth_url,
cert=cert,
domain=domain)
@@ -117,10 +116,10 @@
@classmethod
def get_compute_client(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default', **kwargs):
session = cls._get_auth_session(
- username=username, password=password, tenant_name=tenant_name,
+ username=username, password=password, project_name=project_name,
auth_url=auth_url, cert=cert, domain=domain)
service_type = 'compute'
compute_client = novaclient.Client(
@@ -130,10 +129,10 @@
@classmethod
def get_network_client(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default', **kwargs):
session = cls._get_auth_session(
- username=username, password=password, tenant_name=tenant_name,
+ username=username, password=password, project_name=project_name,
auth_url=auth_url, cert=cert, domain=domain)
service_type = 'network'
return neutron_client.Client(
@@ -142,10 +141,10 @@
@classmethod
def get_volume_client(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default', **kwargs):
session = cls._get_auth_session(
- username=username, password=password, tenant_name=tenant_name,
+ username=username, password=password, project_name=project_name,
auth_url=auth_url, cert=cert, domain=domain)
service_type = 'volume'
return cinder_client.Client(
@@ -156,10 +155,10 @@
@classmethod
def get_image_client(cls, username=None, password=None,
- tenant_name=None, auth_url=None, cert=None,
+ project_name=None, auth_url=None, cert=None,
domain='Default', **kwargs):
session = cls._get_auth_session(
- username=username, password=password, tenant_name=tenant_name,
+ username=username, password=password, project_name=project_name,
auth_url=auth_url, cert=cert, domain=domain)
service_type = 'image'
return glance_client.Client(
@@ -172,7 +171,7 @@
def auth(self):
if self._auth is None:
self._auth = self.get_auth_client(
- self.username, self.password, self.tenant_name, self.auth_url,
+ self.username, self.password, self.project_name, self.auth_url,
self.cert, self.domain, endpoint_type=self.endpoint_type
)
return self._auth
@@ -181,7 +180,7 @@
def compute(self):
if self._compute is None:
self._compute = self.get_compute_client(
- self.username, self.password, self.tenant_name, self.auth_url,
+ self.username, self.password, self.project_name, self.auth_url,
self.cert, self.domain, endpoint_type=self.endpoint_type
)
return self._compute
@@ -190,7 +189,7 @@
def network(self):
if self._network is None:
self._network = self.get_network_client(
- self.username, self.password, self.tenant_name, self.auth_url,
+ self.username, self.password, self.project_name, self.auth_url,
self.cert, self.domain, endpoint_type=self.endpoint_type
)
return self._network
@@ -199,7 +198,7 @@
def volume(self):
if self._volume is None:
self._volume = self.get_volume_client(
- self.username, self.password, self.tenant_name, self.auth_url,
+ self.username, self.password, self.project_name, self.auth_url,
self.cert, self.domain, endpoint_type=self.endpoint_type
)
return self._volume
@@ -209,7 +208,7 @@
if self._image is None:
self._image = self.get_image_client(
- self.username, self.password, self.tenant_name, self.auth_url,
+ self.username, self.password, self.project_name, self.auth_url,
self.cert, self.domain
)
return self._image
@@ -220,16 +219,15 @@
self.os_clients = os_clients
self.create_fake_ext_net = False
- def get_admin_tenant(self):
- # TODO Keystone v3 doesnt have tenants attribute
- return self.os_clients.auth.projects.find(name="admin")
+ def get_project_by_name(self, name):
+ return self.os_clients.auth.projects.find(name=name)
def get_internal_network(self):
networks = [
net for net in self.os_clients.network.list_networks()["networks"]
if net["admin_state_up"] and not net["router:external"] and
len(net["subnets"])
- ]
+ ]
if networks:
net = networks[0]
else:
@@ -276,7 +274,7 @@
self.os_clients.network.list_networks()["networks"]
if net["admin_state_up"] and net["router:external"] and
len(net["subnets"])
- ]
+ ]
else:
networks = [net for net in
self.os_clients.network.list_networks()["networks"]
@@ -284,7 +282,7 @@
if networks:
ext_net = networks[0]
- logger.info("Using external net '{}'.".format(ext_net["name"]))
+ logger.info("Using external net '{}'".format(ext_net["name"]))
else:
ext_net = self.create_fake_external_network()
return ext_net
@@ -390,14 +388,14 @@
vm_uuid=vm_uuid, expected_state=expected_state,
actual=vm.status))
- def create_network(self, tenant_id):
+ def create_network(self, project_id):
net_name = "spt-test-net-{}".format(random.randrange(100, 999))
config = utils.get_configuration()
mtu = config.get('custom_mtu') or 'default'
net_body = {
'network': {
'name': net_name,
- 'tenant_id': tenant_id
+ 'project_id': project_id
}
}
if mtu != 'default':
@@ -412,10 +410,11 @@
# default, so this blocked running tests at TF envs with default MTU
if 'mtu' not in net:
net['mtu'] = None
- logger.info("Created internal network {}".format(net_name))
+ logger.info("Created internal network {} in {} project".format(
+ net_name, project_id))
return net
- def create_subnet(self, net, tenant_id, cidr=None):
+ def create_subnet(self, net, project_id, cidr=None):
subnet_name = "spt-test-subnet-{}".format(random.randrange(100, 999))
subnet_body = {
'subnet': {
@@ -423,14 +422,15 @@
'network_id': net['id'],
'ip_version': 4,
'cidr': cidr if cidr else '10.1.7.0/24',
- 'tenant_id': tenant_id
+ 'project_id': project_id
}
}
subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
- logger.info("Created subnet {}".format(subnet_name))
+ logger.info("Created subnet {} in {} project".format(
+ subnet_name, project_id))
return subnet
- def create_router(self, ext_net, tenant_id):
+ def create_router(self, ext_net, project_id):
name = 'spt-test-router-{}'.format(random.randrange(100, 999))
router_body = {
'router': {
@@ -438,18 +438,19 @@
'external_gateway_info': {
'network_id': ext_net['id']
},
- 'tenant_id': tenant_id
+ 'project_id': project_id
}
}
- logger.info("Created a router {}".format(name))
+ logger.info("Created a router {} in {} project".format(
+ name, project_id))
router = self.os_clients.network.create_router(router_body)['router']
return router
- def create_network_resources(self):
- tenant_id = self.get_admin_tenant().id
+ def create_network_resources(self, project="admin", cidr=None):
+ project_id = self.get_project_by_name(project).id
self.get_external_network()
- net = self.create_network(tenant_id)
- self.create_subnet(net, tenant_id)
+ net = self.create_network(project_id)
+ self.create_subnet(net, project_id, cidr)
return net
def list_nova_computes(self):
@@ -470,6 +471,47 @@
"".format(floatingip_id, e)
logger.info(msg)
+ def create_project(self):
+ project_name = "spt-test-project-{}".format(random.randrange(100, 999))
+ project = self.os_clients.auth.projects.create(
+ name=project_name, domain=self.os_clients.domain,
+ description="Mirantis SPT test project")
+ logger.info("Created a project {}, uuid: {}".format(
+ project.name, project.id))
+ return project
+
+ def add_roles_to_user_in_project(self, project_id, username='admin',
+ domain='default', roles=None):
+ user_id = [
+ user.id for user in self.os_clients.auth.users.list()
+ if (user.name == username) and (user.domain_id == domain)][0]
+ if roles is None:
+ roles = ["admin", "member", "creator"]
+ for role in roles:
+ try:
+ role_id = self.os_clients.auth.roles.list(name=role)[0].id
+ self.os_clients.auth.roles.grant(
+ role=role_id, user=user_id, project=project_id)
+ except Exception as e:
+ continue
+ logger.info("Added admin user to {} project".format(project_id))
+
+ def is_project_empty(self, project_id):
+ sec_groups = [i for i in self.os_clients.network.list_security_groups(
+ tenant_id=project_id)['security_groups'] if i['name'] != 'default']
+ servers = self.os_clients.compute.servers.list(
+ search_opts={'project_id': project_id})
+ nets = self.os_clients.network.list_networks(
+ project_id=project_id)["networks"]
+ subnets = self.os_clients.network.list_subnets(
+ project_id=project_id)["subnets"]
+ ports = self.os_clients.network.list_ports(
+ project_id=project_id)["ports"]
+ routers = self.os_clients.network.list_routers(
+ project_id=project_id)["routers"]
+ resources = [*sec_groups, *servers, *nets, *subnets, *ports, *routers]
+ return not bool(resources)
+
def is_cloud_tf(self):
# Detect the TF cloud by assuming it does not have any neutron
# agents (404 in response)
@@ -492,3 +534,10 @@
"See detailed error: {}".format(e))
logger.info("The port {} is updated with custom MTU {}."
"".format(port_uuid, custom_mtu))
+
+ def get_flavor_id_by_name(self, name):
+ flavors = [flavor for flavor in self.os_clients.compute.flavors.list()]
+ flavor_id = [f.id for f in flavors if f.name == name]
+ if not flavor_id:
+ return None
+ return str(flavor_id[0])
diff --git a/utils/ssh.py b/utils/ssh.py
index abbd89f..e9e5f3a 100644
--- a/utils/ssh.py
+++ b/utils/ssh.py
@@ -204,13 +204,14 @@
home_ubuntu = "/home/ubuntu/"
transport.put_iperf3_deb_packages_at_vms(path_to_iperf_deb,
home_ubuntu)
- transport.exec_command('sudo dpkg -i {}*.deb'.format(home_ubuntu))
+ exit_status, stdout, stderr = transport.exec_sync(
+ 'sudo dpkg -i {}*.deb'.format(home_ubuntu))
else:
logger.info("Installing iperf, iperf3 using apt")
preparation_cmd = config.get('iperf_prep_string') or ['']
transport.exec_command(preparation_cmd)
- transport.exec_command('sudo apt-get update;'
- 'sudo apt-get install -y iperf3 iperf')
+ exit_status, stdout, stderr = transport.exec_sync(
+ 'sudo apt-get update && sudo apt-get install -y iperf3 iperf')
# Log whether iperf is installed with version
check = transport.exec_command('dpkg -l | grep ii | grep iperf3')
@@ -221,8 +222,9 @@
else:
info = "Could not put offline iperf packages from {} to the " \
"VM.".format(path_to_iperf_deb)
- raise BaseException("iperf3 is not installed at VM with FIP {}. {}"
- "".format(fip, info))
+ raise BaseException("iperf3 is not installed at VM with FIP {}. "
+ "{}.\nStdout, stderr at VM:\n{}\n{}"
+ "".format(fip, info, stdout, stderr))
# Staring iperf server
transport.exec_command('nohup iperf3 -s > file 2>&1 &')
transport.exec_command('nohup iperf -s > file 2>&1 &')