Merge "Unhardcode hardware nodes for BM-deployments"
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index 79b248c..a31bbc8 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -4,6 +4,7 @@
import jenkins
import json
+import yaml
import requests
from devops.helpers import helpers
@@ -99,16 +100,47 @@
job_params = job_params['parameterDefinitions']
return job_params
- def make_defults_params(self, name):
+ def make_defaults_params(self, name):
job_params = self.job_params(name)
def_params = dict(
[(j['name'], j['defaultParameterValue']['value'])
for j in job_params])
return def_params
+ def _correct_yaml_params(self, job_name, params):
+ """
+ Params can be defined as a nested dict.
+ In that case 2nd-layer dict will be translated to YAML text and
+ added to default parameter value
+
+ :param job_name: Job name
+ :param params: dict of JenkinsJobs parameters
+ :return: nothing
+ """
+ for param_name, param_value in params.items():
+ if not isinstance(param_value, dict):
+ continue
+ default_param = self.make_defaults_params(job_name).get(param_name)
+ if default_param is None:
+ print("{param} param of {job} job doesn't exist. "
+ "Ignoring enriching it with {value}".format(
+ param=param_name,
+ job=job_name,
+ value=param_value
+ ))
+ continue
+ yaml_param = yaml.load(default_param)
+ yaml_param.update(param_value)
+ params[param_name] = yaml.dump(yaml_param,
+ default_flow_style=False)
+ return params
+
@retry()
def run_build(self, name, params=None, timeout=600, verbose=False):
- params = params or self.make_defults_params(name)
+ params = params or self.make_defaults_params(name)
+ params = self._correct_yaml_params(job_name=name,
+ params=params)
+
num = self.__client.build_job(name, params)
time.sleep(2) # wait while job is started
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index e9c2917..462fa77 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,4 +1,5 @@
# git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre] # Use this requirement for PostgreSQL
+mock>=1.2,<4.0.0 # pinning first to avoid dependency meat grinder below
libvirt-python>=3.5.0,<4.1.0 # LGPLv2+
git+git://github.com/openstack/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
git+git://github.com/dis-xcom/fuel-devops-driver-ironic
@@ -19,7 +20,6 @@
salt-pepper<=0.5.3
setuptools<=36.2.0
netaddr
-mock>=1.2
python-jenkins
cmd2<0.9
PyYAML!=5.1
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index f48bcb3..b66aec1 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -1,6 +1,7 @@
default_context:
opencontrail_api_ssl_enabled: True
jenkins_cfg_admin_password: r00tme
+ jenkins_slave_type: 'ssh'
backup_private_key: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index fbd6da5..f382627 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -1,6 +1,7 @@
#Ceph Nautilus multiple osd
default_context:
jenkins_cfg_admin_password: r00tme
+ jenkins_slave_type: 'ssh'
auditd_enabled: 'False'
backend_network_netmask: 255.255.255.0
backend_network_subnet: 10.167.4.0/24
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index bc5dc0c..02a6888 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -4,12 +4,39 @@
LOG = logger.logger
+xtra_network_interface = """
+parameters:
+ _param:
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:single_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+"""
+
add_osd_ceph_init_yml = """
parameters:
_param:
ceph_osd_node04_hostname: xtra
ceph_osd_node04_address: 10.6.0.205
- ceph_mon_node04_ceph_public_address: #10.166.49.205
ceph_osd_system_codename: xenial
linux:
network:
@@ -40,7 +67,8 @@
@pytest.fixture(scope='module')
-def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
+def add_xtra_node_to_salt(salt_actions, underlay_actions,
+ config, reclass_actions):
"""
:return:
@@ -61,6 +89,11 @@
"systemctl restart salt-minion",
node_name=xtra_node,
raise_on_err=False)
+ salt_actions.enforce_state("I@salt:master", "reclass")
+
+ reclass_actions.merge_context(yaml_context=xtra_network_interface,
+ short_path="../nodes/_generated/xtra.*.yml")
+
yield
# LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture")
@@ -87,6 +120,21 @@
# ------- cluster/infra/config/init.yml -----------
reclass.merge_context(yaml_context=add_osd_config_init_yml,
short_path="cluster/*/infra/config/init.yml")
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+ @pytest.fixture
+ def remove_node_from_reclass(self,
+ reclass_actions):
+ reclass = reclass_actions
+
+ reclass.delete_key(
+ key="parameters.reclass.storage.node.ceph_osd_node04",
+ short_path="cluster/*/infra/config/init.yml"
+ )
+ reclass.delete_key(
+ key="parameters.linux.network.host.xtra",
+ short_path="cluster/*/ceph/init.yml"
+ )
def test_add_node_process(self, describe_node_in_reclass,
drivetrain_actions):
@@ -132,7 +180,9 @@
# 11 hdd 0.01549 osd.11 up 1.00000 1.00000
pass
- def test_delete_node_process(self, drivetrain_actions):
+ def test_delete_node_process(self,
+ remove_node_from_reclass,
+ drivetrain_actions):
dt = drivetrain_actions
job_name = "ceph-remove-node"
@@ -147,12 +197,147 @@
assert job_result == 'SUCCESS', job_description
-class TestCephMon(object):
- def test_add_node(self):
- pass
+add_mon_ceph_init_yml = """
+parameters:
+ _param:
+ ceph_mon_node04_hostname: xtra
+ ceph_mon_node04_address: 10.6.0.205
+ ceph_mon_node04_ceph_public_address: 10.166.49.209
+ ceph_mon_node04_ceph_backup_hour: 4
+ ceph_mon_node04_ceph_backup_minute: 0
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_mon_node04_address}
+ names:
+ - ${_param:ceph_mon_node04_hostname}
+ - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+"""
- def test_delete_node(self):
- pass
+add_mon_ceph_common_yml = """
+parameters:
+ ceph:
+ common:
+ members:
+ - name: ${_param:ceph_mon_node04_hostname}
+ host: ${_param:ceph_mon_node04_address}
+"""
+
+add_mon_config_node_yml = """
+parameters:
+ reclass:
+ storage:
+ node:
+ ceph_mon_node04:
+ name: ${_param:ceph_mon_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.mon
+ params:
+ ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
+ ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+ ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_mon_system_codename}
+ single_address: ${_param:ceph_mon_node04_address}
+ keepalived_vip_priority: 104
+""" # noqa: E501
+
+add_mon_infra_kvm_yml = """
+parameters:
+ salt:
+ control:
+ size:
+ ceph.mon:
+ cpu: 8
+ ram: 16384
+ disk_profile: small
+ net_profile: default
+ cluster:
+ internal:
+ node:
+ cmn04:
+ name: ${_param:ceph_mon_node04_hostname}
+ provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+ image: ${_param:salt_control_xenial_image}
+ size: ceph.mon
+""" # noqa: E501
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephMon(object):
+ @pytest.fixture
+ def describe_node_in_reclass(self,
+ reclass_actions, salt_actions):
+ LOG.info("Executing pytest SETUP "
+ "from describe_node_in_reclass fixture")
+ reclass = reclass_actions
+ # ---- cluster/*/ceph/init.yml --------------
+ reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+ short_path="cluster/*/ceph/init.yml")
+
+ # ------- cluster/infra/config/init.yml -----------
+ reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+ short_path="cluster/*/ceph/common.yml")
+ reclass.merge_context(yaml_context=add_mon_config_node_yml,
+ short_path="cluster/*/infra/config/nodes.yml")
+
+ # ------- define settings for new mon node in KVM cluster -----------
+ reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+ short_path="cluster/*/infra/kvm.yml")
+
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+ @pytest.fixture
+ def remove_node_from_reclass(self,
+ reclass_actions, salt_actions):
+ LOG.info("Executing pytest SETUP "
+ "from remove_node_from_reclass fixture")
+ reclass = reclass_actions
+ reclass.delete_key(
+ key="parameters.reclass.storage.node.ceph_mon_node04",
+ short_path="cluster/*/infra/config/init.yml")
+ reclass.delete_key(
+ key="parameters.salt.control.cluster.internal.node.cmn04",
+ short_path="cluster/*/infra/kvm.yml"
+ )
+ reclass.delete_key(
+ key="parameters.linux.network.host.xtra",
+ short_path="cluster/*/ceph/init.yml"
+ )
+
+ def test_add_node_process(self,
+ drivetrain_actions,
+ describe_node_in_reclass):
+ dt = drivetrain_actions
+
+ job_name = "ceph-add-node"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'USE_UPMAP': True
+ }
+ job_result, job_description = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert job_result == 'SUCCESS', job_description
+
+ def test_delete_node_process(self,
+ remove_node_from_reclass,
+ drivetrain_actions):
+ dt = drivetrain_actions
+
+ job_name = "ceph-remove-node"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'USE_UPMAP': True
+ }
+ job_result, job_description = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert job_result == 'SUCCESS', job_description
class TestCephMgr(object):
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 0c74d20..f3de4eb 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -331,7 +331,7 @@
host='http://{}:8081'.format(config.salt.salt_master_host),
username='admin',
password='r00tme')
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
params['STACK_INSTALL'] = 'core,cicd'
@@ -432,7 +432,7 @@
host='http://{}:8081'.format(config.salt.salt_master_host),
username='admin',
password='r00tme')
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
params['STACK_INSTALL'] = 'core,cicd'
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index c6d61dd..e265af5 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -490,7 +490,7 @@
username=jenkins_info['username'],
password=jenkins_info['password'])
- params = jenkins.make_defults_params('deploy-k8s-upgrade')
+ params = jenkins.make_defaults_params('deploy-k8s-upgrade')
params['SALT_MASTER_URL'] = salt_api
params['SALT_MASTER_CREDENTIALS'] = 'salt'
params['CONFORMANCE_RUN_AFTER'] = True
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 240b481..c1456be 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -234,7 +234,12 @@
job_parameters = {
'GIT_REFSPEC': 'release/proposed/2019.2.0',
'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
- 'TARGET_MCP_VERSION': '2019.2.0'
+ 'TARGET_MCP_VERSION': '2019.2.0',
+ "DRIVE_TRAIN_PARAMS": {
+ "OS_DIST_UPGRADE": True,
+ "OS_UPGRADE": True,
+ "BATCH_SIZE": 10
+ }
}
job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
@@ -346,7 +351,9 @@
job_name = 'deploy-upgrade-galera'
job_parameters = {
- 'INTERACTIVE': 'false'
+ 'INTERACTIVE': 'false',
+ 'OS_DIST_UPGRADE': 'true',
+ 'OS_UPGRADE': 'true'
}
job_result, job_description = dt.start_job_on_jenkins(
@@ -480,7 +487,9 @@
# ########### Start Deploy - upgrade RabbitMQ pipeline ############
show_step(3)
job_parameters = {
- 'INTERACTIVE': 'false'
+ 'INTERACTIVE': 'false',
+ 'OS_DIST_UPGRADE': 'true',
+ 'OS_UPGRADE': 'true'
}
job_result, job_description = dt.start_job_on_jenkins(
@@ -542,7 +551,9 @@
job_parameters = {
"STAGE_UPGRADE_DOCKER_COMPONENTS": True,
"STAGE_UPGRADE_ES_KIBANA": True,
- "STAGE_UPGRADE_SYSTEM_PART": True
+ "STAGE_UPGRADE_SYSTEM_PART": True,
+ 'OS_DIST_UPGRADE': 'true',
+ 'OS_UPGRADE': 'true'
}
job_result, job_description = drivetrain.start_job_on_jenkins(
job_name="stacklight-upgrade",
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index d19316b..7ea9c11 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -150,7 +150,7 @@
host=day1_cfg_config.config.salt.salt_master_host),
username='admin',
password='r00tme')
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = result
params['STACK_INSTALL'] = "core,openstack,ovs"
build = jenkins.run_build('deploy_openstack', params)
@@ -212,7 +212,7 @@
host='http://172.16.44.33:8081',
username='admin',
password='r00tme')
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
if settings.STACK_INSTALL:
params['STACK_INSTALL'] = settings.STACK_INSTALL
@@ -271,7 +271,7 @@
except LookupError:
ntp_skipped_nodes = ''
- params = jenkins.make_defults_params('cvp-sanity')
+ params = jenkins.make_defaults_params('cvp-sanity')
params['TESTS_SETTINGS'] = (
'drivetrain_version={0};{1}'
.format(settings.MCP_VERSION, ntp_skipped_nodes))
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
index 9852f5f..bfc7d8c 100644
--- a/tcp_tests/tests/system/test_pipeline_deploy.py
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -49,7 +49,7 @@
password='r00tme')
# Creating param list for openstack deploy
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
params['STACK_INSTALL'] = 'core,kvm,openstack,ovs'
show_step(4)
@@ -99,7 +99,7 @@
host='http://172.16.49.2:8081',
username='admin',
password='r00tme')
- params = jenkins.make_defults_params('deploy_openstack')
+ params = jenkins.make_defaults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
params['STACK_INSTALL'] = 'core,kvm,cicd'
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index e0e7f69..bf3bb01 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -123,7 +123,7 @@
username=username,
password=password)
- job_params = jenkins.make_defults_params(job_name)
+ job_params = jenkins.make_defaults_params(job_name)
if job_parameters is not None: # job_parameters = {}
job_params.update(job_parameters)