Merge "Change cirros image for mitaka/ocata cookied models"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
new file mode 100644
index 0000000..d44ce62
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -0,0 +1,56 @@
+@Library('tcp-qa')_
+
+def common = new com.mirantis.mk.Common()
+def shared = new com.mirantis.system_qa.SharedPipeline()
+def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+
+node ("${NODE_NAME}") {
+ try {
+
+ stage("Clean the environment and clone tcp-qa") {
+ shared.prepare_working_dir()
+ }
+
+ stage("Create environment, generate model, bootstrap the salt-cluster") {
+ // steps: "hardware,create_model,salt"
+ shared.swarm_bootstrap_salt_cluster_devops()
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ // steps: env.DRIVETRAIN_STACK_INSTALL
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ // steps: env.PLATFORM_STACK_INSTALL
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+ }
+
+ stage("Run tests") {
+ shared.swarm_run_pytest(steps)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} test_failed || true
+ """)
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ dos.py time-sync ${ENV_NAME} || true
+ """)
+ } else {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
+ shared.report_deploy_result(steps)
+ shared.report_test_result()
+ }
+}
diff --git a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
deleted file mode 100644
index baa9853..0000000
--- a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
+++ /dev/null
@@ -1,87 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-
-node ("${NODE_NAME}") {
- try {
-
- stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
- }
-
- stage("Create environment, generate model, bootstrap the salt-cluster") {
- shared.swarm_bootstrap_salt_cluster_devops()
- }
-
- stage("Install core infrastructure and deploy CICD nodes") {
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
- }
-
- stage("Install core infrastructure and deploy CICD nodes") {
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
- }
-
- stage("Run tests") {
- shared.run_cmd("""\
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt
- . ./tcp_tests/utils/env_k8s
-
- # Prepare snapshots that may be used in tests if MANAGER=devops
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_k8s_deployed.ini
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
- #dos.py suspend ${ENV_NAME}
- #dos.py snapshot ${ENV_NAME} k8s_deployed
- #dos.py snapshot ${ENV_NAME} stacklight_deployed
- #dos.py resume ${ENV_NAME}
- #dos.py time-sync ${ENV_NAME}
-
- # Initialize variables used in tcp-qa tests
- export CURRENT_SNAPSHOT=stacklight_deployed # provide the snapshot name required by the test
- export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
-
- #export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
- export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
- export MAKE_SNAPSHOT_STAGES=false # skip 'hardware' fixture, disable snapshot/revert features
- # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
- export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
- export salt_master_port=6969
- export SALT_USER=\$SALTAPI_USER
- export SALT_PASSWORD=\$SALTAPI_PASS
- export CORE_INSTALLED=true # skip core_deployed fixture
- export K8S_INSTALLED=true # skip k8s_deployed fixture
- export sl_installed=true # skip stacklight_deployed fixture
-
- py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} test_completed
- """)
- }
-
- } catch (e) {
- common.printMsg("Job failed", "red")
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} test_failed || true
- """)
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
- } else {
- shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
- dos.py time-sync ${ENV_NAME} || true
- """)
- }
- shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
- shared.report_test_result()
- }
-}
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
deleted file mode 100644
index 585ad83..0000000
--- a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
+++ /dev/null
@@ -1,89 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-
-node ("${NODE_NAME}") {
- try {
-
- stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
- }
-
- stage("Create environment, generate model, bootstrap the salt-cluster") {
- shared.swarm_bootstrap_salt_cluster_devops()
- }
-
- stage("Install core infrastructure and deploy CICD nodes") {
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
- }
-
- stage("Install core infrastructure and deploy CICD nodes") {
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
- }
-
- stage("Run tests") {
- shared.run_cmd("""\
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt
- # TODO: . ./tcp_tests/utils/env_keystonercv3
-
- # Prepare snapshots that may be used in tests if MANAGER=devops
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_openstack_deployed.ini
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_os_deployed.ini
- #dos.py suspend ${ENV_NAME}
- #dos.py snapshot ${ENV_NAME} openstack_deployed
- #dos.py snapshot ${ENV_NAME} stacklight_deployed
- #dos.py snapshot ${ENV_NAME} sl_os_deployed
- #dos.py resume ${ENV_NAME}
- #dos.py time-sync ${ENV_NAME}
-
- # Initialize variables used in tcp-qa tests
- export CURRENT_SNAPSHOT=stacklight_deployed # provide the snapshot name required by the test
- export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
-
- #export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
- export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
- export MAKE_SNAPSHOT_STAGES=false # skip 'hardware' fixture, disable snapshot/revert features
- # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
- export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
- export salt_master_port=6969
- export SALT_USER=\$SALTAPI_USER
- export SALT_PASSWORD=\$SALTAPI_PASS
- export CORE_INSTALLED=true # skip core_deployed fixture
- export OPENSTACK_INSTALLED=true # skip k8s_deployed fixture
- export sl_installed=true # skip stacklight_deployed fixture
-
- py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} test_completed
- """)
- }
-
- } catch (e) {
- common.printMsg("Job failed", "red")
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} test_failed || true
- """)
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
- } else {
- shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
- dos.py time-sync ${ENV_NAME} || true
- """)
- }
- shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
- shared.report_test_result()
- }
-}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
new file mode 100644
index 0000000..d0422fd
--- /dev/null
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -0,0 +1,80 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ * ENV_NAME Fuel-devops environment name
+ * PASSED_STEPS Steps passed to install components using Jenkins on CICD cluster: "salt,core,cicd,openstack:3200,stacklight:2400",
+ where 3200 and 2400 might be timeouts (not used in the testing pipeline)
+ * RUN_TEST_OPTS Pytest option -k or -m, with expression to select necessary tests. Additional pytest options are allowed.
+ * PARENT_NODE_NAME Name of the jenkins slave to create the environment
+ * PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ * SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ * LAB_CONFIG_NAME Not used (backward compatibility, for manual deployment steps only)
+ * REPOSITORY_SUITE Not used (backward compatibility, for manual deployment steps only)
+ * MCP_IMAGE_PATH1604 Not used (backward compatibility, for manual deployment steps only)
+ * IMAGE_PATH_CFG01_DAY01 Not used (backward compatibility, for manual deployment steps only)
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+ error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
+ try {
+
+ stage("Run tests") {
+ def steps = shared.get_steps_list(PASSED_STEPS)
+ def sources = """\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt"""
+ if (steps.contains('k8s')) {
+ sources += """
+ . ./tcp_tests/utils/env_k8s\n"""
+ }
+ if (steps.contains('openstack')) {
+ sources += """
+ # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
+ }
+ def installed = steps.collect {"""\
+ export ${it}_installed=true"""}.join("\n")
+
+ shared.run_cmd(sources + installed + """
+ export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+
+ py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} test_completed
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+ }
+ }
+}
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index af2b2dc..a64b950 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -101,6 +101,28 @@
parameters: parameters
}
+def swarm_run_pytest(String passed_steps) {
+ // Run pytest tests
+ def common = new com.mirantis.mk.Common()
+ def parameters = [
+ string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+ string(name: 'PASSED_STEPS', value: passed_steps),
+ string(name: 'RUN_TEST_OPTS', value: "${RUN_TEST_OPTS}"),
+ string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_WORKSPACE', value: pwd()),
+ string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+ booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+ string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
+ string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
+ string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
+ string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+ ]
+ common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
+ common.prettyPrint(parameters)
+ build job: 'swarm-run-pytest',
+ parameters: parameters
+}
+
def generate_cookied_model() {
def common = new com.mirantis.mk.Common()
// do not fail if environment doesn't exists
@@ -219,13 +241,18 @@
dos.py suspend ${ENV_NAME}
dos.py snapshot ${ENV_NAME} ${stack}_deployed
dos.py resume ${ENV_NAME}
- dos.py time-sync ${ENV_NAME}
+ dos.py time-sync ${ENV_NAME} || dos.py time-sync ${ENV_NAME} # sometimes, timesync may fail. Need to update it in fuel-devops.
if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
fi
""")
}
+def get_steps_list(steps) {
+ // Make a list from comma separated string
+ return steps.split(',').collect { it.split(':')[0] }
+}
+
def report_deploy_result(deploy_expected_stacks) {
}
diff --git a/tcp_tests/fixtures/stacklight_fixtures.py b/tcp_tests/fixtures/stacklight_fixtures.py
index 2c30530..df0d516 100644
--- a/tcp_tests/fixtures/stacklight_fixtures.py
+++ b/tcp_tests/fixtures/stacklight_fixtures.py
@@ -52,7 +52,7 @@
:rtype: SLManager
"""
# Deploy SL services
- if not config.stack_light.sl_installed:
+ if not config.stack_light.stacklight_installed:
steps_path = config.sl_deploy.sl_steps_path
commands = underlay.read_template(steps_path)
sl_actions.install(commands)
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 778691f..5411881 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -129,16 +129,16 @@
return self.salt_api.local('cfg01*', 'state.sls', 'salt.minion')
def create_networks(self):
- return self.salt_api.enforce_state(self.master_tgt, 'neutron.client')
+ return self.salt_api.local('cfg01*', 'state.sls', 'neutron.client')
def create_flavors(self):
- return self.salt_api.enforce_state(self.master_tgt, 'nova.client')
+ return self.salt_api.local('cfg01*', 'state.sls', 'nova.client')
def create_cirros(self):
- return self.salt_api.enforce_state(self.master_tgt, 'glance.client')
+ return self.salt_api.local('cfg01*', 'state.sls', 'glance.client')
def generate_config(self):
- return self.salt_api.enforce_state(self.master_tgt, 'runtest')
+ return self.salt_api.local('cfg01*', 'state.sls', 'runtest')
def fetch_arficats(self, username=None, file_format='xml'):
target_name = next(node_name for node_name
@@ -206,19 +206,23 @@
res = self.run_salt_minion_state()
LOG.info(json.dumps(res, indent=4))
- time.sleep(10)
+ time.sleep(20)
res = self.create_networks()
LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
res = self.create_flavors()
LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
res = self.create_cirros()
LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
res = self.generate_config()
LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
def run_tempest(self, timeout=600):
tgt = "{}*".format(self.target)
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index ba45e16..0f81596 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -39,7 +39,7 @@
def install(self, commands, label='Install SL services'):
self.execute_commands(commands, label=label)
- self.__config.stack_light.sl_installed = True
+ self.__config.stack_light.stacklight_installed = True
self.__config.stack_light.sl_vip_host = self.get_sl_vip()
def get_sl_vip(self):
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 495e51d..ee23654 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -490,7 +490,8 @@
# dump files
LOG.info("Archive artifacts on all nodes")
master.check_call('salt "*" cmd.run "{0}"'.format(dump_commands),
- raise_on_err=False)
+ raise_on_err=False,
+ timeout=600)
# create target dir for archives
master.check_call("mkdir /root/dump/")
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 8da0fae..ba3fb64 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -270,7 +270,7 @@
]
sl_opts = [
- ct.Cfg('sl_installed', ct.Boolean(),
+ ct.Cfg('stacklight_installed', ct.Boolean(),
help="", default=False),
ct.Cfg('sl_version', ct.String(),
help="StackLight version. Could be 1 or 2", default=''),
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
deleted file mode 100644
index d0f69b7..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
+++ /dev/null
@@ -1,72 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-node ("${NODE_NAME}") {
- try {
-
- stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
- }
-
- stage("Create environment, generate mode, bootstrap the salt-cluster") {
- shared.swarm_bootstrap_salt_cluster_devops()
- }
-
- // Install core and cicd
- stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
- shared.run_job_on_day01_node("core")
- }
-
- stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
- shared.run_job_on_day01_node("cicd")
- }
-
- // Install the cluster
- for (stack in "${PLATFORM_STACK_INSTALL}".split(",")) {
- stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
- shared.run_job_on_cicd_nodes(stack)
- }
- }
-
- stage("Run tests") {
- shared.run_cmd("""\
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt
- . ./tcp_tests/utils/env_k8s
-
- # Initialize variables used in tcp-qa tests
- export CURRENT_SNAPSHOT=stacklight_deployed # provide the snapshot name required by the test
- export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
-
- export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
- # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
- export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
- export salt_master_port=6969
- export SALT_USER=\$SALTAPI_USER
- export SALT_PASSWORD=\$SALTAPI_PASS
- export CORE_INSTALLED=true # skip core_deployed fixture
- export K8S_INSTALLED=true # skip k8s_deployed fixture
- export sl_installed=true # skip stacklight_deployed fixture
-
- py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl
-
- #dos.py suspend ${ENV_NAME}
- #dos.py snapshot ${ENV_NAME} test_completed
- #dos.py resume ${ENV_NAME}
- #dos.py time-sync ${ENV_NAME}
- """)
- }
-
- } catch (e) {
- common.printMsg("Job failed", "red")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
- }
-}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
deleted file mode 100644
index 83335a9..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
+++ /dev/null
@@ -1,67 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-node ("${NODE_NAME}") {
- try {
-
- stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
- }
-
- stage("Create environment, generate mode, bootstrap the salt-cluster") {
- shared.swarm_bootstrap_salt_cluster_devops()
- }
-
- // Install core and cicd
- stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
- shared.run_job_on_day01_node("core")
- }
-
- stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
- shared.run_job_on_day01_node("cicd")
- }
-
- // Install the cluster
- for (stack in "${PLATFORM_STACK_INSTALL}".split(",")) {
- stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
- shared.run_job_on_cicd_nodes(stack)
- }
- }
-
- stage("Run tests") {
- shared.run_cmd("""\
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt
- . ./tcp_tests/utils/env_k8s
-
- # Initialize variables used in tcp-qa tests
- export CURRENT_SNAPSHOT=k8s_deployed # provide the snapshot name required by the test
- export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
-
- export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
- # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
- export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
- export salt_master_port=6969
- export SALT_USER=\$SALTAPI_USER
- export SALT_PASSWORD=\$SALTAPI_PASS
- export CORE_INSTALLED=true # skip core_deployed fixture
- export K8S_INSTALLED=true # skip k8s_deployed fixture
-
- py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico
- """)
- }
-
- } catch (e) {
- common.printMsg("Job failed", "red")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
- }
-
-}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index 98016f7..743e65f 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index be11365..7ee113a 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index 89f141d..4f322cb 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index aa859b0..af2258e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -16,7 +16,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/tests/environment/test_bootstrap_salt.py b/tcp_tests/tests/environment/test_bootstrap_underlay.py
similarity index 96%
rename from tcp_tests/tests/environment/test_bootstrap_salt.py
rename to tcp_tests/tests/environment/test_bootstrap_underlay.py
index f852ffa..9458c5b 100644
--- a/tcp_tests/tests/environment/test_bootstrap_salt.py
+++ b/tcp_tests/tests/environment/test_bootstrap_underlay.py
@@ -19,7 +19,7 @@
LOG = logger.logger
-class TestBootstrapCore(object):
+class TestBootstrapUnderlay(object):
"""Test class for deploy local dns_vm"""
def test_create_environment(self, config, hardware):
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
index 97b19da..9852f5f 100644
--- a/tcp_tests/tests/system/test_pipeline_deploy.py
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -81,10 +81,16 @@
Scenario:
1. Prepare salt on hosts.
- .........................
+ 2. Connect to jenkins on cfg01 node
+ 3. Run deploy on cfg01 node
+ 4. Connect to jenkins on cid node
+ 5. Run deploy on cid node
"""
+ show_step(1)
nodes = underlay.node_names()
LOG.info("Nodes - {}".format(nodes))
+ show_step(2)
+
cfg_node = 'cfg01.cookied-bm-dpdk-pipeline.local'
salt_api = salt_deployed.get_pillar(
cfg_node, '_param:jenkins_salt_api_url')
@@ -93,12 +99,11 @@
host='http://172.16.49.2:8081',
username='admin',
password='r00tme')
-
- # Creating param list for openstack deploy
params = jenkins.make_defults_params('deploy_openstack')
params['SALT_MASTER_URL'] = salt_api
params['STACK_INSTALL'] = 'core,kvm,cicd'
- show_step(4)
+
+ show_step(3)
build = jenkins.run_build('deploy_openstack', params)
jenkins.wait_end_of_build(
name=build[0],
@@ -107,11 +112,27 @@
result = jenkins.build_info(name=build[0],
build_id=build[1])['result']
assert result == 'SUCCESS', "Deploy openstack was failed"
- # cicd_passwd = ""
- # jenkins_cicd = JenkinsClient(
- # host='http://10.167.11.90:8081',
- # username='admin',
- # password='r00tme')
+
+ show_step(4)
+ cid_node = 'cid01.cookied-bm-dpdk-pipeline.local'
+ salt_output = salt_deployed.get_pillar(
+ cid_node, 'jenkins:client:master:password')
+ cid_passwd = salt_output[0].get(cid_node)
+ jenkins = JenkinsClient(
+ host='http://10.167.11.90:8081',
+ username='admin',
+ password=cid_passwd)
+ params['STACK_INSTALL'] = 'ovs,openstack'
+ show_step(5)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', "Deploy openstack was failed"
+
if settings.RUN_TEMPEST:
tempest_actions.prepare_and_run_tempest()
LOG.info("*************** DONE **************")