Add cookied-cicd models for openstack configs: ovs/dvr/dpdk
Change-Id: If9e72928b90bb13fd51199c93f9255ec52576bc4
diff --git a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
new file mode 100644
index 0000000..3b4d49e
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
@@ -0,0 +1,87 @@
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+
+node ("${NODE_NAME}") {
+ try {
+
+ stage("Clean the environment and clone tcp-qa") {
+ shared.prepare_working_dir()
+ }
+
+ stage("Create environment, generate model, bootstrap the salt-cluster") {
+ shared.swarm_bootstrap_salt_cluster_devops()
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+ }
+
+ stage("Run tests") {
+ shared.run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ . ./tcp_tests/utils/env_k8s
+
+ # Prepare snapshots that may be used in tests if MANAGER=devops
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_k8s_deployed.ini
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_deployed.ini
+ #dos.py suspend ${ENV_NAME}
+ #dos.py snapshot ${ENV_NAME} k8s_deployed
+ #dos.py snapshot ${ENV_NAME} sl_deployed
+ #dos.py resume ${ENV_NAME}
+ #dos.py time-sync ${ENV_NAME}
+
+ # Initialize variables used in tcp-qa tests
+ export CURRENT_SNAPSHOT=sl_deployed # provide the snapshot name required by the test
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
+
+ #export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
+ export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
+ export MAKE_SNAPSHOT_STAGES=false # skip 'hardware' fixture, disable snapshot/revert features
+ # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+ export COMMON_SERVICES_INSTALLED=true # skip common_services_deployed fixture
+ export K8S_INSTALLED=true # skip k8s_deployed fixture
+ export sl_installed=true # skip sl_deployed fixture
+
+ py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} test_completed
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} test_failed || true
+ """)
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ } else {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ dos.py time-sync ${ENV_NAME} || true
+ """)
+ }
+ shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
+ shared.report_test_result()
+ }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
new file mode 100644
index 0000000..e22c7c2
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
@@ -0,0 +1,89 @@
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+
+node ("${NODE_NAME}") {
+ try {
+
+ stage("Clean the environment and clone tcp-qa") {
+ shared.prepare_working_dir()
+ }
+
+ stage("Create environment, generate model, bootstrap the salt-cluster") {
+ shared.swarm_bootstrap_salt_cluster_devops()
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+ }
+
+ stage("Install core infrastructure and deploy CICD nodes") {
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+ }
+
+ stage("Run tests") {
+ shared.run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ # TODO: . ./tcp_tests/utils/env_keystonercv3
+
+ # Prepare snapshots that may be used in tests if MANAGER=devops
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_openstack_deployed.ini
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_deployed.ini
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_os_deployed.ini
+ #dos.py suspend ${ENV_NAME}
+ #dos.py snapshot ${ENV_NAME} openstack_deployed
+ #dos.py snapshot ${ENV_NAME} sl_deployed
+ #dos.py snapshot ${ENV_NAME} sl_os_deployed
+ #dos.py resume ${ENV_NAME}
+ #dos.py time-sync ${ENV_NAME}
+
+ # Initialize variables used in tcp-qa tests
+ export CURRENT_SNAPSHOT=sl_deployed # provide the snapshot name required by the test
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
+
+ #export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
+ export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
+ export MAKE_SNAPSHOT_STAGES=false # skip 'hardware' fixture, disable snapshot/revert features
+ # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+ export COMMON_SERVICES_INSTALLED=true # skip common_services_deployed fixture
+ export OPENSTACK_INSTALLED=true # skip k8s_deployed fixture
+ export sl_installed=true # skip sl_deployed fixture
+
+ py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} test_completed
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} test_failed || true
+ """)
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ } else {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ dos.py time-sync ${ENV_NAME} || true
+ """)
+ }
+ shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
+ shared.report_test_result()
+ }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
new file mode 100644
index 0000000..024bd83
--- /dev/null
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -0,0 +1,109 @@
+/**
+ *
+ * Create fuel-devops environment, generate a model for it
+ * and bootstrap a salt cluster on the environment nodes
+ *
+ * Expected parameters:
+
+ * PARENT_NODE_NAME Name of the jenkins slave to create the environment
+ * PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * LAB_CONFIG_NAME Name of the tcp-qa deployment template
+ * ENV_NAME Fuel-devops environment name
+ * MCP_VERSION MCP version, like 2018.4 or proposed
+ * MCP_IMAGE_PATH1604 Local path to the image http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ * IMAGE_PATH_CFG01_DAY01 Local path to the image http://ci.mcp.mirantis.net:8085/images/cfg01-day01-proposed.qcow2
+ * CFG01_CONFIG_IMAGE_NAME Name for the creating config drive image, like cfg01.${LAB_CONFIG_NAME}-config-drive.iso
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ * PIPELINE_LIBRARY_REF Reference to the pipeline-library change
+ * MK_PIPELINES_REF Reference to the mk-pipelines change
+ * COOKIECUTTER_TEMPLATE_COMMIT Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
+ * SALT_MODELS_SYSTEM_COMMIT Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
+ * SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+ error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
+ try {
+ stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+ println "Remove environment ${ENV_NAME}"
+ shared.run_cmd("""\
+ dos.py erase ${ENV_NAME} || true
+ """)
+ println "Remove config drive ISO"
+ shared.run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
+
+ stage("Create an environment ${ENV_NAME} in disabled state") {
+ // deploy_hardware.xml
+ shared.run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export MANAGER=devops
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export TEST_GROUP=test_create_environment
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+ """)
+ }
+
+ stage("Generate the model") {
+ shared.generate_cookied_model()
+ }
+
+ stage("Generate config drive ISO") {
+ shared.generate_configdrive_iso()
+ }
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ shared.run_cmd("""\
+ virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh pool-refresh --pool default
+ """)
+ }
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ // deploy_salt.xml
+ shared.run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export MANAGER=devops
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export BOOTSTRAP_TIMEOUT=900
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export TEST_GROUP=test_bootstrap_salt
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_salt.xml -k \${TEST_GROUP}
+ sleep 60 # wait for jenkins to start and IO calm down
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for salt cluster
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
new file mode 100644
index 0000000..591467f
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -0,0 +1,74 @@
+/**
+ *
+ * Deploy CICD cluster using Jenkins master on cfg01 node
+ *
+ * Expected parameters:
+
+ * PARENT_NODE_NAME Name of the jenkins slave to create the environment
+ * PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * ENV_NAME Fuel-devops environment name
+ * STACK_INSTALL Stacks to install using Jenkins on cfg01 node: "core:1800,cicd:1800", where 1800 is timeout
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ * SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+ error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
+ try {
+
+ if (! env.STACK_INSTALL) {
+ error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+ }
+
+ // Install core and cicd
+ def stack
+ def timeout
+
+ for (element in "${env.STACK_INSTALL}".split(",")) {
+ if (element.contains(':')) {
+ (stack, timeout) = element.split(':')
+ } else {
+ stack = element
+ timeout = '1800'
+ }
+ stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
+ shared.run_job_on_day01_node(stack, timeout)
+ }
+
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+ }
+
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for cicd cluster
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+ }
+ }
+}
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
new file mode 100644
index 0000000..e144372
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -0,0 +1,74 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ * PARENT_NODE_NAME Name of the jenkins slave to create the environment
+ * PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * ENV_NAME Fuel-devops environment name
+ * STACK_INSTALL Stacks to install using Jenkins on CICD cluster: "openstack:3200,stacklight:2400", where 3200 and 2400 are timeouts
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ * SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+ error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
+ try {
+
+ if (! env.STACK_INSTALL) {
+ error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+ }
+
+ // Install the cluster
+ def stack
+ def timeout
+
+ for (element in "${STACK_INSTALL}".split(",")) {
+ if (element.contains(':')) {
+ (stack, timeout) = element.split(':')
+ } else {
+ stack = element
+ timeout = '1800'
+ }
+ stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+ shared.run_job_on_cicd_nodes(stack, timeout)
+ }
+
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+ }
+
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for the installed stacks
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+ }
+ }
+}