Dennis Dmitriev | 0f624a8 | 2018-06-11 12:57:13 +0300 | [diff] [blame^] | 1 | common = new com.mirantis.mk.Common() |
| 2 | |
| 3 | def run_cmd(cmd, returnStdout=false) { |
| 4 | common.printMsg("Run shell command:\n" + cmd, "blue") |
| 5 | def VENV_PATH='/home/jenkins/fuel-devops30' |
| 6 | script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'" |
| 7 | return sh(script: script, returnStdout: returnStdout) |
| 8 | } |
| 9 | |
| 10 | def run_cmd_stdout(cmd) { |
| 11 | return run_cmd(cmd, true) |
| 12 | } |
| 13 | |
| 14 | node ("${NODE_NAME}") { |
| 15 | try { |
| 16 | |
| 17 | stage("Clean the environment") { |
| 18 | println "Clean the working directory ${env.WORKSPACE}" |
| 19 | deleteDir() |
| 20 | // do not fail if environment doesn't exists |
| 21 | println "Remove environment ${ENV_NAME}" |
| 22 | run_cmd("""\ |
| 23 | dos.py erase ${ENV_NAME} || true |
| 24 | """) |
| 25 | println "Remove config drive ISO" |
| 26 | run_cmd("""\ |
| 27 | rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true |
| 28 | """) |
| 29 | } |
| 30 | |
| 31 | stage("Clone tcp-qa project and install requirements") { |
| 32 | run_cmd("""\ |
| 33 | git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE} |
| 34 | #cd tcp-qa |
| 35 | if [ -n "$TCP_QA_REFS" ]; then |
| 36 | set -e |
| 37 | git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$? |
| 38 | fi |
| 39 | pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt |
| 40 | """) |
| 41 | } |
| 42 | |
| 43 | // load shared methods from the clonned tcp-qa repository. |
| 44 | // DO NOT MOVE this code before clonning the repo |
| 45 | def rootDir = pwd() |
| 46 | def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy" |
| 47 | |
| 48 | stage("Create an environment ${ENV_NAME} in disabled state") { |
| 49 | // do not fail if environment doesn't exists |
| 50 | run_cmd("""\ |
| 51 | python ./tcp_tests/utils/create_devops_env.py |
| 52 | """) |
| 53 | } |
| 54 | |
| 55 | stage("Generate the model") { |
| 56 | shared.generate_cookied_model() |
| 57 | } |
| 58 | |
| 59 | stage("Generate config drive ISO") { |
| 60 | shared.generate_configdrive_iso() |
| 61 | } |
| 62 | |
| 63 | stage("Upload generated config drive ISO into volume on cfg01 node") { |
| 64 | run_cmd("""\ |
| 65 | virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default |
| 66 | virsh pool-refresh --pool default |
| 67 | """) |
| 68 | } |
| 69 | |
| 70 | stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") { |
| 71 | run_cmd("""\ |
| 72 | export MANAGER=devops |
| 73 | export SHUTDOWN_ENV_ON_TEARDOWN=false |
| 74 | export BOOTSTRAP_TIMEOUT=900 |
| 75 | export PYTHONIOENCODING=UTF-8 |
| 76 | export REPOSITORY_SUITE=${MCP_VERSION} |
| 77 | #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml |
| 78 | export TEST_GROUP=test_install_local_salt |
| 79 | py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP} |
| 80 | sleep 60 # wait for jenkins to start and IO calm down |
| 81 | |
| 82 | """) |
| 83 | } |
| 84 | |
| 85 | // Install core and cicd |
| 86 | stage("Run Jenkins job on salt-master [deploy_openstack:core]") { |
| 87 | shared.run_job_on_day01_node("core") |
| 88 | } |
| 89 | |
| 90 | stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") { |
| 91 | shared.run_job_on_day01_node("cicd") |
| 92 | } |
| 93 | |
| 94 | // Install the cluster |
| 95 | for (stack in "${STACK_INSTALL}".split(",")) { |
| 96 | stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") { |
| 97 | shared.run_job_on_cicd_nodes(stack) |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | stage("Run tests") { |
| 102 | run_cmd("""\ |
| 103 | export ENV_NAME=${ENV_NAME} |
| 104 | . ./tcp_tests/utils/env_salt |
| 105 | . ./tcp_tests/utils/env_k8s |
| 106 | |
| 107 | # Initialize variables used in tcp-qa tests |
| 108 | export CURRENT_SNAPSHOT=sl_deployed # provide the snapshot name required by the test |
| 109 | export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly |
| 110 | |
| 111 | export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features |
| 112 | # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now |
| 113 | export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture |
| 114 | export salt_master_port=6969 |
| 115 | export SALT_USER=\$SALTAPI_USER |
| 116 | export SALT_PASSWORD=\$SALTAPI_PASS |
| 117 | export COMMON_SERVICES_INSTALLED=true # skip common_services_deployed fixture |
| 118 | export K8S_INSTALLED=true # skip k8s_deployed fixture |
| 119 | export sl_installed=true # skip sl_deployed fixture |
| 120 | |
| 121 | py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl |
| 122 | |
| 123 | #dos.py suspend ${ENV_NAME} |
| 124 | #dos.py snapshot ${ENV_NAME} test_completed |
| 125 | #dos.py resume ${ENV_NAME} |
| 126 | #dos.py time-sync ${ENV_NAME} |
| 127 | """) |
| 128 | } |
| 129 | |
| 130 | } catch (e) { |
| 131 | common.printMsg("Job failed", "red") |
| 132 | throw e |
| 133 | } finally { |
| 134 | // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'" |
| 135 | // and report appropriate data to TestRail |
| 136 | run_cmd("""\ |
| 137 | dos.py destroy ${ENV_NAME} |
| 138 | """) |
| 139 | } |
| 140 | } |