Add cookied-cicd-k8s-calico and cookied-cicd-k8s-calico-sl

- Add new templates for k8s deployments using CICD

- In new templates, there is deploy-and-test.groovy script for
  integration CI, which performs necessary steps for:

  - prepare environment
  - get the inventory details from environment (networks)
  - generate cookied model for the environment (using inventory
    details)
  - create ISO config-drive (using the job created by D.Tyzhnenko)
  - Bootstrap salt cluster on all nodes (using salt.yaml)
  - Use Jenkins on salt-master to deploy 'core' and 'cicd' stacks
  - Use Jenkins on CICD nodes to deploy 'k8s', 'calico' and
    'stacklight'
  - Run system tests from tcp-qa test directories, for this there
    are two new pytest marks:  'k8s_calico' and 'k8s_calico_sl'

  The script deploy-and-test.groovy can use the shared library
  tcp_tests/templates/SharedPipeline.groovy at certain step for
  common methods.

- fix replacing networks in shared-salt.yaml for cookied models

Change-Id: I958d183d8951b869877f0c36f4bd1000a5b7e6a9
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
new file mode 100644
index 0000000..71b3e8d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
@@ -0,0 +1,140 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment") {
+        println "Clean the working directory ${env.WORKSPACE}"
+        deleteDir()
+        // do not fail if environment doesn't exists
+        println "Remove environment ${ENV_NAME}"
+        run_cmd("""\
+            dos.py erase ${ENV_NAME} || true
+        """)
+        println "Remove config drive ISO"
+        run_cmd("""\
+            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+        """)
+    }
+
+    stage("Clone tcp-qa project and install requirements") {
+        run_cmd("""\
+        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+        #cd tcp-qa
+        if [ -n "$TCP_QA_REFS" ]; then
+            set -e
+            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+        fi
+        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+        """)
+    }
+
+    // load shared methods from the clonned tcp-qa repository.
+    // DO NOT MOVE this code before clonning the repo
+    def rootDir = pwd()
+    def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
+
+    stage("Create an environment ${ENV_NAME} in disabled state") {
+        // do not fail if environment doesn't exists
+        run_cmd("""\
+        python ./tcp_tests/utils/create_devops_env.py
+        """)
+    }
+
+    stage("Generate the model") {
+        shared.generate_cookied_model()
+    }
+
+    stage("Generate config drive ISO") {
+        shared.generate_configdrive_iso()
+    }
+
+    stage("Upload generated config drive ISO into volume on cfg01 node") {
+        run_cmd("""\
+        virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+        virsh pool-refresh --pool default
+        """)
+    }
+
+    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+        run_cmd("""\
+        export MANAGER=devops
+        export SHUTDOWN_ENV_ON_TEARDOWN=false
+        export BOOTSTRAP_TIMEOUT=900
+        export PYTHONIOENCODING=UTF-8
+        export REPOSITORY_SUITE=${MCP_VERSION}
+        #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
+        export TEST_GROUP=test_install_local_salt
+        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
+        sleep 60  # wait for jenkins to start and IO calm down
+
+        """)
+    }
+
+    // Install core and cicd
+    stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
+        shared.run_job_on_day01_node("core")
+    }
+
+    stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
+        shared.run_job_on_day01_node("cicd")
+    }
+
+    // Install the cluster
+    for (stack in "${STACK_INSTALL}".split(",")) {
+        stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+            shared.run_job_on_cicd_nodes(stack)
+        }
+    }
+
+    stage("Run tests") {
+        run_cmd("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            . ./tcp_tests/utils/env_k8s
+
+            # Initialize variables used in tcp-qa tests
+            export CURRENT_SNAPSHOT=sl_deployed  # provide the snapshot name required by the test
+            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
+
+            export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
+            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+            export salt_master_port=6969
+            export SALT_USER=\$SALTAPI_USER
+            export SALT_PASSWORD=\$SALTAPI_PASS
+            export COMMON_SERVICES_INSTALLED=true  # skip common_services_deployed fixture
+            export K8S_INSTALLED=true              # skip k8s_deployed fixture
+            export sl_installed=true              # skip sl_deployed fixture
+
+            py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl
+
+            #dos.py suspend ${ENV_NAME}
+            #dos.py snapshot ${ENV_NAME} test_completed
+            #dos.py resume ${ENV_NAME}
+            #dos.py time-sync ${ENV_NAME}
+            """)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    run_cmd("""\
+        dos.py destroy ${ENV_NAME}
+    """)
+  }
+}
\ No newline at end of file