Add cookied-cicd-k8s-calico and cookied-cicd-k8s-calico-sl

- Add new templates for k8s deployments using CICD

- In new templates, there is deploy-and-test.groovy script for
  integration CI, which performs necessary steps for:

  - prepare environment
  - get the inventory details from environment (networks)
  - generate cookied model for the environment (using inventory
    details)
  - create ISO config-drive (using the job created by D.Tyzhnenko)
  - Bootstrap salt cluster on all nodes (using salt.yaml)
  - Use Jenkins on salt-master to deploy 'core' and 'cicd' stacks
  - Use Jenkins on CICD nodes to deploy 'k8s', 'calico' and
    'stacklight'
  - Run system tests from tcp-qa test directories, for this there
    are two new pytest marks:  'k8s_calico' and 'k8s_calico_sl'

  The script deploy-and-test.groovy can use the shared library
  tcp_tests/templates/SharedPipeline.groovy at certain step for
  common methods.

- fix replacing networks in shared-salt.yaml for cookied models

Change-Id: I958d183d8951b869877f0c36f4bd1000a5b7e6a9
diff --git a/tcp_tests/templates/SharedPipeline.groovy b/tcp_tests/templates/SharedPipeline.groovy
new file mode 100644
index 0000000..a34be31
--- /dev/null
+++ b/tcp_tests/templates/SharedPipeline.groovy
@@ -0,0 +1,114 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = """\
+        set +x;
+        echo 'activate python virtualenv ${VENV_PATH}';
+        . ${VENV_PATH}/bin/activate;
+        bash -c 'set -ex; set -ex; ${cmd.stripIndent()}'
+    """
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+def generate_cookied_model() {
+        // do not fail if environment doesn't exists
+        def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+        def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+        def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+        def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+        println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
+        println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
+        println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
+        println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
+
+        def parameters = [
+                string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
+                string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
+                string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
+                string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
+                string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
+                string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
+            ]
+        common.printMsg("Start building job 'swarm-cookied-model-generator' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-cookied-model-generator',
+            parameters: parameters
+}
+
+def generate_configdrive_iso() {
+        def SALT_MASTER_IP=run_cmd_stdout("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            echo \$SALT_MASTER_IP
+            """).trim().split().last()
+        println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+        def parameters = [
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
+                string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
+                booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
+                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+                string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+                string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
+                string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
+                booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
+                string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
+                string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
+                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+            ]
+        common.printMsg("Start building job 'create-cfg-config-drive' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'create-cfg-config-drive',
+            parameters: parameters
+}
+
+def run_job_on_day01_node(stack_to_install) {
+    // stack_to_install="core,cicd"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_day01
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+    """)
+}
+
+def run_job_on_cicd_nodes(stack_to_install) {
+    // stack_to_install="k8s,calico,stacklight"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_cicd
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+        sleep 60  # Wait for IO calm down on cluster nodes
+    """)
+}
+
+
+// pretend a groovy class, DO NOT REMOVE
+return this
\ No newline at end of file