Merge "Increase number of concurrent jobs"
diff --git a/build-debian-packages-influxdb-relay.groovy b/build-debian-packages-influxdb-relay.groovy
new file mode 100644
index 0000000..6040849
--- /dev/null
+++ b/build-debian-packages-influxdb-relay.groovy
@@ -0,0 +1,106 @@
+def common = new com.mirantis.mk.Common()
+def git = new com.mirantis.mk.Git()
+def artifactory = new com.mirantis.mk.Artifactory()
+def aptly = new com.mirantis.mk.Aptly()
+
+def timestamp = common.getDatetime()
+def version = "1.0~${timestamp}"
+
+node('docker') {
+    try{
+
+        stage("cleanup") {
+            sh("rm -rf * || true")
+        }
+
+        def workingDir = "src/github.com/influxdata"
+        stage("checkout") {
+            git.checkoutGitRepository(
+                "${workingDir}/influxdb-relay",
+                "${SOURCE_URL}",
+                SOURCE_BRANCH,
+                SOURCE_CREDENTIALS,
+                true,
+                30,
+                1
+            )
+        }
+
+        try {
+
+            def jenkinsUID = sh (
+                script: 'id -u',
+                returnStdout: true
+            ).trim()
+            def imgName = "${OS}-${DIST}-${ARCH}"
+            def img
+
+            stage("build image") {
+                img = docker.build(
+                    "${imgName}:${timestamp}",
+                    [
+                        "--build-arg uid=${jenkinsUID}",
+                        "--build-arg timestamp=${timestamp}",
+                        "-f ${workingDir}/influxdb-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                        "."
+                    ].join(' ')
+                )
+            }
+            stage("build package") {
+                img.inside{
+                    sh("""wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz &&
+                        tar xf go1.9.linux-amd64.tar.gz &&
+                        export GOROOT=\$PWD/go &&
+                        export PATH=\$PATH:\$GOROOT/bin &&
+                        export GOPATH=\$PWD &&
+                        cd src/github.com/influxdata/influxdb-relay &&
+                        ./build.py --package --version=\"${version}\" --platform=linux --arch=amd64""")
+                }
+                archiveArtifacts artifacts: "${workingDir}/influxdb-relay/build/*.deb"
+            }
+            if (UPLOAD_APTLY.toBoolean()) {
+                lock("aptly-api") {
+                    stage("upload") {
+                        def buildSteps = [:]
+                        def debFiles = sh script: "ls ${workingDir}/influxdb-relay/build/*.deb", returnStdout: true
+                        def debFilesArray = debFiles.trim().tokenize()
+                        def workspace = common.getWorkspace()
+                        for (int i = 0; i < debFilesArray.size(); i++) {
+
+                            def debFile = debFilesArray[i];
+                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                "${workspace}/"+debFile,
+                                APTLY_URL,
+                                APTLY_REPO,
+                                true
+                            )
+                        }
+                        parallel buildSteps
+                    }
+                    stage("publish") {
+                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                        aptly.publish(APTLY_URL)
+                    }
+                }
+            }
+
+        } catch (Exception e) {
+            currentBuild.result = 'FAILURE'
+            println "Cleaning up docker images"
+            sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
+            throw e
+        }
+
+    } catch (Throwable e) {
+       // If there was an exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
+
+       if (currentBuild.result != 'FAILURE') {
+          sh("rm -rf *")
+       }
+    }
+}
diff --git a/build-debian-packages-libvirt-exporter.groovy b/build-debian-packages-libvirt-exporter.groovy
index ab83db6..d373961 100644
--- a/build-debian-packages-libvirt-exporter.groovy
+++ b/build-debian-packages-libvirt-exporter.groovy
@@ -31,7 +31,7 @@
             }
         }
 
-        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-xenial")
+        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
         stage("build package") {
             img.inside("-u root:root") {
                 sh("apt-get update && apt-get install ruby ruby-dev && gem install fpm")
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 9c7698b..291f799 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -65,6 +65,8 @@
 def saltMaster
 def venv
 
+def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
+
 if (STACK_TYPE == 'aws') {
     def aws_env_vars
 }
@@ -140,17 +142,17 @@
                         common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
                     }
 
-                    def legacy_env = false;
-                    //FIXME:
-                    if (false && STACK_TEMPLATE.startsWith('virtual_') && !STACK_TEMPLATE.contains('aio')) {
-                        legacy_env = true;
-                    }
-
-                    openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, legacy_env)
+                    openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
                 }
 
                 // get SALT_MASTER_URL
                 saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+                // check that saltMasterHost is valid
+                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                    throw new Exception("saltMasterHost is not a valid ip")
+                }
+
                 currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
 
                 SALT_MASTER_URL = "http://${saltMasterHost}:6969"
@@ -202,6 +204,12 @@
 
                 // get outputs
                 saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
+                // check that saltMasterHost is valid
+                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                    throw new Exception("saltMasterHost is not a valid ip")
+                }
+
                 currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
                 SALT_MASTER_URL = "http://${saltMasterHost}:6969"
 
@@ -247,6 +255,11 @@
             stage('Install Ceph OSDs') {
                 orchestrate.installCephOsd(saltMaster)
             }
+
+
+            stage('Install Ceph clients') {
+                orchestrate.installCephClient(saltMaster)
+            }
         }
 
         // install k8s
@@ -271,7 +284,6 @@
                 stage('Install Contrail for Kubernetes') {
                     orchestrate.installContrailNetwork(saltMaster)
                     orchestrate.installContrailCompute(saltMaster)
-                    orchestrate.installKubernetesContrailCompute(saltMaster)
                 }
             }
 
@@ -400,7 +412,7 @@
         if (common.checkContains('STACK_TEST', 'ceph')) {
             stage('Run infra tests') {
                 def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
-                salt.cmdRun(saltMaster, 'I@salt:master', cmd)
+                salt.cmdRun(saltMaster, 'I@salt:master', cmd, false)
                 writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, 'I@salt:master', '/root/report.xml'))
                 junit(keepLongStdio: true, testResults: 'report.xml')
             }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 1d8845d..87b2c8e 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -153,8 +153,8 @@
             smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
             smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
             smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-            smc['CICD_CONTROL_ADDRESS'] = templateContext['default_context']['cicd_control_address']
-            smc['INFRA_CONFIG_ADDRESS'] = templateContext['default_context']['infra_config_address']
+            smc['CICD_CONTROL_ADDRESS'] = templateContext['default_context']['cicd_control_vip_address']
+            smc['INFRA_CONFIG_ADDRESS'] = templateContext['default_context']['salt_master_address']
 
             for (i in common.entries(smc)) {
                 sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 4da0842..e59f0ce 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -123,6 +123,10 @@
             python.setupCookiecutterVirtualenv(cutterEnv)
         }
 
+        stage("Check workflow_definition") {
+            sh "python ${env.WORKSPACE}/workflow_definition_test.py"
+        }
+
         def contextFiles
         dir("${templateEnv}/contexts") {
             contextFiles = findFiles(glob: "*.yml")