Dennis Dmitriev | f220d97 | 2018-10-10 15:19:14 +0300 | [diff] [blame] | 1 | import java.text.SimpleDateFormat |
| 2 | |
| 3 | def dateFormat = new SimpleDateFormat("yyyyMMddHHmm") |
| 4 | def date = new Date() |
| 5 | def common_scripts_commit = "${COMMON_SCRIPTS_COMMIT}" |
| 6 | def iso_name = "${CONFIG_DRIVE_ISO_NAME}" ?: "cfg01.${CLUSTER_NAME}-config-${dateFormat.format(date)}.iso" |
| 7 | def node_name = "${NODE_NAME}" |
| 8 | |
| 9 | def smc = [:] |
| 10 | smc['SALT_MASTER_MINION_ID'] = "cfg01.${CLUSTER_NAME}.local" |
| 11 | smc['SALT_MASTER_DEPLOY_IP'] = "${SALT_MASTER_DEPLOY_IP}" |
| 12 | smc['DEPLOY_NETWORK_GW'] = "${DEPLOY_NETWORK_GW}" |
| 13 | smc['DEPLOY_NETWORK_NETMASK'] = "${DEPLOY_NETWORK_NETMASK}" |
| 14 | smc['DNS_SERVERS'] = "${DNS_SERVERS}" |
| 15 | smc['PIPELINES_FROM_ISO'] = '${PIPELINES_FROM_ISO}' |
| 16 | smc['PIPELINE_REPO_URL'] = '${PIPELINE_REPO_URL}' |
| 17 | smc['MCP_VERSION'] = "${MCP_VERSION}" |
| 18 | // smc['LOCAL_REPOS'] = 'true' |
| 19 | smc['MCP_SALT_REPO_KEY'] = "${MCP_SALT_REPO_KEY}" |
| 20 | smc['MCP_SALT_REPO_URL'] = "${MCP_SALT_REPO_URL}" |
| 21 | |
| 22 | def entries(m) { |
| 23 | m.collect {k, v -> [k, v]} |
| 24 | } |
| 25 | |
| 26 | node (node_name) { |
| 27 | |
| 28 | timestamps(){ |
| 29 | |
| 30 | stage("Clean Workspace") { |
| 31 | step([$class: 'WsCleanup']) |
| 32 | } |
| 33 | |
| 34 | stage("Get scripts") { |
| 35 | // apt package genisoimage is required for this stage |
| 36 | // download create-config-drive |
| 37 | |
| 38 | def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/create_config_drive.sh" |
| 39 | sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive" |
| 40 | |
| 41 | def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/master_config.yaml" |
| 42 | sh "wget -O user_data ${user_data_script_url}" |
| 43 | } |
| 44 | |
| 45 | stage("Clone mk-pipelines and pipeline-library") { |
| 46 | sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git -b ${MCP_VERSION} mk-pipelines" |
| 47 | sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git -b ${MCP_VERSION} pipeline-library" |
| 48 | if (PIPELINE_LIBRARY_REF != '') { |
| 49 | sh "cd pipeline-library; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .." |
| 50 | } |
| 51 | if (MK_PIPELINES_REF != '') { |
| 52 | sh "cd mk-pipelines; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .." |
| 53 | } |
| 54 | //args = "--user-data user_data --vendor-data user_data2 --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}" |
| 55 | args = "--user-data user_data2 --vendor-data user_data --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}" |
| 56 | } |
| 57 | |
| 58 | stage("Get cluster model") { |
| 59 | def model_url = "${MODEL_URL}" |
| 60 | sh "rm -rf model" |
| 61 | if (MODEL_URL_OBJECT_TYPE == 'tar.gz') { |
| 62 | sh "wget -O model.tar.gz '${model_url}'" |
| 63 | sh "mkdir model && cd model && tar zxfv ../model.tar.gz" |
| 64 | } else { |
| 65 | sh "git clone --recursive $model_url -b ${MCP_VERSION} model" |
| 66 | // remove .git file with hardcoded path |
| 67 | sh "rm model/classes/system/.git" |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | stage("Set data"){ |
| 72 | for (i in entries(smc)) { |
| 73 | sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data" |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | stage("Create user_data2"){ |
| 78 | //http://jen20.com/2015/10/04/cloudconfig-merging.html |
| 79 | //TODO(ddmitriev): allow to read such file from |
| 80 | // ./tcp_tests/templates/${LAB_CONFIG_NAME}/ directory for each lab |
| 81 | def user_data2 = """\ |
| 82 | #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html |
| 83 | |
| 84 | #write_files: # write_files don't work as expected because overwrites this key from mcp-common-scripts YAML, losing data |
| 85 | # - path: /etc/default/grub.d/97-enable-grub-menu.cfg |
| 86 | # content: | |
| 87 | # GRUB_RECORDFAIL_TIMEOUT=30 |
| 88 | # GRUB_TIMEOUT=10 |
| 89 | # GRUB_TIMEOUT_STYLE=menu |
| 90 | # |
| 91 | # - path: /root/interfaces |
| 92 | # content: | |
| 93 | # auto lo |
| 94 | # iface lo inet loopback |
| 95 | # |
| 96 | # auto ens3 |
| 97 | # iface ens3 inet dhcp |
| 98 | # |
| 99 | # - path: /root/.ssh/config |
| 100 | # owner: root:root |
| 101 | # permissions: '0600' |
| 102 | # content: | |
| 103 | # Host * |
| 104 | # ServerAliveInterval 60 |
| 105 | # ServerAliveCountMax 0 |
| 106 | # StrictHostKeyChecking no |
| 107 | # UserKnownHostsFile /dev/null |
| 108 | # |
| 109 | # - path: /etc/cloud/master_environment_override |
| 110 | # owner: root:root |
| 111 | # permissions: '0600' |
| 112 | # content: | |
| 113 | # export SALT_MASTER_MINION_ID="cfg01.${CLUSTER_NAME}.local" |
| 114 | # export SALT_MASTER_DEPLOY_IP="${SALT_MASTER_DEPLOY_IP}" |
| 115 | # export DEPLOY_NETWORK_GW="${DEPLOY_NETWORK_GW}" |
| 116 | # export DEPLOY_NETWORK_NETMASK="${DEPLOY_NETWORK_NETMASK}" |
| 117 | # export DNS_SERVERS="${DNS_SERVERS}" |
| 118 | # export PIPELINES_FROM_ISO="${PIPELINES_FROM_ISO}" |
| 119 | # export PIPELINE_REPO_URL="${PIPELINE_REPO_URL}" |
| 120 | # export MCP_VERSION="${MCP_VERSION}" |
| 121 | # export LOCAL_REPOS="true" |
| 122 | # export MCP_SALT_REPO_KEY="${MCP_SALT_REPO_KEY}" |
| 123 | # export MCP_SALT_REPO_URL="${MCP_SALT_REPO_URL}" |
| 124 | |
| 125 | output: |
| 126 | all: '| tee -a /var/log/cloud-init-output.log /dev/tty0' |
| 127 | |
| 128 | ssh_pwauth: True |
| 129 | users: |
| 130 | - name: root |
| 131 | sudo: ALL=(ALL) NOPASSWD:ALL |
| 132 | shell: /bin/bash |
| 133 | |
| 134 | disable_root: false |
| 135 | chpasswd: |
| 136 | list: | |
| 137 | root:r00tme |
| 138 | expire: False |
| 139 | |
| 140 | bootcmd: |
| 141 | # Block access to SSH while node is preparing |
| 142 | - cloud-init-per once sudo touch /is_cloud_init_started |
| 143 | # Enable root access |
| 144 | - sed -i -e '/^PermitRootLogin/s/.*/PermitRootLogin yes/' /etc/ssh/sshd_config |
| 145 | - service sshd restart |
| 146 | |
| 147 | merge_how: "dict(recurse_array)+list(append)" |
| 148 | """ |
| 149 | writeFile(file: "user_data2", text: user_data2, encoding: "UTF-8") |
| 150 | } |
| 151 | |
| 152 | stage("Create config-drive"){ |
| 153 | // create cfg config-drive |
| 154 | //sh "sed -i 's,config_dir/vendor-data,config_dir/user-data1,g' ./create-config-drive" |
| 155 | sh "./create-config-drive ${args}" |
| 156 | } |
| 157 | |
| 158 | stage("Save artifacts") { |
| 159 | archiveArtifacts allowEmptyArchive: false, |
| 160 | artifacts: "${iso_name}" |
| 161 | } |
| 162 | |
| 163 | stage("Download config drive to slave") { |
| 164 | if (DOWNLOAD_CONFIG_DRIVE == 'true') { |
| 165 | def b_res = build job: 'download-config-drive', |
| 166 | parameters: [ |
| 167 | string(name: 'IMAGE_URL', value: "${BUILD_URL}/artifact/${iso_name}"), |
| 168 | string(name: 'NODE_NAME', value: "${NODE_NAME}") |
| 169 | ] |
| 170 | } else { |
| 171 | echo "Drive only generated. But didn't download" |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | } |