| @Library('tcp-qa')_ |
| |
| import groovy.xml.XmlUtil |
| |
| common = new com.mirantis.mk.Common() |
| shared = new com.mirantis.system_qa.SharedPipeline() |
| |
| NODE_LABEL = "sre-team-infra" |
| ENV_NAME = "bm-mcc-mosk" |
| ENV_NAME = "vkhlyunev-bm-mosk" |
| MAINTENANCE_TEAM_SSH_ID = 'maintenance-team-ssh' |
| IPMI_CREDS = 'lab_engineer' // base bm lab |
| def seed_ext_ip = '172.16.180.2' |
| def kubectl_openstack_cmd = '' |
| ssh_params = "-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" |
| |
| timeout(time: 3, unit: 'HOURS') { |
| timestamps { |
| node ("${NODE_LABEL}") { |
| checkout scm |
| shared.update_working_dir() |
| withCredentials( |
| [[$class : 'UsernamePasswordMultiBinding', |
| credentialsId : env.OS_CREDENTIALS, |
| passwordVariable: 'OS_PASSWORD', |
| usernameVariable: 'OS_USERNAME' |
| ]]) { |
| env.OS_IDENTITY_API_VERSION = 3 |
| stage("Pre-cleanup: erase BM labs") { |
| // TODO: wipe all bm labs, including MCP1 |
| println "Remove heat stack '${ENV_NAME}'" |
| shared.run_cmd("""\ |
| openstack stack delete -y ${ENV_NAME} || true |
| timeout 20m /bin/bash -c "while openstack stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done" |
| """) |
| withCredentials([ |
| [$class : 'UsernamePasswordMultiBinding', |
| credentialsId : "${IPMI_CREDS}", |
| passwordVariable: 'IPMI_PASS', |
| usernameVariable: 'IPMI_USER'] |
| ]) { |
| env.IPMI_PASS = IPMI_PASS |
| env.IPMI_USER = IPMI_USER |
| shared.reboot_hardware_nodes("${IPMI_CREDS}") |
| } //withCredentials IPMI |
| } //stage |
| stage("Boot seed node's heat stack"){ |
| println "Create stack '${ENV_NAME}'" |
| shared.run_cmd("pushd bm_mcc_mosk && openstack stack create --wait -t mcc_seed_2401.yaml --parameter env_name=${ENV_NAME} ${ENV_NAME}") |
| seed_ext_ip = shared.run_cmd_stdout("openstack stack output show ${ENV_NAME} seed_ext_ip -f value -c output_value").trim().split().last() |
| println "SEED IP: '${seed_ext_ip}'" |
| } //stage |
| } //withCredentials OS cloud |
| sshagent(credentials: ['maintenance-team-ssh']) { |
| withCredentials( |
| [[$class: 'SSHUserPrivateKeyBinding', |
| keyFileVariable: "MAINTENANCE_TEAM_SSH_KEY", |
| credentialsId: MAINTENANCE_TEAM_SSH_ID, |
| usernameVariable: "MAINTENANCE_TEAM_SSH_USERNAME"]]) { |
| stage("Bootstrap MCC+child"){ |
| sh "scp ${ssh_params} ${MAINTENANCE_TEAM_SSH_KEY} root@${seed_ext_ip}:/root/.ssh/id_rsa " |
| sh "scp -r ${ssh_params} bm_mcc_mosk root@${seed_ext_ip}: " |
| sh "ssh ${ssh_params} root@${seed_ext_ip} bash do_deploy_mcc_mgmt.sh" |
| sh "ssh ${ssh_params} root@${seed_ext_ip} bash do_deploy_child.sh" |
| // TODO: unhardcode cluster names |
| check_child_cmd = "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/kubeconfig -n mosk get cluster mosk -o jsonpath='{.status.providerStatus.ready}' 2>/dev/null || echo 'ssh error, ignoring'" |
| check_kcc_cmd = "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/kubeconfig -n mosk get kcc mosk-ceph -o jsonpath='{.status.shortClusterInfo.state}' 2>/dev/null || echo 'ssh error, ignoring'" |
| child_ready = false |
| kcc_ready = false |
| println "Waiting child cluster to become ready..." |
| while(!child_ready){ |
| sleep 60 |
| status = sh(returnStdout: true, script: "${check_child_cmd}").trim() |
| if (status == "true"){ |
| child_ready = true |
| } //if |
| } //while |
| println "Waiting child Ceph cluster to become ready..." |
| while(!kcc_ready){ |
| sleep 60 |
| status = sh(returnStdout: true, script: "${check_kcc_cmd}").trim() |
| if (status == "Ready"){ |
| kcc_ready = true |
| } //if |
| } //while |
| } //stage MCC |
| stage("Prepare and deploy MOSK"){ |
| sh "ssh ${ssh_params} root@${seed_ext_ip} '/bin/bash /root/get_child_kubeconfig.sh'" |
| sh "ssh ${ssh_params} root@${seed_ext_ip} '/bin/bash /root/bm_mcc_mosk/utils/tsl_gen.sh'" |
| kubectl_openstack_cmd = "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/child.kubeconfig -n openstack " |
| sh "${kubectl_openstack_cmd} apply -f /root/bm_mcc_mosk/child/kaas_workloads/osdpl-ssl-secrets.yaml" |
| sh "${kubectl_openstack_cmd} apply -f /root/bm_mcc_mosk/child/kaas_workloads/osdpl.yaml" |
| mosk_check_cmd = "${kubectl_openstack_cmd} get osdplst osh-dev -o jsonpath='{.status.osdpl.state}'" |
| mosk_ready = false |
| while(!mosk_ready){ |
| sleep 60 |
| status = sh(returnStdout: true, script: "${mosk_check_cmd}").trim() |
| if (status == "APPLIED"){ |
| mosk_ready = true |
| } //if |
| } //while |
| } //stage MOSK |
| stage("Configure DNS server") { |
| get_ingress_ip_cmd = "${kubectl_openstack_cmd} get service ingress -o jsonpath='{.status.loadBalancer.ingress[0].ip}'" |
| get_dns_internal_ip_cmd = "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/child.kubeconfig -n coredns get service coredns-coredns -o jsonpath='{.spec.clusterIP}'" |
| ingress_ip = sh(returnStdout: true, script: get_ingress_ip_cmd).trim() |
| sh "ssh ${ssh_params} root@${seed_ext_ip} sed -i 's/!!!EXT_DNS_IP/${ingress_ip}/g' /root/bm_mcc_mosk/child/kaas_workloads/coredns.yaml" |
| sh "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/child.kubeconfig apply -f /root/bm_mcc_mosk/child/kaas_workloads/coredns.yaml" |
| sleep 120 |
| dns_internal_ip = sh(returnStdout: true, script: get_dns_internal_ip_cmd).trim() |
| sh "ssh ${ssh_params} root@${seed_ext_ip} \"/root/kaas-bootstrap/bin/kubectl --kubeconfig /root/child.kubeconfig -n kube-system get configmap coredns -oyaml > coredns.conf\"" |
| sh "ssh ${ssh_params} root@${seed_ext_ip} /usr/bin/python3 /root/bm_mcc_mosk/utils/update_kube_dns_conf.py ${dns_internal_ip}" |
| sh "ssh ${ssh_params} root@${seed_ext_ip} /root/kaas-bootstrap/bin/kubectl --kubeconfig /root/child.kubeconfig apply -f coredns.patched.conf" |
| |
| } // stage DNS |
| } //withCredentials |
| } //sshagent |
| } //node |
| } //timestamps |
| } //timeout |