Vasyl Saienko | 775c1da | 2023-07-13 11:34:25 +0000 | [diff] [blame] | 1 | export KUBEVIRT_RELEASE=${KUBEVIRT_RELEASE:-"v0.59.0"} |
| 2 | export RELEASE_OPENSTACK_K8S_REPO=${RELEASE_OPENSTACK_K8S_REPO:-https://github.com/Mirantis/release-openstack-k8s} |
| 3 | |
| 4 | export EXTERNAL_SNAPSHOTTER_REPO=${EXTERNAL_SNAPSHOTTER_REPO:-"https://github.com/kubernetes-csi/external-snapshotter"} |
| 5 | export EXTERNAL_SNAPSHOTTER_TAG=${EXTERNAL_SNAPSHOTTER_TAG:-"v6.2.2"} |
| 6 | |
| 7 | export ROOK_REPO=${ROOK_REPO:-"https://github.com/rook/rook"} |
| 8 | export ROOK_TAG=${ROOK_TAG:-"v1.11.4"} |
| 9 | |
| 10 | export KUBE_OVN_REPO=${KUBE_OVN_REPO:-"https://github.com/kubeovn/kube-ovn"} |
| 11 | export KUBE_OVN_TAG=${KUBE_OVN_TAG:-"v1.11.8"} |
| 12 | |
| 13 | export CDI_TAG=${CDI_TAG:-"v1.56.0"} |
| 14 | |
| 15 | ROOT_DIR=$(cd $(dirname "$0") && pwd) |
| 16 | RESOURCES_DIR=${ROOT_DIR}/resources |
| 17 | WORKDIR="$(cd $(dirname "$0") && pwd)/.workdir" |
| 18 | mkdir -p $WORKDIR |
| 19 | |
| 20 | function clone_repo { |
| 21 | local repo=$1 |
| 22 | local tag=$2 |
| 23 | local dst=$3 |
| 24 | |
| 25 | if [[ ! -d $dst ]]; then |
| 26 | git clone $repo $dst |
| 27 | fi |
| 28 | |
| 29 | if [[ -n $tag ]]; then |
| 30 | pushd $dst |
| 31 | git checkout tags/$tag |
| 32 | fi |
| 33 | } |
| 34 | |
Vasyl Saienko | 81e59df | 2023-07-20 05:32:55 +0000 | [diff] [blame] | 35 | function wait_for_crd { |
| 36 | local crd="$1" |
| 37 | # wait unless crd appear |
| 38 | while ! kubectl get crd $crd; do |
| 39 | echo "Waiting crd $crd" |
| 40 | sleep 5 |
| 41 | done |
| 42 | } |
| 43 | |
Vasyl Saienko | 775c1da | 2023-07-13 11:34:25 +0000 | [diff] [blame] | 44 | function install_kubevirt_operator { |
| 45 | kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml |
| 46 | kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml |
| 47 | } |
| 48 | |
| 49 | function install_virtctl { |
| 50 | if [[ ! -f /usr/sbin/virtctl ]]; then |
| 51 | wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/virtctl-${KUBEVIRT_RELEASE}-linux-amd64 |
| 52 | mv virtctl-${KUBEVIRT_RELEASE}-linux-amd64 /usr/sbin/virtctl |
| 53 | chmod +x /usr/sbin/virtctl |
| 54 | fi |
| 55 | } |
| 56 | |
| 57 | function install_kubevirt { |
| 58 | install_kubevirt_operator |
| 59 | install_virtctl |
| 60 | kubectl apply -f ${RESOURCES_DIR}/kubevirt.yaml |
| 61 | #kubectl apply -f ${RESOURCES_DIR}/libvirt.yaml |
| 62 | } |
| 63 | |
| 64 | function install_cdi { |
| 65 | kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-operator.yaml |
| 66 | kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-cr.yaml |
| 67 | kubectl apply -f ${RESOURCES_DIR}/cdi-uploadproxy-nodeport.yaml |
| 68 | } |
| 69 | |
| 70 | function install_ceph_controller { |
| 71 | pushd $WORKDIR |
| 72 | clone_repo $RELEASE_OPENSTACK_K8S_REPO "" ${WORKDIR}/release-openstack-k8s |
| 73 | mkdir -p customizations/ceph-controller |
| 74 | cat <<EOF > customizations/ceph-controller/ceph-contoller_kubelet_path.json |
| 75 | [ |
| 76 | {"op": "add", |
| 77 | "path": "/spec/releases/0/values/rookExtraConfig", |
| 78 | "value": {"csiKubeletPath": "/var/lib/k0s/kubelet"} |
| 79 | } |
| 80 | ] |
| 81 | EOF |
| 82 | cp release-openstack-k8s/release/50-ceph-controller.yaml customizations/ceph-controller/ |
| 83 | cat <<EOF >customizations/ceph-controller/kustomization.yaml |
| 84 | resources: |
| 85 | - 50-ceph-controller.yaml |
| 86 | patches: |
| 87 | - path: ceph-contoller_kubelet_path.json |
| 88 | target: |
| 89 | group: lcm.mirantis.com |
| 90 | version: v1alpha1 |
| 91 | kind: HelmBundle |
| 92 | name: ceph-operator |
| 93 | namespace: osh-system |
| 94 | EOF |
| 95 | |
| 96 | local release_files="01-namespaces.yaml 02-helmbundle-crd.yaml 30-helm-controller.yaml 40-local-volume-provisioner.yaml ci/50-nodemaintenance.yaml" |
| 97 | for file in $release_files; do |
| 98 | kubectl apply -f release-openstack-k8s/release/$file |
| 99 | done |
| 100 | kubectl apply -k customizations/ceph-controller/ |
| 101 | popd |
| 102 | } |
| 103 | |
| 104 | function is_label_assigned { |
| 105 | local node=$1 |
| 106 | local label=$2 |
| 107 | |
| 108 | if kubectl get node --show-labels $node |grep -q -w $label; then |
| 109 | return 0 |
| 110 | fi |
| 111 | return 1 |
| 112 | } |
| 113 | |
| 114 | function deploy_ceph { |
| 115 | num_ctls=0 |
| 116 | num_osds=0 |
| 117 | seen_nodes="" |
| 118 | mkdir -p $WORKDIR/customizations/ceph-deployment |
| 119 | cp ${RESOURCES_DIR}/miraceph.yaml $WORKDIR/customizations/ceph-deployment/ |
| 120 | pushd $WORKDIR |
| 121 | echo '[' > customizations/ceph-deployment/nodes_path.json |
| 122 | for node in $(kubectl get nodes | awk '{print $1}' |grep -v NAME); do |
| 123 | node_roles="[]" |
| 124 | devices="[]" |
| 125 | if echo $seen_nodes |grep -q $node; then |
| 126 | continue |
| 127 | fi |
| 128 | if is_label_assigned $node openstack-control-plane=enabled; then |
| 129 | num_ctls=$(( num_ctls + 1 )) |
| 130 | if [[ $num_ctls -le 3 ]]; then |
| 131 | node_roles='["mon", "mgr"]' |
| 132 | fi |
| 133 | fi |
| 134 | if is_label_assigned $node role=ceph-osd-node; then |
| 135 | num_osds=$(( num_osds + 1 )) |
| 136 | devices='[{"name": "vdb", "config": {"deviceClass": "hdd"}}]' |
| 137 | fi |
| 138 | if [[ "$node_roles" != "[]" || "$devices" != "[]" ]]; then |
| 139 | cat <<EOF >> customizations/ceph-deployment/nodes_path.json |
| 140 | {"op": "add", |
| 141 | "path": "/spec/nodes/-", |
| 142 | "value": {"name": "$node", "devices": $devices, "roles": $node_roles} |
| 143 | }, |
| 144 | EOF |
| 145 | fi |
| 146 | seen_nodes="$seen_nodes,$node" |
| 147 | done |
| 148 | last_line=$(wc -l customizations/ceph-deployment/nodes_path.json| awk '{print $1}') |
| 149 | sed -i "${last_line}s/},/}/g" customizations/ceph-deployment/nodes_path.json |
| 150 | echo ']' >> customizations/ceph-deployment/nodes_path.json |
| 151 | cat <<EOF >customizations/ceph-deployment/kustomization.yaml |
| 152 | resources: |
| 153 | - miraceph.yaml |
| 154 | patches: |
| 155 | - path: nodes_path.json |
| 156 | target: |
| 157 | group: lcm.mirantis.com |
| 158 | version: v1alpha1 |
| 159 | kind: MiraCeph |
| 160 | name: cephcluster |
| 161 | namespace: ceph-lcm-mirantis |
| 162 | EOF |
| 163 | kubectl apply -k customizations/ceph-deployment/ |
| 164 | popd |
| 165 | } |
| 166 | |
| 167 | function install_ceph { |
| 168 | install_ceph_controller |
Vasyl Saienko | 81e59df | 2023-07-20 05:32:55 +0000 | [diff] [blame] | 169 | wait_for_crd miracephs.lcm.mirantis.com |
Vasyl Saienko | 775c1da | 2023-07-13 11:34:25 +0000 | [diff] [blame] | 170 | deploy_ceph |
| 171 | } |
| 172 | |
| 173 | function install_external_snapshotter { |
| 174 | clone_repo $EXTERNAL_SNAPSHOTTER_REPO $EXTERNAL_SNAPSHOTTER_TAG $WORKDIR/external-snapshotter |
| 175 | kubectl apply -f $WORKDIR/external-snapshotter/deploy/kubernetes/snapshot-controller/ |
| 176 | kubectl apply -f $WORKDIR/external-snapshotter/client/config/crd |
| 177 | |
| 178 | clone_repo $ROOK_REPO $ROOK_TAG $WORKDIR/rook |
| 179 | kubectl apply -f $WORKDIR/rook/deploy/examples/csi/rbd/snapshotclass.yaml |
| 180 | |
| 181 | for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do |
| 182 | kubectl label node --overwrite $node app=snapshot-controller |
| 183 | done |
| 184 | } |
| 185 | |
| 186 | function install_multus { |
| 187 | kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml |
| 188 | } |
| 189 | |
| 190 | function install_helm { |
| 191 | if [[ ! -f /usr/local/bin/helm3 ]]; then |
| 192 | wget https://binary-mirantis-com.s3.amazonaws.com/openstack/bin/utils/helm/helm-v3.11.2-linux-amd64 |
| 193 | mv helm-v3.11.2-linux-amd64 /usr/local/bin/helm3 |
| 194 | chmod +x /usr/local/bin/helm3 |
| 195 | fi |
| 196 | } |
| 197 | |
| 198 | function install_kube_ovn { |
| 199 | clone_repo $KUBE_OVN_REPO $KUBE_OVN_TAG $WORKDIR/kube-ovn |
| 200 | local master_node_count=0 |
| 201 | local master_node_ips="" |
| 202 | local node_ip="" |
| 203 | for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do |
| 204 | if [[ $master_node_count -ge 1 ]]; then |
| 205 | continue |
| 206 | fi |
| 207 | node_ip=$(kubectl get nodes $node -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') |
| 208 | master_node_ips="$master_node_ips $node_ip" |
| 209 | master_node_ips=$(echo $master_node_ips |sed 's/ /,/g') |
| 210 | kubectl label node --overwrite $node kube-ovn/role=master |
| 211 | master_node_count=$(( master_node_count + 1 )) |
| 212 | done |
| 213 | echo "$master_node_ips" |
| 214 | kubectl label no -lovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel --overwrite |
| 215 | pushd $WORKDIR/kube-ovn |
| 216 | cat <<EOF > $WORKDIR/kube-ovn-master-nodes.yaml |
| 217 | MASTER_NODES: "$master_node_ips," |
| 218 | replicaCount: $master_node_count |
| 219 | EOF |
| 220 | helm3 upgrade --install kube-ovn ./kubeovn-helm -f $RESOURCES_DIR/kube_ovn.yaml -f $WORKDIR/kube-ovn-master-nodes.yaml |
| 221 | popd |
| 222 | } |
| 223 | |
| 224 | install_helm |
| 225 | install_kube_ovn |
| 226 | install_kubevirt |
| 227 | install_cdi |
| 228 | install_ceph |
| 229 | install_external_snapshotter |
| 230 | install_multus |