| export KUBEVIRT_RELEASE=${KUBEVIRT_RELEASE:-"v0.59.0"} |
| export RELEASE_OPENSTACK_K8S_REPO=${RELEASE_OPENSTACK_K8S_REPO:-https://github.com/Mirantis/release-openstack-k8s} |
| |
| export EXTERNAL_SNAPSHOTTER_REPO=${EXTERNAL_SNAPSHOTTER_REPO:-"https://github.com/kubernetes-csi/external-snapshotter"} |
| export EXTERNAL_SNAPSHOTTER_TAG=${EXTERNAL_SNAPSHOTTER_TAG:-"v6.2.2"} |
| |
| export ROOK_REPO=${ROOK_REPO:-"https://github.com/rook/rook"} |
| export ROOK_TAG=${ROOK_TAG:-"v1.11.4"} |
| |
| export KUBE_OVN_REPO=${KUBE_OVN_REPO:-"https://github.com/kubeovn/kube-ovn"} |
| export KUBE_OVN_TAG=${KUBE_OVN_TAG:-"v1.11.8"} |
| |
| export CDI_TAG=${CDI_TAG:-"v1.56.0"} |
| |
| ROOT_DIR=$(cd $(dirname "$0") && pwd) |
| RESOURCES_DIR=${ROOT_DIR}/resources |
| WORKDIR="$(cd $(dirname "$0") && pwd)/.workdir" |
| mkdir -p $WORKDIR |
| |
| function clone_repo { |
| local repo=$1 |
| local tag=$2 |
| local dst=$3 |
| |
| if [[ ! -d $dst ]]; then |
| git clone $repo $dst |
| fi |
| |
| if [[ -n $tag ]]; then |
| pushd $dst |
| git checkout tags/$tag |
| fi |
| } |
| |
| function wait_for_crd { |
| local crd="$1" |
| # wait unless crd appear |
| while ! kubectl get crd $crd; do |
| echo "Waiting crd $crd" |
| sleep 5 |
| done |
| } |
| |
| function install_kubevirt_operator { |
| kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml |
| kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml |
| } |
| |
| function install_virtctl { |
| if [[ ! -f /usr/sbin/virtctl ]]; then |
| wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/virtctl-${KUBEVIRT_RELEASE}-linux-amd64 |
| mv virtctl-${KUBEVIRT_RELEASE}-linux-amd64 /usr/sbin/virtctl |
| chmod +x /usr/sbin/virtctl |
| fi |
| } |
| |
| function install_kubevirt { |
| install_kubevirt_operator |
| install_virtctl |
| kubectl apply -f ${RESOURCES_DIR}/kubevirt.yaml |
| #kubectl apply -f ${RESOURCES_DIR}/libvirt.yaml |
| } |
| |
| function install_cdi { |
| kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-operator.yaml |
| kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-cr.yaml |
| kubectl apply -f ${RESOURCES_DIR}/cdi-uploadproxy-nodeport.yaml |
| } |
| |
| function install_ceph_controller { |
| pushd $WORKDIR |
| clone_repo $RELEASE_OPENSTACK_K8S_REPO "" ${WORKDIR}/release-openstack-k8s |
| mkdir -p customizations/ceph-controller |
| cat <<EOF > customizations/ceph-controller/ceph-contoller_kubelet_path.json |
| [ |
| {"op": "add", |
| "path": "/spec/releases/0/values/rookExtraConfig", |
| "value": {"csiKubeletPath": "/var/lib/k0s/kubelet"} |
| } |
| ] |
| EOF |
| cp release-openstack-k8s/release/50-ceph-controller.yaml customizations/ceph-controller/ |
| cat <<EOF >customizations/ceph-controller/kustomization.yaml |
| resources: |
| - 50-ceph-controller.yaml |
| patches: |
| - path: ceph-contoller_kubelet_path.json |
| target: |
| group: lcm.mirantis.com |
| version: v1alpha1 |
| kind: HelmBundle |
| name: ceph-operator |
| namespace: osh-system |
| EOF |
| |
| local release_files="01-namespaces.yaml 02-helmbundle-crd.yaml 30-helm-controller.yaml 40-local-volume-provisioner.yaml ci/50-nodemaintenance.yaml" |
| for file in $release_files; do |
| kubectl apply -f release-openstack-k8s/release/$file |
| done |
| kubectl apply -k customizations/ceph-controller/ |
| popd |
| } |
| |
| function is_label_assigned { |
| local node=$1 |
| local label=$2 |
| |
| if kubectl get node --show-labels $node |grep -q -w $label; then |
| return 0 |
| fi |
| return 1 |
| } |
| |
| function deploy_ceph { |
| num_ctls=0 |
| num_osds=0 |
| seen_nodes="" |
| mkdir -p $WORKDIR/customizations/ceph-deployment |
| cp ${RESOURCES_DIR}/miraceph.yaml $WORKDIR/customizations/ceph-deployment/ |
| pushd $WORKDIR |
| echo '[' > customizations/ceph-deployment/nodes_path.json |
| for node in $(kubectl get nodes | awk '{print $1}' |grep -v NAME); do |
| node_roles="[]" |
| devices="[]" |
| if echo $seen_nodes |grep -q $node; then |
| continue |
| fi |
| if is_label_assigned $node openstack-control-plane=enabled; then |
| num_ctls=$(( num_ctls + 1 )) |
| if [[ $num_ctls -le 3 ]]; then |
| node_roles='["mon", "mgr"]' |
| fi |
| fi |
| if is_label_assigned $node role=ceph-osd-node; then |
| num_osds=$(( num_osds + 1 )) |
| devices='[{"name": "vdb", "config": {"deviceClass": "hdd"}}]' |
| fi |
| if [[ "$node_roles" != "[]" || "$devices" != "[]" ]]; then |
| cat <<EOF >> customizations/ceph-deployment/nodes_path.json |
| {"op": "add", |
| "path": "/spec/nodes/-", |
| "value": {"name": "$node", "devices": $devices, "roles": $node_roles} |
| }, |
| EOF |
| fi |
| seen_nodes="$seen_nodes,$node" |
| done |
| last_line=$(wc -l customizations/ceph-deployment/nodes_path.json| awk '{print $1}') |
| sed -i "${last_line}s/},/}/g" customizations/ceph-deployment/nodes_path.json |
| echo ']' >> customizations/ceph-deployment/nodes_path.json |
| cat <<EOF >customizations/ceph-deployment/kustomization.yaml |
| resources: |
| - miraceph.yaml |
| patches: |
| - path: nodes_path.json |
| target: |
| group: lcm.mirantis.com |
| version: v1alpha1 |
| kind: MiraCeph |
| name: cephcluster |
| namespace: ceph-lcm-mirantis |
| EOF |
| kubectl apply -k customizations/ceph-deployment/ |
| popd |
| } |
| |
| function install_ceph { |
| install_ceph_controller |
| wait_for_crd miracephs.lcm.mirantis.com |
| deploy_ceph |
| } |
| |
| function install_external_snapshotter { |
| clone_repo $EXTERNAL_SNAPSHOTTER_REPO $EXTERNAL_SNAPSHOTTER_TAG $WORKDIR/external-snapshotter |
| kubectl apply -f $WORKDIR/external-snapshotter/deploy/kubernetes/snapshot-controller/ |
| kubectl apply -f $WORKDIR/external-snapshotter/client/config/crd |
| |
| clone_repo $ROOK_REPO $ROOK_TAG $WORKDIR/rook |
| kubectl apply -f $WORKDIR/rook/deploy/examples/csi/rbd/snapshotclass.yaml |
| |
| for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do |
| kubectl label node --overwrite $node app=snapshot-controller |
| done |
| } |
| |
| function install_multus { |
| kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml |
| } |
| |
| function install_helm { |
| if [[ ! -f /usr/local/bin/helm3 ]]; then |
| wget https://binary-mirantis-com.s3.amazonaws.com/openstack/bin/utils/helm/helm-v3.11.2-linux-amd64 |
| mv helm-v3.11.2-linux-amd64 /usr/local/bin/helm3 |
| chmod +x /usr/local/bin/helm3 |
| fi |
| } |
| |
| function install_kube_ovn { |
| clone_repo $KUBE_OVN_REPO $KUBE_OVN_TAG $WORKDIR/kube-ovn |
| local master_node_count=0 |
| local master_node_ips="" |
| local node_ip="" |
| for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do |
| if [[ $master_node_count -ge 1 ]]; then |
| continue |
| fi |
| node_ip=$(kubectl get nodes $node -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') |
| master_node_ips="$master_node_ips $node_ip" |
| master_node_ips=$(echo $master_node_ips |sed 's/ /,/g') |
| kubectl label node --overwrite $node kube-ovn/role=master |
| master_node_count=$(( master_node_count + 1 )) |
| done |
| echo "$master_node_ips" |
| kubectl label no -lovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel --overwrite |
| pushd $WORKDIR/kube-ovn |
| cat <<EOF > $WORKDIR/kube-ovn-master-nodes.yaml |
| MASTER_NODES: "$master_node_ips," |
| replicaCount: $master_node_count |
| EOF |
| helm3 upgrade --install kube-ovn ./kubeovn-helm -f $RESOURCES_DIR/kube_ovn.yaml -f $WORKDIR/kube-ovn-master-nodes.yaml |
| popd |
| } |
| |
| install_helm |
| install_kube_ovn |
| install_kubevirt |
| install_cdi |
| install_ceph |
| install_external_snapshotter |
| install_multus |