Add examples with kubevirt
* Add helpers to install kubevirt and its dependencies
* Add examples how to use kubevirt
Related-Prod: PRODX-3456
Change-Id: I3ade65df5f8ddff39f35605104851833d74690a1
diff --git a/.gitignore b/.gitignore
index 175928f..d3f80e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
.idea/
*.iml
*.tox/
+*.swp
+kubevirt/.workdir/
diff --git a/kubevirt/examples/00-create-boot-image/Dockerfile b/kubevirt/examples/00-create-boot-image/Dockerfile
new file mode 100644
index 0000000..8f60a06
--- /dev/null
+++ b/kubevirt/examples/00-create-boot-image/Dockerfile
@@ -0,0 +1,3 @@
+FROM scratch
+ARG IMAGE_NAME=cirros-0.6.2-x86_64-disk.img
+ADD --chown=107:107 $IMAGE_NAME /disk/
diff --git a/kubevirt/examples/00-create-boot-image/build.sh b/kubevirt/examples/00-create-boot-image/build.sh
new file mode 100755
index 0000000..cddeaf4
--- /dev/null
+++ b/kubevirt/examples/00-create-boot-image/build.sh
@@ -0,0 +1,7 @@
+IMAGE_URL=https://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img
+if [[ ! -f jammy-server-cloudimg-amd64.img ]]; then
+ echo "Downloading image..."
+ wget $IMAGE_URL
+fi
+echo "Building image..."
+docker build -t jumpojoy/kubevirt-cirros:0.6.2 .
diff --git a/kubevirt/examples/01-boot-vm/cirros-boot-vm.yaml b/kubevirt/examples/01-boot-vm/cirros-boot-vm.yaml
new file mode 100644
index 0000000..bf5eabd
--- /dev/null
+++ b/kubevirt/examples/01-boot-vm/cirros-boot-vm.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-boot-vm
+ name: cirros-boot-vm
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-boot-vm
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/01-boot-vm/run.sh b/kubevirt/examples/01-boot-vm/run.sh
new file mode 100755
index 0000000..96be42c
--- /dev/null
+++ b/kubevirt/examples/01-boot-vm/run.sh
@@ -0,0 +1,24 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating VM"
+ kubectl apply -f cirros-boot-vm.yaml
+
+ echo "Starting VM"
+ virtctl start cirros-boot-vm
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-boot-vm Running
+}
+
+function cleanup {
+ kubectl delete -f cirros-boot-vm.yaml
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/02-boot-vm-cdi/cirros.yaml b/kubevirt/examples/02-boot-vm-cdi/cirros.yaml
new file mode 100644
index 0000000..1c9ec30
--- /dev/null
+++ b/kubevirt/examples/02-boot-vm-cdi/cirros.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-cdi-pvc
+ name: cirros-cdi-pvc
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-cdi-pvc
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: cdi-pvc
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - name: cdi-pvc
+ persistentVolumeClaim:
+ claimName: cirros-cdi-pvc
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/02-boot-vm-cdi/run.sh b/kubevirt/examples/02-boot-vm-cdi/run.sh
new file mode 100755
index 0000000..6078a57
--- /dev/null
+++ b/kubevirt/examples/02-boot-vm-cdi/run.sh
@@ -0,0 +1,40 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+
+TMP_DIR=$(cd $(dirname "$0")/../../.workdir/ && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Downloading cirros"
+ if [[ ! -f ${TMP_DIR}/cirros-0.6.2-x86_64-disk.img ]]; then
+ wget https://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img -O ${TMP_DIR}/cirros-0.6.2-x86_64-disk.img
+ fi
+
+ echo "Uploading cirros to PVC."
+ UPLOADPROXY_URL=https://$(kubectl get nodes -o wide| tail -1 | awk '{print $6}'):31119
+ virtctl image-upload pvc cirros-cdi-pvc --size 5G --image-path=${TMP_DIR}/cirros-0.6.2-x86_64-disk.img --insecure --uploadproxy-url ${UPLOADPROXY_URL}
+
+ kubectl get pvc
+ kubectl get pods
+
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Starting VM"
+ virtctl start cirros-cdi-pvc
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-cdi-pvc Running
+}
+
+function cleanup {
+ kubectl delete -f cirros.yaml
+ kubectl delete datavolume cirros-cdi-pvc
+ kubectl delete pvc cirros-cdi-pvc
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/03-boot-vm-volumes/cirros.yaml b/kubevirt/examples/03-boot-vm-volumes/cirros.yaml
new file mode 100644
index 0000000..1f4e236
--- /dev/null
+++ b/kubevirt/examples/03-boot-vm-volumes/cirros.yaml
@@ -0,0 +1,55 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-volumes
+ name: cirros-volumes
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-volumes
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ - disk:
+ bus: virtio
+ name: host-disk
+ - disk:
+ bus: virtio
+ name: metrics
+ - disk:
+ bus: virtio
+ name: mypvcdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - name: mypvcdisk
+ persistentVolumeClaim:
+ claimName: cirros-volumes
+ - hostDisk:
+ capacity: 1Gi
+ path: /data/disk.img
+ type: DiskOrCreate
+ name: host-disk
+ - name: metrics
+ downwardMetrics: {}
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/03-boot-vm-volumes/pvc.yaml b/kubevirt/examples/03-boot-vm-volumes/pvc.yaml
new file mode 100644
index 0000000..80d8fa0
--- /dev/null
+++ b/kubevirt/examples/03-boot-vm-volumes/pvc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cirros-volumes
+ namespace: default
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2G
+ volumeMode: Filesystem
diff --git a/kubevirt/examples/03-boot-vm-volumes/run.sh b/kubevirt/examples/03-boot-vm-volumes/run.sh
new file mode 100755
index 0000000..783cba8
--- /dev/null
+++ b/kubevirt/examples/03-boot-vm-volumes/run.sh
@@ -0,0 +1,28 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating PVC"
+ kubectl apply -f pvc.yaml
+
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Starting VM"
+ virtctl start cirros-volumes
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-volumes Running
+}
+
+function cleanup {
+ kubectl delete -f cirros.yaml
+ kubectl delete -f pvc.yaml
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/04-memdump-vm/cirros.yaml b/kubevirt/examples/04-memdump-vm/cirros.yaml
new file mode 100644
index 0000000..0be94da
--- /dev/null
+++ b/kubevirt/examples/04-memdump-vm/cirros.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-memdump
+ name: cirros-memdump
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-memdump
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/04-memdump-vm/run.sh b/kubevirt/examples/04-memdump-vm/run.sh
new file mode 100755
index 0000000..0f84c21
--- /dev/null
+++ b/kubevirt/examples/04-memdump-vm/run.sh
@@ -0,0 +1,36 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Starting VM"
+ virtctl start cirros-memdump
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-memdump Running
+
+ echo "Creating memorydump"
+ virtctl memory-dump get cirros-memdump --claim-name=cirros-memdump --create-claim
+
+ sleep 5
+
+ echo "Creating ubuntu pod"
+ kubectl apply -f ubuntu.yaml
+
+}
+
+function cleanup {
+ kubectl delete pod ubuntu-memdump --force --grace-period=0
+ kubectl delete -f cirros.yaml
+ kubectl delete -f ubuntu.yaml
+ kubectl delete pvc cirros-memdump
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/04-memdump-vm/ubuntu.yaml b/kubevirt/examples/04-memdump-vm/ubuntu.yaml
new file mode 100644
index 0000000..67a0a4c
--- /dev/null
+++ b/kubevirt/examples/04-memdump-vm/ubuntu.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: ubuntu-memdump
+spec:
+ containers:
+ - name: ubuntu
+ image: ubuntu
+ command:
+ - sleep
+ - infinity
+ volumeMounts:
+ - mountPath: "/data"
+ name: memdump
+ volumes:
+ - name: memdump
+ persistentVolumeClaim:
+ claimName: cirros-memdump
diff --git a/kubevirt/examples/05-migrate-vm-volumes/cirros.yaml b/kubevirt/examples/05-migrate-vm-volumes/cirros.yaml
new file mode 100644
index 0000000..b203985
--- /dev/null
+++ b/kubevirt/examples/05-migrate-vm-volumes/cirros.yaml
@@ -0,0 +1,61 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-migrate
+ name: cirros-migrate
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-migrate
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ # - disk:
+ # bus: virtio
+ # name: host-disk
+ - disk:
+ bus: virtio
+ name: metrics
+ - disk:
+ bus: virtio
+ name: mypvcdisk
+ interfaces:
+ - name: default
+ masquerade: {}
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ networks:
+ - name: default
+ pod: {} # Stock pod network
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - name: mypvcdisk
+ persistentVolumeClaim:
+ claimName: cirros-migrate
+ # - hostDisk:
+ # capacity: 1Gi
+ # path: /data/disk.img
+ # type: DiskOrCreate
+ # name: host-disk
+ - name: metrics
+ downwardMetrics: {}
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/05-migrate-vm-volumes/pvc.yaml b/kubevirt/examples/05-migrate-vm-volumes/pvc.yaml
new file mode 100644
index 0000000..4015580
--- /dev/null
+++ b/kubevirt/examples/05-migrate-vm-volumes/pvc.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cirros-migrate
+ namespace: default
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2G
+ # ReadWriteMany is supported only in Block mode
+ volumeMode: Block
diff --git a/kubevirt/examples/05-migrate-vm-volumes/run.sh b/kubevirt/examples/05-migrate-vm-volumes/run.sh
new file mode 100755
index 0000000..1971190
--- /dev/null
+++ b/kubevirt/examples/05-migrate-vm-volumes/run.sh
@@ -0,0 +1,39 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating PVC"
+ kubectl apply -f pvc.yaml
+
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Starting VM"
+ virtctl start cirros-migrate
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-migrate Running
+
+ kubectl get pods -o wide
+
+ read -p "Press any key to start live migration" -n 1 -r
+ virtctl migrate cirros-migrate
+
+ for i in {1..5}; do
+ kubectl get pods -o wide
+ kubectl get vmim
+ sleep 5
+ done
+}
+
+function cleanup {
+ kubectl delete -f cirros.yaml
+ kubectl delete -f pvc.yaml
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/06-vm-autoscale/cirros-pool.yaml b/kubevirt/examples/06-vm-autoscale/cirros-pool.yaml
new file mode 100644
index 0000000..40d0292
--- /dev/null
+++ b/kubevirt/examples/06-vm-autoscale/cirros-pool.yaml
@@ -0,0 +1,47 @@
+apiVersion: pool.kubevirt.io/v1alpha1
+kind: VirtualMachinePool
+metadata:
+ name: cirros-pool
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ kubevirt.io/vmpool: cirros-pool
+ virtualMachineTemplate:
+ metadata:
+ labels:
+ kubevirt.io/vmpool: cirros-pool
+ spec:
+ running: true
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-pool
+ kubevirt.io/vmpool: cirros-pool
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ cpu: 1000m
+ limits:
+ memory: 256Mi
+ cpu: 2000m
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/06-vm-autoscale/hpa.yaml b/kubevirt/examples/06-vm-autoscale/hpa.yaml
new file mode 100644
index 0000000..78301b6
--- /dev/null
+++ b/kubevirt/examples/06-vm-autoscale/hpa.yaml
@@ -0,0 +1,18 @@
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: cirros-pool
+spec:
+ maxReplicas: 10
+ minReplicas: 3
+ scaleTargetRef:
+ apiVersion: pool.kubevirt.io/v1alpha1
+ kind: VirtualMachinePool
+ name: cirros-pool
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 50
diff --git a/kubevirt/examples/06-vm-autoscale/run.sh b/kubevirt/examples/06-vm-autoscale/run.sh
new file mode 100755
index 0000000..dbd736c
--- /dev/null
+++ b/kubevirt/examples/06-vm-autoscale/run.sh
@@ -0,0 +1,37 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating VM"
+ kubectl apply -f cirros-pool.yaml
+
+ echo "Waiting VM is Running."
+ for i in {0..2}; do
+ wait_vm_state cirros-pool-$i Running
+ done
+
+ kubectl get vmpools
+ kubectl get pods
+
+ echo "Give some time to populate POD metris"
+ sleep 15
+
+ echo "Creating HorizontalPodAutoscaler"
+ kubectl apply -f hpa.yaml
+
+ echo "To create some load login to VM and run:"
+ echo "sudo su"
+ echo "dd if=/dev/zero of=/dev/null"
+}
+
+function cleanup {
+ kubectl delete -f cirros-pool.yaml
+ kubectl delete -f hpa.yaml
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/10-add-volume/cirros-add-volume.yaml b/kubevirt/examples/10-add-volume/cirros-add-volume.yaml
new file mode 100644
index 0000000..0e9ddb2
--- /dev/null
+++ b/kubevirt/examples/10-add-volume/cirros-add-volume.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-add-volume
+ name: cirros-add-volume
+spec:
+ running: true
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-add-volume
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/10-add-volume/run.sh b/kubevirt/examples/10-add-volume/run.sh
new file mode 100755
index 0000000..d4d8a57
--- /dev/null
+++ b/kubevirt/examples/10-add-volume/run.sh
@@ -0,0 +1,45 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating PVC with CDI"
+ echo "Downloading cirros"
+ if [[ ! -f ${TMP_DIR}/cirros-0.6.2-x86_64-disk.img ]]; then
+ wget https://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img -O ${TMP_DIR}/cirros-0.6.2-x86_64-disk.img
+ cp ${TMP_DIR}/cirros-0.6.2-x86_64-disk.img ${TMP_DIR}/disk.img
+ fi
+
+ echo "Uploading cirros to PVC."
+ UPLOADPROXY_URL=https://$(kubectl get nodes -o wide| tail -1 | awk '{print $6}'):31119
+ virtctl image-upload pvc cirros-add-volume --size 5G --image-path=${TMP_DIR}/disk.img --insecure --uploadproxy-url ${UPLOADPROXY_URL}
+
+ kubectl get pvc
+ kubectl get pods
+
+ echo "Creating VM"
+ kubectl apply -f cirros-add-volume.yaml
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-add-volume Running
+
+ read -p "Press any key to add volume" -n 1 -r
+ echo "Adding volume"
+ virtctl addvolume cirros-add-volume --volume-name=cirros-add-volume
+
+ read -p "Press any key to remove volume" -n 1 -r
+ echo "Removing volume"
+ virtctl removevolume cirros-add-volume --volume-name=cirros-add-volume
+}
+
+function cleanup {
+ kubectl delete -f cirros-add-volume.yaml
+ kubectl delete datavolume cirros-add-volume
+ kubectl delete pvc cirros-add-volume
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/11-clone-vm/cirros.yaml b/kubevirt/examples/11-clone-vm/cirros.yaml
new file mode 100644
index 0000000..f9f67e8
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/cirros.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-clone
+ name: cirros-clone
+spec:
+ running: true
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-clone
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - name: mypvcdisk
+ persistentVolumeClaim:
+ claimName: cirros-clone
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/11-clone-vm/clone.yaml b/kubevirt/examples/11-clone-vm/clone.yaml
new file mode 100644
index 0000000..b75273c
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/clone.yaml
@@ -0,0 +1,27 @@
+kind: VirtualMachineClone
+apiVersion: "clone.kubevirt.io/v1alpha1"
+metadata:
+ name: cirros-clone
+
+spec:
+ # source & target definitions
+ source:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-clone
+ target:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-clone-target
+
+ # labels & annotations definitions
+ labelFilters:
+ - "*"
+ - "!someKey/*"
+ annotationFilters:
+ - "anotherKey/*"
+
+ # other identity stripping specs:
+ #newMacAddresses:
+ # interfaceName: "00-11-22"
+ #newSMBiosSerial: "new-serial"
diff --git a/kubevirt/examples/11-clone-vm/pvc.yaml b/kubevirt/examples/11-clone-vm/pvc.yaml
new file mode 100644
index 0000000..746908e
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/pvc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cirros-clone
+ namespace: default
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2G
+ volumeMode: Filesystem
diff --git a/kubevirt/examples/11-clone-vm/restore.yaml b/kubevirt/examples/11-clone-vm/restore.yaml
new file mode 100644
index 0000000..8cfede4
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/restore.yaml
@@ -0,0 +1,10 @@
+apiVersion: snapshot.kubevirt.io/v1alpha1
+kind: VirtualMachineRestore
+metadata:
+ name: cirros-snapshot
+spec:
+ target:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-snapshot
+ virtualMachineSnapshotName: cirros-snapshot
diff --git a/kubevirt/examples/11-clone-vm/run.sh b/kubevirt/examples/11-clone-vm/run.sh
new file mode 100755
index 0000000..47e491c
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/run.sh
@@ -0,0 +1,48 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating PVC"
+ kubectl apply -f pvc.yaml
+
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-clone Running
+
+ echo "Login to VM and create some data on the volume. For example run:"
+ echo "sudo su"
+ echo "mkfs.ext4 /dev/sda"
+ echo "mkdir /mnt/vol01"
+ echo "mount -t ext4 /dev/sda /mnt/vol01"
+ echo "date > /mnt/vol01/here"
+ echo "sync"
+
+ read -p "Press any key to create clone" -n 1 -r
+ echo "Creating clone"
+
+ kubectl apply -f clone.yaml
+
+ echo "Waiting clone is created"
+ kubectl wait vmclone cirros-clone --for condition=Ready
+
+ kubectl get pvc
+ kubectl get vm
+}
+
+function cleanup {
+ kubectl delete -f cirros.yaml
+ kubectl delete -f pvc.yaml
+ kubectl delete -f clone.yaml
+
+ kubectl delete vm cirros-clone-target
+ kubectl delete vmsnapshot cirros-snapshot
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/11-clone-vm/snapshot.yaml b/kubevirt/examples/11-clone-vm/snapshot.yaml
new file mode 100644
index 0000000..f7b9a98
--- /dev/null
+++ b/kubevirt/examples/11-clone-vm/snapshot.yaml
@@ -0,0 +1,9 @@
+apiVersion: snapshot.kubevirt.io/v1alpha1
+kind: VirtualMachineSnapshot
+metadata:
+ name: cirros-snapshot
+spec:
+ source:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-snapshot
diff --git a/kubevirt/examples/12-snapshot-vm/cirros.yaml b/kubevirt/examples/12-snapshot-vm/cirros.yaml
new file mode 100644
index 0000000..13cdc51
--- /dev/null
+++ b/kubevirt/examples/12-snapshot-vm/cirros.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-snapshot
+ name: cirros-snapshot
+spec:
+ running: true
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-snapshot
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - name: mypvcdisk
+ persistentVolumeClaim:
+ claimName: cirros-snapshot
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/12-snapshot-vm/pvc.yaml b/kubevirt/examples/12-snapshot-vm/pvc.yaml
new file mode 100644
index 0000000..ac300bf
--- /dev/null
+++ b/kubevirt/examples/12-snapshot-vm/pvc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cirros-snapshot
+ namespace: default
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2G
+ volumeMode: Filesystem
diff --git a/kubevirt/examples/12-snapshot-vm/restore.yaml b/kubevirt/examples/12-snapshot-vm/restore.yaml
new file mode 100644
index 0000000..8cfede4
--- /dev/null
+++ b/kubevirt/examples/12-snapshot-vm/restore.yaml
@@ -0,0 +1,10 @@
+apiVersion: snapshot.kubevirt.io/v1alpha1
+kind: VirtualMachineRestore
+metadata:
+ name: cirros-snapshot
+spec:
+ target:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-snapshot
+ virtualMachineSnapshotName: cirros-snapshot
diff --git a/kubevirt/examples/12-snapshot-vm/run.sh b/kubevirt/examples/12-snapshot-vm/run.sh
new file mode 100755
index 0000000..0742a56
--- /dev/null
+++ b/kubevirt/examples/12-snapshot-vm/run.sh
@@ -0,0 +1,72 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating PVC"
+ kubectl apply -f pvc.yaml
+
+ echo "Creating VM"
+ kubectl apply -f cirros.yaml
+
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-snapshot Running
+
+ echo "Login to VM and create some data on the volume. For example run:"
+ echo "sudo su"
+ echo "mkfs.ext4 /dev/sda"
+ echo "mkdir /mnt/vol01"
+ echo "mount -t ext4 /dev/sda /mnt/vol01"
+ echo "date > /mnt/vol01/here"
+ echo "sync"
+
+ read -p "Press any key to create snapshot" -n 1 -r
+ echo "Creating snapshot"
+
+ kubectl apply -f snapshot.yaml
+ wait_vmsnapshot_state cirros-snapshot Succeeded
+
+ kubectl get vmsnapshot
+ kubectl get volumesnapshotcontents
+
+ echo "Login to VM and create some more data on the volume. For example run:"
+ echo "sudo su"
+ echo "mkdir /mnt/vol01"
+ echo "mount -t ext4 /dev/sda /mnt/vol01"
+ echo "date > /mnt/vol01/there"
+ echo "sync"
+
+ read -p "Press any key to stop VM" -n 1 -r
+ virtctl stop cirros-snapshot
+ wait_vm_state cirros-snapshot Stopped
+
+ read -p "Press any key to restore VM snapshot" -n 1 -r
+ kubectl apply -f restore.yaml
+
+ wait_vmrestore_completed cirros-snapshot
+
+
+ echo "Starting VM"
+ virtctl start cirros-snapshot
+ wait_vm_state cirros-snapshot Running
+
+ echo "Login to VM and check data on the volume. For example run:"
+ echo "sudo su"
+ echo "mkdir /mnt/vol01"
+ echo "mount -t ext4 /dev/sda /mnt/vol01"
+ echo "ls -lah /mnt/vol01/"
+}
+
+
+function cleanup {
+ kubectlt delete -f snapshot.yaml
+ kubectl delete -f restore.yaml
+ kubectl delete -f pvc.yaml
+ kubectl delete -f cirros.yaml
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/12-snapshot-vm/snapshot.yaml b/kubevirt/examples/12-snapshot-vm/snapshot.yaml
new file mode 100644
index 0000000..f7b9a98
--- /dev/null
+++ b/kubevirt/examples/12-snapshot-vm/snapshot.yaml
@@ -0,0 +1,9 @@
+apiVersion: snapshot.kubevirt.io/v1alpha1
+kind: VirtualMachineSnapshot
+metadata:
+ name: cirros-snapshot
+spec:
+ source:
+ apiGroup: kubevirt.io
+ kind: VirtualMachine
+ name: cirros-snapshot
diff --git a/kubevirt/examples/20-boot-vm-multus/cirros-net1.yaml b/kubevirt/examples/20-boot-vm-multus/cirros-net1.yaml
new file mode 100644
index 0000000..f9710fb
--- /dev/null
+++ b/kubevirt/examples/20-boot-vm-multus/cirros-net1.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-net1
+ name: cirros-net1
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-net1
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - name: default
+ masquerade: {}
+ - name: ovn-net1
+ bridge: {}
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ networks:
+ - name: default
+ pod: {} # Stock pod network
+ - name: ovn-net1
+ multus: # Secondary multus network
+ networkName: net1
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/20-boot-vm-multus/cirros-net2.yaml b/kubevirt/examples/20-boot-vm-multus/cirros-net2.yaml
new file mode 100644
index 0000000..8fc08fb
--- /dev/null
+++ b/kubevirt/examples/20-boot-vm-multus/cirros-net2.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: cirros-net2
+ name: cirros-net2
+spec:
+ running: false
+ template:
+ metadata:
+ labels:
+ kubevirt.io/vm: cirros-net2
+ spec:
+ domain:
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: containerdisk
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - name: default
+ masquerade: {}
+ - name: ovn-net2
+ bridge: {}
+ resources:
+ requests:
+ memory: 128Mi
+ terminationGracePeriodSeconds: 0
+ networks:
+ - name: default
+ pod: {} # Stock pod network
+ - name: ovn-net2
+ multus: # Secondary multus network
+ networkName: net2
+ volumes:
+ - containerDisk:
+ image: docker.io/jumpojoy/kubevirt-cirros:0.6.2
+ name: containerdisk
+ - cloudInitNoCloud:
+ userData: |
+ #!/bin/sh
+ echo 'printed from cloud-init userdata'
+ name: cloudinitdisk
diff --git a/kubevirt/examples/20-boot-vm-multus/network-attachment.yaml b/kubevirt/examples/20-boot-vm-multus/network-attachment.yaml
new file mode 100644
index 0000000..0681674
--- /dev/null
+++ b/kubevirt/examples/20-boot-vm-multus/network-attachment.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: net1
+ namespace: default
+spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "type": "kube-ovn",
+ "server_socket": "/run/openvswitch/kube-ovn-daemon.sock",
+ "provider": "net1.default.ovn"
+ }'
+
+---
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: net2
+ namespace: default
+spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "type": "kube-ovn",
+ "server_socket": "/run/openvswitch/kube-ovn-daemon.sock",
+ "provider": "net2.default.ovn"
+ }'
+---
+apiVersion: kubeovn.io/v1
+kind: Subnet
+metadata:
+ name: net1
+spec:
+ protocol: IPv4
+ provider: net1.default.ovn
+ cidrBlock: 192.168.1.0/24
+ gateway: 192.168.1.1
+---
+apiVersion: kubeovn.io/v1
+kind: Subnet
+metadata:
+ name: net2
+spec:
+ protocol: IPv4
+ provider: net2.default.ovn
+ cidrBlock: 192.168.2.0/24
+ gateway: 192.168.2.1
diff --git a/kubevirt/examples/20-boot-vm-multus/run.sh b/kubevirt/examples/20-boot-vm-multus/run.sh
new file mode 100755
index 0000000..8d55316
--- /dev/null
+++ b/kubevirt/examples/20-boot-vm-multus/run.sh
@@ -0,0 +1,31 @@
+EXAMPLES_DIR=$(cd $(dirname "$0")/.. && pwd)
+WORKDIR=$(cd $(dirname "$0") && pwd)
+source ${EXAMPLES_DIR}/lib.sh
+
+command=$1
+
+function run {
+ echo "Creating network attachments"
+ kubectl apply -f network-attachment.yaml
+
+ echo "Creating VMs"
+ for vm in cirros-net1 cirros-net2; do
+ kubectl apply -f $vm.yaml
+
+ echo "Starting VM $vm"
+ virtctl start $vm
+ done
+ echo "Waiting VM is Running."
+ wait_vm_state cirros-net1 Running
+ wait_vm_state cirros-net2 Running
+}
+
+function cleanup {
+ for f in $(ls *.yaml); do
+ kubectl delete -f $f
+ done
+}
+
+pushd $WORKDIR
+$command
+popd
diff --git a/kubevirt/examples/lib.sh b/kubevirt/examples/lib.sh
new file mode 100644
index 0000000..0a72d48
--- /dev/null
+++ b/kubevirt/examples/lib.sh
@@ -0,0 +1,31 @@
+function wait_vm_state {
+ local vm_name=$1
+ local state=$2
+ i=1
+ while [[ $(kubectl get vm ${vm_name} -o jsonpath="{.status.printableStatus}") != ${state} ]]; do
+ echo "Waiting VM ${vm_name} status ${state}. Attempt ${i}"
+ i=$(( i + 1 ))
+ sleep 5
+ done
+}
+
+function wait_vmsnapshot_state {
+ local snapshot_name=$1
+ local state=$2
+ i=1
+ while [[ $(kubectl get vmsnapshot ${snapshot_name} -o jsonpath="{.status.phase}") != ${state} ]]; do
+ echo "Waiting vmsnapshot ${snapshot_name} phase is ${state}. Attempt ${i}"
+ i=$(( i + 1 ))
+ sleep 5
+ done
+}
+
+function wait_vmrestore_completed {
+ local restore_name=$1
+ i=1
+ while [[ $(kubectl get vmrestore ${restore_name} -o jsonpath="{.status.complete}") != "true" ]]; do
+ echo "Waiting vmrestore ${restore_name} is completed. Attempt ${i}"
+ i=$(( i + 1 ))
+ sleep 5
+ done
+}
diff --git a/kubevirt/examples/ubuntu.yaml b/kubevirt/examples/ubuntu.yaml
new file mode 100644
index 0000000..676a047
--- /dev/null
+++ b/kubevirt/examples/ubuntu.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: ubuntu
+spec:
+ containers:
+ - name: ubuntu
+ image: ubuntu
+ command:
+ - sleep
+ - infinity
diff --git a/kubevirt/install.sh b/kubevirt/install.sh
new file mode 100644
index 0000000..047b44b
--- /dev/null
+++ b/kubevirt/install.sh
@@ -0,0 +1,220 @@
+export KUBEVIRT_RELEASE=${KUBEVIRT_RELEASE:-"v0.59.0"}
+export RELEASE_OPENSTACK_K8S_REPO=${RELEASE_OPENSTACK_K8S_REPO:-https://github.com/Mirantis/release-openstack-k8s}
+
+export EXTERNAL_SNAPSHOTTER_REPO=${EXTERNAL_SNAPSHOTTER_REPO:-"https://github.com/kubernetes-csi/external-snapshotter"}
+export EXTERNAL_SNAPSHOTTER_TAG=${EXTERNAL_SNAPSHOTTER_TAG:-"v6.2.2"}
+
+export ROOK_REPO=${ROOK_REPO:-"https://github.com/rook/rook"}
+export ROOK_TAG=${ROOK_TAG:-"v1.11.4"}
+
+export KUBE_OVN_REPO=${KUBE_OVN_REPO:-"https://github.com/kubeovn/kube-ovn"}
+export KUBE_OVN_TAG=${KUBE_OVN_TAG:-"v1.11.8"}
+
+export CDI_TAG=${CDI_TAG:-"v1.56.0"}
+
+ROOT_DIR=$(cd $(dirname "$0") && pwd)
+RESOURCES_DIR=${ROOT_DIR}/resources
+WORKDIR="$(cd $(dirname "$0") && pwd)/.workdir"
+mkdir -p $WORKDIR
+
+function clone_repo {
+ local repo=$1
+ local tag=$2
+ local dst=$3
+
+ if [[ ! -d $dst ]]; then
+ git clone $repo $dst
+ fi
+
+ if [[ -n $tag ]]; then
+ pushd $dst
+ git checkout tags/$tag
+ fi
+}
+
+function install_kubevirt_operator {
+ kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml
+ kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml
+}
+
+function install_virtctl {
+ if [[ ! -f /usr/sbin/virtctl ]]; then
+ wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/virtctl-${KUBEVIRT_RELEASE}-linux-amd64
+ mv virtctl-${KUBEVIRT_RELEASE}-linux-amd64 /usr/sbin/virtctl
+ chmod +x /usr/sbin/virtctl
+ fi
+}
+
+function install_kubevirt {
+ install_kubevirt_operator
+ install_virtctl
+ kubectl apply -f ${RESOURCES_DIR}/kubevirt.yaml
+ #kubectl apply -f ${RESOURCES_DIR}/libvirt.yaml
+}
+
+function install_cdi {
+ kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-operator.yaml
+ kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_TAG}/cdi-cr.yaml
+ kubectl apply -f ${RESOURCES_DIR}/cdi-uploadproxy-nodeport.yaml
+}
+
+function install_ceph_controller {
+ pushd $WORKDIR
+ clone_repo $RELEASE_OPENSTACK_K8S_REPO "" ${WORKDIR}/release-openstack-k8s
+ mkdir -p customizations/ceph-controller
+ cat <<EOF > customizations/ceph-controller/ceph-contoller_kubelet_path.json
+[
+ {"op": "add",
+ "path": "/spec/releases/0/values/rookExtraConfig",
+ "value": {"csiKubeletPath": "/var/lib/k0s/kubelet"}
+ }
+]
+EOF
+ cp release-openstack-k8s/release/50-ceph-controller.yaml customizations/ceph-controller/
+cat <<EOF >customizations/ceph-controller/kustomization.yaml
+resources:
+ - 50-ceph-controller.yaml
+patches:
+- path: ceph-contoller_kubelet_path.json
+ target:
+ group: lcm.mirantis.com
+ version: v1alpha1
+ kind: HelmBundle
+ name: ceph-operator
+ namespace: osh-system
+EOF
+
+ local release_files="01-namespaces.yaml 02-helmbundle-crd.yaml 30-helm-controller.yaml 40-local-volume-provisioner.yaml ci/50-nodemaintenance.yaml"
+ for file in $release_files; do
+ kubectl apply -f release-openstack-k8s/release/$file
+ done
+ kubectl apply -k customizations/ceph-controller/
+ popd
+}
+
+function is_label_assigned {
+ local node=$1
+ local label=$2
+
+ if kubectl get node --show-labels $node |grep -q -w $label; then
+ return 0
+ fi
+ return 1
+}
+
+function deploy_ceph {
+ num_ctls=0
+ num_osds=0
+ seen_nodes=""
+ mkdir -p $WORKDIR/customizations/ceph-deployment
+ cp ${RESOURCES_DIR}/miraceph.yaml $WORKDIR/customizations/ceph-deployment/
+ pushd $WORKDIR
+ echo '[' > customizations/ceph-deployment/nodes_path.json
+ for node in $(kubectl get nodes | awk '{print $1}' |grep -v NAME); do
+ node_roles="[]"
+ devices="[]"
+ if echo $seen_nodes |grep -q $node; then
+ continue
+ fi
+ if is_label_assigned $node openstack-control-plane=enabled; then
+ num_ctls=$(( num_ctls + 1 ))
+ if [[ $num_ctls -le 3 ]]; then
+ node_roles='["mon", "mgr"]'
+ fi
+ fi
+ if is_label_assigned $node role=ceph-osd-node; then
+ num_osds=$(( num_osds + 1 ))
+ devices='[{"name": "vdb", "config": {"deviceClass": "hdd"}}]'
+ fi
+ if [[ "$node_roles" != "[]" || "$devices" != "[]" ]]; then
+ cat <<EOF >> customizations/ceph-deployment/nodes_path.json
+{"op": "add",
+ "path": "/spec/nodes/-",
+ "value": {"name": "$node", "devices": $devices, "roles": $node_roles}
+},
+EOF
+ fi
+ seen_nodes="$seen_nodes,$node"
+ done
+ last_line=$(wc -l customizations/ceph-deployment/nodes_path.json| awk '{print $1}')
+ sed -i "${last_line}s/},/}/g" customizations/ceph-deployment/nodes_path.json
+ echo ']' >> customizations/ceph-deployment/nodes_path.json
+ cat <<EOF >customizations/ceph-deployment/kustomization.yaml
+resources:
+ - miraceph.yaml
+patches:
+- path: nodes_path.json
+ target:
+ group: lcm.mirantis.com
+ version: v1alpha1
+ kind: MiraCeph
+ name: cephcluster
+ namespace: ceph-lcm-mirantis
+EOF
+ kubectl apply -k customizations/ceph-deployment/
+ popd
+}
+
+function install_ceph {
+ install_ceph_controller
+ deploy_ceph
+}
+
+function install_external_snapshotter {
+ clone_repo $EXTERNAL_SNAPSHOTTER_REPO $EXTERNAL_SNAPSHOTTER_TAG $WORKDIR/external-snapshotter
+ kubectl apply -f $WORKDIR/external-snapshotter/deploy/kubernetes/snapshot-controller/
+ kubectl apply -f $WORKDIR/external-snapshotter/client/config/crd
+
+ clone_repo $ROOK_REPO $ROOK_TAG $WORKDIR/rook
+ kubectl apply -f $WORKDIR/rook/deploy/examples/csi/rbd/snapshotclass.yaml
+
+ for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do
+ kubectl label node --overwrite $node app=snapshot-controller
+ done
+}
+
+function install_multus {
+ kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml
+}
+
+function install_helm {
+ if [[ ! -f /usr/local/bin/helm3 ]]; then
+ wget https://binary-mirantis-com.s3.amazonaws.com/openstack/bin/utils/helm/helm-v3.11.2-linux-amd64
+ mv helm-v3.11.2-linux-amd64 /usr/local/bin/helm3
+ chmod +x /usr/local/bin/helm3
+ fi
+}
+
+function install_kube_ovn {
+ clone_repo $KUBE_OVN_REPO $KUBE_OVN_TAG $WORKDIR/kube-ovn
+ local master_node_count=0
+ local master_node_ips=""
+ local node_ip=""
+ for node in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' -l openstack-control-plane=enabled); do
+ if [[ $master_node_count -ge 1 ]]; then
+ continue
+ fi
+ node_ip=$(kubectl get nodes $node -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
+ master_node_ips="$master_node_ips $node_ip"
+ master_node_ips=$(echo $master_node_ips |sed 's/ /,/g')
+ kubectl label node --overwrite $node kube-ovn/role=master
+ master_node_count=$(( master_node_count + 1 ))
+ done
+ echo "$master_node_ips"
+ kubectl label no -lovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel --overwrite
+ pushd $WORKDIR/kube-ovn
+ cat <<EOF > $WORKDIR/kube-ovn-master-nodes.yaml
+MASTER_NODES: "$master_node_ips,"
+replicaCount: $master_node_count
+EOF
+ helm3 upgrade --install kube-ovn ./kubeovn-helm -f $RESOURCES_DIR/kube_ovn.yaml -f $WORKDIR/kube-ovn-master-nodes.yaml
+ popd
+}
+
+install_helm
+install_kube_ovn
+install_kubevirt
+install_cdi
+install_ceph
+install_external_snapshotter
+install_multus
diff --git a/kubevirt/resources/cdi-uploadproxy-nodeport.yaml b/kubevirt/resources/cdi-uploadproxy-nodeport.yaml
new file mode 100644
index 0000000..3ceab52
--- /dev/null
+++ b/kubevirt/resources/cdi-uploadproxy-nodeport.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: cdi-uploadproxy-nodeport
+ namespace: cdi
+spec:
+ internalTrafficPolicy: Cluster
+ ipFamilies:
+ - IPv4
+ ipFamilyPolicy: SingleStack
+ ports:
+ - port: 443
+ protocol: TCP
+ targetPort: 8443
+ nodePort: 31119
+ selector:
+ cdi.kubevirt.io: cdi-uploadproxy
+ sessionAffinity: None
+ type: NodePort
diff --git a/kubevirt/resources/kube_ovn.yaml b/kubevirt/resources/kube_ovn.yaml
new file mode 100644
index 0000000..a6b8fd8
--- /dev/null
+++ b/kubevirt/resources/kube_ovn.yaml
@@ -0,0 +1,4 @@
+#networking:
+# IFACE: "ens8"
+cni_conf:
+ CNI_CONFIG_PRIORITY: "20"
diff --git a/kubevirt/resources/kubevirt.yaml b/kubevirt/resources/kubevirt.yaml
new file mode 100644
index 0000000..cdc79fb
--- /dev/null
+++ b/kubevirt/resources/kubevirt.yaml
@@ -0,0 +1,22 @@
+apiVersion: kubevirt.io/v1
+kind: KubeVirt
+metadata:
+ name: kubevirt
+ namespace: kubevirt
+spec:
+ certificateRotateStrategy: {}
+ configuration:
+ developerConfiguration:
+ featureGates:
+ - HostDisk
+ - DownwardMetrics
+ - HotplugVolumes
+ - Snapshot
+ - VMExport
+ customizeComponents: {}
+ imagePullPolicy: IfNotPresent
+ workloadUpdateStrategy: {}
+ workloads:
+ nodePlacement:
+ nodeSelector:
+ openstack-compute-node: enabled
diff --git a/kubevirt/resources/libvirt.yaml b/kubevirt/resources/libvirt.yaml
new file mode 100644
index 0000000..670477a
--- /dev/null
+++ b/kubevirt/resources/libvirt.yaml
@@ -0,0 +1,36 @@
+apiVersion: lcm.mirantis.com/v1alpha1
+kind: HelmBundle
+metadata:
+ name: kubevirt-libvirt
+ namespace: osh-system
+spec:
+ repositories:
+ - name: osh-infra
+ url: https://binary.mirantis.com/openstack/helm/openstack-helm-infra
+ releases:
+ - chart: osh-infra/libvirt
+ name: vbmc-libvirt
+ namespace: kubevirt-libvirt
+ values:
+ network:
+ backend: []
+ labels:
+ agent:
+ libvirt:
+ node_selector_key: openstack-compute-node
+ node_selector_value: enabled
+ conf:
+ ceph:
+ enabled: false
+ libvirt:
+ listen_addr: 0.0.0.0
+ images:
+ tags:
+ dep_check: mirantis.azurecr.io/openstack/extra/kubernetes-entrypoint:v1.0.0-20200311160233
+ image_repo_sync: mirantis.azurecr.io/openstack/extra/docker:17.07.0
+ libvirt: mirantis.azurecr.io/general/libvirt:6.0.0-focal-20221028120749
+ manifests:
+ network_policy: false
+ dependencies: {}
+ version: 0.1.0-mcp-2953
+ helmV3: true
diff --git a/kubevirt/resources/miraceph.yaml b/kubevirt/resources/miraceph.yaml
new file mode 100644
index 0000000..1bd78ec
--- /dev/null
+++ b/kubevirt/resources/miraceph.yaml
@@ -0,0 +1,21 @@
+apiVersion: lcm.mirantis.com/v1alpha1
+kind: MiraCeph
+metadata:
+ name: cephcluster
+ namespace: ceph-lcm-mirantis
+spec:
+ rookConfig:
+ # IMPORTANT: for production change to 3 or remove.
+ osd_pool_default_size: "2"
+ network:
+ clusterNet: 10.12.0.0/24
+ publicNet: 10.12.1.0/24
+ dashboard: false
+ pools:
+ - deviceClass: hdd
+ role: kubernetes
+ name: mirablock-k8s-block
+ default: true
+ replicated:
+ size: 2
+ nodes: []