use different images for cfg and other nodes
diff --git a/README.md b/README.md
index 1f5ffe5..0d5a6b0 100644
--- a/README.md
+++ b/README.md
@@ -19,21 +19,24 @@
dos-manage.py migrate
-Get cloudinit image
--------------------
+Get cloudinit images
+--------------------
wget https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -O ./trusty-server-cloudimg-amd64.qcow2
+wget https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img -O ./xenial-server-cloudimg-amd64.qcow2
+
Export variables
----------------
-export ENV_NAME=tcpcloud-mk20 # Optional
+export ENV_NAME=tcpcloud-mk22 # Optional
-export IMAGE_PATH=./trusty-server-cloudimg-amd64.qcow2
+export IMAGE_PATH1404=./trusty-server-cloudimg-amd64.qcow2
+
+export IMAGE_PATH1604=./xenial-server-cloudimg-amd64.qcow2
Run deploy test
------------------------------------------
-export WORKSPACE=$(pwd)
+---------------
export SHUTDOWN_ENV_ON_TEARDOWN=false # Optional
@@ -43,11 +46,9 @@
Create and start the env for manual tests
-----------------------------------------
-dos.py create-env ./tcpcloud-wk20.yaml
+dos.py create-env ./tcp_tests/templates/tcpcloud-default.yaml
dos.py start "${ENV_NAME}"
-Then, wait until cloud-init is finished and port 22 is open (~3-4 minutes), and login to the cfg01 node:
-
-ssh root@172.16.10.100 # Use password 'r00tme'
+Then, wait until cloud-init is finished and port 22 is open (~3-4 minutes), and login with root:r00tme
diff --git a/tcp_tests/templates/tcpcloud-default.yaml b/tcp_tests/templates/tcpcloud-default.yaml
index 2523790..32b5d8a 100644
--- a/tcp_tests/templates/tcpcloud-default.yaml
+++ b/tcp_tests/templates/tcpcloud-default.yaml
@@ -55,14 +55,16 @@
private:
address_pool: private-pool01
dhcp: true
- forward:
- mode: nat
group_volumes:
- - name: cloudimage # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
+ - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
# http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
format: qcow2
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
nodes:
- name: cfg01.mk22-lab-advanced.local
@@ -77,7 +79,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -115,7 +117,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -153,7 +155,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -191,7 +193,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -229,7 +231,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -267,7 +269,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -305,7 +307,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -343,7 +345,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage
+ backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
diff --git a/tcp_tests/tests/system/test_tcp_install.py b/tcp_tests/tests/system/test_tcp_install.py
index c813f56..cf933cc 100644
--- a/tcp_tests/tests/system/test_tcp_install.py
+++ b/tcp_tests/tests/system/test_tcp_install.py
@@ -69,6 +69,13 @@
'skip_fail': False,
},
{
+ 'description': "Accept salt keys from all the nodes",
+ 'cmd': "salt-key -A -y",
+ 'node_name': 'cfg01.mk22-lab-advanced.local', # hardcoded for now
+ 'retry': {'count': 1, 'delay': 5},
+ 'skip_fail': False,
+ },
+ {
'description': ("Generate inventory for all the nodes to the"
" /srv/salt/reclass/nodes/_generated"),
'cmd': salt_cmd + "'cfg01*' state.sls reclass.storage",
@@ -87,7 +94,7 @@
'description': "Configure ntp on controllers",
'cmd': salt_cmd + "'ctl*' state.sls ntp",
'node_name': 'cfg01.mk22-lab-advanced.local', # hardcoded for now
- 'retry': {'count': 3, 'delay': 5},
+ 'retry': {'count': 5, 'delay': 10},
'skip_fail': False,
},
{
@@ -400,8 +407,6 @@
with underlay.remote(node_name=step['node_name']) as remote:
for x in range(step['retry']['count'], 0, -1):
- time.sleep(5)
-
result = remote.execute(step['cmd'], verbose=True)
# Workaround of exit code 0 from salt in case of failures