Deploy ocata ovs with ceph
Change-Id: Id5aa57ef76b9fe9121c81c824ff99cc1f3a30b1f
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
index 723515d..eb0a042 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
@@ -17,14 +17,14 @@
- description: Sync grains on ceph mon nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon' state.sls saltutil.sync_grains
+ -C 'I@ceph:mon' saltutil.sync_grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine on ceph mons
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls mine.update
+ -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -36,9 +36,9 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install ceph mgr if defined
+- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
- if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgt' ; then
+ if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
salt -C 'I@ceph:mgr' state.sls ceph.mgr
fi
node_name: {{ HOSTNAME_CFG01 }}
@@ -54,7 +54,7 @@
- description: Sync grains
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls saltutil.sync_grains
+ -C 'I@ceph:osd' saltutil.sync_grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -64,18 +64,18 @@
-C 'I@ceph:osd' state.sls ceph.osd.custom
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
+ skip_fail: true
- description: Sync grains
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls saltutil.sync_grains
+ -C 'I@ceph:osd' saltutil.sync_grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Update mine on ceph osd
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@ceph:osd' state.sls mine.update
+ -C 'I@ceph:osd' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -97,7 +97,7 @@
- description: Install radosgw if exists
cmd: |
if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt -C 'I@ceph:radosgw' state.sls saltutil.sync_grains;
+ salt -C 'I@ceph:radosgw' saltutil.sync_grains;
salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
salt -C 'I@keystone:client' state.sls keystone.client;
fi
@@ -112,32 +112,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Connect ceph to glance if glance is using it
+- description: Connect ceph to glance
cmd: |
- if salt -C 'I@ceph:common and I@glance:server' match.pillar 'ceph:common and glance:server' ; then
- salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-api,glance-glare,glance-registry
- fi
+ salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+ salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+ salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+ salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
-- description: Connect ceph to cinder if cinder is using it
+- description: Connect ceph to cinder and nova
cmd: |
- if salt -C 'I@ceph:common and I@cinder:controller' match.pillar 'ceph:common and cinder:controller' ; then
- salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Connect ceph to nova
- cmd: |
- if salt -C 'I@ceph:common and I@nova:compute' match.pillar 'ceph:common and nova:compute' ; then
- salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt -C 'I@ceph:common and I@nova:compute' state.sls saltutil.sync_grains;
- salt -C 'I@ceph:common and I@nova:compute' state.sls nova
- fi
+ salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+ salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+ salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index 678103c..4dd76c5 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -57,14 +57,14 @@
- description: Check keystone service-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -79,7 +79,7 @@
- description: Check nova service-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
@@ -89,12 +89,12 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:controller' state.sls cinder -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check cinder list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -116,7 +116,7 @@
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -131,7 +131,7 @@
- description: Check heat service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack orchestration resource type list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 10}
skip_fail: false
@@ -260,79 +260,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index 5dee9e4..480beef 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -399,6 +399,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -425,6 +428,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -451,9 +457,6 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1