blob: 723515dc7d99a9a07cfca8b69b6ab9b92eef9feb [file] [log] [blame]
Tatyana Leontovich460f7df2017-11-13 15:45:06 +02001{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
2
3# Install ceph mons
4- description: Update grains
5 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
6 -C 'I@ceph:common' state.sls salt.minion.grains
7 node_name: {{ HOSTNAME_CFG01 }}
8 retry: {count: 1, delay: 10}
9 skip_fail: false
10
11- description: Generate keyrings
12 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
13 -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
14 node_name: {{ HOSTNAME_CFG01 }}
15 retry: {count: 1, delay: 10}
16 skip_fail: false
17
18- description: Sync grains on ceph mon nodes
19 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
20 -C 'I@ceph:mon' state.sls saltutil.sync_grains
21 node_name: {{ HOSTNAME_CFG01 }}
22 retry: {count: 1, delay: 10}
23 skip_fail: false
24
25- description: Update mine on ceph mons
26 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
27 -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls mine.update
28 node_name: {{ HOSTNAME_CFG01 }}
29 retry: {count: 1, delay: 10}
30 skip_fail: false
31
32- description: Install ceph mon
33 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
34 -C 'I@ceph:mon' state.sls ceph.mon
35 node_name: {{ HOSTNAME_CFG01 }}
36 retry: {count: 1, delay: 5}
37 skip_fail: false
38
39- description: Install ceph mgr if defined
40 cmd: |
41 if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgt' ; then
42 salt -C 'I@ceph:mgr' state.sls ceph.mgr
43 fi
44 node_name: {{ HOSTNAME_CFG01 }}
45 retry: {count: 2, delay: 5}
46 skip_fail: false
47
48- description: Install ceph osd
49 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
50 -C 'I@ceph:osd' state.sls ceph.osd
51 node_name: {{ HOSTNAME_CFG01 }}
52 retry: {count: 1, delay: 5}
53 skip_fail: false
54
55- description: Sync grains
56 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
57 -C 'I@ceph:osd' state.sls saltutil.sync_grains
58 node_name: {{ HOSTNAME_CFG01 }}
59 retry: {count: 1, delay: 5}
60 skip_fail: false
61
62- description: Install ceph osd nodes
63 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
64 -C 'I@ceph:osd' state.sls ceph.osd.custom
65 node_name: {{ HOSTNAME_CFG01 }}
66 retry: {count: 1, delay: 5}
67 skip_fail: false
68
69- description: Sync grains
70 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
71 -C 'I@ceph:osd' state.sls saltutil.sync_grains
72 node_name: {{ HOSTNAME_CFG01 }}
73 retry: {count: 1, delay: 5}
74 skip_fail: false
75
76- description: Update mine on ceph osd
77 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
78 -C 'I@ceph:osd' state.sls mine.update
79 node_name: {{ HOSTNAME_CFG01 }}
80 retry: {count: 1, delay: 10}
81 skip_fail: false
82
83- description: Setup pools, keyrings and maybe crush
84 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
85 -C 'I@ceph:setup' state.sls ceph.setup
86 node_name: {{ HOSTNAME_CFG01 }}
87 retry: {count: 1, delay: 10}
88 skip_fail: false
89
90- description: Install ceph clinet
91 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
92 -C 'I@ceph:setup' state.sls ceph.setup
93 node_name: {{ HOSTNAME_CFG01 }}
94 retry: {count: 1, delay: 10}
95 skip_fail: false
96
97- description: Install radosgw if exists
98 cmd: |
99 if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
100 salt -C 'I@ceph:radosgw' state.sls saltutil.sync_grains;
101 salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
102 salt -C 'I@keystone:client' state.sls keystone.client;
103 fi
104 node_name: {{ HOSTNAME_CFG01 }}
105 retry: {count: 2, delay: 5}
106 skip_fail: false
107
108- description: Install ceph clinet
109 cmd: salt --hard-crash --state-output=mixed --state-verbose=False
110 -C 'I@ceph:setup' state.sls ceph.setup
111 node_name: {{ HOSTNAME_CFG01 }}
112 retry: {count: 1, delay: 10}
113 skip_fail: false
114
115- description: Connect ceph to glance if glance is using it
116 cmd: |
117 if salt -C 'I@ceph:common and I@glance:server' match.pillar 'ceph:common and glance:server' ; then
118 salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
119 salt -C 'I@ceph:common and I@glance:server' service.restart glance-api,glance-glare,glance-registry
120 fi
121 node_name: {{ HOSTNAME_CFG01 }}
122 retry: {count: 2, delay: 5}
123 skip_fail: false
124
125- description: Connect ceph to cinder if cinder is using it
126 cmd: |
127 if salt -C 'I@ceph:common and I@cinder:controller' match.pillar 'ceph:common and cinder:controller' ; then
128 salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder
129 fi
130 node_name: {{ HOSTNAME_CFG01 }}
131 retry: {count: 2, delay: 5}
132 skip_fail: false
133
134- description: Connect ceph to nova
135 cmd: |
136 if salt -C 'I@ceph:common and I@nova:compute' match.pillar 'ceph:common and nova:compute' ; then
137 salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
138 salt -C 'I@ceph:common and I@nova:compute' state.sls saltutil.sync_grains;
139 salt -C 'I@ceph:common and I@nova:compute' state.sls nova
140 fi
141 node_name: {{ HOSTNAME_CFG01 }}
142 retry: {count: 2, delay: 5}
143 skip_fail: false