blob: c62114d7562e2acda0f3a65c7b87311e5b4443c5 [file] [log] [blame]
Dennis Dmitriev010f4cd2016-11-01 20:43:51 +02001# Copyright 2016 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
Dennis Dmitriev2d643bc2017-12-04 12:23:47 +020015from tcp_tests.helpers import exceptions
Dmitry Tyzhnenko2b730a02017-04-07 19:31:32 +030016from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
Dennis Dmitriev2d643bc2017-12-04 12:23:47 +020017from tcp_tests import logger
18
19LOG = logger.logger
Dmitry Tyzhnenko2b730a02017-04-07 19:31:32 +030020
21
22class CommonServicesManager(ExecuteCommandsMixin):
Dennis Dmitriev010f4cd2016-11-01 20:43:51 +020023 """docstring for CommonServicesManager"""
24
Dmitry Tyzhnenkobc0f8262017-04-28 15:39:26 +030025 __config = None
26 __underlay = None
Dennis Dmitriev010f4cd2016-11-01 20:43:51 +020027
Dmitry Tyzhnenko2b730a02017-04-07 19:31:32 +030028 def __init__(self, config, underlay, salt=None):
Dmitry Tyzhnenkobc0f8262017-04-28 15:39:26 +030029 self.__config = config
30 self.__underlay = underlay
Dmitry Tyzhnenko2b730a02017-04-07 19:31:32 +030031 self._salt = salt
Dmitry Tyzhnenko54b415b2017-04-28 16:14:47 +030032 super(CommonServicesManager, self).__init__(
33 config=config, underlay=underlay)
Dennis Dmitriev010f4cd2016-11-01 20:43:51 +020034
35 def install(self, commands):
Dmitry Tyzhnenko2b730a02017-04-07 19:31:32 +030036 self.execute_commands(commands,
37 label='Install common services')
vrovachev700a7b02017-05-23 18:36:48 +040038 self.__config.common_services.common_services_installed = True
Dennis Dmitriev2d643bc2017-12-04 12:23:47 +020039
40 def get_keepalived_vip_minion_id(self, vip):
41 """Get minion ID where keepalived VIP is at the moment"""
42 tgt = 'I@keepalived:cluster:enabled:True'
43 grains = 'ip_interfaces'
Dennis Dmitrievb8115f52017-12-15 13:09:56 +020044 # Refresh grains first
45 self._salt.run_state(tgt, 'saltutil.refresh_grains')
46 # Get grains
Dennis Dmitriev2d643bc2017-12-04 12:23:47 +020047 result = self._salt.get_grains(tgt=tgt, grains=grains)[0]
48 minion_ids = [
49 minion_id for minion_id, interfaces in result.items()
50 for interface, ips in interfaces.items()
51 for ip in ips
52 if ip == vip
53 ]
54 LOG.debug("VIP '{0}' found on minions {1}".format(vip, minion_ids))
55 if len(minion_ids) != 1:
56 raise Exception("VIP {0} is expected on a single node. Actual "
57 "nodes with VIP: {1}".format(vip, minion_ids))
58 return minion_ids[0]
59
60 def get_keepalived_vips(self):
61 tgt = 'I@keepalived:cluster:enabled:True'
62 pillar = 'keepalived:cluster:instance'
63 return self._salt.get_pillar(tgt=tgt, pillar=pillar)[0]
64
65 def check_keepalived_pillar(self):
66 """Check the keepalived pillars for VIPs
67
68 Check for:
69 - the same VIP is used for the same 'virtual_router_id'
70 - the same password is used for the same 'virtual_router_id'
71 - no 'virtual_router_id' or VIP doubles in different
72 keepalived instances on the same node
73 - no 'priority' doubles inside the same 'virtual_router_id'
74 on different nodes
75
76 :param pillar_vips: dict {
77 <minion_id>: {
78 <keepalived instance>: {
79 <address>: str,
80 <password>: str,
81 <virtual_router_id>: int,
82 <priority>: int
83 },
84 ...
85 },
86 }
87 :return dict: {
88 <str:vip1> : {
89 'instance_name': <str>
90 'virtual_router_id': <int>,
91 'password': <str>,
92 'nodes' : {<str:node1>: <int:priority>,
93 <str:node2>: <int:priority>,
94 ...},
95 },
96 <str:vip2> : { ...
97 },
98 }
99 """
100
101 def check_single_address(vips, minion_id, instance, data):
102 for vip in vips:
103 if vips[vip]['virtual_router_id'] == data['virtual_router_id']\
104 and (vip != data['address'] or
105 vips[vip]['instance_name'] != instance):
106 message = (
107 "'virtual_router_id': {0} for keepalived instance "
108 "{1}: {2} is already used for {3}: {4} on nodes {5}"
109 .format(data['virtual_router_id'],
110 instance, data['address'],
111 vips[vip]['instance_name'],
112 vip,
113 vips[vip]['nodes'].keys())
114 )
115 raise exceptions.SaltPillarError(
116 minion_id,
117 'keepalived:cluster:instance',
118 message)
119
120 def check_single_router_id(vips, minion_id, instance, data):
121 for vip in vips:
122 if vips[vip]['virtual_router_id'] != data['virtual_router_id']\
123 and vip == data['address']:
124 message = (
125 "'virtual_router_id': {0} for keepalived instance "
126 "{1}: {2} is not the same as for {3}: {4} on nodes {5}"
127 .format(data['virtual_router_id'],
128 instance, data['address'],
129 vips[vip]['instance_name'],
130 vip,
131 vips[vip]['nodes'].keys())
132 )
133 raise exceptions.SaltPillarError(
134 minion_id,
135 'keepalived:cluster:instance',
136 message)
137
138 pillar_vips = self.get_keepalived_vips()
139 vips = {}
140 for minion_id in pillar_vips:
141 for instance, data in pillar_vips[minion_id].items():
142 address = data['address']
143 password = data['password']
144 virtual_router_id = data['virtual_router_id']
145 priority = data['priority']
146
147 if address not in vips:
148 # Check that there is the same VIP
149 # for the same virtual_router_id
150 check_single_address(vips, minion_id, instance, data)
151
152 # Add new VIP
153 vips[address] = {
154 'instance_name': instance,
155 'virtual_router_id': virtual_router_id,
156 'password': password,
157 'nodes': {
158 minion_id: priority,
159 }
160 }
161 else:
162 # Check that there is the same virtual_router_id
163 # for the same VIP
164 check_single_router_id(vips, minion_id, instance, data)
165 if vips[address]['password'] != password:
166 message = (
167 "'password': {0} for keepalived instance "
168 "{1}: {2} is not the same as for {3}: {4} on "
169 "nodes {5}".format(data['password'],
170 instance, data['address'],
171 vips[address]['instance_name'],
172 address,
173 vips[address]['nodes'].keys())
174 )
175 raise exceptions.SaltPillarError(
176 minion_id,
177 'keepalived:cluster:instance',
178 message)
179
180 if any([priority == prio
181 for node, prio in vips[address]['nodes'].items()]):
182 message = (
183 "'priority': {0} for keepalived instance "
184 "{1}: {2} is the same as for {3}: {4} on "
185 "nodes {5}".format(data['priority'],
186 instance, data['address'],
187 vips[address]['instance_name'],
188 address,
189 vips[address]['nodes'].keys())
190 )
191 raise exceptions.SaltPillarError(
192 minion_id,
193 'keepalived:cluster:instance',
194 message)
195
196 # Add data to the vips
197 vips[address]['nodes'][minion_id] = priority
198
199 LOG.debug("keepalived pillars check passed: {0}".format(vips))
200 return vips
Dennis Dmitriev0f08d9a2017-12-19 02:27:59 +0200201
202 def get_haproxy_status(self, tgt):
203 """Get haproxy status for all backends on a specified minion"""
204 cmd = ("echo 'show stat' | "
205 "socat 'UNIX-CONNECT:/run/haproxy/admin.sock' STDIO")
206 # Refresh grains first
207 res = self._salt.run_state(tgt, 'cmd.run', cmd)
208 output = res[0]['return'][0]
209 assert len(output.keys()) == 1, "Please specify a single minion in tgt"
210 minion_id = output.keys()[0]
211
212 haproxy_status = {}
213 for line in output[minion_id].splitlines():
214 if line.startswith("#"):
215 continue
216 status = line.split(",")
217 pxname = status[0]
218 svname = status[1]
219 if pxname not in haproxy_status:
220 haproxy_status[pxname] = {}
221 haproxy_status[pxname][svname] = {
222 'scur': status[4], # sessions current
223 'smax': status[5], # sessions max
224 'status': status[17], # status: UP or DOWN
225 'rate': status[33], # sessions rate
226 }
227 LOG.debug("Haproxy status: \n{0}".format(haproxy_status))
228 return haproxy_status