Testcases for services failover
- keepalived restart # 4756965
- keepalived stop # 3385682
Changes:
- RallyManager refactored to use updated rally container with tempest
- Added 'rally.create_rally_task' and 'rally.run_task' methods to
generate load on the OpenStack cluster with the specified task config
- new mark for test cases that configure 'rally' fixture:
@pytest.mark.with_rally(rally_node=<str>,
prepare_openstack=<bool>,
prepare_tempest=<bool>)
- a new method common_services_deployed.check_keepalived_pillar()
to check the keepalived pillar settings consistency
- a new fixture 'func_name' returns the current test function name
- a new method 'underlay.get_target_node_names(target='ctl')' to get
a list of all nodes which name starts with the specified target string
- a new method underlay.delayed_call() which can postpone the specified
shell command to run in several minutes later in the background
on the specified node
- fixture 'grab_versions' now works also for failed tests
Change-Id: Icede63163ae0b3569e8463563cb548e2d314899d
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index 658657a..e29cdd6 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -12,7 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tcp_tests.helpers import exceptions
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+from tcp_tests import logger
+
+LOG = logger.logger
class CommonServicesManager(ExecuteCommandsMixin):
@@ -32,3 +36,162 @@
self.execute_commands(commands,
label='Install common services')
self.__config.common_services.common_services_installed = True
+
+ def get_keepalived_vip_minion_id(self, vip):
+ """Get minion ID where keepalived VIP is at the moment"""
+ tgt = 'I@keepalived:cluster:enabled:True'
+ grains = 'ip_interfaces'
+ result = self._salt.get_grains(tgt=tgt, grains=grains)[0]
+ minion_ids = [
+ minion_id for minion_id, interfaces in result.items()
+ for interface, ips in interfaces.items()
+ for ip in ips
+ if ip == vip
+ ]
+ LOG.debug("VIP '{0}' found on minions {1}".format(vip, minion_ids))
+ if len(minion_ids) != 1:
+ raise Exception("VIP {0} is expected on a single node. Actual "
+ "nodes with VIP: {1}".format(vip, minion_ids))
+ return minion_ids[0]
+
+ def get_keepalived_vips(self):
+ tgt = 'I@keepalived:cluster:enabled:True'
+ pillar = 'keepalived:cluster:instance'
+ return self._salt.get_pillar(tgt=tgt, pillar=pillar)[0]
+
+ def check_keepalived_pillar(self):
+ """Check the keepalived pillars for VIPs
+
+ Check for:
+ - the same VIP is used for the same 'virtual_router_id'
+ - the same password is used for the same 'virtual_router_id'
+ - no 'virtual_router_id' or VIP doubles in different
+ keepalived instances on the same node
+ - no 'priority' doubles inside the same 'virtual_router_id'
+ on different nodes
+
+ :param pillar_vips: dict {
+ <minion_id>: {
+ <keepalived instance>: {
+ <address>: str,
+ <password>: str,
+ <virtual_router_id>: int,
+ <priority>: int
+ },
+ ...
+ },
+ }
+ :return dict: {
+ <str:vip1> : {
+ 'instance_name': <str>
+ 'virtual_router_id': <int>,
+ 'password': <str>,
+ 'nodes' : {<str:node1>: <int:priority>,
+ <str:node2>: <int:priority>,
+ ...},
+ },
+ <str:vip2> : { ...
+ },
+ }
+ """
+
+ def check_single_address(vips, minion_id, instance, data):
+ for vip in vips:
+ if vips[vip]['virtual_router_id'] == data['virtual_router_id']\
+ and (vip != data['address'] or
+ vips[vip]['instance_name'] != instance):
+ message = (
+ "'virtual_router_id': {0} for keepalived instance "
+ "{1}: {2} is already used for {3}: {4} on nodes {5}"
+ .format(data['virtual_router_id'],
+ instance, data['address'],
+ vips[vip]['instance_name'],
+ vip,
+ vips[vip]['nodes'].keys())
+ )
+ raise exceptions.SaltPillarError(
+ minion_id,
+ 'keepalived:cluster:instance',
+ message)
+
+ def check_single_router_id(vips, minion_id, instance, data):
+ for vip in vips:
+ if vips[vip]['virtual_router_id'] != data['virtual_router_id']\
+ and vip == data['address']:
+ message = (
+ "'virtual_router_id': {0} for keepalived instance "
+ "{1}: {2} is not the same as for {3}: {4} on nodes {5}"
+ .format(data['virtual_router_id'],
+ instance, data['address'],
+ vips[vip]['instance_name'],
+ vip,
+ vips[vip]['nodes'].keys())
+ )
+ raise exceptions.SaltPillarError(
+ minion_id,
+ 'keepalived:cluster:instance',
+ message)
+
+ pillar_vips = self.get_keepalived_vips()
+ vips = {}
+ for minion_id in pillar_vips:
+ for instance, data in pillar_vips[minion_id].items():
+ address = data['address']
+ password = data['password']
+ virtual_router_id = data['virtual_router_id']
+ priority = data['priority']
+
+ if address not in vips:
+ # Check that there is the same VIP
+ # for the same virtual_router_id
+ check_single_address(vips, minion_id, instance, data)
+
+ # Add new VIP
+ vips[address] = {
+ 'instance_name': instance,
+ 'virtual_router_id': virtual_router_id,
+ 'password': password,
+ 'nodes': {
+ minion_id: priority,
+ }
+ }
+ else:
+ # Check that there is the same virtual_router_id
+ # for the same VIP
+ check_single_router_id(vips, minion_id, instance, data)
+ if vips[address]['password'] != password:
+ message = (
+ "'password': {0} for keepalived instance "
+ "{1}: {2} is not the same as for {3}: {4} on "
+ "nodes {5}".format(data['password'],
+ instance, data['address'],
+ vips[address]['instance_name'],
+ address,
+ vips[address]['nodes'].keys())
+ )
+ raise exceptions.SaltPillarError(
+ minion_id,
+ 'keepalived:cluster:instance',
+ message)
+
+ if any([priority == prio
+ for node, prio in vips[address]['nodes'].items()]):
+ message = (
+ "'priority': {0} for keepalived instance "
+ "{1}: {2} is the same as for {3}: {4} on "
+ "nodes {5}".format(data['priority'],
+ instance, data['address'],
+ vips[address]['instance_name'],
+ address,
+ vips[address]['nodes'].keys())
+ )
+ raise exceptions.SaltPillarError(
+ minion_id,
+ 'keepalived:cluster:instance',
+ message)
+
+ # Add data to the vips
+ vips[address]['nodes'][minion_id] = priority
+
+ LOG.debug("keepalived pillars check passed: {0}".format(vips))
+ return vips