Migrating to Python v3
- support for Python v3.8.x
- support for Python v3.5.x
- new tag, 2019.2.8
- updates class generation and iterators
- unittests updated with coverage >75%
- new coverage routines
- unittests profiling
- full fake data for unittests
- unittest testrun is ~1.5 seconds long
Bugfixes
- 34834, proper use of 'sudo' option
- multiple proper iterator use
- 37919, show warning when installed and candidate versions
are newer comparing to release version
Change-Id: Idd6b889f7ce94ae0c832e2f0a0346e4fdc3264a3
Related-PROD: PROD-34834 PROD-34664 PROD-34919
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..f4a5869
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,4 @@
+[report]
+
+exclude_lines =
+ if __name__ == .__main__.:
diff --git a/cfg_checker/cfg_check.py b/cfg_checker/cfg_check.py
index 1a2d79f..5c986a2 100644
--- a/cfg_checker/cfg_check.py
+++ b/cfg_checker/cfg_check.py
@@ -47,7 +47,7 @@
args, unknown = parser.parse_known_args()
except TypeError:
logger_cli.info("\n# Please, check arguments")
- sys.exit(0)
+ sys.exit(1)
if unknown:
logger_cli.error(
diff --git a/cfg_checker/cli/command.py b/cfg_checker/cli/command.py
index 9ac05f3..0a892d8 100644
--- a/cfg_checker/cli/command.py
+++ b/cfg_checker/cli/command.py
@@ -35,11 +35,11 @@
# check commands
if not hasattr(args, 'type') or not args.type:
logger_cli.info("\n# Please, type a command listed above")
- return 0
+ return 1
_type = args.type.replace("-", "_") if "-" in args.type else args.type
if command not in commands:
logger_cli.info("\n# Please, type a command listed above")
- return 0
+ return 1
elif _type not in commands[command]:
# check type
logger_cli.info(
@@ -47,7 +47,7 @@
command
)
)
- return 0
+ return 1
else:
# form function name to call
_method_name = "do_" + _type
@@ -86,7 +86,7 @@
args, unknown = my_parser.parse_known_args()
except TypeError:
logger_cli.info("\n# Please, check arguments")
- sys.exit(0)
+ sys.exit(1)
if unknown:
logger_cli.error(
diff --git a/cfg_checker/cli/network.py b/cfg_checker/cli/network.py
index 5c5a4e2..6d3059e 100644
--- a/cfg_checker/cli/network.py
+++ b/cfg_checker/cli/network.py
@@ -1,4 +1,4 @@
-from command import cli_command
+from .command import cli_command
def entrypoint():
diff --git a/cfg_checker/cli/packages.py b/cfg_checker/cli/packages.py
index c44e5bc..b1319a1 100644
--- a/cfg_checker/cli/packages.py
+++ b/cfg_checker/cli/packages.py
@@ -1,4 +1,4 @@
-from command import cli_command
+from .command import cli_command
def entrypoint():
diff --git a/cfg_checker/cli/reclass.py b/cfg_checker/cli/reclass.py
index f479a72..652f00a 100644
--- a/cfg_checker/cli/reclass.py
+++ b/cfg_checker/cli/reclass.py
@@ -1,4 +1,4 @@
-from command import cli_command
+from .command import cli_command
def entrypoint():
diff --git a/cfg_checker/clients/__init__.py b/cfg_checker/clients/__init__.py
index c827a38..86d731f 100644
--- a/cfg_checker/clients/__init__.py
+++ b/cfg_checker/clients/__init__.py
@@ -21,9 +21,5 @@
# create it once
if salt is None:
salt = SaltRemote()
- # do most expensive operation with no strict timeout possible
- # all nodes that answer ping
- # salt.nodes_active = salt.get_active_nodes()
-
# return once required
return salt
diff --git a/cfg_checker/common/__init__.py b/cfg_checker/common/__init__.py
index 8316e39..752373f 100644
--- a/cfg_checker/common/__init__.py
+++ b/cfg_checker/common/__init__.py
@@ -1,10 +1,8 @@
-import const
+from cfg_checker.common.log import logger, logger_cli
-from log import logger, logger_cli
+from cfg_checker.common.other import Utils
-from other import Utils
-
-from settings import config
+from cfg_checker.common.settings import config
def nested_set(_d, _keys, _value):
@@ -18,7 +16,6 @@
utils = Utils()
-const = const
logger = logger
logger_cli = logger_cli
config = config
diff --git a/cfg_checker/common/config_file.py b/cfg_checker/common/config_file.py
index 87f4759..c70e5a6 100644
--- a/cfg_checker/common/config_file.py
+++ b/cfg_checker/common/config_file.py
@@ -1,7 +1,7 @@
import configparser
import os
-from cfg_checker.common import logger_cli
+from . import logger_cli
class ConfigFile(object):
diff --git a/cfg_checker/common/const.py b/cfg_checker/common/const.py
index 9e5fea2..685c79a 100644
--- a/cfg_checker/common/const.py
+++ b/cfg_checker/common/const.py
@@ -75,7 +75,7 @@
"unk": "uknown"
}
-ubuntu_releases = ["trusty", "xenial", "ubuntu"]
+ubuntu_releases = ["trusty", "xenial", "ubuntu", "bionic"]
all_arch = ["amd64"]
repo_types = {
"main": "Officially supported software",
diff --git a/cfg_checker/common/exception.py b/cfg_checker/common/exception.py
index 52aab2d..2536099 100644
--- a/cfg_checker/common/exception.py
+++ b/cfg_checker/common/exception.py
@@ -1,5 +1,3 @@
-from exceptions import Exception
-
class CheckerBaseExceptions(Exception):
pass
diff --git a/cfg_checker/common/file_utils.py b/cfg_checker/common/file_utils.py
index c550184..398ea66 100644
--- a/cfg_checker/common/file_utils.py
+++ b/cfg_checker/common/file_utils.py
@@ -67,7 +67,7 @@
_dict = {
'fd': fd.fileno(),
- 'mode': oct(mode & 0777),
+ 'mode': oct(mode & 0o777),
'device': hex(dev),
'inode': ino,
'hard_links': nlink,
@@ -102,3 +102,11 @@
return "... folder '{}' created".format(_folder)
else:
return "... folder is at '{}'".format(_folder)
+
+
+def ensure_folder_removed(_folder):
+ if os.path.exists(_folder):
+ os.rmdir(_folder)
+ return "... folder '{}' removed".format(_folder)
+ else:
+ return "... folder '{}' not exists".format(_folder)
diff --git a/cfg_checker/common/salt_utils.py b/cfg_checker/common/salt_utils.py
index 7bd6ce7..dd6fbec 100644
--- a/cfg_checker/common/salt_utils.py
+++ b/cfg_checker/common/salt_utils.py
@@ -5,15 +5,15 @@
import os
import time
+import requests
+
from cfg_checker.common import config, logger, logger_cli
from cfg_checker.common.exception import InvalidReturnException, SaltException
from cfg_checker.common.other import shell
-import requests
-
def _extract_password(_raw):
- if not isinstance(_raw, unicode):
+ if not isinstance(_raw, str):
raise InvalidReturnException(_raw)
else:
try:
@@ -69,10 +69,14 @@
:return: password string
"""
- _cmd = "salt-call"
- _args = "--out=json pillar.get _param:salt_api_password"
+ _cmd = []
+ if config.ssh_uses_sudo:
+ _cmd = ["sudo"]
+ # salt commands
+ _cmd.append("salt-call")
+ _cmd.append("--out=json pillar.get _param:salt_api_password")
try:
- _result = shell(" ".join([_cmd, _args]))
+ _result = shell(" ".join(_cmd))
except OSError as e:
raise SaltException(
"Salt error calling '{}': '{}'\n"
@@ -386,7 +390,7 @@
param=path,
kwarg=_kwarg
)
- return salt_output[salt_output.keys()[0]]
+ return [*salt_output.values()][0]
def f_append_master(self, path, strings_list, makedirs=True):
_kwarg = {
@@ -400,7 +404,7 @@
param=_args,
kwarg=_kwarg
)
- return salt_output[salt_output.keys()[0]]
+ return [*salt_output.values()][0]
def mkdir(self, target, path, tgt_type=None):
salt_output = self.cmd(
@@ -452,7 +456,7 @@
param=_arg,
kwarg=_kwarg
)
- return salt_output[salt_output.keys()[0]]
+ return [*salt_output.values()][0]
def cache_file(self, target, source_path):
salt_output = self.cmd(
@@ -460,7 +464,7 @@
"cp.cache_file",
param=source_path
)
- return salt_output[salt_output.keys()[0]]
+ return [*salt_output.values()][0]
def get_file(self, target, source_path, target_path, tgt_type=None):
return self.cmd(
diff --git a/cfg_checker/common/settings.py b/cfg_checker/common/settings.py
index 33e7c25..92c17a5 100644
--- a/cfg_checker/common/settings.py
+++ b/cfg_checker/common/settings.py
@@ -1,10 +1,10 @@
import os
-from exception import ConfigException
+from cfg_checker.common.exception import ConfigException
-from log import logger_cli
+from cfg_checker.common.log import logger_cli
-from other import utils
+from cfg_checker.common.other import utils
pkg_dir = os.path.dirname(__file__)
pkg_dir = os.path.join(pkg_dir, os.pardir, os.pardir)
diff --git a/cfg_checker/helpers/console_utils.py b/cfg_checker/helpers/console_utils.py
index 994143c..1a2c184 100644
--- a/cfg_checker/helpers/console_utils.py
+++ b/cfg_checker/helpers/console_utils.py
@@ -16,8 +16,8 @@
new_size = len(note)
if self._note_size > new_size:
_suffix = ' '*(self._note_size - new_size)
- _percent = (100 * index) / self.total
- _index = (self.bar_size * index) / self.total
+ _percent = (100 * index) // self.total
+ _index = (self.bar_size * index) // self.total
# clear the line
sys.stdout.write('\r')
# print new progress
diff --git a/cfg_checker/helpers/errors.py b/cfg_checker/helpers/errors.py
index 95e1495..27ed242 100644
--- a/cfg_checker/helpers/errors.py
+++ b/cfg_checker/helpers/errors.py
@@ -25,11 +25,15 @@
0: "Unknown error"
}
- def __init__(self, area_code, delimiter='-'):
+ def __init__(self, area_code, delimiter='-', folder=None):
self._area_code = area_code
self._delimiter = delimiter
self._index += 1
+ # save folder
+ if folder:
+ self._error_logs_folder_name = folder
+
# init the error log storage folder
_folder = os.path.join(pkg_dir, self._error_logs_folder_name)
self._conf_filename = os.path.join(
@@ -98,7 +102,7 @@
_code = self._format_error_code(index)
# prepare data as string list
_d = self._errors[index]['data']
- _data = [" {}: {}".format(_k, _v) for _k, _v in _d.iteritems()]
+ _data = [" {}: {}".format(_k, _v) for _k, _v in _d.items()]
# format message
_msg = "### {}:\n Description: {}\n{}".format(
_code,
@@ -170,9 +174,11 @@
for _type in self._types.keys():
_len = len(
- filter(
- lambda i: self._errors[i]['type'] == _type,
- self._errors
+ list(
+ filter(
+ lambda i: self._errors[i]['type'] == _type,
+ self._errors
+ )
)
)
if _len:
diff --git a/cfg_checker/helpers/tgz.py b/cfg_checker/helpers/tgz.py
index 754f0de..5be00e2 100644
--- a/cfg_checker/helpers/tgz.py
+++ b/cfg_checker/helpers/tgz.py
@@ -50,11 +50,14 @@
else:
self.basefile = _filepath
- def get_file(self, name):
+ def get_file(self, name, decode=False):
if self.has_file(name):
with tarfile.open(self.basefile, "r:gz") as tgz:
_tgzitem = tgz.extractfile(tgz.getmember(name))
- return _tgzitem.read()
+ if decode:
+ return _tgzitem.read().decode('utf-8')
+ else:
+ return _tgzitem.read()
else:
return None
diff --git a/cfg_checker/helpers/zip.py b/cfg_checker/helpers/zip.py
deleted file mode 100644
index b050030..0000000
--- a/cfg_checker/helpers/zip.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os
-import zipfile
-
-
-class ZIPFile(object):
- def __init__(self, _filepath, label=None):
-
- return
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index 482bdfa..59f3781 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -124,7 +124,7 @@
continue
# build map based on IPs and save info too
- for if_name, _dat in _pillar.iteritems():
+ for if_name, _dat in _pillar.items():
# get proper IF name
_if_name = if_name if 'name' not in _dat else _dat['name']
# place it
@@ -195,11 +195,11 @@
logger_cli.info("-> mapping IPs")
# match interfaces by IP subnets
- for host, node_data in salt_master.nodes.iteritems():
+ for host, node_data in salt_master.nodes.items():
if not salt_master.is_node_available(host):
continue
- for net_name, net_data in node_data['networks'].iteritems():
+ for net_name, net_data in node_data['networks'].items():
# cut net name
_i = net_name.find('@')
_name = net_name if _i < 0 else net_name[:_i]
@@ -321,7 +321,7 @@
# debug, print built tree
# logger_cli.debug("# '{}'".format(_ifname))
- lvls = _tree.keys()
+ lvls = list(_tree.keys())
lvls.sort()
n = len(lvls)
m = max([len(_tree[k].keys()) for k in _tree.keys()])
@@ -330,11 +330,14 @@
while True:
_lv = lvls.pop(0)
# get all interfaces on this level
- nets = _tree[_lv].keys()
+ nets = iter(_tree[_lv].keys())
while True:
y = 0
# get next interface
- _net = nets.pop(0)
+ try:
+ _net = next(nets)
+ except StopIteration:
+ break
# all nets
_a = [_net]
# put current interface if this is only one left
diff --git a/cfg_checker/modules/network/network_errors.py b/cfg_checker/modules/network/network_errors.py
index 6c41021..7159a36 100644
--- a/cfg_checker/modules/network/network_errors.py
+++ b/cfg_checker/modules/network/network_errors.py
@@ -23,9 +23,9 @@
NET_PING_ERROR = next(_c)
NET_PING_NOT_RESOLVED = next(_c)
- def __init__(self):
- super(NetworkErrors, self).__init__("NET")
+ _initialized = False
+ def _add_types(self):
self.add_error_type(
self.NET_MTU_MISMATCH,
"MTU mismatch on runtime interface and in reclass"
@@ -82,6 +82,21 @@
self.NET_PING_NOT_RESOLVED,
"Host not resolved while conducting Ping"
)
+ self._initialized = True
+
+ def __init__(self, folder=None):
+ super(NetworkErrors, self).__init__("NET", folder=folder)
+
+ if not self._initialized:
+ self._add_types()
+ self._initialized = True
+
+ def __call__(self):
+ if not self._initialized:
+ self._add_types()
+ self._initialized = True
+
+ return self
del _c
diff --git a/cfg_checker/modules/network/pinger.py b/cfg_checker/modules/network/pinger.py
index 266727b..0500284 100644
--- a/cfg_checker/modules/network/pinger.py
+++ b/cfg_checker/modules/network/pinger.py
@@ -44,7 +44,7 @@
def ping_nodes(self, network_cidr_str):
# Conduct actual ping using network CIDR
logger_cli.info("# Collecting node pairs")
- _fake_if = ipaddress.IPv4Interface(unicode(network_cidr_str))
+ _fake_if = ipaddress.IPv4Interface(str(network_cidr_str))
_net = _fake_if.network
# collect nodes and ips from reclass
nodes = self._collect_node_addresses(_net)
@@ -69,7 +69,7 @@
"targets": {}
}
- for tgt_host, tgt_data in nodes.iteritems():
+ for tgt_host, tgt_data in nodes.items():
_t = _packets[src_host]["targets"]
for tgt_if in tgt_data:
tgt_if_name = tgt_if['name']
@@ -110,7 +110,7 @@
_progress = Progress(_count)
_progress_index = 0
_node_index = 0
- for src, src_data in _packets.iteritems():
+ for src, src_data in _packets.items():
_targets = src_data["targets"]
_node_index += 1
# create 'targets.json' on source host
@@ -154,7 +154,7 @@
)
continue
# Handle return codes
- for tgt_node, _tgt_ips in _result.iteritems():
+ for tgt_node, _tgt_ips in _result.items():
for _params in _tgt_ips:
_body = "{}({}) --{}--> {}({}@{})\n".format(
src,
diff --git a/cfg_checker/modules/packages/__init__.py b/cfg_checker/modules/packages/__init__.py
index 9d55c05..41dfca1 100644
--- a/cfg_checker/modules/packages/__init__.py
+++ b/cfg_checker/modules/packages/__init__.py
@@ -1,7 +1,7 @@
from cfg_checker.helpers import args_utils
from cfg_checker.modules.packages.repos import RepoManager
-import checker
+from . import checker
command_help = "Package versions check (Candidate vs Installed)"
diff --git a/cfg_checker/modules/packages/checker.py b/cfg_checker/modules/packages/checker.py
index 92d9e1c..8f30f3c 100644
--- a/cfg_checker/modules/packages/checker.py
+++ b/cfg_checker/modules/packages/checker.py
@@ -8,7 +8,7 @@
from cfg_checker.nodes import salt_master
from cfg_checker.reports import reporter
-from versions import DebianVersion, PkgVersions, VersionCmpResult
+from .versions import DebianVersion, PkgVersions, VersionCmpResult
class CloudPackageChecker(object):
@@ -78,6 +78,7 @@
# sort packages
_pn, _val = all_packages.popitem()
_c = _val['desc']['section']
+ _rkeys = _val['results'].keys()
if not full:
# Check if this packet has errors
@@ -125,9 +126,9 @@
_data['unlisted'].update({
_pn: _val
})
- _eu += _val['results'].keys().count(const.VERSION_ERR)
- _wu += _val['results'].keys().count(const.VERSION_WARN)
- _du += _val['results'].keys().count(const.VERSION_DOWN)
+ _eu += sum(x == const.VERSION_ERR for x in _rkeys)
+ _wu += sum(x == const.VERSION_WARN for x in _rkeys)
+ _du += sum(x == const.VERSION_DOWN for x in _rkeys)
# mirantis/critical
# elif len(_c) > 0 and _c != 'System':
elif _val['is_mirantis']:
@@ -135,25 +136,25 @@
_data['critical'].update({
_pn: _val
})
- _ec += _val['results'].keys().count(const.VERSION_ERR)
- _wc += _val['results'].keys().count(const.VERSION_WARN)
- _dc += _val['results'].keys().count(const.VERSION_DOWN)
+ _ec += sum(x == const.VERSION_ERR for x in _rkeys)
+ _wc += sum(x == const.VERSION_WARN for x in _rkeys)
+ _dc += sum(x == const.VERSION_DOWN for x in _rkeys)
# system
elif _c == 'System':
_data['system'].update({
_pn: _val
})
- _es += _val['results'].keys().count(const.VERSION_ERR)
- _ws += _val['results'].keys().count(const.VERSION_WARN)
- _ds += _val['results'].keys().count(const.VERSION_DOWN)
+ _es += sum(x == const.VERSION_ERR for x in _rkeys)
+ _ws += sum(x == const.VERSION_WARN for x in _rkeys)
+ _ds += sum(x == const.VERSION_DOWN for x in _rkeys)
# rest
else:
_data['other'].update({
_pn: _val
})
- _eo += _val['results'].keys().count(const.VERSION_ERR)
- _wo += _val['results'].keys().count(const.VERSION_WARN)
- _do += _val['results'].keys().count(const.VERSION_DOWN)
+ _eo += sum(x == const.VERSION_ERR for x in _rkeys)
+ _wo += sum(x == const.VERSION_WARN for x in _rkeys)
+ _do += sum(x == const.VERSION_DOWN for x in _rkeys)
_progress.end()
@@ -244,7 +245,7 @@
_total_processed = 0
# Collect packages from all of the nodes in flat dict
_all_packages = {}
- for node_name, node_value in salt_master.nodes.iteritems():
+ for node_name, node_value in salt_master.nodes.items():
_uniq_len = len(_all_packages.keys())
_progress_index += 1
# progress updates shown before next node only
@@ -256,7 +257,7 @@
_total_processed
)
)
- for _name, _value in node_value['packages'].iteritems():
+ for _name, _value in node_value['packages'].items():
_total_processed += 1
# Parse versions from nodes
_ver_ins = DebianVersion(_value['installed'])
@@ -309,9 +310,9 @@
_vs = {}
_sections = {}
_apps = {}
- for s, apps in _r.iteritems():
- for a, versions in apps.iteritems():
- for v, repos in versions.iteritems():
+ for s, apps in _r.items():
+ for a, versions in apps.items():
+ for v, repos in versions.items():
for repo in repos:
if v not in _vs:
_vs[v] = []
@@ -324,11 +325,13 @@
_apps[v].append(a)
# search for the newest version among filtered
_r_desc = []
- _vs_keys = _vs.keys()
- if _vs_keys:
- _newest = _newest = DebianVersion(_vs_keys.pop())
- else:
+ _vs_keys = iter(_vs.keys())
+ # get next version, if any
+ try:
+ _newest = DebianVersion(next(_vs_keys))
+ except StopIteration:
_newest = DebianVersion('')
+ # iterate others, if any
for v in _vs_keys:
_this = DebianVersion(v)
if _this > _newest:
diff --git a/cfg_checker/modules/packages/repos.py b/cfg_checker/modules/packages/repos.py
index 00c438f..57d8b9e 100644
--- a/cfg_checker/modules/packages/repos.py
+++ b/cfg_checker/modules/packages/repos.py
@@ -43,32 +43,39 @@
def _get_value_index(_di, value, header=None):
# Mainteiner names often uses specific chars
- # so make sure that value saved is unicode not str
- _val = unicode(value, 'utf-8') if isinstance(value, str) else value
+ # so make sure that value saved is str not str
+ # Python2
+ # _val = str(value, 'utf-8') if isinstance(value, str) else value
+ # Python3 has always utf-8 decoded value
+ _val = value
if header:
- if not filter(lambda i: _di[i]["header"] == header, _di):
- _index = unicode(len(_di.keys()) + 1)
+ try:
+ _ = next(filter(lambda i: _di[i]["header"] == header, _di))
+ # iterator not empty, find index
+ for _k, _v in _di.items():
+ if _v["header"] == header:
+ _index = _k
+ except StopIteration:
+ _index = str(len(_di.keys()) + 1)
_di[_index] = {
"header": header,
"props": _val
}
- else:
- for _k, _v in _di.iteritems():
- if _v["header"] == header:
- _index = _k
-
- return _index
+ finally:
+ return _index
else:
- if not filter(lambda i: _di[i] == _val, _di):
- _index = unicode(len(_di.keys()) + 1)
- # on save, cast it as unicode
- _di[_index] = _val
- else:
- for _k, _v in _di.iteritems():
+ try:
+ _ = next(filter(lambda i: _di[i] == _val, _di))
+ # iterator not empty, find index
+ for _k, _v in _di.items():
if _v == _val:
_index = _k
-
- return _index
+ except StopIteration:
+ _index = str(len(_di.keys()) + 1)
+ # on save, cast it as str
+ _di[_index] = _val
+ finally:
+ return _index
def _safe_load(_f, _a):
@@ -79,7 +86,7 @@
_f
)
)
- return json.loads(_a.get_file(_f))
+ return json.loads(_a.get_file(_f, decode=True))
else:
return {}
@@ -92,8 +99,33 @@
class ReposInfo(object):
- repos = []
- _repofile = os.path.join(pkg_dir, "versions", _repos_info_archive)
+ init_done = False
+
+ def _init_vars(self):
+ self.repos = []
+
+ def _init_folders(self, arch_folder=None):
+ if arch_folder:
+ self._arch_folder = arch_folder
+ self._repofile = os.path.join(arch_folder, _repos_info_archive)
+ else:
+ self._arch_folder = os.path.join(pkg_dir, "versions")
+ self._repofile = os.path.join(
+ self._arch_folder,
+ _repos_info_archive
+ )
+
+ def __init__(self, arch_folder=None):
+ # perform inits
+ self._init_vars()
+ self._init_folders(arch_folder)
+ self.init_done = True
+
+ def __call__(self, *args, **kwargs):
+ if self.init_done:
+ return self
+ else:
+ return self.__init__(self, *args, **kwargs)
@staticmethod
def _ls_repo_page(url):
@@ -189,8 +221,10 @@
else:
# gather all of them
_tags, _ = self._ls_repo_page(base_url)
- _tags.remove('hotfix')
- _tags.remove('update')
+ if "hotfix" in _tags:
+ _tags.remove('hotfix')
+ if "update" in _tags:
+ _tags.remove('update')
# search tags in subfolders
_h_tags, _ = self._ls_repo_page(base_url + 'hotfix')
_u_tags, _ = self._ls_repo_page(base_url + 'update')
@@ -334,30 +368,46 @@
def get_repoinfo(self, tag):
_tgz = TGZFile(self._repofile)
- _buf = _tgz.get_file(tag + ext)
+ _buf = _tgz.get_file(tag + ext, decode=True)
return json.loads(_buf)
class RepoManager(object):
- # archives
- _arch_folder = os.path.join(pkg_dir, "versions")
- _versions_arch = os.path.join(_arch_folder, _repos_versions_archive)
- _desc_arch = os.path.join(_arch_folder, _pkg_desc_archive)
- _apps_filename = "apps.json"
+ init_done = False
- # repository index
- _repo_index = {}
- _mainteiners_index = {}
+ def _init_folders(self, arch_folder=None):
+ # overide arch folder if needed
+ if arch_folder:
+ self._arch_folder = arch_folder
+ else:
+ self._arch_folder = os.path.join(pkg_dir, "versions")
- _apps = {}
+ self._versions_arch = os.path.join(
+ self._arch_folder,
+ _repos_versions_archive
+ )
+ self._desc_arch = os.path.join(self._arch_folder, _pkg_desc_archive)
- # init package versions storage
- _versions_mirantis = {}
- _versions_other = {}
+ def _init_vars(self, info_class):
+ # RepoInfo instance init
+ if info_class:
+ self._info_class = info_class
+ else:
+ self._info_class = ReposInfo()
+ # archives
+ self._apps_filename = "apps.json"
- def __init__(self):
- # Ensure that versions folder exists
- logger_cli.debug(ensure_folder_exists(self._arch_folder))
+ # repository index
+ self._repo_index = {}
+ self._mainteiners_index = {}
+
+ self._apps = {}
+
+ # init package versions storage
+ self._versions_mirantis = {}
+ self._versions_other = {}
+
+ def _init_archives(self):
# Init version files
self.versionstgz = TGZFile(
self._versions_arch,
@@ -394,6 +444,22 @@
self.versionstgz
)
+ def __init__(self, arch_folder=None, info_class=None):
+ # Perform inits
+ self._init_vars(info_class)
+ self._init_folders(arch_folder)
+ # Ensure that versions folder exists
+ logger_cli.debug(ensure_folder_exists(self._arch_folder))
+ # Preload/create archives
+ self._init_archives()
+ self.init_done = True
+
+ def __call__(self, *args, **kwargs):
+ if self.init_done:
+ return self
+ else:
+ return self.__init__(self, *args, **kwargs)
+
def _create_repo_header(self, p):
_header = "_".join([
p['tag'],
@@ -504,14 +570,14 @@
due to huge resulting file size and slow processing
"""
# init gzip and downloader
- _info = ReposInfo().get_repoinfo(tag)
+ _info = self._info_class.get_repoinfo(tag)
# calculate Packages.gz files to process
_baseurl = _info.pop("baseurl")
_total_components = len(_info.keys()) - 1
_ubuntu_package_repos = 0
_other_repos = 0
- for _c, _d in _info.iteritems():
- for _ur, _l in _d.iteritems():
+ for _c, _d in _info.items():
+ for _ur, _l in _d.items():
if _ur in ubuntu_releases:
_ubuntu_package_repos += len(_l)
elif _ur != 'url':
@@ -531,12 +597,12 @@
_index = 0
_processed = 0
_new = 0
- for _c, _d in _info.iteritems():
+ for _c, _d in _info.items():
# we do not need url here, just get rid of it
if 'url' in _d:
_d.pop('url')
# _url = if 'url' in _d else _baseurl + _c
- for _ur, _l in _d.iteritems():
+ for _ur, _l in _d.items():
# iterate package collections
for _p in _l:
# descriptions
@@ -564,6 +630,8 @@
)
)
continue
+ else:
+ _raw = _raw.decode("utf-8")
_progress.write_progress(
_index,
note="/ {} {} {} {} {}, {}/{}".format(
@@ -728,11 +796,9 @@
def build_repos(self, url, tag=None):
"""Builds versions data for selected tag, or for all of them
"""
- # Init the ReposInfo class and check if all files are present
- _repos = ReposInfo()
# recoursively walk the mirrors
# and gather all of the repos for 'tag' or all of the tags
- _repos.fetch_repos(url, tag=tag)
+ self._info_class.fetch_repos(url, tag=tag)
def _build_action(self, url, tags):
for t in tags:
@@ -741,7 +807,7 @@
def get_available_tags(self, tag=None):
# Populate action tags
- major, updates, hotfix = ReposInfo().list_tags(splitted=True)
+ major, updates, hotfix = self._info_class.list_tags(splitted=True)
_tags = []
if tag in major:
@@ -767,14 +833,14 @@
logger_cli.info("# No action set, nothing to do")
# See if this is a list action
if action == "list":
- _all = ReposInfo().list_tags()
+ _all = self._info_class.list_tags()
if _all:
# Print pretty list and exit
logger_cli.info("# Tags available at '{}':".format(url))
for t in _all:
_ri = self._repo_index
_isparsed = any(
- [k for k, v in _ri.iteritems()
+ [k for k, v in _ri.items()
if v['props']['tag'] == t]
)
if _isparsed:
@@ -862,8 +928,8 @@
_rows = []
for _p in versions.keys():
_vs = versions[_p]
- for _v, _d1 in _vs.iteritems():
- for _md5, _info in _d1.iteritems():
+ for _v, _d1 in _vs.items():
+ for _md5, _info in _d1.items():
if _all or name == _info['app']:
_s_max = max(len(_info['section']), _s_max)
_a_max = max(len(_info['app']), _a_max)
@@ -993,21 +1059,21 @@
and filters them using keys above
"""
if tag:
- tag = unicode(tag) if not isinstance(tag, unicode) else tag
+ tag = str(tag) if not isinstance(tag, str) else tag
_out = {}
_vs = self.get_package_versions(name, tagged=True)
# iterate to filter out keywords
- for s, apps in _vs.iteritems():
- for a, _tt in apps.iteritems():
- for t, vs in _tt.iteritems():
+ for s, apps in _vs.items():
+ for a, _tt in apps.items():
+ for t, vs in _tt.items():
# filter tags
if tag and t != tag and t.rsplit('.', 1)[0] != tag:
continue
# Skip hotfix tag
if t == tag + ".hotfix":
continue
- for v, rp in vs.iteritems():
- for h, p in rp.iteritems():
+ for v, rp in vs.items():
+ for h, p in rp.items():
# filter headers with all keywords matching
_h = re.split(r"[\-\_]+", h)
_included = all([kw in _h for kw in include])
@@ -1038,9 +1104,9 @@
# insert repo data, insert props into headers place
_package = {}
if tagged:
- for _v, _d1 in _vs.iteritems():
+ for _v, _d1 in _vs.items():
# use tag as a next step
- for _md5, _info in _d1.iteritems():
+ for _md5, _info in _d1.items():
_s = _info['section']
_a = _info['app']
for _pair in _info['repo']:
@@ -1061,8 +1127,8 @@
_rp
)
else:
- for _v, _d1 in _vs.iteritems():
- for _md5, _info in _d1.iteritems():
+ for _v, _d1 in _vs.items():
+ for _md5, _info in _d1.items():
_s = _info['section']
_a = _info['app']
for _pair in _info['repo']:
@@ -1079,7 +1145,7 @@
def parse_repos(self):
# all tags to check
- major, updates, hotfix = ReposInfo().list_tags(splitted=True)
+ major, updates, hotfix = self._info_class.list_tags(splitted=True)
# major tags
logger_cli.info("# Processing major tags")
diff --git a/cfg_checker/modules/packages/versions.py b/cfg_checker/modules/packages/versions.py
index 7fae9fc..542c0e4 100644
--- a/cfg_checker/modules/packages/versions.py
+++ b/cfg_checker/modules/packages/versions.py
@@ -351,8 +351,8 @@
self.target = r
elif i > r:
# both are newer, same target
- self.status = const.VERSION_UP
- self.action = const.ACT_NA
+ self.status = const.VERSION_WARN
+ self.action = const.ACT_REPO
elif i == r:
# all is ok
self.status = const.VERSION_OK
@@ -372,10 +372,3 @@
# and we need to update per-part status
self.source.update_parts(self.target, self.status)
-
- @staticmethod
- def deb_lower(_s, _t):
- if _t.debian and _t.debian > _s.debian:
- return True
- else:
- return False
diff --git a/cfg_checker/modules/reclass/__init__.py b/cfg_checker/modules/reclass/__init__.py
index 4b8b667..88b287e 100644
--- a/cfg_checker/modules/reclass/__init__.py
+++ b/cfg_checker/modules/reclass/__init__.py
@@ -4,9 +4,9 @@
from cfg_checker.helpers import args_utils
from cfg_checker.reports import reporter
-import comparer
+from . import comparer
-import validator
+from . import validator
command_help = "Reclass related checks and reports"
diff --git a/cfg_checker/modules/reclass/comparer.py b/cfg_checker/modules/reclass/comparer.py
index 8ef8894..47e4baf 100644
--- a/cfg_checker/modules/reclass/comparer.py
+++ b/cfg_checker/modules/reclass/comparer.py
@@ -5,8 +5,9 @@
import itertools
import os
+from functools import reduce
+
from cfg_checker.common import logger, logger_cli
-from cfg_checker.reports import reporter
import yaml
@@ -197,13 +198,13 @@
# use ifilterfalse to compare lists of dicts
try:
_removed = list(
- itertools.ifilterfalse(
+ itertools.filterfalse(
lambda x: x in dict2[k],
dict1[k]
)
)
_added = list(
- itertools.ifilterfalse(
+ itertools.filterfalse(
lambda x: x in dict1[k],
dict2[k]
)
@@ -271,9 +272,10 @@
except TypeError as e:
logger.warning(
"One of the values is not a dict: "
- "{}, {}".format(
+ "{}, {}; {}".format(
str(dict1),
- str(dict2)
+ str(dict2),
+ e.message
))
match = False
if not match:
@@ -331,38 +333,3 @@
_diff_report["diff_names"] = [self.model_name_1, self.model_name_2]
return _diff_report
-
- def compare_models(self):
- # Do actual compare using model names from the class
- self.load_model_tree(
- self.model_name_1,
- self.model_path_1
- )
- self.load_model_tree(
- self.model_name_2,
- self.model_path_2
- )
- # Models should have similar structure to be compared
- # classes/system
- # classes/cluster
- # nodes
-
- diffs = self.generate_model_report_tree()
-
- report_file = \
- self.model_name_1 + "-vs-" + self.model_name_2 + ".html"
- # HTML report class is post-callable
- report = reporter.ReportToFile(
- reporter.HTMLModelCompare(),
- report_file
- )
- logger_cli.info("...generating report to {}".format(report_file))
- # report will have tabs for each of the comparable entities in diffs
- report({
- "nodes": {},
- "rc_diffs": diffs,
- })
- # with open("./gen_tree.json", "w+") as _out:
- # _out.write(json.dumps(mComparer.generate_model_report_tree))
-
- return
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index ec20f6a..c261752 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -35,7 +35,7 @@
try:
_keys = self.salt.list_keys()
_str = []
- for _k, _v in _keys.iteritems():
+ for _k, _v in _keys.items():
_str.append("{}: {}".format(_k, len(_v)))
logger_cli.info("-> keys collected: {}".format(", ".join(_str)))
@@ -92,10 +92,12 @@
)
)
# get master node fqdn
- _filtered = filter(
- lambda nd: self.nodes[nd]['role'] == const.all_roles_map['cfg'],
- self.nodes
- )
+ # _filtered = filter(
+ # lambda nd: self.nodes[nd]['role'] == const.all_roles_map['cfg'],
+ # self.nodes
+ # )
+ _role = const.all_roles_map['cfg']
+ _filtered = [n for n, v in self.nodes.items() if v['role'] == _role]
if len(_filtered) < 1:
raise SaltException(
"No master node detected! Check/Update node role map."
@@ -161,7 +163,7 @@
else:
_nodes = self.nodes
_result = self.execute_cmd_on_active_nodes(cmd, nodes=nodes)
- for node, data in _nodes.iteritems():
+ for node, data in _nodes.items():
if node in self.skip_list:
logger_cli.debug(
@@ -200,7 +202,7 @@
)
_result = self.salt.pillar_get(self.active_nodes_compound, pillar_path)
self.not_responded = []
- for node, data in self.nodes.iteritems():
+ for node, data in self.nodes.items():
if node in self.skip_list:
logger_cli.debug(
"... '{}' skipped while collecting '{}'".format(
diff --git a/cfg_checker/reports/reporter.py b/cfg_checker/reports/reporter.py
index fa5fc72..3d3ede3 100644
--- a/cfg_checker/reports/reporter.py
+++ b/cfg_checker/reports/reporter.py
@@ -110,9 +110,7 @@
r['arch']
]) + ", "
# maintainer w/o email
- _m = r['maintainer'][:r['maintainer'].find('<')-1]
- _m_ascii = _m.encode('ascii', errors="xmlcharrefreplace")
- _text += _m_ascii
+ _text += ascii(r['maintainer'][:r['maintainer'].find('<')-1])
# newline
_text += "<br />"
return _text
@@ -215,17 +213,21 @@
def _extend_data(self, data):
def get_bytes(value):
- if value[-1] == 'G':
- return int(float(value[:-1]) * 1024 * 1024 * 1024)
- elif value[-1] == 'M':
- return int(float(value[:-1]) * 1024 * 1024)
- elif value[-1] == 'K':
- return int(float(value[:-1]) * 1024)
- else:
+ _char = value[-1]
+ _ord = ord(_char)
+ if _ord > 47 and _ord < 58:
+ # bytes comes with no Char
return int(value)
+ else:
+ _sizes = ["*", "K", "M", "G", "T"]
+ _flo = float(value[:-1])
+ _pwr = 1
+ if _char in _sizes:
+ _pwr = _sizes.index(_char)
+ return int(_flo**_pwr)
def _dmidecode(_dict, type=0):
- _key = "dmi"
+ # _key = "dmi"
_key_r = "dmi_r"
_f_cmd = salt_master.get_cmd_for_nodes
_cmd = "dmidecode -t {}".format(type)
@@ -234,7 +236,7 @@
pass
def _lsblk(_dict):
- _key = "lsblk"
+ # _key = "lsblk"
_key_r = "lsblk_raw"
_f_cmd = salt_master.get_cmd_for_nodes
_columns = [
@@ -261,7 +263,7 @@
_cmd = "lscpu | sed -n '/\\:/s/ \\+/ /gp'"
_f_cmd(_cmd, _key_r, target_dict=_dict)
# parse them and put into dict
- for node, dt in _dict.iteritems():
+ for node, dt in _dict.items():
dt[_key] = {}
if dt['status'] == DOWN:
continue
@@ -291,7 +293,7 @@
_cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp'"
_f_cmd(_cmd, _key_r, target_dict=_dict)
# parse them and put into dict
- for node, dt in _dict.iteritems():
+ for node, dt in _dict.items():
dt[_key] = {}
if dt['status'] == DOWN:
continue
@@ -323,7 +325,7 @@
_f_cmd = salt_master.get_cmd_for_nodes
_cmd = "service --status-all"
_f_cmd(_cmd, _key_r, target_dict=_dict)
- for node, dt in _dict.iteritems():
+ for node, dt in _dict.items():
dt[_key] = {}
if dt['status'] == DOWN:
continue
@@ -386,7 +388,7 @@
_softnet_interval
)
_f_cmd(_cmd, _key_r, target_dict=_dict)
- for node, dt in _dict.iteritems():
+ for node, dt in _dict.items():
_cpuindex = 1
_add_mode = True
# totals for start mark
@@ -438,7 +440,7 @@
dt[_key]["total"][i] += _c[i]
_cpuindex += 1
# finally, subtract initial totals
- for k, v in dt[_key].iteritems():
+ for k, v in dt[_key].items():
if k != "total":
dt[_key][k] = [v[i] / 5. for i in range(len(v))]
else:
@@ -483,7 +485,7 @@
"disk_raw",
target_dict=data["nodes"]
)
- for dt in data["nodes"].itervalues():
+ for dt in data["nodes"].values():
dt["disk"] = {}
dt["disk_max_dev"] = None
if dt['status'] == DOWN:
@@ -516,8 +518,8 @@
_d[_t[0]]['f'] = ""
# prepare networks data for report
- for net, net_v in data['map'].iteritems():
- for node, ifs in net_v.iteritems():
+ for net, net_v in data['map'].items():
+ for node, ifs in net_v.items():
for d in ifs:
_err = "fail"
d['interface_error'] = _err if d['interface_error'] else ""
diff --git a/cover.sh b/cover.sh
old mode 100644
new mode 100755
index 49c73e5..de3ce8f
--- a/cover.sh
+++ b/cover.sh
@@ -1,2 +1,3 @@
#!/bin/bash
-coverage run --source cfg_checker setup.py test && coverage report
+PYTHONPATH=. coverage run --source=cfg_checker ./runtests.py
+coverage xml && coverage report
diff --git a/lastprofile.dat b/lastprofile.dat
new file mode 100644
index 0000000..f60981c
--- /dev/null
+++ b/lastprofile.dat
Binary files differ
diff --git a/profile.sh b/profile.sh
new file mode 100755
index 0000000..2f3f725
--- /dev/null
+++ b/profile.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+python -m cProfile -o lastprofile.dat ./runtests.py
+#echo -e "sort tottime\nstats 30" | python -m pstats lastprofile.dat
diff --git a/pstats.sh b/pstats.sh
new file mode 100755
index 0000000..1b2fa68
--- /dev/null
+++ b/pstats.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+#python -m cProfile -o lastprofile.dat ./tests/runtests.py
+echo -e "sort tottime\nstats 50" | python -m pstats lastprofile.dat
diff --git a/runtests.py b/runtests.py
new file mode 100644
index 0000000..3da567f
--- /dev/null
+++ b/runtests.py
@@ -0,0 +1,55 @@
+import os
+import shutil
+import unittest
+
+from unittest import TextTestResult, TextTestRunner
+from tests.test_base import tests_dir
+from tests.test_packages import _res_dir
+
+
+class MyTestResult(TextTestResult):
+ def getDescription(self, test):
+ # return super().getDescription(test)
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return '\n'.join((str(test), doc_first_line))
+ else:
+ # return str(test)
+ return "{}.{}.{}".format(
+ test.__class__.__module__,
+ test.__class__.__name__,
+ test._testMethodName
+ )
+
+
+class MyTestRunner(TextTestRunner):
+ resultclass = MyTestResult
+
+
+def _cleanup():
+ _fpath = [
+ "repo.info.tgz",
+ "repo.versions.tgz",
+ "pkg.descriptions.tgz"
+ ]
+ for _p in _fpath:
+ _fp = os.path.join(_res_dir, _p)
+ if os.path.exists(_fp):
+ os.remove(_fp)
+
+ _ferr = os.path.join(_res_dir, "fakeerrors")
+ if os.path.exists(_ferr):
+ shutil.rmtree(_ferr)
+
+
+if __name__ == '__main__':
+ # remove old files if exists
+ _cleanup()
+
+ # start tests
+ suite = unittest.TestLoader().discover(tests_dir, "test_*", tests_dir)
+ runner = MyTestRunner(verbosity=3)
+ runner.run(suite)
+
+ # cleanup after testrun
+ _cleanup()
diff --git a/scripts/ping.py b/scripts/ping.py
index 4e2778d..fae4f65 100644
--- a/scripts/ping.py
+++ b/scripts/ping.py
@@ -122,7 +122,7 @@
pool = Pool(15)
# prepare threaded map
_param_map = []
- for _node, _data in _packets.iteritems():
+ for _node, _data in _packets.items():
if isinstance(_data, list):
for target in _data:
_param_map.append(target)
diff --git a/scripts/sniffer.py b/scripts/sniffer.py
index 9b6830c..8657f8a 100644
--- a/scripts/sniffer.py
+++ b/scripts/sniffer.py
@@ -121,15 +121,15 @@
print "\n\n=== [+] ------------ Ethernet Header----- [+]"
# print data on terminal
- for i in u.eth_header(pkt[0][0:14]).iteritems():
+ for i in u.eth_header(pkt[0][0:14]).items():
a, b = i
print "{} : {} | ".format(a, b),
print "\n=== [+] ------------ IP Header ------------[+]"
- for i in u.ip_header(pkt[0][14:34]).iteritems():
+ for i in u.ip_header(pkt[0][14:34]).items():
a, b = i
print "{} : {} | ".format(a, b),
print "\n== [+] ------------ Tcp Header ----------- [+]"
- for i in u.tcp_header(pkt[0][34:54]).iteritems():
+ for i in u.tcp_header(pkt[0][34:54]).items():
a, b = i
print "{} : {} | ".format(a, b),
print "\n===== Data ===="
diff --git a/tests/mocks.py b/tests/mocks.py
new file mode 100644
index 0000000..863def5
--- /dev/null
+++ b/tests/mocks.py
@@ -0,0 +1,292 @@
+import json
+import os
+
+from tests.test_base import tests_dir
+
+
+# Prepare fake filenames and files
+_res_dir = os.path.join(tests_dir, 'res')
+
+
+# preload file from res
+def _load_from_res(_filename, mode='rt'):
+ fake_file_path = os.path.join(_res_dir, _filename)
+ _patch_buf = []
+ with open(fake_file_path, mode) as _f:
+ _patch_buf = _f.read()
+
+ return _patch_buf
+
+
+_fakepage_template = _load_from_res(os.path.join(_res_dir, "_fakepage.html"))
+_fakepage_empty = _load_from_res(os.path.join(_res_dir, "_fakeempty.html"))
+_fake_keys = json.loads(_load_from_res("_fake_keys.json"))
+_fake_pkg_versions = _load_from_res("_fake_pkg_versions.json")
+_fake_network_data = _load_from_res("_fake_net_data.json")
+
+
+def _prepare_result_for_target(_tgt, result=True):
+ # prepare True answer for target node if we have it in fakes list
+ _nodes = _fake_keys["return"]["minions"]
+ _m = {}
+ if _tgt == "*":
+ for _n in _nodes:
+ _m[_n] = result
+ elif _tgt in _nodes:
+ # single target
+ _m[_tgt] = result
+ elif " or " in _tgt:
+ # compund
+ _t_list = _tgt.split(" or ")
+ for _t in _t_list:
+ _m[_t] = result
+ return _m
+
+
+class MockResponse:
+ def __init__(self, _buffer, status_code):
+ if _buffer is None:
+ self.content = _buffer
+ self.text = _buffer
+ self.json = _buffer
+ elif isinstance(_buffer, bytes):
+ self.content = _buffer
+ self.text = None
+ self._json = None
+ elif isinstance(_buffer, dict):
+ _dump = json.dumps(_buffer)
+ self.content = _dump.encode('utf-8')
+ self.text = _dump
+ self._json = _buffer
+ else:
+ self.content = _buffer.encode('utf-8')
+ self.text = _buffer
+ self._json = None
+
+ self.status_code = status_code
+ self.reason = "OK" if self.status_code == 200 else "FAIL"
+
+ def content(self):
+ return self.content
+
+ def text(self):
+ return self.text
+
+ def json(self):
+ if not self._json:
+ try:
+ _j = json.loads(self.text)
+ except Exception:
+ raise Exception("Failed to create json {}".format(self.text))
+ return _j
+ else:
+ return self._json
+
+ def reason(self):
+ return self.reason
+
+ def ok(self):
+ return True if self.status_code == 200 else False
+
+ def cookies(self):
+ return None
+
+
+def mocked_salt_post(*args, **kwargs):
+ _rest_handle = args[0].split('/', 3)[3]
+ if _rest_handle == "login":
+ # return fake token
+ _fake_token = {
+ "return":
+ [
+ {
+ "perms": [
+ ".*",
+ "@local",
+ "@wheel",
+ "@runner",
+ "@jobs"
+ ],
+ "start": 0,
+ "token": "faketoken",
+ "expire": 0,
+ "user": "salt",
+ "eauth": "pam"
+ }
+ ]
+ }
+
+ return MockResponse(_fake_token, 200)
+ elif not _rest_handle and "json" in kwargs:
+ # handle functions
+ _funs = kwargs["json"]
+ if isinstance(_funs, list):
+ if len(_funs) > 1:
+ raise Exception("Multiple commands in sale requiest")
+ else:
+ _f = _funs[0]
+ _t = _f["tgt"]
+ _a = _f["arg"] if "arg" in _f else ""
+ _f = _f["fun"]
+ if _f == "test.ping":
+ # prepare answer to ping
+ _val = _prepare_result_for_target(_t)
+ return MockResponse({"return": [_val]}, 200)
+ elif _f == "pillar.get":
+ # pillar get response, preload data
+ _j = json.loads(_load_from_res("_fake_pillars.json"))
+ _result = {"return": []}
+ if _t in _j.keys():
+ # target is single
+ _j = _j[_t]
+ _r = {_t: _j[_a]} if _a in _j else {}
+ else:
+ # target is a compound
+ _t_list = _t.split(" or ")
+ _r = {}
+ for _t in _t_list:
+ _val = _j[_t][_a] if _a in _j[_t] else {}
+ _r[_t] = _val
+ _result["return"].append(_r)
+ return MockResponse(_result, 200)
+ elif _f == "cmd.run":
+ # determine which script is called
+ _args = _a.split()
+ if _args[0] == "python" and _args[1].endswith("pkg_versions.py"):
+ _val = _prepare_result_for_target(_t, _fake_pkg_versions)
+ elif _args[0] == "python" and _args[1].endswith("ifs_data.py"):
+ _val = _prepare_result_for_target(_t, _fake_network_data)
+ elif _args[0] == "uname":
+ _val = _prepare_result_for_target(_t, "FakeLinux")
+ elif _args[0] == "lscpu":
+ _val = _prepare_result_for_target(
+ _t,
+ _load_from_res("_fake_lscpu.txt")
+ )
+ elif _args[0] == "free":
+ _val = _prepare_result_for_target(
+ _t,
+ "Mem: 1.9G 1.4G 84M 22M 524M 343M"
+ )
+ elif _args[0] == "df":
+ _val = _prepare_result_for_target(
+ _t,
+ _load_from_res("_fake_df.txt")
+ )
+ elif _args[0] == "service":
+ _val = _prepare_result_for_target(
+ _t,
+ _load_from_res("_fake_service_status.txt")
+ )
+ elif _args[0] == "virsh":
+ _val = _prepare_result_for_target(
+ _t,
+ _load_from_res("_fake_kvm_instances.txt")
+ )
+ elif _args[0] == "cat" and \
+ _args[1].endswith("/proc/net/softnet_stat;"):
+ _val = _prepare_result_for_target(
+ _t,
+ _load_from_res("_fake_softnet_stats.txt")
+ )
+ return MockResponse({"return": [_val]}, 200)
+ elif _f in ["file.mkdir", "file.touch", "file.write", "cp.get_file"]:
+ _val = _prepare_result_for_target(_t)
+ return MockResponse({"return": [_val]}, 200)
+
+ return MockResponse(None, 404)
+
+
+def mocked_salt_get(*args, **kwargs):
+ _rest_handle = args[0].split('/', 3)[3]
+ if _rest_handle == "keys":
+ # return list of minions
+ _fake_keys = _load_from_res("_fake_keys.json")
+ return MockResponse(_fake_keys, 200)
+ elif _rest_handle == "minions":
+ # list of minions
+ _list = _load_from_res("_fake_minions.json")
+ return MockResponse(_list, 200)
+ return MockResponse(None, 404)
+
+
+def mocked_package_get(*args, **kwargs):
+ # fake page _placeholder_
+ _placeholder = "_placeholder_"
+ _type = "_type_"
+ # fake domain
+ _url = "http://fakedomain.com"
+ # folders list and file
+ _folders = [
+ "2099.0.0",
+ "ubuntu",
+ "dists",
+ "trusty",
+ "main",
+ "binary-amd64"
+ ]
+ _file = "Packages.gz"
+
+ # if this is a fakedomain for mirrors
+ if args[0].startswith(_url):
+ # cut url
+ _u = args[0].replace(_url, "")
+ # detect folder
+ _split_res = _u.rsplit('/', 2)
+ if len(_split_res) > 2 and _u[-1] != '/':
+ _current_page = _u.rsplit('/', 2)[2]
+ else:
+ _current_page = _u.rsplit('/', 2)[1]
+ # if this is main index page, take first
+ if len(_current_page) == 0:
+ # initial folder
+ _p = _fakepage_template
+ _p = _p.replace(_placeholder, _folders[0] + "/")
+ _p = _p.replace(_type, "-")
+ # return fake page
+ return MockResponse(_p, 200)
+ # index in array
+ elif _current_page in _folders:
+ # simulate folder walk
+ _ind = _folders.index(_current_page)
+ # get next one
+ if _ind+1 < len(_folders):
+ # folder
+ _p = _fakepage_template
+ _p = _p.replace(_placeholder, _folders[_ind+1] + "/")
+ _p = _p.replace(_type, "-")
+ else:
+ # file
+ _p = _fakepage_template
+ _p = _p.replace(_placeholder, _file)
+ # type is detected as '-' for folder
+ # and <number> for file
+ _p = _p.replace(_type, "999")
+ # supply next fake page
+ return MockResponse(_p, 200)
+ elif _current_page == _file:
+ # just package.gz file
+ # preload file
+ _gzfile = _load_from_res("Packages.gz", mode='rb')
+ return MockResponse(_gzfile, 200)
+ elif _current_page == "hotfix" or _current_page == "update":
+ return MockResponse(_fakepage_empty, 200)
+
+ return MockResponse(None, 404)
+
+
+_shell_salt_path = "cfg_checker.common.salt_utils.shell"
+
+
+def mocked_shell(*args, **kwargs):
+ _args = args[0].split()
+ # _fake_salt_response = ["cfg01.fakedomain.com"]
+ _args = _args[1:] if _args[0] == "sudo" else _args
+ if _args[0].startswith("salt-call"):
+ # local calls
+ _json = {"local": None}
+ if _args[-1].startswith("_param:salt_api_password"):
+ _json["local"] = "fakepassword"
+ return json.dumps(_json)
+
+ return "emptyfakeresponse"
diff --git a/tests/res/2019.2.7.json b/tests/res/2019.2.7.json
new file mode 100644
index 0000000..bb13dbe
--- /dev/null
+++ b/tests/res/2019.2.7.json
@@ -0,0 +1,798 @@
+{
+ "all": {
+ "url": "http://mirror.mirantis.com/2019.2.7/ubuntu/",
+ "ubuntu": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-backports/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-backports"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-backports/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-backports"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-backports/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-backports"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-backports/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-backports"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-proposed/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-proposed"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-proposed/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-proposed"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-proposed/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-proposed"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-proposed/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-proposed"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-security/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-security"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-security/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-security"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-security/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-security"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-security/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-security"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-updates/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-updates"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-updates/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-updates"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-updates/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-updates"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/trusty-updates/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty-updates"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-backports/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-backports"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-backports/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-backports"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-backports/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-backports"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-backports/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-backports"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-proposed/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-proposed"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-proposed/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-proposed"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-proposed/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-proposed"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-proposed/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-proposed"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-security/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-security"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-security/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-security"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-security/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-security"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-security/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-security"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-updates/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-updates"
+ },
+ {
+ "type": "multiverse",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-updates/multiverse/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-updates"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-updates/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-updates"
+ },
+ {
+ "type": "universe",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ubuntu/dists/xenial-updates/universe/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial-updates"
+ }
+ ]
+ },
+ "glusterfs-3.8": {
+ "url": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.8/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.8/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "extra": {
+ "url": "http://mirror.mirantis.com/2019.2.7/extra/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/extra/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/extra/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "glusterfs-5": {
+ "url": "http://mirror.mirantis.com/2019.2.7/glusterfs-5/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/glusterfs-5/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "saltstack-2016.11": {
+ "url": "http://mirror.mirantis.com/2019.2.7/saltstack-2016.11/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack-2016.11/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "openstack-pike": {
+ "url": "http://mirror.mirantis.com/2019.2.7/openstack-pike/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-pike/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-pike/xenial/dists/xenial/main/binary-arm64/Packages.gz",
+ "arch": "arm64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "mcp-tools": {
+ "url": "http://mirror.mirantis.com/2019.2.7/mcp-tools/",
+ "xenial": [
+ {
+ "type": "extra",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/mcp-tools/xenial/dists/nightly/extra/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "nightly"
+ }
+ ]
+ },
+ "kubernetes-extra": {
+ "url": "http://mirror.mirantis.com/2019.2.7/kubernetes-extra/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/kubernetes-extra/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "cassandra": {
+ "url": "http://mirror.mirantis.com/2019.2.7/cassandra/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/cassandra/trusty/dists/21x/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "21x"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/cassandra/xenial/dists/21x/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "21x"
+ }
+ ]
+ },
+ "saltstack-2017.7": {
+ "url": "http://mirror.mirantis.com/2019.2.7/saltstack-2017.7/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack-2017.7/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack-2017.7/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "opencontrail-4.1": {
+ "url": "http://mirror.mirantis.com/2019.2.7/opencontrail-4.1/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/opencontrail-4.1/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "opencontrail-4.0": {
+ "url": "http://mirror.mirantis.com/2019.2.7/opencontrail-4.0/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/opencontrail-4.0/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "openstack-ocata": {
+ "url": "http://mirror.mirantis.com/2019.2.7/openstack-ocata/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-ocata/xenial/dists/ocata/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "ocata"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-ocata/xenial/dists/ocata/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "ocata"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-ocata/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-ocata/xenial/dists/xenial/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "confluent-4.1": {
+ "url": "http://mirror.mirantis.com/2019.2.7/confluent-4.1/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/confluent-4.1/xenial/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ]
+ },
+ "openstack-queens": {
+ "url": "http://mirror.mirantis.com/2019.2.7/openstack-queens/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-queens/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-queens/xenial/dists/xenial/main/binary-arm64/Packages.gz",
+ "arch": "arm64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "docker-1.x": {
+ "url": "http://mirror.mirantis.com/2019.2.7/docker-1.x/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/docker-1.x/xenial/dists/ubuntu-xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "ubuntu-xenial"
+ }
+ ]
+ },
+ "openstack-mitaka": {
+ "url": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/trusty/dists/mitaka/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "mitaka"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/trusty/dists/mitaka/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "mitaka"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/trusty/dists/trusty/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/xenial/dists/mitaka/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "mitaka"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/xenial/dists/mitaka/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "mitaka"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-mitaka/xenial/dists/xenial/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "jenkins": {
+ "url": "http://mirror.mirantis.com/2019.2.7/jenkins/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/jenkins/xenial/dists/binary/main/binary-all/Packages.gz",
+ "arch": "all",
+ "ubuntu-release": "binary"
+ }
+ ]
+ },
+ "glusterfs-3.12": {
+ "url": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.12/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.12/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "glusterfs-3.10": {
+ "url": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.10/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.10/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/glusterfs-3.10/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "maas-ephemeral-v3": {
+ "url": "http://mirror.mirantis.com/2019.2.7/maas-ephemeral-v3/",
+ "xenial": []
+ },
+ "baseurl": "http://mirror.mirantis.com/2019.2.7/",
+ "influxdb": {
+ "url": "http://mirror.mirantis.com/2019.2.7/influxdb/",
+ "xenial": [
+ {
+ "type": "stable",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/influxdb/xenial/dists/xenial/stable/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "percona": {
+ "url": "http://mirror.mirantis.com/2019.2.7/percona/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/percona/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "opencontrail-3.2": {
+ "url": "http://mirror.mirantis.com/2019.2.7/opencontrail-3.2/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/opencontrail-3.2/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/opencontrail-3.2/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "maas": {
+ "url": "http://mirror.mirantis.com/2019.2.7/maas/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/maas/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "saltstack": {
+ "url": "http://mirror.mirantis.com/2019.2.7/saltstack/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "td-agent": {
+ "url": "http://mirror.mirantis.com/2019.2.7/td-agent/",
+ "trusty": [
+ {
+ "type": "contrib",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/td-agent/trusty/dists/trusty/contrib/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "contrib",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/td-agent/xenial/dists/xenial/contrib/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "ceph-luminous": {
+ "url": "http://mirror.mirantis.com/2019.2.7/ceph-luminous/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/ceph-luminous/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "saltstack-2016.3": {
+ "url": "http://mirror.mirantis.com/2019.2.7/saltstack-2016.3/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack-2016.3/trusty/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/saltstack-2016.3/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "openstack-newton": {
+ "url": "http://mirror.mirantis.com/2019.2.7/openstack-newton/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-newton/xenial/dists/newton/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "newton"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-newton/xenial/dists/newton/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "newton"
+ },
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-newton/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ },
+ {
+ "type": "restricted",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/openstack-newton/xenial/dists/xenial/restricted/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "elasticsearch-6.x": {
+ "url": "http://mirror.mirantis.com/2019.2.7/elasticsearch-6.x/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/elasticsearch-6.x/xenial/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ]
+ },
+ "aptly": {
+ "url": "http://mirror.mirantis.com/2019.2.7/aptly/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/aptly/trusty/dists/squeeze/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "squeeze"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/aptly/xenial/dists/squeeze/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "squeeze"
+ }
+ ]
+ },
+ "salt-formulas": {
+ "url": "http://mirror.mirantis.com/2019.2.7/salt-formulas/",
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/salt-formulas/xenial/dists/xenial/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ },
+ "elasticsearch-5.x": {
+ "url": "http://mirror.mirantis.com/2019.2.7/elasticsearch-5.x/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/elasticsearch-5.x/trusty/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/elasticsearch-5.x/xenial/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ]
+ },
+ "elasticsearch-curator-5": {
+ "url": "http://mirror.mirantis.com/2019.2.7/elasticsearch-curator-5/",
+ "trusty": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/elasticsearch-curator-5/trusty/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ],
+ "xenial": [
+ {
+ "type": "main",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/elasticsearch-curator-5/xenial/dists/stable/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "stable"
+ }
+ ]
+ },
+ "docker": {
+ "url": "http://mirror.mirantis.com/2019.2.7/docker/",
+ "xenial": [
+ {
+ "type": "stable",
+ "filepath": "http://mirror.mirantis.com/2019.2.7/docker/xenial/dists/xenial/stable/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "xenial"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/tests/res/2099.0.0.json b/tests/res/2099.0.0.json
new file mode 100644
index 0000000..97530f6
--- /dev/null
+++ b/tests/res/2099.0.0.json
@@ -0,0 +1,13 @@
+{
+ "all": {
+ "url": "http://fakedomain.com/2099.0.0/fakerepo/",
+ "ubuntu": [
+ {
+ "type": "main",
+ "filepath": "http://fakedomain.com/2099.0.0/ubuntu/dists/trusty/main/binary-amd64/Packages.gz",
+ "arch": "amd64",
+ "ubuntu-release": "trusty"
+ }
+ ]
+ }
+}
diff --git a/tests/res/Packages.gz b/tests/res/Packages.gz
new file mode 100644
index 0000000..39d619f
--- /dev/null
+++ b/tests/res/Packages.gz
Binary files differ
diff --git a/tests/res/_fake_df.txt b/tests/res/_fake_df.txt
new file mode 100644
index 0000000..7a4fd35
--- /dev/null
+++ b/tests/res/_fake_df.txt
@@ -0,0 +1,7 @@
+/dev/mapper/vg0-root 20G 14G 5.2G 73%
+/dev/vda2 971M 70M 852M 8%
+/dev/mapper/vg0-home 93M 1.7M 85M 2%
+/dev/mapper/vg0-tmp 481M 2.3M 454M 1%
+/dev/mapper/vg0-var_tmp 477M 2.3M 448M 1%
+/dev/mapper/vg0-var_log 5.7G 551M 4.9G 10%
+/dev/mapper/vg0-var_log_audit 481M 2.3M 454M 1%
\ No newline at end of file
diff --git a/tests/res/_fake_keys.json b/tests/res/_fake_keys.json
new file mode 100644
index 0000000..027ea64
--- /dev/null
+++ b/tests/res/_fake_keys.json
@@ -0,0 +1,16 @@
+{
+ "return": {
+ "minions_pre": [],
+ "minions_rejected": [],
+ "minions_denied": [],
+ "local": [
+ "master.pem",
+ "master.pub"
+ ],
+ "minions": [
+ "cfg01.fakedomain.local",
+ "cmp01.fakedomain.local",
+ "ctl01.fakedomain.local"
+ ]
+ }
+}
diff --git a/tests/res/_fake_kvm_instances.txt b/tests/res/_fake_kvm_instances.txt
new file mode 100644
index 0000000..db8a4b6
--- /dev/null
+++ b/tests/res/_fake_kvm_instances.txt
@@ -0,0 +1,14 @@
+ 1 cid01.harlandclarke.local running
+ 2 dns01.harlandclarke.local running
+ 3 kmn01.harlandclarke.local running
+ 4 mdb01.harlandclarke.local running
+ 5 mtr01.harlandclarke.local running
+ 6 bmt01.harlandclarke.local running
+ 7 log01.harlandclarke.local running
+ 8 rgw01.harlandclarke.local running
+ 9 ctl01.harlandclarke.local running
+ 10 cmn01.harlandclarke.local running
+ 11 gtw01.harlandclarke.local running
+ 12 dbs01.harlandclarke.local running
+ 13 msg01.harlandclarke.local running
+ 14 mon01.harlandclarke.local running
\ No newline at end of file
diff --git a/tests/res/_fake_lscpu.txt b/tests/res/_fake_lscpu.txt
new file mode 100644
index 0000000..6f80c83
--- /dev/null
+++ b/tests/res/_fake_lscpu.txt
@@ -0,0 +1,25 @@
+Architecture: x86_64
+CPU op-mode(s): 32-bit, 64-bit
+Byte Order: Little Endian
+CPU(s): 1
+On-line CPU(s) list: 0
+Thread(s) per core: 1
+Core(s) per socket: 1
+Socket(s): 1
+NUMA node(s): 1
+Vendor ID: GenuineIntel
+CPU family: 6
+Model: 63
+Model name: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
+Stepping: 2
+CPU MHz: 2299.998
+BogoMIPS: 4599.99
+Virtualization: VT-x
+Hypervisor vendor: KVM
+Virtualization type: full
+L1d cache: 32K
+L1i cache: 32K
+L2 cache: 4096K
+L3 cache: 16384K
+NUMA node0 CPU(s): 0
+Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm cpuid_fault invpcid_single pti ssbd ibrs ibpb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat
\ No newline at end of file
diff --git a/tests/res/_fake_minions.json b/tests/res/_fake_minions.json
new file mode 100644
index 0000000..1cb356f
--- /dev/null
+++ b/tests/res/_fake_minions.json
@@ -0,0 +1,7064 @@
+{
+ "return": [
+ {
+ "cmp01.fakedomain.local": {
+ "biosversion": "1.10.2-1.1~u16.04+mcp2",
+ "kernel": "Linux",
+ "domain": "ozhurba-os-oc-cicd-sl.local",
+ "uid": 0,
+ "zmqversion": "4.1.4",
+ "kernelrelease": "4.15.0-43-generic",
+ "pythonpath": [
+ "/usr/bin",
+ "/usr/lib/python2.7",
+ "/usr/lib/python2.7/plat-x86_64-linux-gnu",
+ "/usr/lib/python2.7/lib-tk",
+ "/usr/lib/python2.7/lib-old",
+ "/usr/lib/python2.7/lib-dynload",
+ "/usr/local/lib/python2.7/dist-packages",
+ "/usr/lib/python2.7/dist-packages"
+ ],
+ "serialnumber": "c7b67a52-b1a5-4d50-9069-599cc0aad878",
+ "pid": 2253,
+ "telegraf": {
+ "remote_agent": {
+ "input": {},
+ "processor": {},
+ "dir": {
+ "config": "/srv/volumes/local/telegraf",
+ "config_d": "/srv/volumes/local/telegraf/telegraf.d"
+ },
+ "output": {}
+ },
+ "agent": {
+ "metric_batch_size": 1000,
+ "collection_jitter": 2,
+ "interval": 15,
+ "enabled": true,
+ "pkgs": [
+ "telegraf"
+ ],
+ "round_interval": false,
+ "output": {
+ "prometheus_client": {
+ "engine": "prometheus",
+ "bind": {
+ "port": 9126,
+ "address": "0.0.0.0"
+ },
+ "string_as_label": false
+ }
+ },
+ "input": {
+ "kernel": null,
+ "processes": null,
+ "nstat": {
+ "fieldpass": [
+ "packet_drop",
+ "time_squeeze"
+ ]
+ },
+ "swap": null,
+ "mem": null,
+ "ntp": {
+ "template": "ntp/files/telegraf.conf"
+ },
+ "system": null,
+ "http_listener": {
+ "read_timeout": "10s",
+ "bind": {
+ "port": 8186,
+ "address": "127.0.0.1"
+ },
+ "tagexclude": [
+ "hostname"
+ ],
+ "write_timeout": "10s"
+ },
+ "linux_sysctl_fs": null,
+ "diskio": null,
+ "procstat": {
+ "process": {
+ "contrail-vrouter-agent": {
+ "pattern": "contrail-vrouter-agent"
+ },
+ "sshd": {
+ "exe": "sshd"
+ },
+ "salt-minion": {
+ "pattern": "salt-minion"
+ },
+ "cron": {
+ "exe": "cron"
+ },
+ "contrail-nodemgr-vrouter": {
+ "pattern": "python.*contrail-nodemgr.*-vrouter"
+ },
+ "ntpd": {
+ "exe": "ntpd"
+ }
+ }
+ },
+ "net": null,
+ "disk": {
+ "ignore_fs": [
+ "aufs",
+ "rootfs",
+ "sysfs",
+ "proc",
+ "devtmpfs",
+ "devpts",
+ "tmpfs",
+ "fusectl",
+ "cgroup",
+ "overlay"
+ ]
+ },
+ "cpu": {
+ "totalcpu": true,
+ "percpu": false
+ },
+ "http_response": {
+ "contrail-node-manager": {
+ "address": "http://127.0.0.1:8102/"
+ },
+ "contrail-vrouter": {
+ "address": "http://127.0.0.1:8085/"
+ }
+ }
+ },
+ "metric_buffer_limit": 10000,
+ "processor": {},
+ "dir": {
+ "config": "/etc/telegraf",
+ "config_d": "/etc/telegraf/telegraf.d"
+ }
+ }
+ },
+ "ip_interfaces": {
+ "vethec13d320-3": [
+ "fe80::c87c:a9ff:fe17:a601"
+ ],
+ "veth880f8ede-a": [
+ "fe80::f0b6:bbff:fe72:d0cc"
+ ],
+ "veth256f51fd-c": [
+ "fe80::688f:17ff:fe12:c526"
+ ],
+ "veth18c9ae6b-9": [
+ "fe80::c2c:88ff:fe42:b3c8"
+ ],
+ "veth0ee2fae3-b": [
+ "fe80::70a6:c4ff:fef3:6955"
+ ],
+ "ens4": [
+ "10.10.100.5",
+ "fe80::f816:3eff:feee:9510"
+ ],
+ "ens5": [
+ "fe80::f816:3eff:fe6a:ff78"
+ ],
+ "ens6": [
+ "10.13.100.26",
+ "fe80::f816:3eff:fe3c:67ee"
+ ],
+ "ens3": [
+ "10.11.1.1",
+ "fe80::f816:3eff:fed0:8639"
+ ],
+ "veth7739e3f4-f": [
+ "fe80::a8f8:2eff:fe83:6dc5"
+ ],
+ "lo": [
+ "127.0.0.1",
+ "::1"
+ ],
+ "vethccc56015-8": [
+ "fe80::ccbf:97ff:fe1e:c4b4"
+ ],
+ "vethc03db88c-1": [
+ "fe80::7b:e3ff:fea7:a8ff"
+ ],
+ "veth67e6d989-e": [
+ "fe80::6c3f:19ff:feaf:edba"
+ ],
+ "vhost0": [
+ "10.12.1.1",
+ "fe80::f816:3eff:fe6a:ff78"
+ ],
+ "veth8342151a-7": [
+ "fe80::1c3e:f3ff:fed5:5872"
+ ],
+ "vethd53dd8ab-d": [
+ "fe80::e4e8:42ff:fec2:b2ed"
+ ],
+ "veth108c39b4-b": [
+ "fe80::784c:7cff:fee8:d005"
+ ],
+ "pkt0": [
+ "fe80::c52:1bff:feec:2a07"
+ ],
+ "veth625ca23d-8": [
+ "fe80::1c9d:65ff:fe06:979c"
+ ],
+ "vethcabb806e-4": [
+ "fe80::5c08:5aff:fe1b:2d3a"
+ ],
+ "vethc1f26111-e": [
+ "fe80::bc15:baff:fed1:b96e"
+ ],
+ "pkt3": [],
+ "pkt2": [],
+ "pkt1": [],
+ "veth783896a6-1": [
+ "fe80::ec0d:87ff:fefb:9241"
+ ]
+ },
+ "groupname": "root",
+ "fqdn_ip6": [],
+ "mem_total": 7976,
+ "saltversioninfo": [
+ 2017,
+ 7,
+ 8,
+ 0
+ ],
+ "SSDs": [],
+ "mdadm": [],
+ "id": "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "manufacturer": "OpenStack Foundation",
+ "osrelease": "16.04",
+ "ps": "ps -efHww",
+ "systemd": {
+ "version": "229",
+ "features": "+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN"
+ },
+ "fqdn": "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "uuid": "62cb8077-9932-42e2-95fc-27f611de39f6",
+ "ip6_interfaces": {
+ "vethec13d320-3": [
+ "fe80::c87c:a9ff:fe17:a601"
+ ],
+ "veth880f8ede-a": [
+ "fe80::f0b6:bbff:fe72:d0cc"
+ ],
+ "veth256f51fd-c": [
+ "fe80::688f:17ff:fe12:c526"
+ ],
+ "veth18c9ae6b-9": [
+ "fe80::c2c:88ff:fe42:b3c8"
+ ],
+ "veth0ee2fae3-b": [
+ "fe80::70a6:c4ff:fef3:6955"
+ ],
+ "ens4": [
+ "fe80::f816:3eff:feee:9510"
+ ],
+ "ens5": [
+ "fe80::f816:3eff:fe6a:ff78"
+ ],
+ "ens6": [
+ "fe80::f816:3eff:fe3c:67ee"
+ ],
+ "ens3": [
+ "fe80::f816:3eff:fed0:8639"
+ ],
+ "veth7739e3f4-f": [
+ "fe80::a8f8:2eff:fe83:6dc5"
+ ],
+ "lo": [
+ "::1"
+ ],
+ "vethccc56015-8": [
+ "fe80::ccbf:97ff:fe1e:c4b4"
+ ],
+ "vethc03db88c-1": [
+ "fe80::7b:e3ff:fea7:a8ff"
+ ],
+ "veth67e6d989-e": [
+ "fe80::6c3f:19ff:feaf:edba"
+ ],
+ "vhost0": [
+ "fe80::f816:3eff:fe6a:ff78"
+ ],
+ "veth8342151a-7": [
+ "fe80::1c3e:f3ff:fed5:5872"
+ ],
+ "vethd53dd8ab-d": [
+ "fe80::e4e8:42ff:fec2:b2ed"
+ ],
+ "veth108c39b4-b": [
+ "fe80::784c:7cff:fee8:d005"
+ ],
+ "pkt0": [
+ "fe80::c52:1bff:feec:2a07"
+ ],
+ "veth625ca23d-8": [
+ "fe80::1c9d:65ff:fe06:979c"
+ ],
+ "vethcabb806e-4": [
+ "fe80::5c08:5aff:fe1b:2d3a"
+ ],
+ "vethc1f26111-e": [
+ "fe80::bc15:baff:fed1:b96e"
+ ],
+ "pkt3": [],
+ "pkt2": [],
+ "pkt1": [],
+ "veth783896a6-1": [
+ "fe80::ec0d:87ff:fefb:9241"
+ ]
+ },
+ "num_cpus": 4,
+ "hwaddr_interfaces": {
+ "vethec13d320-3": "ca:7c:a9:17:a6:01",
+ "veth880f8ede-a": "f2:b6:bb:72:d0:cc",
+ "veth256f51fd-c": "6a:8f:17:12:c5:26",
+ "veth18c9ae6b-9": "0e:2c:88:42:b3:c8",
+ "veth0ee2fae3-b": "72:a6:c4:f3:69:55",
+ "ens4": "fa:16:3e:ee:95:10",
+ "ens5": "fa:16:3e:6a:ff:78",
+ "ens6": "fa:16:3e:3c:67:ee",
+ "ens3": "fa:16:3e:d0:86:39",
+ "veth7739e3f4-f": "aa:f8:2e:83:6d:c5",
+ "lo": "00:00:00:00:00:00",
+ "vethccc56015-8": "ce:bf:97:1e:c4:b4",
+ "vethc03db88c-1": "02:7b:e3:a7:a8:ff",
+ "veth67e6d989-e": "6e:3f:19:af:ed:ba",
+ "vhost0": "fa:16:3e:6a:ff:78",
+ "veth8342151a-7": "1e:3e:f3:d5:58:72",
+ "vethd53dd8ab-d": "e6:e8:42:c2:b2:ed",
+ "veth108c39b4-b": "7a:4c:7c:e8:d0:05",
+ "pkt0": "0e:52:1b:ec:2a:07",
+ "veth625ca23d-8": "1e:9d:65:06:97:9c",
+ "vethcabb806e-4": "5e:08:5a:1b:2d:3a",
+ "vethc1f26111-e": "be:15:ba:d1:b9:6e",
+ "pkt3": "62:2a:6b:43:22:23",
+ "pkt2": "9a:6f:3c:e5:f1:68",
+ "pkt1": "b6:fa:51:a7:bf:3c",
+ "veth783896a6-1": "ee:0d:87:fb:92:41"
+ },
+ "init": "systemd",
+ "ip4_interfaces": {
+ "vethec13d320-3": [],
+ "veth880f8ede-a": [],
+ "veth256f51fd-c": [],
+ "veth18c9ae6b-9": [],
+ "veth0ee2fae3-b": [],
+ "ens4": [
+ "10.10.100.5"
+ ],
+ "ens5": [],
+ "ens6": [
+ "10.13.100.26"
+ ],
+ "ens3": [
+ "10.11.1.1"
+ ],
+ "veth7739e3f4-f": [],
+ "lo": [
+ "127.0.0.1"
+ ],
+ "vethccc56015-8": [],
+ "vethc03db88c-1": [],
+ "veth67e6d989-e": [],
+ "vhost0": [
+ "10.12.1.1"
+ ],
+ "veth8342151a-7": [],
+ "vethd53dd8ab-d": [],
+ "veth108c39b4-b": [],
+ "pkt0": [],
+ "veth625ca23d-8": [],
+ "vethcabb806e-4": [],
+ "vethc1f26111-e": [],
+ "pkt3": [],
+ "pkt2": [],
+ "pkt1": [],
+ "veth783896a6-1": []
+ },
+ "grafana": {
+ "dashboard": {
+ "openstack_tenants_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/openstack_tenants_prometheus.json",
+ "format": "json"
+ },
+ "contrail_controller_prometheus": {
+ "datasource": "prometheus",
+ "template": "opencontrail/files/grafana_dashboards/contrail_4_controller_prometheus.json",
+ "format": "json"
+ },
+ "cassandra_influxdb": {
+ "datasource": "influxdb",
+ "template": "opencontrail/files/grafana_dashboards/cassandra_influxdb.json",
+ "format": "json"
+ },
+ "ntp_prometheus": {
+ "datasource": "prometheus",
+ "template": "ntp/files/grafana_dashboards/ntp_prometheus.json",
+ "format": "json"
+ },
+ "cinder_prometheus": {
+ "datasource": "prometheus",
+ "template": "cinder/files/grafana_dashboards/cinder_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "nova_utilization_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_utilization_prometheus.json",
+ "format": "json"
+ },
+ "hypervisor_influxdb": {
+ "datasource": "influxdb",
+ "template": "nova/files/grafana_dashboards/hypervisor_influxdb.json",
+ "format": "json"
+ },
+ "contrail_vrouter_prometheus": {
+ "datasource": "prometheus",
+ "template": "opencontrail/files/grafana_dashboards/contrail_4_vrouter_prometheus.json",
+ "format": "json"
+ },
+ "cinder_influxdb": {
+ "datasource": "influxdb",
+ "template": "cinder/files/grafana_dashboards/cinder_influxdb.json",
+ "format": "json"
+ },
+ "zookeeper_prometheus": {
+ "datasource": "prometheus",
+ "template": "opencontrail/files/grafana_dashboards/zookeeper_prometheus.json",
+ "format": "json"
+ },
+ "openstack_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/openstack_overview_prometheus.json",
+ "format": "json"
+ },
+ "linux_network_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_network_prometheus.json",
+ "format": "json"
+ },
+ "service_level": {
+ "datasource": "influxdb",
+ "row": {
+ "cinder-service-level": {
+ "title": "Cinder Service Levels",
+ "panel": {
+ "cinder-api-requests": {
+ "target": {
+ "cinder-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ },
+ "cinder-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ },
+ "cinder-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ }
+ },
+ "title": "Cinder API Requests"
+ },
+ "cinder-control-uptime": {
+ "target": {
+ "cinder-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "cinder-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND value = 0 AND $timeFilter"
+ },
+ "cinder-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND value = 4 AND $timeFilter"
+ }
+ },
+ "title": "Cinder Control Uptime"
+ },
+ "cinder-data-uptime": {
+ "target": {
+ "cinder-data-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND value = 0 AND $timeFilter"
+ },
+ "cinder-data-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND value = 4 AND $timeFilter"
+ },
+ "cinder-data-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ }
+ },
+ "title": "Cinder Data Uptime"
+ },
+ "cinder-api-availability": {
+ "target": {
+ "cinder-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'cinder-api' OR service = 'cinder-v2-api') AND value = 1 AND $timeFilter"
+ },
+ "cinder-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'cinder-api' OR service = 'cinder-v2-api') AND value = 0 AND $timeFilter"
+ }
+ },
+ "title": "Cinder API Availability (V1 & V2)"
+ }
+ }
+ },
+ "nova-service-level": {
+ "title": "Nova Service Levels",
+ "panel": {
+ "nova-control-uptime": {
+ "target": {
+ "nova-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND value = 0 AND $timeFilter"
+ },
+ "nova-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND value = 4 AND $timeFilter"
+ },
+ "nova-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ }
+ },
+ "title": "Nova Control Uptime"
+ },
+ "nova-data-uptime": {
+ "target": {
+ "nova-data-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND value = 0 AND $timeFilter"
+ },
+ "nova-data-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "nova-data-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND value = 4 AND $timeFilter"
+ }
+ },
+ "title": "Nova Data Uptime"
+ },
+ "nova-api-availability": {
+ "target": {
+ "nova-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'nova-api' AND value = 0 AND $timeFilter"
+ },
+ "nova-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'nova-api' AND value = 1 AND $timeFilter"
+ }
+ },
+ "title": "Nova API Availability"
+ },
+ "nova-api-requests": {
+ "target": {
+ "nova-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ },
+ "nova-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ },
+ "nova-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ }
+ },
+ "title": "Nova API Requests"
+ }
+ }
+ }
+ }
+ },
+ "linux_influxdb": {
+ "datasource": "influxdb",
+ "template": "linux/files/grafana_dashboards/system_influxdb.json",
+ "format": "json"
+ },
+ "nova_instances_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_instances_prometheus.json",
+ "format": "json"
+ },
+ "nova_influxdb": {
+ "datasource": "influxdb",
+ "template": "nova/files/grafana_dashboards/nova_influxdb.json",
+ "format": "json"
+ },
+ "main_prometheus": {
+ "datasource": "prometheus",
+ "row": {
+ "ost-control-plane": {
+ "title": "OpenStack Control Plane",
+ "panel": {
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"cinder.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=\"nova\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ }
+ }
+ }
+ }
+ },
+ "contrail_influxdb": {
+ "datasource": "influxdb",
+ "template": "opencontrail/files/grafana_dashboards/contrail_influxdb.json",
+ "format": "json"
+ },
+ "linux_disk_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_disk_prometheus.json",
+ "format": "json"
+ },
+ "cassandra_prometheus": {
+ "datasource": "prometheus",
+ "template": "opencontrail/files/grafana_dashboards/cassandra_prometheus.json",
+ "format": "json"
+ },
+ "zookeeper_influxdb": {
+ "datasource": "influxdb",
+ "template": "opencontrail/files/grafana_dashboards/zookeeper_influxdb.json",
+ "format": "json"
+ },
+ "nova_hypervisor_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_hypervisor_prometheus.json",
+ "format": "json"
+ },
+ "main_influxdb": {
+ "datasource": "influxdb",
+ "row": {
+ "ost-data-plane": {
+ "title": "OpenStack Data Plane",
+ "panel": {
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'cinder-data' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'nova-data' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ }
+ }
+ },
+ "ost-control-plane": {
+ "title": "OpenStack Control Plane",
+ "panel": {
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'cinder-control' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'nova-control' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ }
+ }
+ }
+ }
+ },
+ "nova_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_overview_prometheus.json",
+ "format": "json"
+ },
+ "linux_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_overview_prometheus.json",
+ "format": "json"
+ }
+ },
+ "parameters": null
+ },
+ "ssh_fingerprints": {
+ "rsa": "b2:cd:0e:6d:38:fa:37:de:b6:0f:89:81:85:72:77:a2",
+ "ecdsa": "22:8e:8d:63:f5:6b:3b:60:3e:20:fd:0c:68:22:2c:0c",
+ "dsa": "d2:03:00:39:78:2b:af:dc:81:18:b5:e0:d3:e1:f0:74"
+ },
+ "gid": 0,
+ "master": "10.10.0.15",
+ "ipv4": [
+ "10.10.100.5",
+ "10.11.1.1",
+ "10.12.1.1",
+ "10.13.100.26",
+ "127.0.0.1"
+ ],
+ "dns": {
+ "domain": "",
+ "sortlist": [],
+ "nameservers": [
+ "172.18.176.6",
+ "172.17.44.91"
+ ],
+ "ip4_nameservers": [
+ "172.18.176.6",
+ "172.17.44.91"
+ ],
+ "search": [
+ "openstacklocal"
+ ],
+ "ip6_nameservers": [],
+ "options": []
+ },
+ "ipv6": [
+ "::1",
+ "fe80::7b:e3ff:fea7:a8ff",
+ "fe80::c2c:88ff:fe42:b3c8",
+ "fe80::c52:1bff:feec:2a07",
+ "fe80::1c3e:f3ff:fed5:5872",
+ "fe80::1c9d:65ff:fe06:979c",
+ "fe80::5c08:5aff:fe1b:2d3a",
+ "fe80::688f:17ff:fe12:c526",
+ "fe80::6c3f:19ff:feaf:edba",
+ "fe80::70a6:c4ff:fef3:6955",
+ "fe80::784c:7cff:fee8:d005",
+ "fe80::a8f8:2eff:fe83:6dc5",
+ "fe80::bc15:baff:fed1:b96e",
+ "fe80::c87c:a9ff:fe17:a601",
+ "fe80::ccbf:97ff:fe1e:c4b4",
+ "fe80::e4e8:42ff:fec2:b2ed",
+ "fe80::ec0d:87ff:fefb:9241",
+ "fe80::f0b6:bbff:fe72:d0cc",
+ "fe80::f816:3eff:fe3c:67ee",
+ "fe80::f816:3eff:fe6a:ff78",
+ "fe80::f816:3eff:fed0:8639",
+ "fe80::f816:3eff:feee:9510"
+ ],
+ "server_id": 1847579797,
+ "cpu_flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "vmx",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "cpuid_fault",
+ "invpcid_single",
+ "pti",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "hle",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "rtm",
+ "rdseed",
+ "adx",
+ "smap",
+ "xsaveopt",
+ "arat"
+ ],
+ "osfullname": "Ubuntu",
+ "localhost": "cmp1",
+ "lsb_distrib_id": "Ubuntu",
+ "username": "root",
+ "fqdn_ip4": [
+ "10.11.1.1"
+ ],
+ "shell": "/bin/sh",
+ "nodename": "cmp1",
+ "saltversion": "2017.7.8",
+ "lsb_distrib_release": "16.04",
+ "saltpath": "/usr/lib/python2.7/dist-packages/salt",
+ "pythonversion": [
+ 2,
+ 7,
+ 12,
+ "final",
+ 0
+ ],
+ "host": "cmp1",
+ "os_family": "Debian",
+ "oscodename": "xenial",
+ "services": [
+ "fluentd",
+ "telegraf",
+ "rsyslog",
+ "openssh",
+ "ntp",
+ "opencontrail",
+ "nova",
+ "grafana",
+ "prometheus",
+ "logrotate",
+ "_reclass_",
+ "linux",
+ "cinder",
+ "salt",
+ "openscap"
+ ],
+ "osfinger": "Ubuntu-16.04",
+ "biosreleasedate": "04/01/2014",
+ "dns_records": [
+ {
+ "names": [
+ "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "cmp1"
+ ],
+ "address": "10.11.1.1"
+ }
+ ],
+ "lsb_distrib_description": "Ubuntu 16.04.5 LTS",
+ "sphinx": {
+ "doc": {
+ "cinder": {
+ "role": {
+ "volume": {
+ "name": "volume",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack@10.11.0.10:5672/openstack"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "backends": {
+ "value": "* lvm-driver:\n * storage engine: lvm\n * volume type: lvm-driver"
+ },
+ "identity_host": {
+ "name": "Identity service",
+ "value": "cinder@10.11.0.10:35357"
+ },
+ "packages": {
+ "value": "* cinder-volume: 2:12.0.4-2~u16.04+mcp96\n* lvm2: 2.02.133-1ubuntu10\n* sysfsutils: 2.1.0+repack-4\n* sg3-utils: 1.40-0ubuntu1\n* python-cinder: 2:12.0.4-2~u16.04+mcp96\n* python-mysqldb: 1.3.7-1build2\n* p7zip: 9.20.1~dfsg.1-4.2\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-pycadf: 2.6.0-1~u16.04+mcp2\n"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "cinder@10.11.0.50:3306//cinder"
+ }
+ }
+ }
+ },
+ "description": "Cinder provides an infrastructure for managing volumes in OpenStack. It was originally a Nova component called nova-volume, but has become an independent project since the Folsom release.",
+ "name": "Cinder"
+ },
+ "opencontrail": {
+ "role": {
+ "vrouter": {
+ "name": "vrouter",
+ "param": {
+ "disable_flow_collection": {
+ "name": "Disable flow collection",
+ "value": true
+ },
+ "compute_interface": {
+ "name": "vrouter vhost0 interface",
+ "value": "* interface binded: ens5\n* ip address: 10.12.1.1\n* MTU: 9000\n"
+ },
+ "packages": {
+ "value": "* contrail-utils: 4.0~20190123144438-0\n* iproute2: 4.3.0-1ubuntu3.16.04.4\n* haproxy: 1.6.3-1ubuntu0.1\n* linux-headers-4.15.0-43-generic: 4.15.0-43.46~16.04.1\n* contrail-nova-driver: 4.0~20190123144438-0"
+ },
+ "version": {
+ "name": "Contrail version",
+ "value": 4.0
+ }
+ }
+ }
+ },
+ "description": "OpenContrail is an open source network virtualization platform for the cloud.",
+ "name": "OpenContrail"
+ },
+ "salt": {
+ "role": {
+ "minion": {
+ "name": "minion",
+ "param": {
+ "version": {
+ "value": "2017.7.8 (Nitrogen)"
+ }
+ }
+ }
+ },
+ "description": "Salt is a new approach to infrastructure management. Easy enough to get running in minutes, scalable enough to manage tens of thousands of servers, and fast enough to communicate with them in seconds.",
+ "name": "Salt"
+ },
+ "nova": {
+ "role": {
+ "compute": {
+ "name": "compute",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "network_host": {
+ "name": "Network service",
+ "value": "10.11.0.10:9696"
+ },
+ "vncproxy_url": {
+ "name": "VNC proxy URL",
+ "value": "https://10.13.250.9:6080"
+ },
+ "reserved_host_memory_mb": {
+ "name": "Reserved Host Memmory",
+ "value": 1100
+ },
+ "glance_host": {
+ "name": "Image service",
+ "value": "10.11.0.10:9292"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "identity_host": {
+ "name": "Identity host ip",
+ "value": "nova@10.11.0.10:35357"
+ },
+ "packages": {
+ "value": "* nova-common: 2:17.0.9-6~u16.01+mcp93\n* nova-compute-kvm: 2:17.0.9-6~u16.01+mcp93\n* python-novaclient: 2:9.1.1-1~u16.04+mcp6\n* pm-utils: 1.4.1-16\n* sysfsutils: 2.1.0+repack-4\n* sg3-utils: 1.40-0ubuntu1\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-guestfs: 1:1.32.2-4ubuntu2\n* gettext-base: 0.19.7-2ubuntu3.1"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "nova@10.11.0.50:3306/nova"
+ }
+ }
+ }
+ },
+ "description": "OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more.",
+ "name": "Nova"
+ },
+ "linux": {
+ "role": {
+ "network": {
+ "name": "Network",
+ "param": {
+ "ip": {
+ "name": "IP Addresses",
+ "value": [
+ "10.10.100.5",
+ "10.11.1.1",
+ "10.12.1.1",
+ "10.13.100.26",
+ "127.0.0.1"
+ ]
+ },
+ "fqdn": {
+ "name": "FQDN",
+ "value": "cmp1.ozhurba-os-oc-cicd-sl.local"
+ }
+ }
+ },
+ "system": {
+ "name": "System",
+ "param": {
+ "kernel": {
+ "value": "Linux 4.15.0-43-generic"
+ },
+ "distribution": {
+ "value": "Ubuntu 16.04.5 LTS"
+ },
+ "name": {
+ "value": "cmp1"
+ }
+ }
+ }
+ },
+ "description": "Linux is a high performance, yet completely free, Unix-like operating system that is suitable for use on a wide range of computers and other products.",
+ "name": "Linux"
+ }
+ }
+ },
+ "num_gpus": 1,
+ "roles": [
+ "fluentd.agent",
+ "telegraf.agent",
+ "rsyslog.client",
+ "openssh.client",
+ "openssh.server",
+ "ntp.client",
+ "opencontrail.client",
+ "opencontrail.compute",
+ "nova.compute",
+ "grafana.collector",
+ "prometheus.collector",
+ "logrotate.server",
+ "linux.storage",
+ "linux.system",
+ "linux.network",
+ "cinder.volume",
+ "salt.minion",
+ "openscap.service"
+ ],
+ "virtual": "kvm",
+ "os": "Ubuntu",
+ "disks": [
+ "loop1",
+ "dm-1",
+ "loop6",
+ "dm-6",
+ "vdb",
+ "loop4",
+ "dm-4",
+ "loop2",
+ "dm-2",
+ "loop0",
+ "dm-0",
+ "loop7",
+ "loop5",
+ "dm-5",
+ "vda",
+ "loop3",
+ "dm-3"
+ ],
+ "cpu_model": "Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz",
+ "osmajorrelease": 16,
+ "pythonexecutable": "/usr/bin/python",
+ "productname": "OpenStack Nova",
+ "osarch": "amd64",
+ "cpuarch": "x86_64",
+ "lsb_distrib_codename": "xenial",
+ "osrelease_info": [
+ 16,
+ 4
+ ],
+ "locale_info": {
+ "detectedencoding": "UTF-8",
+ "defaultlanguage": "en_US",
+ "defaultencoding": "UTF-8"
+ },
+ "gpus": [
+ {
+ "model": "GD 5446",
+ "vendor": "unknown"
+ }
+ ],
+ "prometheus": {
+ "exporters": {
+ "libvirt": {
+ "services": {
+ "qemu": {
+ "bind": {
+ "port": 9177,
+ "address": "0.0.0.0"
+ },
+ "enabled": true
+ }
+ },
+ "packages": [
+ "libvirt-exporter"
+ ],
+ "enabled": true
+ }
+ },
+ "server": {
+ "recording": {},
+ "target": {
+ "static": {
+ "fluentd": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.1.1:24231",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cmp1"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 24231,
+ "address": "10.11.1.1"
+ }
+ ]
+ },
+ "telegraf": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.1.1:9126",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cmp1"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 9126,
+ "address": "10.11.1.1"
+ }
+ ]
+ },
+ "libvirt_qemu_exporter": {
+ "metric_relabel": [
+ {
+ "regex": "10.11.1.1:9177",
+ "source_labels": "instance",
+ "target_label": "host",
+ "replacement": "cmp1"
+ }
+ ],
+ "relabel_configs": [
+ {
+ "regex": "10.11.1.1:9177",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cmp1"
+ }
+ ],
+ "endpoint": [
+ {
+ "port": 9177,
+ "address": "10.11.1.1"
+ }
+ ]
+ }
+ }
+ },
+ "alert": {
+ "ContrailApiDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The {{ $labels.name }} API endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} API endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"contrail.*\"} == 0"
+ },
+ "ContrailApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The {{ $labels.name }} API is not accessible for all available endpoints for 2 minutes.",
+ "summary": "{{ $labels.name }} API outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"contrail.*\"} == 0) by (name) == count(http_response_status{name=~\"contrail.*\"}) by (name)"
+ },
+ "ContrailVrouterXMPPSessionsChangesTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The OpenContrail vRouter XMPP sessions on the {{ $labels.host }} node have changed {{ $value }} times.",
+ "summary": "OpenContrail vRouter XMPP sessions changes reached the limit of5"
+ },
+ "if": "abs(delta(contrail_vrouter_xmpp[2m])) >= 5"
+ },
+ "CinderErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Cinder logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Cinder logs"
+ },
+ "if": "sum(rate(log_messages{service=\"cinder\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "ContrailApiDownMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.name }} API endpoints (>= 30.0%) are not accessible for 2 minutes.",
+ "summary": "30.0% of {{ $labels.name }} API endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"contrail.*\"} == 0) by (name) >= count(http_response_status{name=~\"contrail.*\"}) by (name) *0.3"
+ },
+ "ContrailVrouterXMPPSessionsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter XMPP sessions are open on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "OpenContrail vRouter XMPP sessions reached the limit of 10"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_xmpp) by (host) >= 10"
+ },
+ "LibvirtDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "libvirt"
+ },
+ "annotations": {
+ "description": "The Libvirt metric exporter fails to gather metrics on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "Failure to gather Libvirt metrics"
+ },
+ "for": "2m",
+ "if": "libvirt_up == 0"
+ },
+ "CronProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The cron process on the {{ $labels.host }} node is down.",
+ "summary": "Cron process is down"
+ },
+ "if": "procstat_running{process_name=\"cron\"} == 0"
+ },
+ "ContrailApiDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.name }} API endpoints (>= 60.0%) are not accessible for 2 minutes.",
+ "summary": " 60.0% of {{ $labels.name }} API endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"contrail.*\"} == 0) by (name) >= count(http_response_status{name=~\"contrail.*\"}) by (name) *0.6"
+ },
+ "ContrailBGPSessionsNoEstablished": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "There are no established OpenContrail BGP sessions on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "No established OpenContrail BGP sessions"
+ },
+ "for": "2m",
+ "if": "max(contrail_bgp_session_count) by (host) == 0"
+ },
+ "ContrailXMPPSessionsChangesTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The OpenContrail XMPP sessions on the {{ $labels.host }} node have changed {{ $value }} times.",
+ "summary": "OpenContrail XMPP sessions changes reached the limit of100"
+ },
+ "if": "abs(delta(contrail_xmpp_session_count[2m])) >= 100"
+ },
+ "SystemRxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets received by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "60 received packets were dropped"
+ },
+ "if": "increase(net_drop_in[1m]) > 60 unless on (host,interface) bond_slave_active == 0"
+ },
+ "ContrailFlowsLabelInvalidTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter flows on the {{ $labels.host }} node had an invalid composite interface for 2 minutes.",
+ "summary": "OpenContrail vRouter flows with an invalid label reached the limit of 100"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_flows_invalid_label) by (host) >= 100"
+ },
+ "ContrailVrouterLLSSessionsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter LLS sessions are open on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "OpenContrail vRouter LLS sessions reached the limit of 10"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_lls) by (host) >= 10"
+ },
+ "ContrailFlowsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "enabled": false,
+ "annotations": {
+ "description": "The average per-second rate of dropped OpenContrail vRouter flows on the {{ $labels.host }} node is {{ $value }} for 2 minutes.",
+ "summary": "OpenContrail vRouter dropped flows reached the limit of 0.2/s"
+ },
+ "for": "2m",
+ "if": "rate(contrail_vrouter_flows_flow_action_drop[5m]) >= 0.2"
+ },
+ "ContrailXMPPSessionsMissing": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail XMPP sessions are missing on the compute cluster for 2 minutes.",
+ "summary": "Missing OpenContrail XMPP sessions"
+ },
+ "for": "2m",
+ "if": "count(contrail_vrouter_xmpp) * 2 - sum(contrail_xmpp_session_count) > 0"
+ },
+ "ContrailProcessDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.process_name }} processes (>= 60.0%) are down.",
+ "summary": "60.0% of {{ $labels.process_name }} processes are down"
+ },
+ "if": "count(procstat_running{process_name=~\"contrail.*\"} == 0) by (process_name) >= 0.6*count(procstat_running{process_name=~\"contrail.*\"}) by (process_name)"
+ },
+ "ContrailFlowsQueueSizeExceededTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The average per-second rate of OpenContrail vRouter flows exceeding the queue size on the {{ $labels.host }} node is {{ $value }} for 2 minutes.",
+ "summary": "OpenContrail vRouter flows exceeding the queue size reached the limit of 0.1/s"
+ },
+ "for": "2m",
+ "if": "rate(contrail_vrouter_flows_flow_queue_limit_exceeded[5m]) >= 0.1"
+ },
+ "SystemLoadTooHighWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is1.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 1.0"
+ },
+ "PacketsDroppedByCpuMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 24 hours.",
+ "summary": "CPU dropped 100 packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[24h])) > 100"
+ },
+ "SshdProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The SSH process on the {{ $labels.host }} node is down.",
+ "summary": "SSH process is down"
+ },
+ "if": "procstat_running{process_name=\"sshd\"} == 0"
+ },
+ "SystemSwapFullMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The swap on the {{ $labels.host }} node is {{ $value }}% used for 2 minutes.",
+ "summary": "90.0% of swap is used"
+ },
+ "for": "2m",
+ "if": "swap_used_percent >= 90.0"
+ },
+ "NovaErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Nova logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Nova logs"
+ },
+ "if": "sum(rate(log_messages{service=\"nova\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "ContrailXMPPSessionsDown": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail XMPP sessions on the {{ $labels.host }} node are down for 2 minutes.",
+ "summary": "OpenContrail XMPP sessions are down"
+ },
+ "for": "2m",
+ "if": "min(contrail_xmpp_session_down_count) by (host) > 0"
+ },
+ "NetdevBudgetRanOutsWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The rate of net_rx_action loops terminations on the {{ $labels.host }} node is {{ $value }} per second during the last 7 minutes. Modify the net.core.netdev_budget and net.core.netdev_budget_usecs kernel parameters.",
+ "summary": "CPU terminated 0.1 net_rx_action loops per second"
+ },
+ "for": "7m",
+ "if": "max(rate(nstat_time_squeeze[5m])) without (cpu) > 0.1"
+ },
+ "ContrailFlowsDiscardedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The average per-second rate of discarded OpenContrail vRouter flows on the {{ $labels.host }} node is {{ $value }} for 2 minutes.",
+ "summary": "OpenContrail vRouter discarded flows reached the limit of 0.1/s"
+ },
+ "for": "2m",
+ "if": "rate(contrail_vrouter_flows_discard[5m]) >= 0.1"
+ },
+ "SystemLoadTooHighCritical": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is2.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 2.0"
+ },
+ "SystemDiskInodesFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "85.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 85.0"
+ },
+ "NtpOffsetTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "ntp"
+ },
+ "annotations": {
+ "description": "The NTP offset on the {{ $labels.host }} node is {{ $value }}ms for 2 minutes.",
+ "summary": "NTP offset reached the limit of 200ms"
+ },
+ "for": "2m",
+ "if": "ntpq_offset >= 200"
+ },
+ "ContrailBGPSessionsNoActive": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "There are no active OpenContrail BGP sessions on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "No active OpenContrail BGP sessions"
+ },
+ "for": "2m",
+ "if": "max(contrail_bgp_session_up_count) by (host) == 0"
+ },
+ "PacketsDroppedByCpuMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 24 hours.",
+ "summary": "CPU dropped 0 packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[24h])) > 0"
+ },
+ "ContrailProcessDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The {{ $labels.process_name }} process on the {{ $labels.host }} node is down.",
+ "summary": "{{ $labels.process_name }} process is down"
+ },
+ "if": "procstat_running{process_name=~\"contrail.*\"} == 0"
+ },
+ "SaltMinionServiceDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "salt"
+ },
+ "annotations": {
+ "description": "The salt-minion service on the {{ $labels.host }} node is down.",
+ "summary": "Salt-minion service is down"
+ },
+ "if": "procstat_running{process_name=\"salt-minion\"} == 0"
+ },
+ "ContrailXMPPSessionsMissingEstablished": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} established OpenContrail XMPP sessions are missing on the compute cluster for 2 minutes.",
+ "summary": "Missing established OpenContrail XMPP sessions"
+ },
+ "for": "2m",
+ "if": "count(contrail_vrouter_xmpp) * 2 - sum(contrail_xmpp_session_up_count) > 0"
+ },
+ "ContrailProcessOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "All {{ $labels.process_name }} processes are down.",
+ "summary": "{{ $labels.name }} service outage"
+ },
+ "if": "count(procstat_running{process_name=~\"contrail.*\"} == 0) by (process_name) == count(procstat_running{process_name=~\"contrail.*\"}) by (process_name)"
+ },
+ "SystemDiskFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 85.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 85.0"
+ },
+ "SshFailedLoginsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} failed SSH login attempts on the {{ $labels.host }} node during the last 5 minutes.",
+ "summary": "5 failed SSH logins"
+ },
+ "if": "increase(failed_logins_total[5m]) > 5"
+ },
+ "ContrailFlowsNextHopInvalidTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The average per-second rate of OpenContrail vRouter flows with an invalid next hop on the {{ $labels.host }} node is {{ $value }} for 2 minutes.",
+ "summary": "OpenContrail vRouter flows with an invalid next hop reached the limit of 0.1/s"
+ },
+ "for": "2m",
+ "if": "rate(contrail_vrouter_flows_invalid_nh[5m]) >= 0.1"
+ },
+ "SystemDiskInodesFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "95.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 95.0"
+ },
+ "SystemMemoryFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "90.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 90.0"
+ },
+ "ContrailBGPSessionsDown": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail BGP sessions on the {{ $labels.host }} node are down for 2 minutes.",
+ "summary": "OpenContrail BGP sessions are down"
+ },
+ "for": "2m",
+ "if": "min(contrail_bgp_session_down_count) by (host) > 0"
+ },
+ "ContrailFlowsActiveTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter flows per second on the {{ $labels.host }} node are active for 2 minutes.",
+ "summary": "OpenContrail vRouter active flows reached the limit of 100"
+ },
+ "for": "2m",
+ "if": "deriv(contrail_vrouter_flows_active[5m]) >= 100"
+ },
+ "SystemTxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets transmitted by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "100 transmitted packets were dropped"
+ },
+ "if": "increase(net_drop_out[1m]) > 100"
+ },
+ "ContrailVrouterLLSSessionsChangesTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The OpenContrail vRouter LLS sessions on the {{ $labels.host }} node have changed {{ $value }} times.",
+ "summary": "OpenContrail vRouter LLS sessions changes reached the limit of 5"
+ },
+ "if": "abs(delta(contrail_vrouter_lls[2m])) >= 5"
+ },
+ "SystemDiskFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 95.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 95.0"
+ },
+ "ContrailProcessDownMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.process_name }} processes (>= 30.0%) are down.",
+ "summary": "30.0% of {{ $labels.process_name }} processes are down"
+ },
+ "if": "count(procstat_running{process_name=~\"contrail.*\"} == 0) by (process_name) >= 0.3*count(procstat_running{process_name=~\"contrail.*\"}) by (process_name)"
+ },
+ "SystemCpuFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The average CPU usage on the {{ $labels.host }} node is {{ $value }}% for 2 minutes.",
+ "summary": "90.0% CPU usage"
+ },
+ "for": "2m",
+ "if": "100 - avg_over_time(cpu_usage_idle{cpu=\"cpu-total\"}[5m]) > 90.0"
+ },
+ "ContrailFlowsInterfaceInvalidTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "The average per-second rate of OpenContrail vRouter flows with an invalid composite interface on the {{ $labels.host }} node is {{ $value }} for 2 minutes.",
+ "summary": "OpenContrail vRouter flows with an invalid composite interface reached the limit of 0.05/s"
+ },
+ "for": "2m",
+ "if": "rate(contrail_vrouter_flows_composite_invalid_interface[5m]) >= 0.05"
+ },
+ "ContrailFlowsTableFullTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter flows on the {{ $labels.host }} node had a full table for 2 minutes.",
+ "summary": "OpenContrail vRouter flows with full table reached the limit of 100"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_flows_flow_table_full) by (host) >= 100"
+ },
+ "ContrailXMPPSessionsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail XMPP sessions on the {{ $labels.host }} node are open for 2 minutes.",
+ "summary": "OpenContrail XMPP sessions reached the limit of 500"
+ },
+ "for": "2m",
+ "if": "min(contrail_xmpp_session_count) by (host) >= 500"
+ },
+ "ContrailFlowsFragErrTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "{{ $value }} OpenContrail vRouter flows on the {{ $labels.host }} node had fragment errors for 2 minutes.",
+ "summary": "OpenContrail vRouter flows with fragment errors reached the limit of 0.2"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_flows_frag_err) by (host) >= 100"
+ },
+ "SystemDiskErrorsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.device }} disk on the {{ $labels.host }} node is reporting errors for 5 minutes.",
+ "summary": "Disk {{ $labels.device }} is failing"
+ },
+ "for": "5m",
+ "if": "increase(hdd_errors_total[1m]) > 0"
+ },
+ "SystemSwapFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The swap on the {{ $labels.host }} node is {{ $value }}% used for 2 minutes.",
+ "summary": "50.0% of swap is used"
+ },
+ "for": "2m",
+ "if": "swap_used_percent >= 50.0"
+ },
+ "SystemMemoryFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "95.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 95.0"
+ },
+ "ContrailVrouterXMPPSessionsZero": {
+ "labels": {
+ "severity": "warning",
+ "service": "contrail"
+ },
+ "annotations": {
+ "description": "There are no OpenContrail vRouter XMPP sessions on the {{ $labels.host }} node for 2 minutes.",
+ "summary": "No OpenContrail vRouter XMPP sessions"
+ },
+ "for": "2m",
+ "if": "min(contrail_vrouter_xmpp) by (host) == 0"
+ }
+ }
+ }
+ },
+ "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "machine_id": "62cb8077993242e295fc27f611de39f6",
+ "salt": {
+ "graph": [
+ {
+ "host": "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "ntp.client",
+ "relations": [
+ {
+ "host_external": "udp://10.10.0.15",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ },
+ {
+ "host_external": "udp://pool.ntp.org",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ }
+ ]
+ },
+ {
+ "host": "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "linux.system",
+ "relations": [
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//opencontrail-4.0//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-updates main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ }
+ ]
+ },
+ {
+ "host": "cmp1.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-config",
+ "service": "salt.minion",
+ "relations": [
+ {
+ "direction": "source",
+ "type": "tcp-0mq",
+ "service": "salt.master",
+ "host_from_target": "10.10.0.15"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "ctl01.fakedomain.local": {
+ "biosversion": "1.10.2-1.1~u16.04+mcp2",
+ "kernel": "Linux",
+ "domain": "ozhurba-os-oc-cicd-sl.local",
+ "uid": 0,
+ "zmqversion": "4.1.4",
+ "kernelrelease": "4.15.0-43-generic",
+ "pythonpath": [
+ "/usr/bin",
+ "/usr/lib/python2.7",
+ "/usr/lib/python2.7/plat-x86_64-linux-gnu",
+ "/usr/lib/python2.7/lib-tk",
+ "/usr/lib/python2.7/lib-old",
+ "/usr/lib/python2.7/lib-dynload",
+ "/usr/local/lib/python2.7/dist-packages",
+ "/usr/lib/python2.7/dist-packages"
+ ],
+ "serialnumber": "a29a9e8f-3421-47ae-aaeb-1d02391f7e62",
+ "neutron_policy": {
+ "create_router:distributed": "rule:admin_only",
+ "get_policy_dscp_marking_rule": "rule:regular_user",
+ "shared_subnetpools": "field:subnetpools:shared=True",
+ "context_is_advsvc": "role:advsvc",
+ "get_floatingip": "rule:admin_or_owner",
+ "context_is_admin": "role:admin",
+ "update_router:external_gateway_info:network_id": "rule:admin_or_owner",
+ "update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
+ "get_loadbalancer-pools": "rule:admin_only",
+ "get_service_provider": "rule:regular_user",
+ "create_network:router:external": "rule:admin_only",
+ "create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "get_service_profiles": "rule:admin_only",
+ "create_qos_queue": "rule:admin_only",
+ "get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
+ "delete_address_scope": "rule:admin_or_owner",
+ "delete_agent": "rule:admin_only",
+ "update_policy_dscp_marking_rule": "rule:admin_only",
+ "update_address_scope:shared": "rule:admin_only",
+ "create_router": "rule:regular_user",
+ "delete_metering_label_rule": "rule:admin_only",
+ "update_network:segments": "rule:admin_only",
+ "update_network:provider:segmentation_id": "rule:admin_only",
+ "create_network:segments": "rule:admin_only",
+ "delete_log": "rule:admin_only",
+ "get_policy_profiles": "",
+ "get_port:binding:host_id": "rule:admin_only",
+ "get_rule_type": "rule:regular_user",
+ "update_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "create_segment": "rule:admin_only",
+ "get_security_group_rule": "rule:admin_or_owner",
+ "update_port:binding:host_id": "rule:admin_only",
+ "delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
+ "delete_rbac_policy": "rule:admin_or_owner",
+ "get_policy_profile": "",
+ "update_network:router:external": "rule:admin_only",
+ "create_network:provider:segmentation_id": "rule:admin_only",
+ "get_subnet": "rule:admin_or_owner or rule:shared",
+ "create_port": "",
+ "create_subnet": "rule:admin_or_network_owner",
+ "update_policy": "rule:admin_only",
+ "get_network:queue_id": "rule:admin_only",
+ "get_policy": "rule:regular_user",
+ "update_network": "rule:admin_or_owner",
+ "delete_network": "rule:admin_or_owner",
+ "get_service_profile": "rule:admin_only",
+ "get_policy_minimum_bandwidth_rule": "rule:regular_user",
+ "update_subnet": "rule:admin_or_network_owner",
+ "update_router:ha": "rule:admin_only",
+ "update_subnet:service_types": "rule:admin_only",
+ "get_flavor": "rule:regular_user",
+ "create_policy_bandwidth_limit_rule": "rule:admin_only",
+ "create_subnetpool": "",
+ "get_metering_label_rule": "rule:admin_only",
+ "add_router_interface": "rule:admin_or_owner",
+ "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
+ "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
+ "create_rbac_policy:target_tenant": "rule:restrict_wildcard",
+ "get_port:binding:vif_details": "rule:admin_only",
+ "get_router:ha": "rule:admin_only",
+ "update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
+ "update_log": "rule:admin_only",
+ "update_subnetpool:is_default": "rule:admin_only",
+ "update_policy_minimum_bandwidth_rule": "rule:admin_only",
+ "get_subports": "",
+ "get_port:binding:vif_type": "rule:admin_only",
+ "shared": "field:networks:shared=True",
+ "update_security_group": "rule:admin_or_owner",
+ "get_logs": "rule:admin_only",
+ "get_agent": "rule:admin_only",
+ "create_floatingip:floating_ip_address": "rule:admin_only",
+ "delete_floatingip": "rule:admin_or_owner",
+ "delete_flavor_service_profile": "rule:admin_only",
+ "create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "update_service_profile": "rule:admin_only",
+ "update_network:shared": "rule:admin_only",
+ "update_router:distributed": "rule:admin_only",
+ "create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
+ "delete_policy": "rule:admin_only",
+ "regular_user": "",
+ "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
+ "update_segment": "rule:admin_only",
+ "get_network:segments": "rule:admin_only",
+ "create_network:provider:physical_network": "rule:admin_only",
+ "shared_address_scopes": "field:address_scopes:shared=True",
+ "get_network:provider:segmentation_id": "rule:admin_only",
+ "get_router": "rule:admin_or_owner",
+ "get_l3-agents": "rule:admin_only",
+ "get_router:distributed": "rule:admin_only",
+ "create_lsn": "rule:admin_only",
+ "create_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "update_network:provider:network_type": "rule:admin_only",
+ "create_metering_label": "rule:admin_only",
+ "create_metering_label_rule": "rule:admin_only",
+ "create_flavor_service_profile": "rule:admin_only",
+ "delete_security_group_rule": "rule:admin_or_owner",
+ "delete_policy_bandwidth_limit_rule": "rule:admin_only",
+ "create_l3-router": "rule:admin_only",
+ "create_policy_minimum_bandwidth_rule": "rule:admin_only",
+ "update_router": "rule:admin_or_owner",
+ "create_network:shared": "rule:admin_only",
+ "get_subnet:segment_id": "rule:admin_only",
+ "get_qos_queue": "rule:admin_only",
+ "create_dhcp-network": "rule:admin_only",
+ "get_dhcp-networks": "rule:admin_only",
+ "add_subports": "rule:admin_or_owner",
+ "create_port:binding:host_id": "rule:admin_only",
+ "update_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "owner": "tenant_id:%(tenant_id)s",
+ "get_network_ip_availabilities": "rule:admin_only",
+ "remove_router_interface": "rule:admin_or_owner",
+ "update_router:external_gateway_info": "rule:admin_or_owner",
+ "get_network:provider:physical_network": "rule:admin_only",
+ "get_policy_bandwidth_limit_rule": "rule:regular_user",
+ "create_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "delete_l3-router": "rule:admin_only",
+ "create_security_group": "rule:admin_or_owner",
+ "delete_router": "rule:admin_or_owner",
+ "delete_policy_dscp_marking_rule": "rule:admin_only",
+ "get_rbac_policy": "rule:admin_or_owner",
+ "update_floatingip": "rule:admin_or_owner",
+ "delete_network_profile": "rule:admin_only",
+ "create_policy_dscp_marking_rule": "rule:admin_only",
+ "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
+ "admin_only": "rule:context_is_admin",
+ "update_port:allowed_address_pairs": "rule:admin_or_network_owner",
+ "get_lsn": "rule:admin_only",
+ "update_address_scope": "rule:admin_or_owner",
+ "create_network:is_default": "rule:admin_only",
+ "external": "field:networks:router:external=True",
+ "get_network_profile": "",
+ "create_address_scope": "",
+ "create_floatingip": "rule:regular_user",
+ "get_loadbalancer-agent": "rule:admin_only",
+ "get_network:router:external": "rule:regular_user",
+ "create_address_scope:shared": "rule:admin_only",
+ "create_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared",
+ "create_network_profile": "rule:admin_only",
+ "delete_subnet": "rule:admin_or_network_owner",
+ "get_network_profiles": "",
+ "delete_trunk": "rule:admin_or_owner",
+ "create_network": "",
+ "get_auto_allocated_topology": "rule:admin_or_owner",
+ "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
+ "create_security_group_rule": "rule:admin_or_owner",
+ "get_loadbalancer-hosting-agent": "rule:admin_only",
+ "delete_security_group": "rule:admin_or_owner",
+ "get_network:provider:network_type": "rule:admin_only",
+ "create_trunk": "rule:regular_user",
+ "update_policy_bandwidth_limit_rule": "rule:admin_only",
+ "get_network_ip_availability": "rule:admin_only",
+ "get_metering_label": "rule:admin_only",
+ "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
+ "update_agent": "rule:admin_only",
+ "create_flavor": "rule:admin_only",
+ "create_port:binding:profile": "rule:admin_only",
+ "get_port:binding:profile": "rule:admin_only",
+ "update_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared",
+ "create_network:provider:network_type": "rule:admin_only",
+ "update_port:data_plane_status": "rule:admin_or_data_plane_int",
+ "create_port:allowed_address_pairs": "rule:admin_or_network_owner",
+ "create_router:ha": "rule:admin_only",
+ "update_network_profile": "rule:admin_only",
+ "delete_dhcp-network": "rule:admin_only",
+ "create_policy": "rule:admin_only",
+ "create_log": "rule:admin_only",
+ "update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
+ "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
+ "get_security_groups": "rule:admin_or_owner",
+ "admin_or_data_plane_int": "rule:context_is_admin or role:data_plane_integrator",
+ "default": "rule:admin_or_owner",
+ "get_trunk": "rule:admin_or_owner",
+ "network_device": "field:port:device_owner=~^network:",
+ "delete_policy_minimum_bandwidth_rule": "rule:admin_only",
+ "get_port:queue_id": "rule:admin_only",
+ "update_port:binding:profile": "rule:admin_only",
+ "remove_subports": "rule:admin_or_owner",
+ "get_loggable_resources": "rule:admin_only",
+ "update_network:provider:physical_network": "rule:admin_only",
+ "get_flavor_service_profile": "rule:regular_user",
+ "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
+ "update_flavor": "rule:admin_only",
+ "get_security_group_rules": "rule:admin_or_owner",
+ "get_security_group": "rule:admin_or_owner",
+ "delete_flavor": "rule:admin_only",
+ "create_subnetpool:is_default": "rule:admin_only",
+ "update_policy_profiles": "rule:admin_only",
+ "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
+ "create_subnet:service_types": "rule:admin_only",
+ "get_l3-routers": "rule:admin_only",
+ "create_rbac_policy": "",
+ "delete_metering_label": "rule:admin_only",
+ "update_subnetpool": "rule:admin_or_owner",
+ "get_dhcp-agents": "rule:admin_only",
+ "get_log": "rule:admin_only",
+ "get_flavors": "rule:regular_user",
+ "create_subnetpool:shared": "rule:admin_only",
+ "get_agent-loadbalancers": "rule:admin_only",
+ "delete_subnetpool": "rule:admin_or_owner",
+ "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
+ "get_segment": "rule:admin_only",
+ "delete_service_profile": "rule:admin_only",
+ "create_subnet:segment_id": "rule:admin_only",
+ "create_service_profile": "rule:admin_only",
+ "delete_segment": "rule:admin_only",
+ "update_rbac_policy": "rule:admin_or_owner",
+ "admin_or_owner": "rule:context_is_admin or rule:owner"
+ },
+ "telegraf": {
+ "remote_agent": {
+ "input": {
+ "openstack": {
+ "username": "admin",
+ "cpu_ratio": "16.0",
+ "region": "RegionOne",
+ "interval": "3m",
+ "project": "admin",
+ "monitor_agents": "true",
+ "password": "lkgQzExIrlxueh57sLEm1vkcOfQgDuWh",
+ "tenant": "admin",
+ "identity_endpoint": "http://10.11.0.10:35357/"
+ }
+ },
+ "processor": {},
+ "dir": {
+ "config": "/srv/volumes/local/telegraf",
+ "config_d": "/srv/volumes/local/telegraf/telegraf.d"
+ },
+ "output": {
+ "prometheus_client_openstack": {
+ "engine": "prometheus",
+ "bind": {
+ "port": "9127",
+ "address": "0.0.0.0"
+ },
+ "expiration_interval": "6m",
+ "namepass": [
+ "openstack*"
+ ],
+ "template": "telegraf/files/output/prometheus_client.conf"
+ },
+ "prometheus_client": {
+ "namedrop": [
+ "openstack*"
+ ]
+ }
+ }
+ },
+ "agent": {
+ "metric_batch_size": 1000,
+ "collection_jitter": 2,
+ "interval": 15,
+ "enabled": true,
+ "pkgs": [
+ "telegraf"
+ ],
+ "round_interval": false,
+ "output": {
+ "prometheus_client": {
+ "engine": "prometheus",
+ "bind": {
+ "port": 9126,
+ "address": "0.0.0.0"
+ },
+ "string_as_label": false
+ }
+ },
+ "input": {
+ "haproxy": {
+ "fielddrop": [
+ "addr",
+ "agent_status",
+ "check_status",
+ "cookie",
+ "last_agt",
+ "last_chk",
+ "mode",
+ "status",
+ "tracked"
+ ],
+ "servers": [
+ "/run/haproxy/admin.sock"
+ ]
+ },
+ "kernel": null,
+ "processes": null,
+ "nstat": {
+ "fieldpass": [
+ "packet_drop",
+ "time_squeeze"
+ ]
+ },
+ "ipcheck": {
+ "ips": [
+ "10.11.0.10"
+ ],
+ "interface_name_regexp_exclude": "^docker.*",
+ "template": "keepalived/files/telegraf.conf"
+ },
+ "x509": {
+ "sources": [
+ "/srv/salt/pki/ozhurba-os-oc-cicd-sl/10.13.250.9.crt"
+ ]
+ },
+ "ntp": {
+ "template": "ntp/files/telegraf.conf"
+ },
+ "mem": null,
+ "keepalived": {
+ "template": "keepalived/files/vrrp_telegraf.conf"
+ },
+ "system": null,
+ "http_listener": {
+ "read_timeout": "10s",
+ "bind": {
+ "port": 8186,
+ "address": "127.0.0.1"
+ },
+ "tagexclude": [
+ "hostname"
+ ],
+ "write_timeout": "10s"
+ },
+ "swap": null,
+ "linux_sysctl_fs": null,
+ "diskio": null,
+ "procstat": {
+ "process": {
+ "sshd": {
+ "exe": "sshd"
+ },
+ "salt-minion": {
+ "pattern": "salt-minion"
+ },
+ "keepalived": {
+ "exe": "keepalived"
+ },
+ "cron": {
+ "exe": "cron"
+ },
+ "memcached": {
+ "exe": "memcached"
+ },
+ "ntpd": {
+ "exe": "ntpd"
+ }
+ }
+ },
+ "apache": {
+ "urls": [
+ "http://127.0.0.1:80/server-status?auto"
+ ],
+ "template": "apache/files/telegraf.conf"
+ },
+ "net": null,
+ "disk": {
+ "ignore_fs": [
+ "aufs",
+ "rootfs",
+ "sysfs",
+ "proc",
+ "devtmpfs",
+ "devpts",
+ "tmpfs",
+ "fusectl",
+ "cgroup",
+ "overlay"
+ ]
+ },
+ "memcached": {
+ "servers": [
+ {
+ "port": 11211,
+ "address": "127.0.0.1"
+ }
+ ]
+ },
+ "cpu": {
+ "totalcpu": true,
+ "percpu": false
+ },
+ "http_response": {
+ "glance-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:9292/"
+ },
+ "cinder-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:8776/"
+ },
+ "heat-cfn-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:8000/"
+ },
+ "nova-api": {
+ "expected_code": 200,
+ "address": "http://10.11.0.11:8774/"
+ },
+ "neutron-api": {
+ "expected_code": 200,
+ "address": "http://10.11.0.11:9696/"
+ },
+ "keystone-public-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:5000/"
+ },
+ "glance-registry": {
+ "expected_code": 401,
+ "address": "http://10.11.0.11:9191/"
+ },
+ "heat-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:8004/"
+ },
+ "keystone-admin-api": {
+ "expected_code": 300,
+ "address": "http://10.11.0.11:35357/"
+ }
+ }
+ },
+ "metric_buffer_limit": 10000,
+ "processor": {},
+ "dir": {
+ "config": "/etc/telegraf",
+ "config_d": "/etc/telegraf/telegraf.d"
+ }
+ }
+ },
+ "ip_interfaces": {
+ "ens4": [
+ "10.10.100.7",
+ "fe80::f816:3eff:fe4e:f3c8"
+ ],
+ "lo": [
+ "127.0.0.1",
+ "::1"
+ ],
+ "ens6": [
+ "10.13.100.22",
+ "fe80::f816:3eff:fe06:f3b"
+ ],
+ "ens5": [
+ "10.12.100.14",
+ "fe80::f816:3eff:fedb:db4f"
+ ],
+ "ens3": [
+ "10.11.0.11",
+ "fe80::f816:3eff:fe0c:1532"
+ ]
+ },
+ "groupname": "root",
+ "fqdn_ip6": [],
+ "mem_total": 9992,
+ "saltversioninfo": [
+ 2017,
+ 7,
+ 8,
+ 0
+ ],
+ "SSDs": [],
+ "mdadm": [],
+ "id": "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "manufacturer": "OpenStack Foundation",
+ "osrelease": "16.04",
+ "ps": "ps -efHww",
+ "systemd": {
+ "version": "229",
+ "features": "+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN"
+ },
+ "fqdn": "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "uuid": "b7410784-ac6d-44c3-a9c3-e2ae2aaad69e",
+ "ip6_interfaces": {
+ "ens4": [
+ "fe80::f816:3eff:fe4e:f3c8"
+ ],
+ "lo": [
+ "::1"
+ ],
+ "ens6": [
+ "fe80::f816:3eff:fe06:f3b"
+ ],
+ "ens5": [
+ "fe80::f816:3eff:fedb:db4f"
+ ],
+ "ens3": [
+ "fe80::f816:3eff:fe0c:1532"
+ ]
+ },
+ "num_cpus": 4,
+ "hwaddr_interfaces": {
+ "ens4": "fa:16:3e:4e:f3:c8",
+ "lo": "00:00:00:00:00:00",
+ "ens6": "fa:16:3e:06:0f:3b",
+ "ens5": "fa:16:3e:db:db:4f",
+ "ens3": "fa:16:3e:0c:15:32"
+ },
+ "init": "systemd",
+ "ip4_interfaces": {
+ "ens4": [
+ "10.10.100.7"
+ ],
+ "lo": [
+ "127.0.0.1"
+ ],
+ "ens6": [
+ "10.13.100.22"
+ ],
+ "ens5": [
+ "10.12.100.14"
+ ],
+ "ens3": [
+ "10.11.0.11"
+ ]
+ },
+ "grafana": {
+ "dashboard": {
+ "openstack_tenants_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/openstack_tenants_prometheus.json",
+ "format": "json"
+ },
+ "neutron_influxdb": {
+ "datasource": "influxdb",
+ "template": "neutron/files/grafana_dashboards/neutron_influxdb.json",
+ "format": "json"
+ },
+ "ntp_prometheus": {
+ "datasource": "prometheus",
+ "template": "ntp/files/grafana_dashboards/ntp_prometheus.json",
+ "format": "json"
+ },
+ "heat_prometheus": {
+ "datasource": "prometheus",
+ "template": "heat/files/grafana_dashboards/heat_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "cinder_prometheus": {
+ "datasource": "prometheus",
+ "template": "cinder/files/grafana_dashboards/cinder_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "neutron_prometheus": {
+ "datasource": "prometheus",
+ "template": "neutron/files/grafana_dashboards/neutron_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "glance_prometheus": {
+ "datasource": "prometheus",
+ "template": "glance/files/grafana_dashboards/glance_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "hypervisor_influxdb": {
+ "datasource": "influxdb",
+ "template": "nova/files/grafana_dashboards/hypervisor_influxdb.json",
+ "format": "json"
+ },
+ "nova_utilization_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_utilization_prometheus.json",
+ "format": "json"
+ },
+ "haproxy_prometheus": {
+ "datasource": "prometheus",
+ "template": "haproxy/files/grafana_dashboards/haproxy_prometheus.json",
+ "format": "json"
+ },
+ "glusterfs_prometheus": {
+ "datasource": "prometheus",
+ "template": "glusterfs/files/grafana_dashboards/glusterfs_prometheus.json",
+ "format": "json"
+ },
+ "apache_influxdb": {
+ "datasource": "influxdb",
+ "template": "apache/files/grafana_dashboards/apache_influxdb.json",
+ "format": "json"
+ },
+ "glusterfs_influxdb": {
+ "datasource": "influxdb",
+ "template": "glusterfs/files/grafana_dashboards/glusterfs_influxdb.json",
+ "format": "json"
+ },
+ "cinder_influxdb": {
+ "datasource": "influxdb",
+ "template": "cinder/files/grafana_dashboards/cinder_influxdb.json",
+ "format": "json"
+ },
+ "openstack_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/openstack_overview_prometheus.json",
+ "format": "json"
+ },
+ "linux_network_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_network_prometheus.json",
+ "format": "json"
+ },
+ "keystone_influxdb": {
+ "datasource": "influxdb",
+ "template": "keystone/files/grafana_dashboards/keystone_influxdb.json",
+ "format": "json"
+ },
+ "linux_disk_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_disk_prometheus.json",
+ "format": "json"
+ },
+ "service_level": {
+ "datasource": "influxdb",
+ "row": {
+ "neutron-service-level": {
+ "title": "Neutron Service Levels",
+ "panel": {
+ "neutron-control-uptime": {
+ "target": {
+ "neutron-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-control' AND value = 4 AND $timeFilter"
+ },
+ "neutron-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-control' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "neutron-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-control' AND value = 0 AND $timeFilter"
+ }
+ },
+ "title": "Neutron Control Uptime"
+ },
+ "neutron-api-requests": {
+ "target": {
+ "neutron-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_neutron_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ },
+ "neutron-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_neutron_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ },
+ "neutron-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_neutron_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ }
+ },
+ "title": "Neutron API Requests"
+ },
+ "neutron-data-uptime": {
+ "target": {
+ "neutron-data-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-data' AND value = 0 AND $timeFilter"
+ },
+ "neutron-data-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-data' AND value = 4 AND $timeFilter"
+ },
+ "neutron-data-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'neutron-data' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ }
+ },
+ "title": "Neutron Data Uptime"
+ },
+ "neutron-api-availability": {
+ "target": {
+ "neutron-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'neutron-api' AND value = 1 AND $timeFilter"
+ },
+ "neutron-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'neutron-api' AND value = 0 AND $timeFilter"
+ }
+ },
+ "title": "Neutron API Availability"
+ }
+ }
+ },
+ "heat-service-level": {
+ "title": "Heat Service Levels",
+ "panel": {
+ "heat-api-requests": {
+ "target": {
+ "heat-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_heat_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ },
+ "heat-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_heat_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ },
+ "heat-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_heat_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ }
+ },
+ "title": "Heat API Requests"
+ },
+ "heat-control-uptime": {
+ "target": {
+ "heat-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'heat' AND value = 4 AND $timeFilter"
+ },
+ "heat-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'heat' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "heat-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'heat' AND value = 0 AND $timeFilter"
+ }
+ },
+ "title": "Heat Control Uptime"
+ },
+ "heat-api-availability": {
+ "target": {
+ "heat-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'heat-api' OR service = 'heat-cfn-api') AND value = 0 AND $timeFilter"
+ },
+ "heat-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'heat-api' OR service = 'heat-cfn-api') AND value = 1 AND $timeFilter"
+ }
+ },
+ "title": "Heat API Availability (including cfn-api)"
+ }
+ }
+ },
+ "cinder-service-level": {
+ "title": "Cinder Service Levels",
+ "panel": {
+ "cinder-api-requests": {
+ "target": {
+ "cinder-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ },
+ "cinder-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ },
+ "cinder-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_cinder_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ }
+ },
+ "title": "Cinder API Requests"
+ },
+ "cinder-control-uptime": {
+ "target": {
+ "cinder-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "cinder-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND value = 0 AND $timeFilter"
+ },
+ "cinder-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-control' AND value = 4 AND $timeFilter"
+ }
+ },
+ "title": "Cinder Control Uptime"
+ },
+ "cinder-data-uptime": {
+ "target": {
+ "cinder-data-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND value = 0 AND $timeFilter"
+ },
+ "cinder-data-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND value = 4 AND $timeFilter"
+ },
+ "cinder-data-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'cinder-data' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ }
+ },
+ "title": "Cinder Data Uptime"
+ },
+ "cinder-api-availability": {
+ "target": {
+ "cinder-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'cinder-api' OR service = 'cinder-v2-api') AND value = 1 AND $timeFilter"
+ },
+ "cinder-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND (service = 'cinder-api' OR service = 'cinder-v2-api') AND value = 0 AND $timeFilter"
+ }
+ },
+ "title": "Cinder API Availability (V1 & V2)"
+ }
+ }
+ },
+ "keystone-service-level": {
+ "title": "Keystone Service Levels",
+ "panel": {
+ "keystone-control-uptime": {
+ "target": {
+ "keystone-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'keystone' AND value = 0 AND $timeFilter"
+ },
+ "keystone-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'keystone' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "keystone-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'keystone' AND value = 4 AND $timeFilter"
+ }
+ },
+ "title": "Keystone Control Uptime"
+ },
+ "keystone-api-availability": {
+ "target": {
+ "keystone-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'keystone-public-api' AND value = 0 AND $timeFilter"
+ },
+ "keystone-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'keystone-public-api' AND value = 1 AND $timeFilter"
+ }
+ },
+ "title": "Keystone API Availability"
+ }
+ }
+ },
+ "nova-service-level": {
+ "title": "Nova Service Levels",
+ "panel": {
+ "nova-control-uptime": {
+ "target": {
+ "nova-control-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND value = 0 AND $timeFilter"
+ },
+ "nova-control-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND value = 4 AND $timeFilter"
+ },
+ "nova-control-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-control' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ }
+ },
+ "title": "Nova Control Uptime"
+ },
+ "nova-data-uptime": {
+ "target": {
+ "nova-data-healthy-status": {
+ "alias": "Healthy",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND value = 0 AND $timeFilter"
+ },
+ "nova-data-degraded-unknwon-status": {
+ "alias": "Degraded or Unknwon",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND (value = 1 OR value = 2 OR value = 3) AND $timeFilter"
+ },
+ "nova-data-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM cluster_status WHERE environment_label = '$environment' AND cluster_name = 'nova-data' AND value = 4 AND $timeFilter"
+ }
+ },
+ "title": "Nova Data Uptime"
+ },
+ "nova-api-availability": {
+ "target": {
+ "nova-api-down-status": {
+ "alias": "Down",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'nova-api' AND value = 0 AND $timeFilter"
+ },
+ "nova-api-up-status": {
+ "alias": "Up",
+ "rawQuery": true,
+ "query": "SELECT count(value) FROM openstack_check_api WHERE environment_label = '$environment' AND service = 'nova-api' AND value = 1 AND $timeFilter"
+ }
+ },
+ "title": "Nova API Availability"
+ },
+ "nova-api-requests": {
+ "target": {
+ "nova-api-okay-status": {
+ "alias": "Okay",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND (http_status = '2xx' OR http_status = '3xx') AND $timeFilter"
+ },
+ "nova-api-error-status": {
+ "alias": "Error",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND http_status = '4xx' AND $timeFilter"
+ },
+ "nova-api-fatal-status": {
+ "alias": "Fatal",
+ "rawQuery": true,
+ "query": "SELECT count(max) FROM openstack_nova_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter"
+ }
+ },
+ "title": "Nova API Requests"
+ }
+ }
+ }
+ }
+ },
+ "linux_influxdb": {
+ "datasource": "influxdb",
+ "template": "linux/files/grafana_dashboards/system_influxdb.json",
+ "format": "json"
+ },
+ "memcached_prometheus": {
+ "datasource": "prometheus",
+ "template": "memcached/files/grafana_dashboards/memcached_prometheus.json",
+ "format": "json"
+ },
+ "nova_instances_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_instances_prometheus.json",
+ "format": "json"
+ },
+ "apache_prometheus": {
+ "datasource": "prometheus",
+ "template": "apache/files/grafana_dashboards/apache_prometheus.json",
+ "format": "json"
+ },
+ "nova_influxdb": {
+ "datasource": "influxdb",
+ "template": "nova/files/grafana_dashboards/nova_influxdb.json",
+ "format": "json"
+ },
+ "main_prometheus": {
+ "datasource": "prometheus",
+ "row": {
+ "ost-middleware": {
+ "title": "Middleware",
+ "panel": {
+ "apache": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(apache_up) by (name)"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Apache",
+ "title": "Apache"
+ }
+ ],
+ "title": "Apache"
+ },
+ "haproxy": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(haproxy_up) by (name)"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "HAProxy",
+ "title": "HAProxy"
+ }
+ ],
+ "title": "HAProxy"
+ },
+ "glusterfs": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(glusterfs_up) by (name)"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "GlusterFS",
+ "title": "GlusterFS"
+ }
+ ],
+ "title": "GlusterFS"
+ },
+ "memcached": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(memcached_up) by (name)"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Memcached",
+ "title": "Memcached"
+ }
+ ],
+ "title": "Memcached"
+ }
+ }
+ },
+ "ost-control-plane": {
+ "title": "OpenStack Control Plane",
+ "panel": {
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=\"nova\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ },
+ "heat": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"heat.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Heat",
+ "title": "Heat"
+ }
+ ],
+ "title": "Heat"
+ },
+ "keystone": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"keystone.*public.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Keystone",
+ "title": "Keystone"
+ }
+ ],
+ "title": "Keystone"
+ },
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"cinder.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "glance": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"glance.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Glance",
+ "title": "Glance"
+ }
+ ],
+ "title": "Glance"
+ },
+ "neutron": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(openstack_api_check_status{service=~\"neutron.*\"})"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Neutron",
+ "title": "Neutron"
+ }
+ ],
+ "title": "Neutron"
+ }
+ }
+ }
+ }
+ },
+ "haproxy_influxdb": {
+ "datasource": "influxdb",
+ "template": "haproxy/files/grafana_dashboards/haproxy_influxdb.json",
+ "format": "json"
+ },
+ "heat_influxdb": {
+ "datasource": "influxdb",
+ "template": "heat/files/grafana_dashboards/heat_influxdb.json",
+ "format": "json"
+ },
+ "keepalived_prometheus": {
+ "datasource": "prometheus",
+ "template": "keepalived/files/grafana_dashboards/keepalived_prometheus.json",
+ "format": "json"
+ },
+ "nova_hypervisor_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_hypervisor_prometheus.json",
+ "format": "json"
+ },
+ "main_influxdb": {
+ "datasource": "influxdb",
+ "row": {
+ "ost-data-plane": {
+ "title": "OpenStack Data Plane",
+ "panel": {
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'cinder-data' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'nova-data' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ }
+ }
+ },
+ "ost-middleware": {
+ "title": "Middleware",
+ "panel": {
+ "apache": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'apache' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Apache",
+ "title": "Apache"
+ }
+ ],
+ "title": "Apache"
+ },
+ "haproxy": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'haproxy-openstack' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "HAProxy",
+ "title": "HAProxy"
+ }
+ ],
+ "title": "HAProxy"
+ },
+ "memcached": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'memcached' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Memcached",
+ "title": "Memcached"
+ }
+ ],
+ "title": "Memcached"
+ }
+ }
+ },
+ "ost-control-plane": {
+ "title": "OpenStack Control Plane",
+ "panel": {
+ "nova": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'nova-control' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Nova",
+ "title": "Nova"
+ }
+ ],
+ "title": "Nova"
+ },
+ "heat": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'heat' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Heat",
+ "title": "Heat"
+ }
+ ],
+ "title": "Heat"
+ },
+ "keystone": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'keystone' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Keystone",
+ "title": "Keystone"
+ }
+ ],
+ "title": "Keystone"
+ },
+ "cinder": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'cinder-control' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Cinder",
+ "title": "Cinder"
+ }
+ ],
+ "title": "Cinder"
+ },
+ "glance": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'glance' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Glance",
+ "title": "Glance"
+ }
+ ],
+ "title": "Glance"
+ },
+ "neutron": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'neutron-control' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Neutron",
+ "title": "Neutron"
+ }
+ ],
+ "title": "Neutron"
+ }
+ }
+ }
+ }
+ },
+ "keystone_prometheus": {
+ "datasource": "prometheus",
+ "template": "keystone/files/grafana_dashboards/keystone_prometheus_fluentd.json",
+ "format": "json"
+ },
+ "glance_influxdb": {
+ "datasource": "influxdb",
+ "template": "glance/files/grafana_dashboards/glance_influxdb.json",
+ "format": "json"
+ },
+ "nova_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "nova/files/grafana_dashboards/nova_overview_prometheus.json",
+ "format": "json"
+ },
+ "memcached_influxdb": {
+ "datasource": "influxdb",
+ "template": "memcached/files/grafana_dashboards/memcached_influxdb.json",
+ "format": "json"
+ },
+ "linux_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_overview_prometheus.json",
+ "format": "json"
+ }
+ },
+ "parameters": null
+ },
+ "ssh_fingerprints": {
+ "rsa": "90:35:c6:81:d9:59:58:86:4a:ba:a0:f4:dd:cc:c6:e3",
+ "ecdsa": "c1:30:3a:40:42:a8:cc:40:a7:3b:9a:5b:35:d5:4e:f1",
+ "dsa": "a2:7a:52:8c:8b:4f:ee:68:99:69:4a:ae:a0:c9:e8:e0"
+ },
+ "gid": 0,
+ "master": "10.10.0.15",
+ "ipv4": [
+ "10.10.100.7",
+ "10.11.0.11",
+ "10.12.100.14",
+ "10.13.100.22",
+ "127.0.0.1"
+ ],
+ "dns": {
+ "domain": "",
+ "sortlist": [],
+ "nameservers": [
+ "172.18.176.6"
+ ],
+ "ip4_nameservers": [
+ "172.18.176.6"
+ ],
+ "search": [
+ "openstacklocal"
+ ],
+ "ip6_nameservers": [],
+ "options": []
+ },
+ "ipv6": [
+ "::1",
+ "fe80::f816:3eff:fe06:f3b",
+ "fe80::f816:3eff:fe0c:1532",
+ "fe80::f816:3eff:fe4e:f3c8",
+ "fe80::f816:3eff:fedb:db4f"
+ ],
+ "cinder_policy": {
+ "volume:create_volume_metadata": "rule:admin_or_owner",
+ "volume:delete_snapshot_metadata": "rule:admin_or_owner",
+ "volume:failover_host": "rule:admin_api",
+ "volume_extension:capabilities": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
+ "backup:delete": "rule:admin_or_owner",
+ "volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
+ "clusters:get": "rule:admin_api",
+ "message:get": "rule:admin_or_owner",
+ "volume_extension:quotas:update": "rule:admin_api",
+ "snapshot_extension:snapshot_actions:update_snapshot_status": "",
+ "scheduler_extension:scheduler_stats:get_pools": "rule:admin_api",
+ "backup:restore": "rule:admin_or_owner",
+ "volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
+ "volume_extension:services:update": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "group:create_group_snapshot": "",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+ "volume:update_readonly_flag": "rule:admin_or_owner",
+ "backup:backup-export": "rule:admin_api",
+ "group:group_types_manage": "rule:admin_api",
+ "volume:create_from_image": "",
+ "backup:backup-import": "rule:admin_api",
+ "backup:get_all": "rule:admin_or_owner",
+ "group:delete": "rule:admin_or_owner",
+ "volume_extension:types_manage": "rule:admin_api",
+ "group:get_all_group_snapshots": "rule:admin_or_owner",
+ "volume:update_volume_metadata": "rule:admin_or_owner",
+ "volume:accept_transfer": "",
+ "default": "rule:admin_or_owner",
+ "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
+ "volume:force_delete": "rule:admin_api",
+ "volume_extension:volume_mig_status_attribute": "rule:admin_api",
+ "admin_api": "is_admin:True or (role:admin and is_admin_project:True)",
+ "consistencygroup:get_all": "group:nobody",
+ "volume_extension:quota_classes": "rule:admin_api",
+ "backup:create": "",
+ "volume:get_all": "rule:admin_or_owner",
+ "snapshot_extension:snapshot_unmanage": "rule:admin_api",
+ "volume_extension:volume_image_metadata": "rule:admin_or_owner",
+ "volume:update_snapshot_metadata": "rule:admin_or_owner",
+ "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api",
+ "group:create": "",
+ "volume:extend": "rule:admin_or_owner",
+ "volume:get_snapshot_metadata": "rule:admin_or_owner",
+ "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
+ "consistencygroup:create": "group:nobody",
+ "workers:cleanup": "rule:admin_api",
+ "volume:get_transfer": "rule:admin_or_owner",
+ "group:reset_status": "rule:admin_api",
+ "group:get": "rule:admin_or_owner",
+ "group:update": "rule:admin_or_owner",
+ "volume_extension:volume_manage": "rule:admin_api",
+ "volume:get_snapshot": "rule:admin_or_owner",
+ "volume:create": "",
+ "volume:update_snapshot": "rule:admin_or_owner",
+ "volume_extension:quotas:show": "",
+ "volume_extension:hosts": "rule:admin_api",
+ "group:update_group_snapshot": "rule:admin_or_owner",
+ "volume_extension:volume_type_access": "rule:admin_or_owner",
+ "volume:get_all_snapshots": "rule:admin_or_owner",
+ "group:get_group_snapshot": "rule:admin_or_owner",
+ "volume:get_all_transfers": "rule:admin_or_owner",
+ "volume:freeze_host": "rule:admin_api",
+ "consistencygroup:get": "group:nobody",
+ "consistencygroup:create_cgsnapshot": "group:nobody",
+ "volume_extension:types_extra_specs": "rule:admin_api",
+ "volume:get": "rule:admin_or_owner",
+ "volume:get_volume_metadata": "rule:admin_or_owner",
+ "volume_extension:backup_admin_actions:force_delete": "rule:admin_api",
+ "consistencygroup:update": "group:nobody",
+ "group:access_group_types_specs": "rule:admin_api",
+ "volume_extension:volume_unmanage": "rule:admin_api",
+ "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
+ "group:get_all": "rule:admin_or_owner",
+ "group:reset_group_snapshot_status": "rule:admin_api",
+ "volume:update_volume_admin_metadata": "rule:admin_api",
+ "clusters:update": "rule:admin_api",
+ "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or project_id:%(project_id)s",
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_actions:upload_image": "rule:admin_or_owner",
+ "volume_extension:volume_actions:upload_public": "rule:admin_api",
+ "volume_extension:list_manageable": "rule:admin_api",
+ "volume_extension:extended_snapshot_attributes": "rule:admin_or_owner",
+ "volume:delete_transfer": "rule:admin_or_owner",
+ "volume:create_snapshot": "rule:admin_or_owner",
+ "snapshot_extension:list_manageable": "rule:admin_api",
+ "volume_extension:quotas:delete": "rule:admin_api",
+ "consistencygroup:delete": "group:nobody",
+ "consistencygroup:delete_cgsnapshot": "group:nobody",
+ "volume_extension:volume_host_attribute": "rule:admin_api",
+ "volume:delete_volume_metadata": "rule:admin_or_owner",
+ "backup:get": "rule:admin_or_owner",
+ "backup:backup_project_attribute": "rule:admin_api",
+ "volume:create_transfer": "rule:admin_or_owner",
+ "volume:retype": "rule:admin_or_owner",
+ "clusters:get_all": "rule:admin_api",
+ "volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api",
+ "backup:update": "rule:admin_or_owner",
+ "volume:delete_snapshot": "rule:admin_or_owner",
+ "snapshot_extension:snapshot_manage": "rule:admin_api",
+ "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
+ "volume_extension:services:index": "rule:admin_api",
+ "volume:delete": "rule:admin_or_owner",
+ "volume_extension:access_types_extra_specs": "rule:admin_api",
+ "volume_extension:volume_type_encryption": "rule:admin_api",
+ "consistencygroup:get_all_cgsnapshots": "group:nobody",
+ "group:group_type_access": "rule:admin_or_owner",
+ "message:delete": "rule:admin_or_owner",
+ "message:get_all": "rule:admin_or_owner",
+ "group:group_types_specs": "rule:admin_api",
+ "volume_extension:access_types_qos_specs_id": "rule:admin_api",
+ "volume:update": "rule:admin_or_owner",
+ "consistencygroup:get_cgsnapshot": "group:nobody",
+ "group:delete_group_snapshot": "rule:admin_or_owner",
+ "volume:get_volume_admin_metadata": "rule:admin_api",
+ "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api",
+ "volume:thaw_host": "rule:admin_api"
+ },
+ "server_id": 82054315,
+ "cpu_flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "vmx",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "cpuid_fault",
+ "invpcid_single",
+ "pti",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "hle",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "rtm",
+ "rdseed",
+ "adx",
+ "smap",
+ "xsaveopt",
+ "arat"
+ ],
+ "osfullname": "Ubuntu",
+ "localhost": "ctl01",
+ "lsb_distrib_id": "Ubuntu",
+ "username": "root",
+ "fqdn_ip4": [
+ "10.11.0.11"
+ ],
+ "shell": "/bin/sh",
+ "nodename": "ctl01",
+ "saltversion": "2017.7.8",
+ "lsb_distrib_release": "16.04",
+ "pid": 1775,
+ "saltpath": "/usr/lib/python2.7/dist-packages/salt",
+ "pythonversion": [
+ 2,
+ 7,
+ 12,
+ "final",
+ 0
+ ],
+ "host": "ctl01",
+ "os_family": "Debian",
+ "oscodename": "xenial",
+ "services": [
+ "fluentd",
+ "telegraf",
+ "glusterfs",
+ "rsyslog",
+ "linux",
+ "glance",
+ "keepalived",
+ "nova",
+ "grafana",
+ "keystone",
+ "memcached",
+ "openscap",
+ "neutron",
+ "ntp",
+ "nginx",
+ "heat",
+ "apache",
+ "haproxy",
+ "openssh",
+ "opencontrail",
+ "logrotate",
+ "_reclass_",
+ "prometheus",
+ "cinder",
+ "salt"
+ ],
+ "osfinger": "Ubuntu-16.04",
+ "biosreleasedate": "04/01/2014",
+ "dns_records": [
+ {
+ "names": [
+ "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "ctl01"
+ ],
+ "address": "10.11.0.11"
+ }
+ ],
+ "lsb_distrib_description": "Ubuntu 16.04.5 LTS",
+ "sphinx": {
+ "doc": {
+ "haproxy": {
+ "role": {
+ "proxy": {
+ "endpoint": {
+ "glance_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "glance_api",
+ "address": "http://10.11.0.10:9292/"
+ },
+ "nova_metadata_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "nova_metadata_api",
+ "address": "http://10.11.0.10:8775/"
+ },
+ "heat_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "heat_api",
+ "address": "http://10.11.0.10:8004/"
+ },
+ "cinder_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "cinder_api",
+ "address": "http://10.11.0.10:8776/"
+ },
+ "heat_cloudwatch_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "heat_cloudwatch_api",
+ "address": "http://10.11.0.10:8003/"
+ },
+ "heat_cfn_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "heat_cfn_api",
+ "address": "http://10.11.0.10:8000/"
+ },
+ "nova_novnc_general-service": {
+ "type": "general-service",
+ "protocol": "http",
+ "name": "nova_novnc",
+ "address": "http://10.11.0.10:6080/"
+ },
+ "placement_api_http": {
+ "type": "http",
+ "protocol": "http",
+ "mode": "http",
+ "name": "placement_api",
+ "address": "http://10.11.0.10:8778/"
+ },
+ "keystone_public_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "keystone_public_api",
+ "address": "http://10.11.0.10:5000/"
+ },
+ "keystone_admin_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "keystone_admin_api",
+ "address": "http://10.11.0.10:35357/"
+ },
+ "glance_registry_api_general-service": {
+ "type": "general-service",
+ "protocol": "http",
+ "name": "glance_registry_api",
+ "address": "http://10.11.0.10:9191/"
+ },
+ "neutron_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "neutron_api",
+ "address": "http://10.11.0.10:9696/"
+ },
+ "nova_api_openstack-service": {
+ "type": "openstack-service",
+ "protocol": "http",
+ "name": "nova_api",
+ "address": "http://10.11.0.10:8774/"
+ }
+ },
+ "name": "proxy",
+ "param": {
+ "version": {
+ "value": "1.6.3 2015/12/25 Copyright 2000-2015 Willy Tarreau <willy@haproxy.org>"
+ }
+ }
+ }
+ },
+ "description": "The Reliable, High Performance TCP/HTTP Load Balancer.",
+ "name": "HAProxy"
+ },
+ "keepalived": {
+ "role": {
+ "cluster": {
+ "name": "cluster",
+ "param": {
+ "router_VIP": {
+ "name": "Instance VIP",
+ "value": "10.11.0.10 at ens3, priority 103 of router 50"
+ }
+ }
+ }
+ },
+ "description": "The main goal of this project is to provide simple and robust facilities for loadbalancing and high-availability to Linux system and Linux based infrastructures.",
+ "name": "Keepalived"
+ },
+ "heat": {
+ "role": {
+ "server": {
+ "endpoint": {
+ "heat_api_cfn": {
+ "type": "heat-api-cfn",
+ "protocol": "http",
+ "name": "heat-api-cfn",
+ "address": "http://10.11.0.11:8003"
+ },
+ "heat_api_cloudwatch": {
+ "type": "heat-api-cloudwatch",
+ "protocol": "http",
+ "name": "heat-api-cloudwatch",
+ "address": "http://10.11.0.11:8000"
+ },
+ "heat_api": {
+ "type": "heat-api",
+ "protocol": "http",
+ "name": "heat-api",
+ "address": "http://10.11.0.11:8004"
+ }
+ },
+ "name": "server",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "bind": {
+ "value": "10.11.0.11:8004"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "identity_host": {
+ "name": "Identity service",
+ "value": "heat@10.11.0.10:35357"
+ },
+ "packages": {
+ "value": "* heat-api: 1:10.0.3-1.0~u16.04+mcp59\n* heat-api-cfn: 1:10.0.3-1.0~u16.04+mcp59\n* heat-engine: 1:10.0.3-1.0~u16.04+mcp59\n* heat-common: 1:10.0.3-1.0~u16.04+mcp59\n* python-heatclient: 1.14.0-1.0~u16.04+mcp6\n* gettext-base: 0.19.7-2ubuntu3.1"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "heat@10.11.0.50:3306//heat"
+ }
+ }
+ }
+ },
+ "description": "Heat implements an orchestration engine to launch multiple composite cloud applications based on templates in the form of text files that can be treated like code.",
+ "name": "Heat"
+ },
+ "nova": {
+ "role": {
+ "controller": {
+ "endpoint": {
+ "nova_ec2_api": {
+ "type": "nova-ec2-api",
+ "protocol": "http",
+ "name": "nova-ec2-api",
+ "address": "http://10.11.0.11:8773"
+ },
+ "nova_api": {
+ "type": "nova-api",
+ "protocol": "http",
+ "name": "nova-api",
+ "address": "http://10.11.0.11:8774"
+ },
+ "nova_metadata": {
+ "type": "nova-metadata",
+ "protocol": "http",
+ "name": "nova-metadata",
+ "address": "http://10.11.0.11:8775"
+ }
+ },
+ "name": "controller",
+ "param": {
+ "network_host": {
+ "name": "Network service",
+ "value": "10.11.0.10:9696"
+ },
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "networking": {
+ "name": "Networking plugin",
+ "value": "default"
+ },
+ "identity_host": {
+ "name": "Identity host ip",
+ "value": "nova@10.11.0.10:35357"
+ },
+ "dhcp_domain": {
+ "name": "DHCP domain",
+ "value": "novalocal"
+ },
+ "bind": {
+ "value": "10.11.0.11"
+ },
+ "workers": {
+ "name": "Number of osapi and conductor workers",
+ "value": 8
+ },
+ "vncproxy_url": {
+ "name": "VNC proxy URL",
+ "value": "https://10.13.250.9:6080"
+ },
+ "ram_allocation_ratio": {
+ "name": "RAM allocation ratio",
+ "value": 1.5
+ },
+ "glance_host": {
+ "name": "Glance service",
+ "value": "10.11.0.10:9292"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "scheduler_default_filters": {
+ "name": "Scheduler default filters",
+ "value": "DifferentHostFilter SameHostFilter RetryFilter AvailabilityZoneFilter RamFilter CoreFilter DiskFilter ComputeFilter ComputeCapabilitiesFilter ImagePropertiesFilter ServerGroupAntiAffinityFilter ServerGroupAffinityFilter PciPassthroughFilter NUMATopologyFilter AggregateInstanceExtraSpecsFilter"
+ },
+ "disk_allocation_ratio": {
+ "name": "Disk allocation ratio",
+ "value": 1.0
+ },
+ "cpu_allocation_ratio": {
+ "name": "CPU allocation ratio",
+ "value": 16.0
+ },
+ "packages": {
+ "value": "* nova-common: 2:17.0.10-7~u16.01+mcp188\n* nova-consoleproxy: 2:17.0.10-7~u16.01+mcp188\n* novnc: 1:0.6.1-1.1~u16.04+mcp3\n* nova-api: 2:17.0.10-7~u16.01+mcp188\n* nova-conductor: 2:17.0.10-7~u16.01+mcp188\n* nova-consoleauth: 2:17.0.10-7~u16.01+mcp188\n* nova-doc: 2:17.0.9-6~u16.01+mcp189\n* nova-scheduler: 2:17.0.10-7~u16.01+mcp188\n* python-novaclient: 2:9.1.1-1~u16.04+mcp6\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-pycadf: 2.7.0-1~u16.04+mcp3"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "nova@10.11.0.50:3306/nova"
+ }
+ }
+ }
+ },
+ "description": "OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more.",
+ "name": "Nova"
+ },
+ "glusterfs": {
+ "role": {
+ "client": {
+ "name": "client",
+ "param": {
+ "mounts": {
+ "value": {
+ "glance": "/var/lib/glance/images"
+ }
+ }
+ }
+ }
+ },
+ "description": "An open source, distributed file system capable of scaling to several petabytes and handling thousands of clients.",
+ "name": "GlusterFS"
+ },
+ "keystone": {
+ "role": {
+ "client": {
+ "name": "client",
+ "param": {}
+ },
+ "server": {
+ "endpoint": {
+ "keystone_api_admin": {
+ "type": "keystone-api-admin",
+ "protocol": "http",
+ "name": "keystone-api-admin",
+ "address": "http://10.11.0.11:35357"
+ },
+ "keystone_api_public": {
+ "type": "keystone-api-public",
+ "protocol": "http",
+ "name": "keystone-api-public",
+ "address": "http://10.11.0.11:5000"
+ }
+ },
+ "name": "server",
+ "param": {
+ "service_tenant": {
+ "value": "service"
+ },
+ "token_engine": {
+ "value": "fernet"
+ },
+ "bind": {
+ "value": "10.11.0.11:5000"
+ },
+ "region": {
+ "name": "Region",
+ "value": "RegionOne"
+ },
+ "private_bind": {
+ "value": "10.11.0.11:35357"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "services": {
+ "value": ""
+ },
+ "packages": {
+ "value": "* keystone: 2:13.0.2-3~u16.04+mcp19\n* python-keystone: 2:13.0.2-3~u16.04+mcp19\n* python-keystoneclient: 1:3.15.0-1.0~u16.04+mcp12\n* python-psycopg2: 2.7.4-1.0~u16.04+mcp1\n* python-mysqldb: 1.3.7-1build2\n* mysql-client: 5.6.41-1~u16.04+mcp1\n* python-six: 1.10.0-3\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-openstackclient: 3.14.3-1.0~u16.04+mcp18\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-pycadf: 2.7.0-1~u16.04+mcp3"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "keystone@10.11.0.50:3306/keystone"
+ }
+ }
+ }
+ },
+ "description": "Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family.",
+ "name": "Keystone"
+ },
+ "linux": {
+ "role": {
+ "network": {
+ "name": "Network",
+ "param": {
+ "ip": {
+ "name": "IP Addresses",
+ "value": [
+ "10.10.100.7",
+ "10.11.0.10",
+ "10.11.0.11",
+ "10.12.100.14",
+ "10.13.100.22",
+ "127.0.0.1"
+ ]
+ },
+ "fqdn": {
+ "name": "FQDN",
+ "value": "ctl01.ozhurba-os-oc-cicd-sl.local"
+ }
+ }
+ },
+ "system": {
+ "name": "System",
+ "param": {
+ "kernel": {
+ "value": "Linux 4.15.0-43-generic"
+ },
+ "distribution": {
+ "value": "Ubuntu 16.04.5 LTS"
+ },
+ "name": {
+ "value": "ctl01"
+ }
+ }
+ }
+ },
+ "description": "Linux is a high performance, yet completely free, Unix-like operating system that is suitable for use on a wide range of computers and other products.",
+ "name": "Linux"
+ },
+ "apache": {
+ "role": {
+ "server": {
+ "endpoint": {
+ "keystone_keystone": {
+ "type": "keystone",
+ "protocol": "http",
+ "name": "keystone",
+ "address": "http://ctl01.ozhurba-os-oc-cicd-sl.local:80/"
+ }
+ },
+ "name": "server",
+ "param": {
+ "version": {
+ "value": "2.4.18 (Ubuntu)"
+ }
+ }
+ }
+ },
+ "description": "The Apache HTTP Server Project is a collaborative software development effort aimed at creating a robust, commercial-grade, featureful, and freely-available source code implementation of an HTTP (Web) server",
+ "name": "Apache"
+ },
+ "cinder": {
+ "role": {
+ "volume": {
+ "name": "volume",
+ "param": {
+ "backends": {
+ "value": ""
+ },
+ "packages": {
+ "value": "* cinder-volume: dpkg-query: no packages found matching cinder-volume\n* lvm2: 2.02.133-1ubuntu10\n* sysfsutils: 2.1.0+repack-4\n* sg3-utils: dpkg-query: no packages found matching sg3-utils\n* python-cinder: 2:12.0.7-2~u16.04+mcp100\n* python-mysqldb: 1.3.7-1build2\n* p7zip: dpkg-query: no packages found matching p7zip\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-pycadf: 2.7.0-1~u16.04+mcp3\n"
+ }
+ }
+ },
+ "controller": {
+ "endpoint": {
+ "glance_api": {
+ "type": "cinder-api",
+ "protocol": "http",
+ "name": "cinder-api",
+ "address": "http://10.11.0.11:8776"
+ }
+ },
+ "name": "controller",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "bind": {
+ "value": "10.11.0.11:8776"
+ },
+ "glance_host": {
+ "name": "Glance service",
+ "value": "10.11.0.10:9292"
+ },
+ "backends": {
+ "value": ""
+ },
+ "identity_host": {
+ "name": "Identity service",
+ "value": "cinder@10.11.0.10:35357"
+ },
+ "packages": {
+ "value": "* cinder-api: dpkg-query: no packages found matching cinder-api\n* cinder-scheduler: 2:12.0.7-2~u16.04+mcp100\n* lvm2: 2.02.133-1ubuntu10\n* python-cinder: 2:12.0.7-2~u16.04+mcp100\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-pycadf: 2.7.0-1~u16.04+mcp3\n"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "cinder@10.11.0.50:3306//cinder"
+ }
+ }
+ }
+ },
+ "description": "Cinder provides an infrastructure for managing volumes in OpenStack. It was originally a Nova component called nova-volume, but has become an independent project since the Folsom release.",
+ "name": "Cinder"
+ },
+ "glance": {
+ "role": {
+ "server": {
+ "endpoint": {
+ "glance_registry": {
+ "type": "glance-registry",
+ "protocol": "http",
+ "name": "glance-registry",
+ "address": "http://10.11.0.10:9191"
+ },
+ "glance_api": {
+ "type": "glance-api",
+ "protocol": "http",
+ "name": "glance-api",
+ "address": "http://10.11.0.11:9292"
+ }
+ },
+ "name": "server",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "bind": {
+ "value": "10.11.0.11:9292"
+ },
+ "workers": {
+ "name": "Number of workers",
+ "value": 8
+ },
+ "storage_engine": {
+ "name": "Glance storage engine",
+ "value": "file"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "identity_host": {
+ "name": "Identity service",
+ "value": "glance@10.11.0.10:35357"
+ },
+ "packages": {
+ "value": "* glance: 2:16.0.1-2~u16.04+mcp23\n* glance-api: 2:16.0.1-2~u16.04+mcp23\n* glance-registry: 2:16.0.1-2~u16.04+mcp23\n* glance-common: 2:16.0.1-2~u16.04+mcp23\n* python-glance: 2:16.0.1-2~u16.04+mcp23\n* python-glance-store: 0.23.0-2~u16.04+mcp6\n* python-glanceclient: 1:2.10.1-1.0~u16.04+mcp4\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-memcache: 1.57+fixed-1~u16.04+mcp1\n* python-pycadf: 2.7.0-1~u16.04+mcp3"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "glance@10.11.0.50:3306//glance"
+ }
+ }
+ }
+ },
+ "description": "The Glance project provides services for discovering, registering, and retrieving virtual machine images.",
+ "name": "Glance"
+ },
+ "salt": {
+ "role": {
+ "minion": {
+ "name": "minion",
+ "param": {
+ "version": {
+ "value": "2017.7.8 (Nitrogen)"
+ }
+ }
+ }
+ },
+ "description": "Salt is a new approach to infrastructure management. Easy enough to get running in minutes, scalable enough to manage tens of thousands of servers, and fast enough to communicate with them in seconds.",
+ "name": "Salt"
+ },
+ "neutron": {
+ "role": {
+ "server": {
+ "endpoint": {
+ "neutron_server": {
+ "type": "neutron-server",
+ "protocol": "http",
+ "name": "neutron-server",
+ "address": "http://10.11.0.11:9696"
+ }
+ },
+ "name": "server",
+ "param": {
+ "message_queue_ip": {
+ "name": "Message queue",
+ "value": "openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.41:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.42:5672,openstack:vsCFaby8sPgbSu4YdtkaL912mkWAJw9b@10.11.0.43:5672//openstack"
+ },
+ "plugin": {
+ "value": "contrail"
+ },
+ "bind": {
+ "value": "10.11.0.11:9696"
+ },
+ "version": {
+ "name": "Version",
+ "value": "queens"
+ },
+ "identity_host": {
+ "name": "Identity service",
+ "value": "neutron@10.11.0.10:35357"
+ },
+ "compute_host": {
+ "name": "Compute service",
+ "value": "nova@10.11.0.10"
+ },
+ "packages": {
+ "value": "* neutron-server: 2:12.0.5-5~u16.04+mcp155\n* python-neutron-lbaas: 2:12.0.0-2~u16.04+mcp34\n* gettext-base: 0.19.7-2ubuntu3.1\n* python-pycadf: 2.7.0-1~u16.04+mcp3"
+ },
+ "database_host": {
+ "name": "Database",
+ "value": "neutron@10.11.0.50:3306//neutron"
+ }
+ }
+ }
+ },
+ "description": "Neutron is an OpenStack project to provide networking as a service between interface devices managed by other Openstack services.",
+ "name": "Neutron"
+ }
+ }
+ },
+ "num_gpus": 1,
+ "roles": [
+ "fluentd.agent",
+ "telegraf.agent",
+ "glusterfs.client",
+ "rsyslog.client",
+ "linux.storage",
+ "linux.system",
+ "linux.network",
+ "glance.client",
+ "glance.server",
+ "keepalived.cluster",
+ "nova.controller",
+ "grafana.collector",
+ "keystone.client",
+ "keystone.server",
+ "memcached.server",
+ "openscap.service",
+ "neutron.client",
+ "neutron.server",
+ "ntp.client",
+ "nginx.server",
+ "heat.server",
+ "apache.server",
+ "haproxy.proxy",
+ "openssh.server",
+ "opencontrail.client",
+ "logrotate.server",
+ "prometheus.collector",
+ "cinder.controller",
+ "salt.minion"
+ ],
+ "virtual": "kvm",
+ "os": "Ubuntu",
+ "disks": [
+ "loop1",
+ "dm-1",
+ "loop6",
+ "vdb",
+ "loop4",
+ "dm-4",
+ "loop2",
+ "dm-2",
+ "loop0",
+ "dm-0",
+ "loop7",
+ "loop5",
+ "dm-5",
+ "vda",
+ "loop3",
+ "dm-3"
+ ],
+ "cpu_model": "Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz",
+ "osmajorrelease": 16,
+ "pythonexecutable": "/usr/bin/python",
+ "productname": "OpenStack Nova",
+ "osarch": "amd64",
+ "cpuarch": "x86_64",
+ "lsb_distrib_codename": "xenial",
+ "osrelease_info": [
+ 16,
+ 4
+ ],
+ "locale_info": {
+ "detectedencoding": "UTF-8",
+ "defaultlanguage": "en_US",
+ "defaultencoding": "UTF-8"
+ },
+ "gpus": [
+ {
+ "model": "GD 5446",
+ "vendor": "unknown"
+ }
+ ],
+ "prometheus": {
+ "server": {
+ "recording": {},
+ "target": {
+ "static": {
+ "fluentd": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.0.11:24231",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "ctl01"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 24231,
+ "address": "10.11.0.11"
+ }
+ ]
+ },
+ "telegraf": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.0.11:9126",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "ctl01"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 9126,
+ "address": "10.11.0.11"
+ }
+ ]
+ }
+ },
+ "dns": {
+ "endpoint": [
+ {
+ "domain": [
+ "tasks.monitoring_remote_agent"
+ ],
+ "type": "A",
+ "name": "remote_agent_openstack",
+ "port": "9127"
+ }
+ ],
+ "enabled": true
+ }
+ },
+ "alert": {
+ "GlareApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "Glare API is not accessible for the Glare endpoint in the OpenStack service catalog.",
+ "summary": "Glare API outage"
+ },
+ "if": "openstack_api_check_status{name=\"glare\"} == 0"
+ },
+ "ApacheWorkersAbsent": {
+ "labels": {
+ "severity": "minor",
+ "service": "apache"
+ },
+ "annotations": {
+ "description": "The Apache service on the {{ $labels.host }} node has no available workers for 2 minutes.",
+ "summary": "Apache has no available idle workers"
+ },
+ "for": "2m",
+ "if": "apache_IdleWorkers == 0"
+ },
+ "KeepalivedProcessNotResponsive": {
+ "labels": {
+ "severity": "major",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "The Keepalived process on the {{ $labels.host }} node is not responding.",
+ "summary": "Keepalived process is not responding"
+ },
+ "if": "keepalived_up == 0"
+ },
+ "NetdevBudgetRanOutsWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The rate of net_rx_action loops terminations on the {{ $labels.host }} node is {{ $value }} per second during the last 7 minutes. Modify the net.core.netdev_budget and net.core.netdev_budget_usecs kernel parameters.",
+ "summary": "CPU terminated 0.1 net_rx_action loops per second"
+ },
+ "for": "7m",
+ "if": "max(rate(nstat_time_squeeze[5m])) without (cpu) > 0.1"
+ },
+ "HaproxyHTTPResponse5xxTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "The average per-second rate of 5xx HTTP errors on the {{ $labels.host }} node for the {{ $labels.proxy }} back end is {{ $value }} (as measured over the last 2 minutes).",
+ "summary": "HTTP 5xx responses on the {{ $labels.proxy }} back end"
+ },
+ "if": "rate(haproxy_http_response_5xx{sv=\"FRONTEND\"}[2m]) > 1"
+ },
+ "NovaServicesDownMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "More than 30% {{ $labels.binary }} services are down.",
+ "summary": "30% of {{ $labels.binary }} services are down"
+ },
+ "if": "count(openstack_nova_service_state{binary!~\"nova-compute\"} == 0) by (binary) >= on (binary) count(openstack_nova_service_state{binary!~\"nova-compute\"}) by (binary) * 0.3"
+ },
+ "HeatApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "All available {{ $labels.name }} endpoints are not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"heat.*-api\"} == 0) by (name) == count(http_response_status{name=~\"heat.*-api\"}) by (name)"
+ },
+ "NovaServiceDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "The {{ $labels.binary }} service on the {{ $labels.hostname }} node is down.",
+ "summary": "{{ $labels.binary }} service is down"
+ },
+ "if": "openstack_nova_service_state == 0"
+ },
+ "GlanceApiEndpointsDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.name }} endpoints (>= 50.0%) are not accessible for 2 minutes.",
+ "summary": "50.0% of {{ $labels.name }} endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"glance.*\"} == 0) by (name) >= count(http_response_status{name=~\"glance.*\"}) by (name) * 0.5"
+ },
+ "SystemDiskFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 85.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 85.0"
+ },
+ "SystemMemoryFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "90.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 90.0"
+ },
+ "HaproxyServiceDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "The HAProxy service on the {{ $labels.host }} node is down.",
+ "summary": "HAProxy service is down"
+ },
+ "if": "haproxy_up != 1"
+ },
+ "ApacheServiceDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "apache"
+ },
+ "annotations": {
+ "description": "The Apache service on the {{ $labels.host }} node is down.",
+ "summary": "Apache service is down"
+ },
+ "if": "apache_up != 1"
+ },
+ "HaproxyBackendDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "The {{ $labels.proxy }} back end on the {{ $labels.host }} node is down.",
+ "summary": "{{ $labels.proxy }} back end is down"
+ },
+ "if": "increase(haproxy_chkdown{sv=\"BACKEND\"}[1m]) > 0"
+ },
+ "CinderApiEndpointsDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "{{ $value }} cinder-api endpoints (>= 50.0%) are not accessible for 2 minutes.",
+ "summary": "50.0% of cinder-api endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"cinder-api\"} == 0) >= count(http_response_status{name=~\"cinder-api\"}) * 0.5"
+ },
+ "HeatApiEndpointsDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.name }} endpoints (>= 50.0%) are not accessible for 2 minutes.",
+ "summary": "50.0% of {{ $labels.name }} endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"heat.*-api\"} == 0) by (name) >= count(http_response_status{name=~\"heat.*-api\"}) by (name) * 0.5"
+ },
+ "CinderApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "All available cinder-api endpoints are not accessible for 2 minutes.",
+ "summary": "Cinder-api endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"cinder-api\"} == 0) == count(http_response_status{name=~\"cinder-api\"})"
+ },
+ "HaproxyServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "All HAProxy services within the {{ $labels.cluster }} are down.",
+ "summary": "HAProxy service outage"
+ },
+ "if": "count(label_replace(haproxy_up, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\") != 1) by (cluster) == count(label_replace(haproxy_up, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster)"
+ },
+ "HeatErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Heat logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Heat logs"
+ },
+ "if": "sum(rate(log_messages{service=\"heat\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "NovaApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "Nova API is not accessible for all available Nova endpoints in the OpenStack service catalog.",
+ "summary": "Nova API outage"
+ },
+ "if": "max(openstack_api_check_status{name=~\"nova.*|placement\"}) == 0"
+ },
+ "CinderApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "The cinder-api endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "Cinder-api endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"cinder-api\"} == 0"
+ },
+ "NovaComputeServicesDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "More than 50% of nova-compute services are down.",
+ "summary": "More than 50% of nova-compute services are down"
+ },
+ "if": "count(openstack_nova_service_state{binary=\"nova-compute\"} == 0) >= count(openstack_nova_service_state{binary=\"nova-compute\"}) * 0.5"
+ },
+ "PacketsDroppedByCpuWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 10 minutes.",
+ "summary": "Increased number of CPU dropped packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[10m])) > 0"
+ },
+ "KeepalivedUnknownState": {
+ "labels": {
+ "severity": "minor",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "The Keepalived VRRP {{ $labels.name }} is in the UNKNOWN state on the {{ $labels.host }} node.",
+ "summary": "Keepalived VRRP state is UNKNOWN"
+ },
+ "if": "keepalived_state == -1"
+ },
+ "HaproxyBackendOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "All {{ $labels.proxy }} back ends are down.",
+ "summary": "{{ $labels.proxy }} back end outage"
+ },
+ "if": "max(haproxy_active_servers{sv=\"BACKEND\"}) by (proxy) + max(haproxy_backup_servers{sv=\"BACKEND\"}) by (proxy) == 0"
+ },
+ "KeystoneApiResponseTimeTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "The Keystone API response time for GET and POST requests on the {{ $labels.host }} node is higher than 3.0s for 2 minutes.",
+ "summary": "High response time of Keystone API"
+ },
+ "for": "2m",
+ "if": "max by(host) (openstack_http_response_times{service='keystone',quantile=\"0.9\",http_method=~\"^(GET|POST)$\",http_status=~\"^2..$\"}) >= 3.0"
+ },
+ "KeystoneErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Keystone logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Keystone logs"
+ },
+ "if": "sum(rate(log_messages{service=\"keystone\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "KeepalivedMultipleIPAddr": {
+ "labels": {
+ "severity": "major",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "The Keepalived {{ $labels.ip }} virtual IP is assigned more than once.",
+ "summary": "Keepalived VIP is assigned more than once"
+ },
+ "for": "2m",
+ "if": "count(ipcheck_assigned) by (ip) > 1"
+ },
+ "SystemLoadTooHighWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is1.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 1.0"
+ },
+ "NovaApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "All available nova-api endpoints are not accessible for 2 minutes.",
+ "summary": "nova-api endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"nova-api\"} == 0) == count(http_response_status{name=~\"nova-api\"})"
+ },
+ "SystemDiskInodesFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "85.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 85.0"
+ },
+ "NovaComputeServicesDownMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "More than 25% of nova-compute services are down.",
+ "summary": "More than 25% of nova-compute services are down"
+ },
+ "if": "count(openstack_nova_service_state{binary=\"nova-compute\"} == 0) >= count(openstack_nova_service_state{binary=\"nova-compute\"}) * 0.25"
+ },
+ "SshFailedLoginsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} failed SSH login attempts on the {{ $labels.host }} node during the last 5 minutes.",
+ "summary": "5 failed SSH logins"
+ },
+ "if": "increase(failed_logins_total[5m]) > 5"
+ },
+ "NovaApiDown": {
+ "labels": {
+ "severity": "major",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "Nova API is not accessible for the {{ $labels.name }} endpoint.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "if": "openstack_api_check_status{name=~\"nova.*|placement\"} == 0"
+ },
+ "MemcachedConnectionsNoneMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "memcached"
+ },
+ "annotations": {
+ "description": "The Memcached database on the {{ $labels.host }} node has no open connections.",
+ "summary": "Memcached has no open connections"
+ },
+ "if": "memcached_curr_connections == 0"
+ },
+ "NeutronApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "neutron"
+ },
+ "annotations": {
+ "description": "Neutron API is not accessible for the Neutron endpoint in the OpenStack service catalog.",
+ "summary": "Neutron API outage"
+ },
+ "if": "openstack_api_check_status{name=\"neutron\"} == 0"
+ },
+ "SystemMemoryFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "95.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 95.0"
+ },
+ "SystemCpuFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The average CPU usage on the {{ $labels.host }} node is {{ $value }}% for 2 minutes.",
+ "summary": "90.0% CPU usage"
+ },
+ "for": "2m",
+ "if": "100 - avg_over_time(cpu_usage_idle{cpu=\"cpu-total\"}[5m]) > 90.0"
+ },
+ "CinderErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Cinder logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Cinder logs"
+ },
+ "if": "sum(rate(log_messages{service=\"cinder\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "KeystoneApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "All available {{ $labels.name }} endpoints are not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"keystone.*\"} == 0) by (name) == count(http_response_status{name=~\"keystone.*\"}) by (name)"
+ },
+ "NeutronApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "neutron"
+ },
+ "annotations": {
+ "description": "The neutron-api endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "neutron-api endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=\"neutron-api\"} == 0"
+ },
+ "MemcachedConnectionsNoneMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "memcached"
+ },
+ "annotations": {
+ "description": "The Memcached database has no open connections on all nodes.",
+ "summary": "Memcached has no open connections on all nodes"
+ },
+ "if": "count(memcached_curr_connections == 0) == count(memcached_up)"
+ },
+ "CinderServicesDownMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.binary }} services (>=30.0%) are down.",
+ "summary": "30.0% of {{ $labels.binary }} services are down"
+ },
+ "if": "count(openstack_cinder_service_state == 0) by (binary) >= on (binary) count(openstack_cinder_service_state) by (binary) * 0.3"
+ },
+ "KeepalivedFailedState": {
+ "labels": {
+ "severity": "minor",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "The Keepalived VRRP {{ $labels.name }} is in the FAILED state on the {{ $labels.host }} node.",
+ "summary": "Keepalived VRRP state is FAILED"
+ },
+ "if": "keepalived_state == 0"
+ },
+ "NeutronApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "neutron"
+ },
+ "annotations": {
+ "description": "All available neutron-api endpoints are not accessible for 2 minutes.",
+ "summary": "neutron-api endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=\"neutron-api\"} == 0) == count(http_response_status{name=\"neutron-api\"})"
+ },
+ "NovaServicesDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "More than 60% of {{ $labels.binary }} services are down.",
+ "summary": "More than 60% of {{ $labels.binary }} services are down"
+ },
+ "if": "count(openstack_nova_service_state{binary!~\"nova-compute\"} == 0) by (binary) >= on (binary) count(openstack_nova_service_state{binary!~\"nova-compute\"}) by (binary) * 0.6"
+ },
+ "CertificateExpirationWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.source }} certificate on the {{ $labels.host }} node expires in less than 60 days.",
+ "summary": "The certificate expires in less than 60 days"
+ },
+ "if": "x509_cert_expiry / (24 * 60 * 60) < 60"
+ },
+ "GlanceApiEndpointsOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "All available {{ $labels.name }} endpoints are not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoints outage"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"glance.*\"} == 0) by (name) == count(http_response_status{name=~\"glance.*\"}) by (name)"
+ },
+ "SaltMinionServiceDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "salt"
+ },
+ "annotations": {
+ "description": "The salt-minion service on the {{ $labels.host }} node is down.",
+ "summary": "Salt-minion service is down"
+ },
+ "if": "procstat_running{process_name=\"salt-minion\"} == 0"
+ },
+ "CinderServiceDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "The {{ $labels.binary }} service on the {{ $labels.hostname }} node is down.",
+ "summary": "{{ $labels.binary }} service is down"
+ },
+ "if": "openstack_cinder_service_state == 0"
+ },
+ "NovaApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "The nova-api endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "nova-api endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"nova-api\"} == 0"
+ },
+ "SystemLoadTooHighCritical": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is2.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 2.0"
+ },
+ "HaproxyServiceDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "{{ $value }} HAProxy services (>= 50%) within the {{ $labels.cluster }} cluster are down.",
+ "summary": "50% of HAProxy services are down"
+ },
+ "if": "count(label_replace(haproxy_up, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\") != 1) by (cluster) >= 0.5 * count(label_replace(haproxy_up, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster)"
+ },
+ "PacketsDroppedByCpuMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 10 minutes.",
+ "summary": "CPU dropped more than 100 packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[10m])) > 100"
+ },
+ "CronProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The cron process on the {{ $labels.host }} node is down.",
+ "summary": "Cron process is down"
+ },
+ "if": "procstat_running{process_name=\"cron\"} == 0"
+ },
+ "SshdProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The SSH process on the {{ $labels.host }} node is down.",
+ "summary": "SSH process is down"
+ },
+ "if": "procstat_running{process_name=\"sshd\"} == 0"
+ },
+ "CinderServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "All {{ $labels.binary }} services are down.",
+ "summary": "{{ $labels.binary }} service outage"
+ },
+ "if": "count(openstack_cinder_service_state == 0) by (binary) == on (binary) count(openstack_cinder_service_state) by (binary)"
+ },
+ "SystemDiskInodesFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "95.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 95.0"
+ },
+ "NovaErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "The average rate of errors in Nova logs on the {{ $labels.host }} node is more than 0.2 error messages per second (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Nova logs"
+ },
+ "if": "sum(rate(log_messages{service=\"nova\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "KeystoneApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "The {{ $labels.name }} endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"keystone.*\"} == 0"
+ },
+ "SystemDiskFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 95.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 95.0"
+ },
+ "MemcachedConnectionThrottled": {
+ "labels": {
+ "severity": "warning",
+ "service": "memcached"
+ },
+ "annotations": {
+ "description": "An average of {{ $value }} client connections to the Memcached database on the {{ $labels.host }} node throttle for 2 minutes.",
+ "summary": " 5 throttled Memcached connections"
+ },
+ "for": "2m",
+ "if": "increase(memcached_conn_yields[1m]) > 5"
+ },
+ "KeystoneApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "Keystone API is not accessible for the Keystone endpoint in the OpenStack service catalog.",
+ "summary": "Keystone API outage"
+ },
+ "if": "openstack_api_check_status{name=~\"keystone.*\"} == 0"
+ },
+ "GlanceApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "The {{ $labels.name }} endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"glance.*\"} == 0"
+ },
+ "HeatApiEndpointDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "The {{ $labels.name }} endpoint on the {{ $labels.host }} node is not accessible for 2 minutes.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "for": "2m",
+ "if": "http_response_status{name=~\"heat.*-api\"} == 0"
+ },
+ "GlanceErrorLogsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "The average per-second rate of errors in Glance logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes).",
+ "summary": "High number of errors in Glance logs"
+ },
+ "if": "sum(rate(log_messages{service=\"glance\",level=~\"(?i:(error|emergency|fatal))\"}[5m])) without (level) > 0.2"
+ },
+ "NeutronApiEndpointsDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "neutron"
+ },
+ "annotations": {
+ "description": "{{ $value }} neutron-api endpoints (>= 50.0%) are not accessible for 2 minutes.",
+ "summary": "50.0% of neutron-api endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=\"neutron-api\"} == 0) >= count(http_response_status{name=\"neutron-api\"}) * 0.5"
+ },
+ "KeepalivedServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "All Keepalived processes within the {{ $labels.cluster }} cluster are down.",
+ "summary": "Keepalived service outage"
+ },
+ "if": "count(label_replace(procstat_running{process_name=\"keepalived\"}, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster) == count(label_replace(procstat_running{process_name=\"keepalived\"} == 0, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster)"
+ },
+ "MemcachedEvictionsLimit": {
+ "labels": {
+ "severity": "warning",
+ "service": "memcached"
+ },
+ "annotations": {
+ "description": "An average of {{ $value }} evictions in the Memcached database occurred on the {{ $labels.host }} node during the last minute.",
+ "summary": "10 Memcached evictions"
+ },
+ "if": "increase(memcached_evictions[1m]) > 10"
+ },
+ "NovaApiEndpointsDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "More than 60% of nova-api endpoints are not accessible for 2 minutes.",
+ "summary": "60% of nova-api endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"nova-api\"} == 0) >= count(http_response_status{name=~\"nova-api\"}) * 0.6"
+ },
+ "HeatApiDown": {
+ "labels": {
+ "severity": "major",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "Heat API is not accessible for the {{ $labels.name }} endpoint.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "if": "openstack_api_check_status{name=~\"heat.*\"} == 0"
+ },
+ "NovaServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "nova"
+ },
+ "annotations": {
+ "description": "All {{ $labels.binary }} services are down.",
+ "summary": "{{ $labels.binary }} service outage"
+ },
+ "if": "count(openstack_nova_service_state == 0) by (binary) == on (binary) count(openstack_nova_service_state) by (binary)"
+ },
+ "SystemDiskErrorsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.device }} disk on the {{ $labels.host }} node is reporting errors for 5 minutes.",
+ "summary": "Disk {{ $labels.device }} is failing"
+ },
+ "for": "5m",
+ "if": "increase(hdd_errors_total[1m]) > 0"
+ },
+ "CinderApiDown": {
+ "labels": {
+ "severity": "major",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "Cinder API is not accessible for the {{ $labels.name }} endpoint.",
+ "summary": "{{ $labels.name }} endpoint is not accessible"
+ },
+ "if": "openstack_api_check_status{name=~\"cinder.*\"} == 0"
+ },
+ "CinderServicesDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.binary }} services (>=60.0%) are down.",
+ "summary": "60.0% of {{ $labels.binary }} services are down"
+ },
+ "if": "count(openstack_cinder_service_state == 0) by (binary) >= on (binary) count(openstack_cinder_service_state) by (binary) * 0.6"
+ },
+ "MemcachedServiceDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "memcached"
+ },
+ "annotations": {
+ "description": "The Memcached service on the {{ $labels.host }} node is down.",
+ "summary": "Memcached service is down"
+ },
+ "if": "memcached_up == 0"
+ },
+ "NtpOffsetTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "ntp"
+ },
+ "annotations": {
+ "description": "The NTP offset on the {{ $labels.host }} node is {{ $value }}ms for 2 minutes.",
+ "summary": "NTP offset reached the limit of 200ms"
+ },
+ "for": "2m",
+ "if": "ntpq_offset >= 200"
+ },
+ "GlanceApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "glance"
+ },
+ "annotations": {
+ "description": "Glance API is not accessible for the Glance endpoint in the OpenStack service catalog.",
+ "summary": "Glance API outage"
+ },
+ "if": "openstack_api_check_status{name=\"glance\"} == 0"
+ },
+ "SystemRxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets received by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "60 received packets were dropped"
+ },
+ "if": "increase(net_drop_in[1m]) > 60 unless on (host,interface) bond_slave_active == 0"
+ },
+ "KeystoneApiEndpointssDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "keystone"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.name }} endpoints (>= 50.0%) are not accessible for 2 minutes.",
+ "summary": "50.0% of {{ $labels.name }} endpoints are not accessible"
+ },
+ "for": "2m",
+ "if": "count(http_response_status{name=~\"keystone.*\"} == 0) by (name) >= count(http_response_status{name=~\"keystone.*\"}) by (name) * 0.5"
+ },
+ "SystemTxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets transmitted by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "100 transmitted packets were dropped"
+ },
+ "if": "increase(net_drop_out[1m]) > 100"
+ },
+ "ApacheServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "apache"
+ },
+ "annotations": {
+ "description": "The Apache services within the {{ $labels.cluster }} cluster are down.",
+ "summary": "Apache service outage"
+ },
+ "if": "count(label_replace(apache_up, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster) == count(label_replace(apache_up == 0, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster)"
+ },
+ "HaproxyBackendDownMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "haproxy"
+ },
+ "annotations": {
+ "description": "{{ $value }} {{ $labels.proxy }} back ends (>= 50%) are down.",
+ "summary": "50% of {{ $labels.proxy }} back ends are down"
+ },
+ "if": "0.5 * avg(sum(haproxy_active_servers{type=\"server\"}) by (host, proxy) + sum(haproxy_backup_servers{type=\"server\"}) by (host, proxy)) by (proxy) >= avg(sum(haproxy_active_servers{type=\"backend\"}) by (host, proxy) + sum(haproxy_backup_servers{type=\"backend\"}) by (host, proxy)) by (proxy)"
+ },
+ "HeatApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "heat"
+ },
+ "annotations": {
+ "description": "Heat API is not accessible for all available Heat endpoints in the OpenStack service catalog.",
+ "summary": "Heat API outage"
+ },
+ "if": "max(openstack_api_check_status{name=~\"heat.*\"}) == 0"
+ },
+ "KeepalivedProcessDown": {
+ "labels": {
+ "severity": "major",
+ "service": "keepalived"
+ },
+ "annotations": {
+ "description": "The Keepalived process on the {{ $labels.host }} node is down.",
+ "summary": "Keepalived process is down"
+ },
+ "if": "procstat_running{process_name=\"keepalived\"} == 0"
+ },
+ "CertificateExpirationCritical": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.source }} certificate on the {{ $labels.host }} node expires in less than 30 days.",
+ "summary": "The certificate expires in less than 30 days"
+ },
+ "if": "x509_cert_expiry / (24 * 60 * 60) < 30"
+ },
+ "CinderApiOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "cinder"
+ },
+ "annotations": {
+ "description": "Cinder API is not accessible for all available Cinder endpoints in the OpenStack service catalog.",
+ "summary": "Cinder API outage"
+ },
+ "if": "max(openstack_api_check_status{name=~\"cinder.*\"}) == 0"
+ }
+ }
+ }
+ },
+ "glance_policy": {
+ "get_task": "",
+ "modify_metadef_namespace": "",
+ "add_metadef_resource_type_association": "",
+ "get_metadef_object": "",
+ "upload_image": "",
+ "get_image_location": "",
+ "list_metadef_resource_types": "",
+ "add_image": "",
+ "get_metadef_namespace": "",
+ "manage_image_cache": "role:admin",
+ "delete_member": "",
+ "deactivate": "",
+ "add_metadef_object": "",
+ "get_images": "",
+ "get_metadef_resource_type": "",
+ "delete_image": "",
+ "get_metadef_namespaces": "",
+ "get_metadef_objects": "",
+ "modify_metadef_property": "",
+ "publicize_image": "role:admin",
+ "add_metadef_namespace": "",
+ "get_member": "",
+ "add_member": "",
+ "set_image_location": "",
+ "communitize_image": "",
+ "tasks_api_access": "role:admin",
+ "modify_task": "",
+ "add_task": "",
+ "modify_member": "",
+ "context_is_admin": "role:admin",
+ "modify_image": "",
+ "add_metadef_property": "",
+ "get_metadef_properties": "",
+ "get_members": "",
+ "get_tasks": "",
+ "get_metadef_tag": "",
+ "reactivate": "",
+ "modify_metadef_tag": "",
+ "get_metadef_property": "",
+ "delete_image_location": "",
+ "copy_from": "",
+ "add_metadef_tags": "",
+ "default": "role:admin",
+ "download_image": "",
+ "modify_metadef_object": "",
+ "get_image": "",
+ "add_metadef_tag": "",
+ "get_metadef_tags": ""
+ },
+ "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "machine_id": "7b17cfb3a5724e06a2b1b8d17cc0e2cb",
+ "salt": {
+ "graph": [
+ {
+ "host": "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "linux.system",
+ "relations": [
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//glusterfs-3.8//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//opencontrail-4.0//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-updates main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ }
+ ]
+ },
+ {
+ "host": "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "ntp.client",
+ "relations": [
+ {
+ "host_external": "udp://10.10.0.15",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ },
+ {
+ "host_external": "udp://pool.ntp.org",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ }
+ ]
+ },
+ {
+ "host": "ctl01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-config",
+ "service": "salt.minion",
+ "relations": [
+ {
+ "direction": "source",
+ "type": "tcp-0mq",
+ "service": "salt.master",
+ "host_from_target": "10.10.0.15"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "cfg01.fakedomain.local": {
+ "biosversion": "1.10.2-1.1~u16.04+mcp2",
+ "kernel": "Linux",
+ "domain": "ozhurba-os-oc-cicd-sl.local",
+ "uid": 0,
+ "zmqversion": "4.1.4",
+ "kernelrelease": "4.15.0-43-generic",
+ "pythonpath": [
+ "/usr/bin",
+ "/usr/lib/python2.7",
+ "/usr/lib/python2.7/plat-x86_64-linux-gnu",
+ "/usr/lib/python2.7/lib-tk",
+ "/usr/lib/python2.7/lib-old",
+ "/usr/lib/python2.7/lib-dynload",
+ "/usr/local/lib/python2.7/dist-packages",
+ "/usr/local/lib/python2.7/dist-packages/configparser-3.7.4-py2.7.egg",
+ "/usr/lib/python2.7/dist-packages"
+ ],
+ "serialnumber": "157967c6-9d9f-49dc-9f27-73d403afd492",
+ "pid": 19577,
+ "telegraf": {
+ "remote_agent": {
+ "input": {},
+ "processor": {},
+ "dir": {
+ "config": "/srv/volumes/local/telegraf",
+ "config_d": "/srv/volumes/local/telegraf/telegraf.d"
+ },
+ "output": {}
+ },
+ "agent": {
+ "metric_batch_size": 1000,
+ "collection_jitter": 2,
+ "interval": 15,
+ "enabled": true,
+ "pkgs": [
+ "telegraf"
+ ],
+ "round_interval": false,
+ "output": {
+ "prometheus_client": {
+ "engine": "prometheus",
+ "bind": {
+ "port": 9126,
+ "address": "0.0.0.0"
+ },
+ "string_as_label": false
+ }
+ },
+ "input": {
+ "kernel": null,
+ "processes": null,
+ "nstat": {
+ "fieldpass": [
+ "packet_drop",
+ "time_squeeze"
+ ]
+ },
+ "x509": {
+ "sources": [
+ "/srv/salt/pki/ozhurba-os-oc-cicd-sl/10.13.250.9.crt"
+ ]
+ },
+ "ntp": {
+ "template": "ntp/files/telegraf.conf"
+ },
+ "system": null,
+ "http_listener": {
+ "read_timeout": "10s",
+ "bind": {
+ "port": 8186,
+ "address": "127.0.0.1"
+ },
+ "tagexclude": [
+ "hostname"
+ ],
+ "write_timeout": "10s"
+ },
+ "cpu": {
+ "totalcpu": true,
+ "percpu": false
+ },
+ "linux_sysctl_fs": null,
+ "diskio": null,
+ "procstat": {
+ "process": {
+ "sshd": {
+ "exe": "sshd"
+ },
+ "salt-minion": {
+ "pattern": "salt-minion"
+ },
+ "cron": {
+ "exe": "cron"
+ },
+ "dockerd": {
+ "exe": "dockerd"
+ },
+ "salt-master": {
+ "pattern": "salt-master"
+ },
+ "ntpd": {
+ "exe": "ntpd"
+ }
+ }
+ },
+ "net": null,
+ "disk": {
+ "ignore_fs": [
+ "aufs",
+ "rootfs",
+ "sysfs",
+ "proc",
+ "devtmpfs",
+ "devpts",
+ "tmpfs",
+ "fusectl",
+ "cgroup",
+ "overlay"
+ ]
+ },
+ "mem": null,
+ "docker": {
+ "endpoint": "unix:///var/run/docker.sock",
+ "container_name_exclude": [
+ "*"
+ ],
+ "timeout": 5,
+ "namepass": [
+ "docker",
+ "docker_swarm"
+ ],
+ "perdevice": true,
+ "gather_services": true,
+ "total": false
+ },
+ "swap": null
+ },
+ "metric_buffer_limit": 10000,
+ "processor": {},
+ "dir": {
+ "config": "/etc/telegraf",
+ "config_d": "/etc/telegraf/telegraf.d"
+ }
+ }
+ },
+ "ip_interfaces": {
+ "ens4": [
+ "10.11.0.15",
+ "fe80::f816:3eff:fe8d:eff3"
+ ],
+ "ens5": [
+ "10.12.100.8",
+ "fe80::f816:3eff:fe39:9558"
+ ],
+ "ens6": [
+ "10.13.0.15",
+ "fe80::f816:3eff:fea6:7cae"
+ ],
+ "vethaca0b7a": [
+ "fe80::38b7:15ff:fecc:9e2a"
+ ],
+ "lo": [
+ "127.0.0.1",
+ "::1"
+ ],
+ "ens3": [
+ "10.10.0.15",
+ "fe80::f816:3eff:fe79:d748"
+ ],
+ "docker_gwbridge": [
+ "10.20.0.1",
+ "fe80::42:beff:fe69:f35d"
+ ],
+ "veth31bea8f": [
+ "fe80::40ac:75ff:fe71:f26f"
+ ]
+ },
+ "groupname": "root",
+ "fqdn_ip6": [],
+ "mem_total": 16040,
+ "saltversioninfo": [
+ 2017,
+ 7,
+ 8,
+ 0
+ ],
+ "SSDs": [],
+ "mdadm": [],
+ "id": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "manufacturer": "OpenStack Foundation",
+ "osrelease": "16.04",
+ "ps": "ps -efHww",
+ "systemd": {
+ "version": "229",
+ "features": "+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN"
+ },
+ "fqdn": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "uuid": "6cf16038-e8ef-45da-8f9d-13f0ca824a31",
+ "ip6_interfaces": {
+ "ens4": [
+ "fe80::f816:3eff:fe8d:eff3"
+ ],
+ "ens5": [
+ "fe80::f816:3eff:fe39:9558"
+ ],
+ "ens6": [
+ "fe80::f816:3eff:fea6:7cae"
+ ],
+ "vethaca0b7a": [
+ "fe80::38b7:15ff:fecc:9e2a"
+ ],
+ "lo": [
+ "::1"
+ ],
+ "ens3": [
+ "fe80::f816:3eff:fe79:d748"
+ ],
+ "docker_gwbridge": [
+ "fe80::42:beff:fe69:f35d"
+ ],
+ "veth31bea8f": [
+ "fe80::40ac:75ff:fe71:f26f"
+ ]
+ },
+ "num_cpus": 4,
+ "hwaddr_interfaces": {
+ "ens4": "fa:16:3e:8d:ef:f3",
+ "ens5": "fa:16:3e:39:95:58",
+ "ens6": "fa:16:3e:a6:7c:ae",
+ "vethaca0b7a": "3a:b7:15:cc:9e:2a",
+ "lo": "00:00:00:00:00:00",
+ "ens3": "fa:16:3e:79:d7:48",
+ "docker_gwbridge": "02:42:be:69:f3:5d",
+ "veth31bea8f": "42:ac:75:71:f2:6f"
+ },
+ "init": "systemd",
+ "ip4_interfaces": {
+ "ens4": [
+ "10.11.0.15"
+ ],
+ "ens5": [
+ "10.12.100.8"
+ ],
+ "ens6": [
+ "10.13.0.15"
+ ],
+ "vethaca0b7a": [],
+ "lo": [
+ "127.0.0.1"
+ ],
+ "ens3": [
+ "10.10.0.15"
+ ],
+ "docker_gwbridge": [
+ "10.20.0.1"
+ ],
+ "veth31bea8f": []
+ },
+ "grafana": {
+ "dashboard": {
+ "linux_disk_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_disk_prometheus.json",
+ "format": "json"
+ },
+ "linux_influxdb": {
+ "datasource": "influxdb",
+ "template": "linux/files/grafana_dashboards/system_influxdb.json",
+ "format": "json"
+ },
+ "glusterfs_prometheus": {
+ "datasource": "prometheus",
+ "template": "glusterfs/files/grafana_dashboards/glusterfs_prometheus.json",
+ "format": "json"
+ },
+ "jenkins_prometheus": {
+ "datasource": "prometheus",
+ "template": "jenkins/files/grafana_dashboards/jenkins_prometheus.json",
+ "format": "json"
+ },
+ "main_prometheus": {
+ "datasource": "prometheus",
+ "row": {
+ "ost-middleware": {
+ "title": "Middleware",
+ "panel": {
+ "glusterfs": {
+ "target": {
+ "cluster_status": {
+ "expr": "avg(glusterfs_up) by (name)"
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "GlusterFS",
+ "title": "GlusterFS"
+ }
+ ],
+ "title": "GlusterFS"
+ }
+ }
+ }
+ }
+ },
+ "glusterfs_influxdb": {
+ "datasource": "influxdb",
+ "template": "glusterfs/files/grafana_dashboards/glusterfs_influxdb.json",
+ "format": "json"
+ },
+ "linux_overview_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_overview_prometheus.json",
+ "format": "json"
+ },
+ "docker_prometheus": {
+ "datasource": "prometheus",
+ "template": "docker/files/grafana_dashboards/docker_prometheus.json",
+ "format": "json"
+ },
+ "ntp_prometheus": {
+ "datasource": "prometheus",
+ "template": "ntp/files/grafana_dashboards/ntp_prometheus.json",
+ "format": "json"
+ },
+ "main": {
+ "datasource": "influxdb",
+ "row": {
+ "docker-data-plane": {
+ "title": "Docker",
+ "panel": {
+ "docker": {
+ "target": {
+ "cluster_status": {
+ "query": "SELECT last(value) FROM cluster_status WHERE cluster_name = 'docker' AND environment_label = '$environment' AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true
+ }
+ },
+ "links": [
+ {
+ "type": "dashboard",
+ "dashboard": "Docker",
+ "title": "Docker"
+ }
+ ],
+ "title": "Docker"
+ }
+ }
+ }
+ }
+ },
+ "linux_network_prometheus": {
+ "datasource": "prometheus",
+ "template": "linux/files/grafana_dashboards/system_network_prometheus.json",
+ "format": "json"
+ },
+ "docker_influxdb": {
+ "datasource": "influxdb",
+ "template": "docker/files/grafana_dashboards/docker_influxdb.json",
+ "format": "json"
+ }
+ }
+ },
+ "ssh_fingerprints": {
+ "rsa": "76:1d:3d:51:56:9b:6c:2f:47:2a:e0:46:28:ce:f5:86",
+ "ecdsa": "e6:d4:2f:25:fa:5b:dd:35:00:9e:5c:65:d5:91:ac:8c",
+ "dsa": "53:2c:67:0f:03:57:ae:26:7d:65:de:bf:8c:41:1e:05"
+ },
+ "gid": 0,
+ "master": "10.10.0.15",
+ "ipv4": [
+ "10.10.0.15",
+ "10.11.0.15",
+ "10.12.100.8",
+ "10.13.0.15",
+ "10.20.0.1",
+ "127.0.0.1"
+ ],
+ "dns": {
+ "domain": "",
+ "sortlist": [],
+ "nameservers": [
+ "172.18.176.6",
+ "172.17.44.91"
+ ],
+ "ip4_nameservers": [
+ "172.18.176.6",
+ "172.17.44.91"
+ ],
+ "search": [
+ "openstacklocal"
+ ],
+ "ip6_nameservers": [],
+ "options": []
+ },
+ "ipv6": [
+ "::1",
+ "fe80::42:beff:fe69:f35d",
+ "fe80::38b7:15ff:fecc:9e2a",
+ "fe80::40ac:75ff:fe71:f26f",
+ "fe80::f816:3eff:fe39:9558",
+ "fe80::f816:3eff:fe79:d748",
+ "fe80::f816:3eff:fe8d:eff3",
+ "fe80::f816:3eff:fea6:7cae"
+ ],
+ "server_id": 1095353950,
+ "cpu_flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "vmx",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "cpuid_fault",
+ "invpcid_single",
+ "pti",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "hle",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "rtm",
+ "rdseed",
+ "adx",
+ "smap",
+ "xsaveopt",
+ "arat"
+ ],
+ "osfullname": "Ubuntu",
+ "localhost": "cfg01",
+ "lsb_distrib_id": "Ubuntu",
+ "username": "root",
+ "fqdn_ip4": [
+ "10.11.0.15"
+ ],
+ "shell": "/bin/sh",
+ "nodename": "cfg01",
+ "saltversion": "2017.7.8",
+ "lsb_distrib_release": "16.04",
+ "saltpath": "/usr/lib/python2.7/dist-packages/salt",
+ "pythonversion": [
+ 2,
+ 7,
+ 12,
+ "final",
+ 0
+ ],
+ "host": "cfg01",
+ "os_family": "Debian",
+ "oscodename": "xenial",
+ "services": [
+ "fluentd",
+ "telegraf",
+ "runtest",
+ "glusterfs",
+ "rsyslog",
+ "linux",
+ "glance",
+ "git",
+ "reclass",
+ "nova",
+ "grafana",
+ "keystone",
+ "jenkins",
+ "openscap",
+ "neutron",
+ "ntp",
+ "maas",
+ "nginx",
+ "prometheus",
+ "postgresql",
+ "openssh",
+ "logrotate",
+ "_reclass_",
+ "docker",
+ "salt"
+ ],
+ "osfinger": "Ubuntu-16.04",
+ "biosreleasedate": "04/01/2014",
+ "dns_records": [
+ {
+ "names": [
+ "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "cfg01"
+ ],
+ "address": "10.11.0.15"
+ }
+ ],
+ "lsb_distrib_description": "Ubuntu 16.04.5 LTS",
+ "sphinx": {
+ "doc": {
+ "reclass": {
+ "role": {
+ "storage": {
+ "name": "storage",
+ "param": {
+ "version": {
+ "value": "1.5.6"
+ }
+ }
+ }
+ },
+ "description": "reclass is an external node classifier (ENC) used with automation tools, such as Puppet, Salt, and Ansible.",
+ "name": "Reclass"
+ },
+ "maas": {
+ "role": {
+ "server": {
+ "name": "server",
+ "param": {
+ "some_param": {
+ "name": "Some name",
+ "value": "some value"
+ }
+ }
+ }
+ },
+ "description": "Some service info",
+ "name": "maas"
+ },
+ "glusterfs": {
+ "role": {
+ "client": {
+ "name": "client",
+ "param": {
+ "mounts": {
+ "value": {
+ "salt_pki": "/srv/salt/pki"
+ }
+ }
+ }
+ }
+ },
+ "description": "An open source, distributed file system capable of scaling to several petabytes and handling thousands of clients.",
+ "name": "GlusterFS"
+ },
+ "linux": {
+ "role": {
+ "network": {
+ "name": "Network",
+ "param": {
+ "ip": {
+ "name": "IP Addresses",
+ "value": [
+ "10.10.0.15",
+ "10.11.0.15",
+ "10.12.100.8",
+ "10.13.0.15",
+ "10.20.0.1",
+ "127.0.0.1"
+ ]
+ },
+ "fqdn": {
+ "name": "FQDN",
+ "value": "cfg01.ozhurba-os-oc-cicd-sl.local"
+ }
+ }
+ },
+ "system": {
+ "name": "System",
+ "param": {
+ "kernel": {
+ "value": "Linux 4.15.0-43-generic"
+ },
+ "distribution": {
+ "value": "Ubuntu 16.04.5 LTS"
+ },
+ "name": {
+ "value": "cfg01"
+ }
+ }
+ }
+ },
+ "description": "Linux is a high performance, yet completely free, Unix-like operating system that is suitable for use on a wide range of computers and other products.",
+ "name": "Linux"
+ },
+ "jenkins": {
+ "role": {
+ "client": {
+ "name": "client",
+ "param": {
+ "master": {
+ "value": "10.10.0.15:8081"
+ },
+ "jobs": {
+ "value": [
+ "cvp-func",
+ "validate_openstack",
+ "cvp-ha",
+ "cvp-stacklight",
+ "cvp-perf",
+ "deploy_openstack",
+ "cvp-spt",
+ "cvp-sanity",
+ "deploy-openstack-compute"
+ ]
+ }
+ }
+ },
+ "master": {
+ "name": "master",
+ "param": {}
+ }
+ },
+ "description": "Jenkins is an application that monitors executions of repeated jobs, such as building a software project or jobs run by cron.",
+ "name": "Jenkins"
+ },
+ "docker": {
+ "role": {
+ "swarm": {
+ "name": "swarm",
+ "param": {
+ "role": {
+ "value": "master"
+ },
+ "advertise_addr": {
+ "value": "10.11.0.15"
+ }
+ }
+ },
+ "host": {
+ "name": "host",
+ "param": {
+ "version": {
+ "value": "Docker version 18.09.0, build 4d60db4"
+ }
+ }
+ },
+ "client": {
+ "name": "client",
+ "param": {
+ "source": {
+ "value": {
+ "engine": "pkg"
+ }
+ },
+ "stacks": {
+ "value": [
+ "jenkins-master (image docker-prod-local.docker.mirantis.net/mirantis/cicd/jenkins:2019.2.0)",
+ "jenkins-slave01 (image docker-prod-local.docker.mirantis.net/mirantis/cicd/jnlp-slave:2019.2.0)",
+ "postgresql-postgresql-db (image docker-prod-local.docker.mirantis.net/mirantis/external/library/postgres:9.6.10)"
+ ]
+ }
+ }
+ }
+ },
+ "description": "Docker is an open source project to pack, ship and run any application as a lightweight container.",
+ "name": "Docker"
+ },
+ "salt": {
+ "role": {
+ "minion": {
+ "name": "minion",
+ "param": {
+ "version": {
+ "value": "2017.7.8 (Nitrogen)"
+ }
+ }
+ },
+ "master": {
+ "name": "master",
+ "param": {
+ "version": {
+ "value": "2017.7.8 (Nitrogen)"
+ }
+ }
+ }
+ },
+ "description": "Salt is a new approach to infrastructure management. Easy enough to get running in minutes, scalable enough to manage tens of thousands of servers, and fast enough to communicate with them in seconds.",
+ "name": "Salt"
+ }
+ }
+ },
+ "num_gpus": 1,
+ "roles": [
+ "fluentd.agent",
+ "telegraf.agent",
+ "runtest.artifact_collector",
+ "runtest.tempest",
+ "runtest.salttest",
+ "glusterfs.client",
+ "rsyslog.client",
+ "linux.storage",
+ "linux.system",
+ "linux.network",
+ "glance.client",
+ "git.client",
+ "reclass.storage",
+ "nova.client",
+ "grafana.collector",
+ "keystone.client",
+ "jenkins.client",
+ "openscap.service",
+ "neutron.client",
+ "ntp.client",
+ "maas.cluster",
+ "maas.region",
+ "nginx.server",
+ "prometheus.collector",
+ "postgresql.server",
+ "openssh.server",
+ "logrotate.server",
+ "docker.host",
+ "docker.client",
+ "salt.minion",
+ "salt.api",
+ "salt.master"
+ ],
+ "virtual": "kvm",
+ "os": "Ubuntu",
+ "disks": [
+ "loop1",
+ "loop6",
+ "vdb",
+ "loop4",
+ "loop2",
+ "loop0",
+ "loop7",
+ "loop5",
+ "vda",
+ "loop3"
+ ],
+ "cpu_model": "Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz",
+ "osmajorrelease": 16,
+ "pythonexecutable": "/usr/bin/python",
+ "productname": "OpenStack Nova",
+ "osarch": "amd64",
+ "cpuarch": "x86_64",
+ "lsb_distrib_codename": "xenial",
+ "osrelease_info": [
+ 16,
+ 4
+ ],
+ "locale_info": {
+ "detectedencoding": "UTF-8",
+ "defaultlanguage": "en_US",
+ "defaultencoding": "UTF-8"
+ },
+ "gpus": [
+ {
+ "model": "GD 5446",
+ "vendor": "unknown"
+ }
+ ],
+ "prometheus": {
+ "server": {
+ "recording": {},
+ "target": {
+ "static": {
+ "fluentd": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.0.15:24231",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cfg01"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 24231,
+ "address": "10.11.0.15"
+ }
+ ]
+ },
+ "telegraf": {
+ "relabel_configs": [
+ {
+ "regex": "10.11.0.15:9126",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cfg01"
+ }
+ ],
+ "honor_labels": true,
+ "endpoint": [
+ {
+ "port": 9126,
+ "address": "10.11.0.15"
+ }
+ ]
+ },
+ "jenkins": {
+ "relabel_configs": [
+ {
+ "regex": "10.10.0.15:8081",
+ "source_labels": "__address__",
+ "target_label": "host",
+ "replacement": "cfg01"
+ }
+ ],
+ "endpoint": [
+ {
+ "port": 8081,
+ "address": "10.10.0.15"
+ }
+ ],
+ "metrics_path": "/prometheus/"
+ }
+ }
+ },
+ "alert": {
+ "SystemCpuFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The average CPU usage on the {{ $labels.host }} node is {{ $value }}% for 2 minutes.",
+ "summary": "90.0% CPU usage"
+ },
+ "for": "2m",
+ "if": "100 - avg_over_time(cpu_usage_idle{cpu=\"cpu-total\"}[5m]) > 90.0"
+ },
+ "DockerServiceJenkinsMasterOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "docker"
+ },
+ "annotations": {
+ "description": "All Docker Swarm 'jenkins_master' replicas are down for 2 minutes.",
+ "summary": "Docker Swarm 'jenkins_master' service outage"
+ },
+ "for": "2m",
+ "if": "docker_swarm_tasks_running{service_name=\"jenkins_master\"} == 0 or absent(docker_swarm_tasks_running{service_name=\"jenkins_master\"}) == 1"
+ },
+ "NetdevBudgetRanOutsWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The rate of net_rx_action loops terminations on the {{ $labels.host }} node is {{ $value }} per second during the last 7 minutes. Modify the net.core.netdev_budget and net.core.netdev_budget_usecs kernel parameters.",
+ "summary": "CPU terminated 0.1 net_rx_action loops per second"
+ },
+ "for": "7m",
+ "if": "max(rate(nstat_time_squeeze[5m])) without (cpu) > 0.1"
+ },
+ "PacketsDroppedByCpuWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 10 minutes.",
+ "summary": "Increased number of CPU dropped packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[10m])) > 0"
+ },
+ "SystemRxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets received by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "60 received packets were dropped"
+ },
+ "if": "increase(net_drop_in[1m]) > 60 unless on (host,interface) bond_slave_active == 0"
+ },
+ "DockerServiceJenkinsSlave01Outage": {
+ "labels": {
+ "severity": "critical",
+ "service": "docker"
+ },
+ "annotations": {
+ "description": "All Docker Swarm 'jenkins_slave01' replicas are down for 2 minutes.",
+ "summary": "Docker Swarm 'jenkins_slave01' service outage"
+ },
+ "for": "2m",
+ "if": "docker_swarm_tasks_running{service_name=\"jenkins_slave01\"} == 0 or absent(docker_swarm_tasks_running{service_name=\"jenkins_slave01\"}) == 1"
+ },
+ "CertificateExpirationWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.source }} certificate on the {{ $labels.host }} node expires in less than 60 days.",
+ "summary": "The certificate expires in less than 60 days"
+ },
+ "if": "x509_cert_expiry / (24 * 60 * 60) < 60"
+ },
+ "DockerServiceOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "docker"
+ },
+ "annotations": {
+ "description": "All dockerd processes within the {{ $labels.cluster }} cluster are down.",
+ "summary": "Docker cluster outage"
+ },
+ "if": "count(label_replace(procstat_running{process_name=\"dockerd\"}, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster) == count(label_replace(procstat_running{process_name=\"dockerd\"} == 0, \"cluster\", \"$1\", \"host\", \"([^0-9]+).+\")) by (cluster)"
+ },
+ "SystemDiskErrorsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.device }} disk on the {{ $labels.host }} node is reporting errors for 5 minutes.",
+ "summary": "Disk {{ $labels.device }} is failing"
+ },
+ "for": "5m",
+ "if": "increase(hdd_errors_total[1m]) > 0"
+ },
+ "SystemLoadTooHighWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is1.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 1.0"
+ },
+ "SaltMinionServiceDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "salt"
+ },
+ "annotations": {
+ "description": "The salt-minion service on the {{ $labels.host }} node is down.",
+ "summary": "Salt-minion service is down"
+ },
+ "if": "procstat_running{process_name=\"salt-minion\"} == 0"
+ },
+ "SystemLoadTooHighCritical": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The system load per CPU on the {{ $labels.host }} node is {{ $value }} for 5 minutes.",
+ "summary": "System load is2.0"
+ },
+ "for": "5m",
+ "if": "system_load5 / system_n_cpus > 2.0"
+ },
+ "SystemDiskInodesFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "85.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 85.0"
+ },
+ "NtpOffsetTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "ntp"
+ },
+ "annotations": {
+ "description": "The NTP offset on the {{ $labels.host }} node is {{ $value }}ms for 2 minutes.",
+ "summary": "NTP offset reached the limit of 200ms"
+ },
+ "for": "2m",
+ "if": "ntpq_offset >= 200"
+ },
+ "PacketsDroppedByCpuMinor": {
+ "labels": {
+ "severity": "minor",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.cpu }} CPU on the {{ $labels.host }} node dropped {{ $value }} packets during the last 10 minutes.",
+ "summary": "CPU dropped more than 100 packets"
+ },
+ "if": "floor(increase(nstat_packet_drop[10m])) > 100"
+ },
+ "CronProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The cron process on the {{ $labels.host }} node is down.",
+ "summary": "Cron process is down"
+ },
+ "if": "procstat_running{process_name=\"cron\"} == 0"
+ },
+ "SshdProcessDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The SSH process on the {{ $labels.host }} node is down.",
+ "summary": "SSH process is down"
+ },
+ "if": "procstat_running{process_name=\"sshd\"} == 0"
+ },
+ "SystemDiskFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 85.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 85.0"
+ },
+ "SshFailedLoginsTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} failed SSH login attempts on the {{ $labels.host }} node during the last 5 minutes.",
+ "summary": "5 failed SSH logins"
+ },
+ "if": "increase(failed_logins_total[5m]) > 5"
+ },
+ "DockerdProcessDown": {
+ "labels": {
+ "severity": "minor",
+ "service": "docker"
+ },
+ "annotations": {
+ "description": "The dockerd process on the {{ $labels.host }} node is down.",
+ "summary": "Dockerd process is down"
+ },
+ "if": "procstat_running{process_name=\"dockerd\"} == 0"
+ },
+ "SystemDiskInodesFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of disk inodes in the {{ $labels.path }} volume for 2 minutes.",
+ "summary": "95.0% of inodes for {{ $labels.path }} are used"
+ },
+ "for": "2m",
+ "if": "100 * disk_inodes_used / disk_inodes_total >= 95.0"
+ },
+ "SystemMemoryFullWarning": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "90.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 90.0"
+ },
+ "SystemTxPacketsDroppedTooHigh": {
+ "labels": {
+ "severity": "warning",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "{{ $value }} packets transmitted by the {{ $labels.interface }} interface on the {{ $labels.host }} node were dropped during the last minute.",
+ "summary": "100 transmitted packets were dropped"
+ },
+ "if": "increase(net_drop_out[1m]) > 100"
+ },
+ "SystemDiskFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The disk partition ({{ $labels.path }}) on the {{ $labels.host }} node is {{ $value }}% full for 2 minutes.",
+ "summary": "Disk partition {{ $labels.path }} is 95.0% full"
+ },
+ "for": "2m",
+ "if": "disk_used_percent >= 95.0"
+ },
+ "DockerServicePostgresqlPostgresqldbOutage": {
+ "labels": {
+ "severity": "critical",
+ "service": "docker"
+ },
+ "annotations": {
+ "description": "All Docker Swarm 'postgresql_postgresql-db' replicas are down for 2 minutes.",
+ "summary": "Docker Swarm 'postgresql_postgresql-db' service outage"
+ },
+ "for": "2m",
+ "if": "docker_swarm_tasks_running{service_name=\"postgresql_postgresql-db\"} == 0 or absent(docker_swarm_tasks_running{service_name=\"postgresql_postgresql-db\"}) == 1"
+ },
+ "SaltMasterServiceDown": {
+ "labels": {
+ "severity": "critical",
+ "service": "salt"
+ },
+ "annotations": {
+ "description": "The salt-master service on the {{ $labels.host }} node is down.",
+ "summary": "Salt-master service is down"
+ },
+ "if": "procstat_running{process_name=\"salt-master\"} == 0"
+ },
+ "CertificateExpirationCritical": {
+ "labels": {
+ "severity": "critical",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.source }} certificate on the {{ $labels.host }} node expires in less than 30 days.",
+ "summary": "The certificate expires in less than 30 days"
+ },
+ "if": "x509_cert_expiry / (24 * 60 * 60) < 30"
+ },
+ "SystemMemoryFullMajor": {
+ "labels": {
+ "severity": "major",
+ "service": "system"
+ },
+ "annotations": {
+ "description": "The {{ $labels.host }} node uses {{ $value }}% of memory for 2 minutes.",
+ "summary": "95.0% of memory is used"
+ },
+ "for": "2m",
+ "if": "mem_used_percent >= 95.0"
+ }
+ }
+ }
+ },
+ "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "machine_id": "e1d7cc5551ee409b895ce38bb098ed07",
+ "salt": {
+ "graph": [
+ {
+ "host": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "linux.system",
+ "relations": [
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//salt-formulas//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//td-agent//xenial xenial contrib",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//saltstack-2017.7//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//maas//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//percona//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//glusterfs-3.8//xenial/ xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//docker//xenial/ xenial stable",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//docker//xenial/ xenial stable",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/2019.2.0//extra//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-updates main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/2019.2.0//ubuntu/ xenial-security main restricted universe",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb [arch=amd64] http://mirror.mirantis.com/update/2019.2.0//salt-formulas//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ },
+ {
+ "host_external": "deb http://mirror.mirantis.com/update/2019.2.0//openstack-queens//xenial xenial main",
+ "direction": "source",
+ "type": "tcp-http",
+ "service": "apt.repo"
+ }
+ ]
+ },
+ {
+ "host": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-config",
+ "service": "reclass.storage",
+ "relations": null
+ },
+ {
+ "host": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-system",
+ "service": "ntp.client",
+ "relations": [
+ {
+ "host_external": "udp://ntp.cesnet.cz",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ },
+ {
+ "host_external": "udp://pool.ntp.org",
+ "direction": "source",
+ "type": "udp",
+ "service": "other-service"
+ }
+ ]
+ },
+ {
+ "host": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-config",
+ "service": "salt.minion",
+ "relations": [
+ {
+ "direction": "source",
+ "type": "tcp-0mq",
+ "service": "salt.master",
+ "host_from_target": "10.10.0.15"
+ }
+ ]
+ },
+ {
+ "host": "cfg01.ozhurba-os-oc-cicd-sl.local",
+ "type": "software-config",
+ "service": "salt.master",
+ "relations": null
+ }
+ ]
+ },
+ "backupninja": {
+ "client": {
+ "addresses": [
+ "10.11.0.15"
+ ]
+ },
+ "backup": {
+ "jenkins": {
+ "fs_includes": [
+ "/var/lib/jenkins"
+ ],
+ "fs_excludes": []
+ },
+ "salt": {
+ "fs_includes": [
+ "/srv/salt/reclass",
+ "/etc/salt/pki",
+ "/etc/pki/ca"
+ ],
+ "fs_excludes": []
+ },
+ "maas": {
+ "fs_includes": [
+ "/etc/maas",
+ "/var/lib/maas",
+ "/var/backups/postgresql"
+ ],
+ "fs_excludes": [
+ "/var/lib/maas/boot-resources"
+ ]
+ }
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/tests/res/_fake_net_data.json b/tests/res/_fake_net_data.json
new file mode 100644
index 0000000..eba2f42
--- /dev/null
+++ b/tests/res/_fake_net_data.json
@@ -0,0 +1,212 @@
+{
+ "ens4": {
+ "upper": null,
+ "qlen": "1000",
+ "if_index": "3",
+ "lower": null,
+ "mtu": "9050",
+ "state": "UP",
+ "other": [
+ "<BROADCAST,MULTICAST,UP,LOWER_UP>",
+ "qdisc",
+ "pfifo_fast",
+ "group",
+ "default"
+ ],
+ "link": {
+ "fa:16:3e:4e:f3:c8": {
+ "brd": "ff:ff:ff:ff:ff:ff",
+ "link-netnsid": "n/a",
+ "other": []
+ }
+ },
+ "at": "",
+ "type": "physical",
+ "ipv4": {
+ "10.10.100.7/16": {
+ "brd": "10.10.255.255",
+ "other": [
+ "scope",
+ "global",
+ "ens4"
+ ]
+ }
+ }
+ },
+ "ens5": {
+ "upper": null,
+ "qlen": "1000",
+ "if_index": "4",
+ "lower": null,
+ "mtu": "9050",
+ "state": "UP",
+ "other": [
+ "<BROADCAST,MULTICAST,UP,LOWER_UP>",
+ "qdisc",
+ "pfifo_fast",
+ "group",
+ "default"
+ ],
+ "link": {
+ "fa:16:3e:db:db:4f": {
+ "brd": "ff:ff:ff:ff:ff:ff",
+ "link-netnsid": "n/a",
+ "other": []
+ }
+ },
+ "at": "",
+ "type": "physical",
+ "ipv4": {
+ "10.12.100.14/16": {
+ "brd": "10.12.255.255",
+ "other": [
+ "scope",
+ "global",
+ "ens5"
+ ]
+ }
+ }
+ },
+ "ens6": {
+ "upper": null,
+ "qlen": "1000",
+ "if_index": "5",
+ "lower": null,
+ "mtu": "9050",
+ "state": "UP",
+ "other": [
+ "<BROADCAST,MULTICAST,UP,LOWER_UP>",
+ "qdisc",
+ "pfifo_fast",
+ "group",
+ "default"
+ ],
+ "link": {
+ "fa:16:3e:06:0f:3b": {
+ "brd": "ff:ff:ff:ff:ff:ff",
+ "link-netnsid": "n/a",
+ "other": []
+ }
+ },
+ "at": "",
+ "type": "physical",
+ "ipv4": {
+ "10.13.100.22/16": {
+ "brd": "10.13.255.255",
+ "other": [
+ "scope",
+ "global",
+ "ens6"
+ ]
+ }
+ }
+ },
+ "ens3": {
+ "upper": null,
+ "qlen": "1000",
+ "if_index": "2",
+ "lower": null,
+ "mtu": "9050",
+ "state": "UP",
+ "other": [
+ "<BROADCAST,MULTICAST,UP,LOWER_UP>",
+ "qdisc",
+ "pfifo_fast",
+ "group",
+ "default"
+ ],
+ "link": {
+ "fa:16:3e:0c:15:32": {
+ "brd": "ff:ff:ff:ff:ff:ff",
+ "link-netnsid": "n/a",
+ "other": []
+ }
+ },
+ "at": "",
+ "type": "physical",
+ "ipv4": {
+ "10.11.0.11/16": {
+ "brd": "10.11.255.255",
+ "other": [
+ "scope",
+ "global",
+ "ens3"
+ ]
+ },
+ "10.11.0.10/32": {
+ "brd": "n/a",
+ "other": [
+ "scope",
+ "global",
+ "ens3"
+ ]
+ }
+ }
+ },
+ "lo": {
+ "upper": null,
+ "qlen": "1000",
+ "if_index": "1",
+ "lower": null,
+ "mtu": "65536",
+ "state": "UNKNOWN",
+ "other": [
+ "<LOOPBACK,UP,LOWER_UP>",
+ "qdisc",
+ "noqueue",
+ "group",
+ "default"
+ ],
+ "link": {},
+ "at": "",
+ "type": "virtual",
+ "ipv4": {
+ "127.0.0.1/8": {
+ "brd": "n/a",
+ "other": [
+ "scope",
+ "host",
+ "lo"
+ ]
+ }
+ }
+ },
+ "routes": {
+ "10.12.0.0/16": {
+ "device": "ens5",
+ "source": "10.12.100.14",
+ "args": " proto kernel scope link ",
+ "gateway": null
+ },
+ "10.11.0.0/16": {
+ "device": "ens3",
+ "source": "10.11.0.11",
+ "args": " proto kernel scope link ",
+ "gateway": null
+ },
+ "default": {
+ "device": "ens4",
+ "args": "",
+ "gateway": "10.10.0.1"
+ },
+ "10.10.0.0/16": {
+ "device": "ens4",
+ "source": "10.10.100.7",
+ "args": " proto kernel scope link ",
+ "gateway": null
+ },
+ "raw": [],
+ "169.254.169.254": {
+ "device": "ens5",
+ "source": "n/a",
+ "args": "",
+ "gateway": "10.12.100.30"
+ },
+ "10.13.0.0/16": {
+ "device": "ens6",
+ "source": "10.13.100.22",
+ "args": " proto kernel scope link ",
+ "gateway": null
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/res/_fake_pillars.json b/tests/res/_fake_pillars.json
new file mode 100644
index 0000000..eaaf37a
--- /dev/null
+++ b/tests/res/_fake_pillars.json
@@ -0,0 +1,20 @@
+{
+ "cmp01.fakedomain.local": {
+ "_param:apt_mk_version": "2099.0.0",
+ "_param:openstack_version": "queens",
+ "_param:linux_system_codename": "xenial",
+ "_param:linux_system_architecture": "amd64"
+ },
+ "ctl01.fakedomain.local": {
+ "_param:apt_mk_version": "2099.0.0",
+ "_param:openstack_version": "queens",
+ "_param:linux_system_codename": "xenial",
+ "_param:linux_system_architecture": "amd64"
+ },
+ "cfg01.fakedomain.local": {
+ "_param:apt_mk_version": "2099.0.0",
+ "_param:openstack_version": "queens",
+ "_param:linux_system_codename": "xenial",
+ "_param:linux_system_architecture": "amd64"
+ }
+}
diff --git a/tests/res/_fake_pkg_versions.json b/tests/res/_fake_pkg_versions.json
new file mode 100644
index 0000000..984a8ee
--- /dev/null
+++ b/tests/res/_fake_pkg_versions.json
@@ -0,0 +1,16 @@
+{
+ "fakepackage-o": {
+ "candidate": "1.2.3-0ubuntu4",
+ "installed": "1.2.3-0ubuntu4",
+ "raw": "fakepackage-o:\n Installed: 1.2.3-0ubuntu4\n Candidate: 1.2.3-0ubuntu4\n Version table:\n *** 1.2.3-0ubuntu4 500\n 500 http://mirrors.fakedomain.com/2099.0.0//ubuntu trusty/main amd64 Packages\n 100 /var/lib/dpkg/status\n 1.2.2-0ubuntu4 500\n 500 http://mirrors.fakedomain.com/2099.0.0//ubuntu trusty/main amd64 Packages"
+ },
+ "fakepackage-m": {
+ "candidate": "1.2.3-0ubuntu4",
+ "installed": "1.2.2-0ubuntu4",
+ "raw": "fakepackage-m:\n Installed: 1.2.2-0ubuntu4\n Candidate: 1.2.3-0ubuntu4\n Version table:\n 1.2.3-0ubuntu4 500\n 500 http://mirrors.fakedomain.com/2099.0.0//ubuntu trusty/main amd64 Packages\n 100 /var/lib/dpkg/status\n *** 1.2.2-0ubuntu4 500\n 500 http://mirrors.fakedomain.com/2099.0.0//ubuntu trusty/main amd64 Packages"
+ }
+}
+
+
+
+
diff --git a/tests/res/_fake_service_status.txt b/tests/res/_fake_service_status.txt
new file mode 100644
index 0000000..bb6c018
--- /dev/null
+++ b/tests/res/_fake_service_status.txt
@@ -0,0 +1,44 @@
+ [ + ] acpid
+ [ + ] atd
+ [ - ] bootmisc.sh
+ [ - ] checkfs.sh
+ [ - ] checkroot-bootclean.sh
+ [ - ] checkroot.sh
+ [ + ] console-setup
+ [ + ] cron
+ [ + ] dbus
+ [ + ] grub-common
+ [ - ] hostname.sh
+ [ - ] hwclock.sh
+ [ + ] keepalived
+ [ + ] keyboard-setup
+ [ - ] killprocs
+ [ + ] kmod
+ [ - ] lvm2
+ [ + ] lvm2-lvmetad
+ [ + ] lvm2-lvmpolld
+ [ + ] mcelog
+ [ + ] mongodb
+ [ - ] mountall-bootclean.sh
+ [ - ] mountall.sh
+ [ - ] mountdevsubfs.sh
+ [ - ] mountkernfs.sh
+ [ - ] mountnfs-bootclean.sh
+ [ - ] mountnfs.sh
+ [ + ] networking
+ [ + ] ntp
+ [ - ] ondemand
+ [ + ] procps
+ [ + ] rc.local
+ [ + ] resolvconf
+ [ + ] rsyslog
+ [ + ] salt-minion
+ [ - ] sendsigs
+ [ + ] ssh
+ [ + ] sysfsutils
+ [ + ] td-agent
+ [ + ] udev
+ [ - ] umountfs
+ [ - ] umountnfs.sh
+ [ - ] umountroot
+ [ + ] urandom
\ No newline at end of file
diff --git a/tests/res/_fake_softnet_stats.txt b/tests/res/_fake_softnet_stats.txt
new file mode 100644
index 0000000..b8cbd88
--- /dev/null
+++ b/tests/res/_fake_softnet_stats.txt
@@ -0,0 +1,129 @@
+000110f2 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000224a4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0015d13e 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0016bd8f 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00058d31 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00247fcf 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001711b9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00344a08 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0009c5e1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+003140ea 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0003e1df 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0008f3e2 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000f2ed9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000529d9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001f8dc4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001b5e35 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0030e4b7 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000bbb13 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00095aae 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000e3ebf 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0025468b 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000586db 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0006fff9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00153629 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+003a3675 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0006e13b 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001c1063 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0004f1e0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00149c23 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0002ba14 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000784dd 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0024cc9c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0004df22 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0002531d 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00050f7d 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00211e7c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000e43d6 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000c1a49 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00296867 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00219b1a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00126150 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0023137b 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000598f4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00049744 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001e3e36 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0018232e 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+002188d1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0005db7c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000f4f31 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0023c773 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000382d2 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00029686 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00093107 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00043d26 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0012f3e7 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0015fbd6 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0021e92c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000c06df 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00228552 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00099546 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0023c149 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000d8e92 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00119ec3 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001d8883 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+#
+000110fc 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000224ab 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0015d162 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0016bda2 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00058fd7 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+002480ef 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0017121f 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00344b00 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0009c610 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+003144c6 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0003e1e3 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0008f439 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000f2f34 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000529db 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001f8e50 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001b5ed4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0030e66f 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000bbb5c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00095ad8 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000e3eed 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00254aef 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000586e9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0007000a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0015362f 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+003a36a6 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0006e13c 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001c106a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0004f216 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00149c52 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0002ba31 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00078501 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0024cd98 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0004df25 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0002534e 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00050f9b 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00211ec2 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000e441d 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000c1aea 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+002968f3 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00219b2d 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001261d3 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+002314cd 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000598f8 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00049749 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001e3f71 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001823b9 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00218bff 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0005db81 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000f500b 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0023c7e6 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00038325 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0002968a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00093145 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00043d26 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0012f3f5 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0015fca3 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0021e9af 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000c0a53 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+002285ac 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000995e7 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+0023c3c1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+000d8fd1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+00119fc8 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+001d88bd 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
\ No newline at end of file
diff --git a/tests/res/_fakeempty.html b/tests/res/_fakeempty.html
new file mode 100644
index 0000000..65c2d70
--- /dev/null
+++ b/tests/res/_fakeempty.html
@@ -0,0 +1,13 @@
+<html>
+<head>
+<title>Index of /</title>
+</head>
+<body bgcolor="white">
+<h1>Index of /</h1>
+<hr>
+<pre>
+<a href="../">../</a>
+<hr>
+</pre>
+</body>
+</html>
diff --git a/tests/res/_fakepage.html b/tests/res/_fakepage.html
new file mode 100644
index 0000000..9d23d5b
--- /dev/null
+++ b/tests/res/_fakepage.html
@@ -0,0 +1,14 @@
+<html>
+<head>
+<title>Index of /</title>
+</head>
+<body bgcolor="white">
+<h1>Index of /</h1>
+<hr>
+<pre>
+<a href="../">../</a>
+<a href="_placeholder_">_placeholder_/</a> 11-Jan-2020 00:00 _type_
+<hr>
+</pre>
+</body>
+</html>
diff --git a/tests/res/_packages.txt b/tests/res/_packages.txt
new file mode 100644
index 0000000..3380512
--- /dev/null
+++ b/tests/res/_packages.txt
@@ -0,0 +1,48 @@
+Package: fakepackage-o
+Priority: optional
+Section: fakesection
+Installed-Size: 941
+Maintainer: Some non-mirantis developer <someuser@fakedomain.com>
+Original-Maintainer: Some Debian maintainer
+Architecture: amd64
+Source: fakesource
+Version: 1.2.3-0ubuntu4
+Replaces: fakepackage-r
+Depends: fakedependency
+Breaks: fakedependency
+Filename: pool/main/e/fakesource/fake
+Size: 8838
+MD5sum: f7f709442600bb60bc5e230f905773ae
+SHA1: 588d6aa693e4b9a9bda228360bc65dfab4527ed8
+SHA256: 4a34e416bb37191d0b8e6855b27cdbf7cd63fec182ab415cc9e71b19cfe55e48
+Description: Unit tests fake package
+Homepage: http://fakepage.org
+Description-md5: 1a2069e5dd5f4777061642b2d7c9a76a
+Bugs: https://fakebugs.net
+Origin: Unittests
+Supported: 1y
+Task: None
+
+Package: fakepackage-m
+Priority: optional
+Section: fakesection
+Installed-Size: 941
+Maintainer: Some mirantis developer <someuser@mirantis.com>
+Architecture: amd64
+Source: fakesource
+Version: 1.2.3-0ubuntu4
+Replaces: fakepackage-r
+Depends: fakedependency
+Breaks: fakedependency
+Filename: pool/main/e/fakesource/fake
+Size: 8838
+MD5sum: f7f709442600bb60bc5e230f905773ae
+SHA1: 588d6aa693e4b9a9bda228360bc65dfab4527ed8
+SHA256: 4a34e416bb37191d0b8e6855b27cdbf7cd63fec182ab415cc9e71b19cfe55e48
+Description: Unit tests fake package
+Homepage: http://fakepage.org
+Description-md5: 1a2069e5dd5f4777061642b2d7c9a76a
+Bugs: https://fakebugs.net
+Origin: Unittests
+Supported: 1y
+Task: None
\ No newline at end of file
diff --git a/tests/res/fake.csv b/tests/res/fake.csv
new file mode 100644
index 0000000..b3ea843
--- /dev/null
+++ b/tests/res/fake.csv
@@ -0,0 +1,8 @@
+type,package_name,node,status,action,installed,candidate,release
+mirantis,fakepackage-m,cfg01.fakedomain.local,OK,Upgrade possible,1.2.2-0ubuntu4,1.2.3-0ubuntu4,n/a
+mirantis,fakepackage-m,cmp01.fakedomain.local,OK,Upgrade possible,1.2.2-0ubuntu4,1.2.3-0ubuntu4,n/a
+mirantis,fakepackage-m,ctl01.fakedomain.local,OK,Upgrade possible,1.2.2-0ubuntu4,1.2.3-0ubuntu4,n/a
+
+
+
+
diff --git a/tests/res/models/model01/classes/cluster/model01/cicd/empty.yml b/tests/res/models/model01/classes/cluster/model01/cicd/empty.yml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/res/models/model01/classes/cluster/model01/cicd/empty.yml
diff --git a/tests/res/models/model01/classes/cluster/model01/non_yml.yaml b/tests/res/models/model01/classes/cluster/model01/non_yml.yaml
new file mode 100644
index 0000000..7c7ffbc
--- /dev/null
+++ b/tests/res/models/model01/classes/cluster/model01/non_yml.yaml
@@ -0,0 +1,2 @@
+# Some non yml file
+echo 1
diff --git a/tests/res/models/model01/classes/cluster/model01/openstack/init.yml b/tests/res/models/model01/classes/cluster/model01/openstack/init.yml
new file mode 100644
index 0000000..5db4df3
--- /dev/null
+++ b/tests/res/models/model01/classes/cluster/model01/openstack/init.yml
@@ -0,0 +1,225 @@
+classes:
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.update.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ceph
+ - system.linux.system.repo.mcp.apt_mirantis.update.ceph
+ - cluster.model01.kernel
+parameters:
+ _param:
+ openstack_region: RegionOne
+ admin_email: root@localhost
+ ##Neutron osv/nodvr + dpdk
+ neutron_global_physnet_mtu: 1500
+ neutron_external_mtu: 1500
+
+ neutron_enable_qos: False
+ neutron_enable_vlan_aware_vms: False
+ neutron_enable_bgp_vpn: False
+ neutron_bgp_vpn_driver: bagpipe
+ neutron_compute_external_access: False
+ neutron_control_dvr: False
+ neutron_gateway_dvr: False
+ neutron_compute_dvr: False
+
+ neutron_gateway_agent_mode: legacy
+ neutron_compute_agent_mode: legacy
+
+ neutron_l3_ha: True
+ neutron_tenant_network_types: "flat,vlan,vxlan"
+
+ cluster_internal_protocol: http
+
+ galera_server_cluster_name: openstack_cluster
+ galera_server_maintenance_password: ${_param:galera_server_maintenance_password_generated}
+ galera_server_admin_password: ${_param:galera_server_admin_password_generated}
+ galera_server_sst_password: ${_param:galera_server_sst_password_generated}
+ rabbitmq_secret_key: ${_param:rabbitmq_secret_key_generated}
+ rabbitmq_admin_password: ${_param:rabbitmq_admin_password_generated}
+ rabbitmq_openstack_password: ${_param:rabbitmq_openstack_password_generated}
+ glance_service_host: ${_param:openstack_service_host}
+ keystone_service_host: ${_param:openstack_service_host}
+ heat_service_host: ${_param:openstack_service_host}
+ heat_domain_admin_password: ${_param:heat_domain_admin_password_generated}
+ cinder_service_host: ${_param:openstack_service_host}
+ aodh_service_host: ${_param:openstack_service_host}
+ ceilometer_service_host: ${_param:openstack_service_host}
+ ceilometer_influxdb_password: ${_param:ceilometer_influxdb_password_generated}
+ nova_service_host: ${_param:openstack_service_host}
+ placement_service_host: ${_param:openstack_service_host}
+ neutron_service_host: ${_param:openstack_service_host}
+ glusterfs_service_host: ${_param:infra_kvm_address}
+
+ mysql_admin_user: root
+ mysql_admin_password: ${_param:galera_server_admin_password}
+ mysql_aodh_username: username
+ mysql_cinder_username: username
+ mysql_ceilometer_username: username
+ mysql_gnocchi_username: username
+ mysql_panko_username: username
+ mysql_glance_username: username
+ mysql_heat_username: username
+ mysql_keystone_username: username
+ mysql_neutron_username: username
+ mysql_nova_username: username
+ keystone_service_token: ${_param:keystone_service_token_generated}
+ keystone_ceilometer_username: username
+ keystone_panko_username: username
+ keystone_gnocchi_username: username
+ keystone_cinder_username: username
+ keystone_glance_username: username
+ keystone_heat_username: username
+ keystone_neutron_username: username
+ keystone_nova_username: username
+ keystone_aodh_username: username
+ ceilometer_secret_key: ${_param:ceilometer_secret_key_generated}
+ horizon_secret_key: ${_param:horizon_secret_key_generated}
+ horizon_identity_host: ${_param:openstack_control_address}
+ horizon_identity_encryption: none
+ keystone_service_protocol: ${_param:cluster_internal_protocol}
+ glance_service_protocol: ${_param:cluster_internal_protocol}
+ nova_service_protocol: ${_param:cluster_internal_protocol}
+ neutron_service_protocol: ${_param:cluster_internal_protocol}
+ heat_service_protocol: ${_param:cluster_internal_protocol}
+ cinder_service_protocol: ${_param:cluster_internal_protocol}
+ mongodb_server_replica_set: ceilometer
+ mongodb_ceilometer_password: ${_param:mongodb_ceilometer_password_generated}
+ mongodb_admin_password: ${_param:mongodb_admin_password_generated}
+ mongodb_shared_key: ${_param:mongodb_shared_key_generated}
+ metadata_password: ${_param:openstack_metadata_password_generated}
+ salt_pki_proxy_alt_names: DNS:${_param:cluster_public_host},IP:${_param:openstack_proxy_address},DNS:horizon.${_param:cluster_domain}
+ stacklight_notification_topic: stacklight_notifications
+ openstack_notification_topics: ${_param:stacklight_notification_topic}
+
+ # OpenStack Compute
+ openstack_compute_rack01_hostname: mhv
+
+ # openstack service hostnames
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_hostname: prx02
+ openstack_control_hostname: ctl
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_hostname: ctl03
+ openstack_database_hostname: dbs
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_hostname: dbs03
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_hostname: msg03
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node03_hostname: gtw03
+
+ nginx_proxy_gerrit_server_site_host: ${_param:cluster_public_host}
+ nginx_proxy_gerrit_server_site_port: 8070
+ openstack_log_appender: true
+ openstack_fluentd_handler_enabled: true
+ # Unversioned endpoint for pike, queens
+ keystone_public_path: "/"
+ keystone_internal_path: "/"
+ keystone_admin_path: "/"
+
+ # Memcache security
+ openstack_heat_cloudwatch_api_enabled: False
+
+ # Barbican
+ barbican_integration_enabled: False
+ barbican_enabled: False
+ linux:
+ network:
+ host:
+ prx:
+ address: ${_param:openstack_proxy_address}
+ names:
+ - admin
+ - ${_param:openstack_admin_url}
+ - ${_param:openstack_proxy_hostname}
+ - ${_param:openstack_proxy_hostname}.${_param:cluster_domain}
+ prx01:
+ address: ${_param:openstack_proxy_node01_address}
+ names:
+ - ${_param:openstack_proxy_node01_hostname}
+ - ${_param:openstack_proxy_node01_hostname}.${_param:cluster_domain}
+ prx02:
+ address: ${_param:openstack_proxy_node02_address}
+ names:
+ - ${_param:openstack_proxy_node02_hostname}
+ - ${_param:openstack_proxy_node02_hostname}.${_param:cluster_domain}
+ ctl:
+ address: ${_param:openstack_control_address}
+ names:
+ - ${_param:openstack_control_hostname}
+ - ${_param:openstack_control_hostname}.${_param:cluster_domain}
+ ctl01:
+ address: ${_param:openstack_control_node01_address}
+ names:
+ - ${_param:openstack_control_node01_hostname}
+ - ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
+ ctl02:
+ address: ${_param:openstack_control_node02_address}
+ names:
+ - ${_param:openstack_control_node02_hostname}
+ - ${_param:openstack_control_node02_hostname}.${_param:cluster_domain}
+ ctl03:
+ address: ${_param:openstack_control_node03_address}
+ names:
+ - ${_param:openstack_control_node03_hostname}
+ - ${_param:openstack_control_node03_hostname}.${_param:cluster_domain}
+ msg:
+ address: ${_param:openstack_message_queue_address}
+ names:
+ - ${_param:openstack_message_queue_hostname}
+ - ${_param:openstack_message_queue_hostname}.${_param:cluster_domain}
+ msg01:
+ address: ${_param:openstack_message_queue_node01_address}
+ names:
+ - ${_param:openstack_message_queue_node01_hostname}
+ - ${_param:openstack_message_queue_node01_hostname}.${_param:cluster_domain}
+ msg02:
+ address: ${_param:openstack_message_queue_node02_address}
+ names:
+ - ${_param:openstack_message_queue_node02_hostname}
+ - ${_param:openstack_message_queue_node02_hostname}.${_param:cluster_domain}
+ msg03:
+ address: ${_param:openstack_message_queue_node03_address}
+ names:
+ - ${_param:openstack_message_queue_node03_hostname}
+ - ${_param:openstack_message_queue_node03_hostname}.${_param:cluster_domain}
+ dbs:
+ address: ${_param:openstack_database_address}
+ names:
+ - ${_param:openstack_database_hostname}
+ - ${_param:openstack_database_hostname}.${_param:cluster_domain}
+ dbs01:
+ address: ${_param:openstack_database_node01_address}
+ names:
+ - ${_param:openstack_database_node01_hostname}
+ - ${_param:openstack_database_node01_hostname}.${_param:cluster_domain}
+ dbs02:
+ address: ${_param:openstack_database_node02_address}
+ names:
+ - ${_param:openstack_database_node02_hostname}
+ - ${_param:openstack_database_node02_hostname}.${_param:cluster_domain}
+ dbs03:
+ address: ${_param:openstack_database_node03_address}
+ names:
+ - ${_param:openstack_database_node03_hostname}
+ - ${_param:openstack_database_node03_hostname}.${_param:cluster_domain}
+ gtw01:
+ address: ${_param:openstack_gateway_node01_address}
+ names:
+ - ${_param:openstack_gateway_node01_hostname}
+ - ${_param:openstack_gateway_node01_hostname}.${_param:cluster_domain}
+ gtw02:
+ address: ${_param:openstack_gateway_node02_address}
+ names:
+ - ${_param:openstack_gateway_node02_hostname}
+ - ${_param:openstack_gateway_node02_hostname}.${_param:cluster_domain}
+ gtw03:
+ address: ${_param:openstack_gateway_node03_address}
+ names:
+ - ${_param:openstack_gateway_node03_hostname}
+ - ${_param:openstack_gateway_node03_hostname}.${_param:cluster_domain}
diff --git a/tests/res/models/model01/classes/system/empty.yml b/tests/res/models/model01/classes/system/empty.yml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/tests/res/models/model01/classes/system/empty.yml
@@ -0,0 +1 @@
+---
diff --git a/tests/res/models/model01/nodes/cfg01.az2.n9.jungle.tech.yml b/tests/res/models/model01/nodes/cfg01.az2.n9.jungle.tech.yml
new file mode 100644
index 0000000..4dfc666
--- /dev/null
+++ b/tests/res/models/model01/nodes/cfg01.az2.n9.jungle.tech.yml
@@ -0,0 +1,11 @@
+classes:
+- cluster.sm_sjc.infra.config
+parameters:
+ _param:
+ linux_system_codename: xenial
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: az2.n9.jungle.tech
+
\ No newline at end of file
diff --git a/tests/res/models/model02/classes/cluster/model02/cicd/empty.yml b/tests/res/models/model02/classes/cluster/model02/cicd/empty.yml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/res/models/model02/classes/cluster/model02/cicd/empty.yml
diff --git a/tests/res/models/model02/classes/cluster/model02/cicd/nonyml.txt b/tests/res/models/model02/classes/cluster/model02/cicd/nonyml.txt
new file mode 100644
index 0000000..605904d
--- /dev/null
+++ b/tests/res/models/model02/classes/cluster/model02/cicd/nonyml.txt
@@ -0,0 +1,2 @@
+# Some non yml file
+{ "a": 1 }
\ No newline at end of file
diff --git a/tests/res/models/model02/classes/cluster/model02/openstack/init.yml b/tests/res/models/model02/classes/cluster/model02/openstack/init.yml
new file mode 100644
index 0000000..5081da8
--- /dev/null
+++ b/tests/res/models/model02/classes/cluster/model02/openstack/init.yml
@@ -0,0 +1,224 @@
+classes:
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.update.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ceph
+ - system.linux.system.repo.mcp.apt_mirantis.update.ceph
+ - cluster.model02.kernel
+parameters:
+ _param:
+ openstack_region: RegionOne
+ admin_email: root@localhost
+ neutron_global_physnet_mtu: 1500
+ neutron_external_mtu: 1500
+
+ neutron_enable_qos: False
+ neutron_enable_vlan_aware_vms: False
+ neutron_enable_bgp_vpn: False
+ neutron_bgp_vpn_driver: bagpipe
+ neutron_compute_external_access: False
+ neutron_control_dvr: False
+ neutron_gateway_dvr: False
+ neutron_compute_dvr: False
+
+ neutron_gateway_agent_mode: legacy
+ neutron_compute_agent_mode: legacy
+
+ neutron_l3_ha: True
+ neutron_tenant_network_types: "flat,vlan,vxlan"
+
+ cluster_internal_protocol: http
+
+ galera_server_cluster_name: openstack_cluster
+ galera_server_maintenance_password: ${_param:galera_server_maintenance_password_generated}
+ galera_server_admin_password: ${_param:galera_server_admin_password_generated}
+ galera_server_sst_password: ${_param:galera_server_sst_password_generated}
+ rabbitmq_secret_key: ${_param:rabbitmq_secret_key_generated}
+ rabbitmq_admin_password: ${_param:rabbitmq_admin_password_generated}
+ rabbitmq_openstack_password: ${_param:rabbitmq_openstack_password_generated}
+ glance_service_host: ${_param:openstack_service_host}
+ keystone_service_host: ${_param:openstack_service_host}
+ heat_service_host: ${_param:openstack_service_host}
+ heat_domain_admin_password: ${_param:heat_domain_admin_password_generated}
+ cinder_service_host: ${_param:openstack_service_host}
+ aodh_service_host: ${_param:openstack_service_host}
+ ceilometer_service_host: ${_param:openstack_service_host}
+ ceilometer_influxdb_password: ${_param:ceilometer_influxdb_password_generated}
+ nova_service_host: ${_param:openstack_service_host}
+ placement_service_host: ${_param:openstack_service_host}
+ neutron_service_host: ${_param:openstack_service_host}
+ glusterfs_service_host: ${_param:infra_kvm_address}
+
+ mysql_admin_user: root
+ mysql_admin_password: ${_param:galera_server_admin_password}
+ mysql_aodh_username: username
+ mysql_cinder_username: username
+ mysql_ceilometer_username: username
+ mysql_gnocchi_username: username
+ mysql_panko_username: username
+ mysql_glance_username: other_user
+ mysql_heat_username: username
+ mysql_keystone_username: username
+ mysql_neutron_username: username
+ mysql_nova_username: username
+ keystone_service_token: ${_param:keystone_service_token_generated}
+ keystone_ceilometer_username: username
+ keystone_panko_username: username
+ keystone_gnocchi_username: username
+ keystone_cinder_username: username
+ keystone_glance_username: username
+ keystone_heat_username: username
+ keystone_neutron_username: username
+ keystone_nova_username: username
+ keystone_aodh_username: username
+ ceilometer_secret_key: ${_param:ceilometer_secret_key_generated}
+ horizon_secret_key: ${_param:horizon_secret_key_generated}
+ horizon_identity_host: ${_param:openstack_control_address}
+ horizon_identity_encryption: none
+ keystone_service_protocol: ${_param:cluster_internal_protocol}
+ glance_service_protocol: ${_param:cluster_internal_protocol}
+ nova_service_protocol: ${_param:cluster_internal_protocol}
+ neutron_service_protocol: ${_param:cluster_internal_protocol}
+ heat_service_protocol: ${_param:cluster_internal_protocol}
+ cinder_service_protocol: ${_param:cluster_internal_protocol}
+ mongodb_server_replica_set: ceilometer
+ mongodb_ceilometer_password: ${_param:mongodb_ceilometer_password_generated}
+ mongodb_admin_password: ${_param:mongodb_admin_password_generated}
+ mongodb_shared_key: ${_param:mongodb_shared_key_generated}
+ metadata_password: ${_param:openstack_metadata_password_generated}
+ salt_pki_proxy_alt_names: DNS:${_param:cluster_public_host},IP:${_param:openstack_proxy_address},DNS:horizon.${_param:cluster_domain}
+ stacklight_notification_topic: stacklight_notifications
+ openstack_notification_topics: ${_param:stacklight_notification_topic}
+
+ # OpenStack Compute
+ openstack_compute_rack01_hostname: mhv
+
+ # openstack service hostnames
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_hostname: prx02
+ openstack_control_hostname: ctl
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_hostname: ctl03
+ openstack_database_hostname: dbs
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_hostname: dbs03
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_hostname: msg03
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node03_hostname: gtw03
+
+ nginx_proxy_gerrit_server_site_host: ${_param:cluster_public_host}
+ nginx_proxy_gerrit_server_site_port: 8070
+ openstack_log_appender: true
+ openstack_fluentd_handler_enabled: true
+ # Unversioned endpoint for pike, queens
+ keystone_public_path: "/"
+ keystone_internal_path: "/"
+ keystone_admin_path: "/"
+
+ # Memcache security
+ openstack_heat_cloudwatch_api_enabled: False
+
+ # Barbican
+ barbican_integration_enabled: False
+ barbican_enabled: False
+ linux:
+ network:
+ host:
+ prx:
+ address: ${_param:openstack_proxy_address}
+ names:
+ - admin
+ - ${_param:openstack_admin_url}
+ - ${_param:openstack_proxy_hostname}
+ - ${_param:openstack_proxy_hostname}.${_param:cluster_domain}
+ prx01:
+ address: ${_param:openstack_proxy_node01_address}
+ names:
+ - ${_param:openstack_proxy_node01_hostname}
+ - ${_param:openstack_proxy_node01_hostname}.${_param:cluster_domain}
+ prx02:
+ address: ${_param:openstack_proxy_node02_address}
+ names:
+ - ${_param:openstack_proxy_node02_hostname}
+ - ${_param:openstack_proxy_node02_hostname}.${_param:cluster_domain}
+ ctl:
+ address: ${_param:openstack_control_address}
+ names:
+ - ${_param:openstack_control_hostname}
+ - ${_param:openstack_control_hostname}.${_param:cluster_domain}
+ ctl01:
+ address: ${_param:openstack_control_node01_address}
+ names:
+ - ${_param:openstack_control_node01_hostname}
+ - ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
+ ctl02:
+ address: ${_param:openstack_control_node02_address}
+ names:
+ - ${_param:openstack_control_node02_hostname}
+ - ${_param:openstack_control_node02_hostname}.${_param:cluster_domain}
+ ctl03:
+ address: ${_param:openstack_control_node03_address}
+ names:
+ - ${_param:openstack_control_node03_hostname}
+ - ${_param:openstack_control_node03_hostname}.${_param:cluster_domain}
+ msg:
+ address: ${_param:openstack_message_queue_address}
+ names:
+ - ${_param:openstack_message_queue_hostname}
+ - ${_param:openstack_message_queue_hostname}.${_param:cluster_domain}
+ msg01:
+ address: ${_param:openstack_message_queue_node01_address}
+ names:
+ - ${_param:openstack_message_queue_node01_hostname}
+ - ${_param:openstack_message_queue_node01_hostname}.${_param:cluster_domain}
+ msg02:
+ address: ${_param:openstack_message_queue_node02_address}
+ names:
+ - ${_param:openstack_message_queue_node02_hostname}
+ - ${_param:openstack_message_queue_node02_hostname}.${_param:cluster_domain}
+ msg03:
+ address: ${_param:openstack_message_queue_node03_address}
+ names:
+ - ${_param:openstack_message_queue_node03_hostname}
+ - ${_param:openstack_message_queue_node03_hostname}.${_param:cluster_domain}
+ dbs:
+ address: ${_param:openstack_database_address}
+ names:
+ - ${_param:openstack_database_hostname}
+ - ${_param:openstack_database_hostname}.${_param:cluster_domain}
+ dbs01:
+ address: ${_param:openstack_database_node01_address}
+ names:
+ - ${_param:openstack_database_node01_hostname}
+ - ${_param:openstack_database_node01_hostname}.${_param:cluster_domain}
+ dbs02:
+ address: ${_param:openstack_database_node02_address}
+ names:
+ - ${_param:openstack_database_node02_hostname}
+ - ${_param:openstack_database_node02_hostname}.${_param:cluster_domain}
+ dbs03:
+ address: ${_param:openstack_database_node03_address}
+ names:
+ - ${_param:openstack_database_node03_hostname}
+ - ${_param:openstack_database_node03_hostname}.${_param:cluster_domain}
+ gtw01:
+ address: ${_param:openstack_gateway_node01_address}
+ names:
+ - ${_param:openstack_gateway_node01_hostname}
+ - ${_param:openstack_gateway_node01_hostname}.${_param:cluster_domain}
+ gtw02:
+ address: ${_param:openstack_gateway_node02_address}
+ names:
+ - ${_param:openstack_gateway_node02_hostname}
+ - ${_param:openstack_gateway_node02_hostname}.${_param:cluster_domain}
+ gtw03:
+ address: ${_param:openstack_gateway_node03_address}
+ names:
+ - ${_param:openstack_gateway_node03_hostname}
+ - ${_param:openstack_gateway_node03_hostname}.${_param:cluster_domain}
diff --git a/tests/res/models/model02/classes/system/empty.yml b/tests/res/models/model02/classes/system/empty.yml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/tests/res/models/model02/classes/system/empty.yml
@@ -0,0 +1 @@
+---
diff --git a/tests/res/models/model02/nodes/cfg01.az2.n9.jungle.tech.yml b/tests/res/models/model02/nodes/cfg01.az2.n9.jungle.tech.yml
new file mode 100644
index 0000000..4dfc666
--- /dev/null
+++ b/tests/res/models/model02/nodes/cfg01.az2.n9.jungle.tech.yml
@@ -0,0 +1,11 @@
+classes:
+- cluster.sm_sjc.infra.config
+parameters:
+ _param:
+ linux_system_codename: xenial
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: az2.n9.jungle.tech
+
\ No newline at end of file
diff --git a/tests/res/textfile.txt.gz b/tests/res/textfile.txt.gz
new file mode 100644
index 0000000..3fb30b3
--- /dev/null
+++ b/tests/res/textfile.txt.gz
Binary files differ
diff --git a/tests/test_base.py b/tests/test_base.py
index c4a627f..b30a6e5 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -1,29 +1,122 @@
import contextlib
import io
+import os
import sys
import unittest
+import logging
+
+from copy import deepcopy
+
+tests_dir = os.path.dirname(__file__)
+tests_dir = os.path.normpath(tests_dir)
+tests_dir = os.path.abspath(tests_dir)
class CfgCheckerTestBase(unittest.TestCase):
dummy_base_var = 0
+ last_stderr = ""
+ last_stdout = ""
- def _safe_import_module(self, _str):
+ def _safe_import(self, _str):
+ if "." not in _str:
+ return self._safe_import_module(_str)
+ else:
+ return self._safe_import_class(_str)
+
+ def _safe_import_class(self, _str):
+ _import_msg = ""
+ attrs = _str.split('.')
+ _import_msg, _module = self._safe_import_module(attrs[0])
+ if _import_msg:
+ return _import_msg, _module
+ else:
+ for attr_name in attrs[1:]:
+ _module = getattr(_module, attr_name)
+ return "", _module
+
+ @staticmethod
+ def _safe_import_module(_str, *args, **kwargs):
_import_msg = ""
_module = None
try:
- _module = __import__(_str)
+ _module = __import__(_str, *args, **kwargs)
except ImportError as e:
_import_msg = e.message
return _import_msg, _module
+ @staticmethod
+ def _safe_run(_obj, *args, **kwargs):
+ _m = ""
+ try:
+ _r = _obj(*args, **kwargs)
+ except Exception as ex:
+ if hasattr(ex, 'message'):
+ _m = "{}: {}".format(str(_obj), ex.message)
+ elif hasattr(ex, 'msg'):
+ _m = "{}: {}".format(str(_obj), ex.msg)
+ else:
+ _m = "{}: {}".format(str(_obj), "<no message>")
+ return _r, _m
+
+ def run_main(self, args_list):
+ _module_name = 'cfg_checker.cfg_check'
+ _m = self._try_import(_module_name)
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ sys.argv = ["fake.py"] + args_list
+ _m.cfg_check.config_check_entrypoint()
+ return ep.exception.code
+
+ def run_cli(self, command, args_list):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv = ["fake.py"] + args_list
+ _m.cli.command.cli_command(
+ "Fake run for '{} {}'".format(
+ command,
+ " ".join(args_list)
+ ),
+ command
+ )
+ return ep.exception.code
+
@contextlib.contextmanager
def redirect_output(self):
save_stdout = sys.stdout
save_stderr = sys.stderr
- sys.stdout = io.BytesIO()
- sys.stderr = io.BytesIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
+ logging.disable(logging.CRITICAL)
yield
+ self.last_stderr = sys.stderr.read()
+ self.last_stdout = sys.stdout.read()
sys.stdout = save_stdout
sys.stderr = save_stderr
+
+ @contextlib.contextmanager
+ def save_arguments(self):
+ _argv = deepcopy(sys.argv)
+ yield
+ sys.argv = _argv
+
+ def _try_import(self, module_name):
+ with self.redirect_output():
+ _msg, _m = self._safe_import_module(module_name)
+
+ self.assertEqual(
+ len(_msg),
+ 0,
+ "Error importing '{}': {}".format(
+ module_name,
+ _msg
+ )
+ )
+
+ return _m
diff --git a/tests/test_cli.py b/tests/test_cli.py
new file mode 100644
index 0000000..74928e3
--- /dev/null
+++ b/tests/test_cli.py
@@ -0,0 +1,159 @@
+from unittest import mock
+
+from tests.test_base import CfgCheckerTestBase
+
+
+class TestCliCommands(CfgCheckerTestBase):
+ def test_do_cli_main_command(self):
+ _module_name = 'cfg_checker.cfg_check'
+ _m = self._try_import(_module_name)
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv = ["fake.py", "reclass", "list", "-p", "/tmp"]
+ _m.cfg_check.config_check_entrypoint()
+
+ self.assertEqual(
+ ep.exception.code,
+ 0,
+ "'mcp-checker reclass list -p /tmp' command failed"
+ )
+
+ def test_do_cli_main_command_debug(self):
+ _module_name = 'cfg_checker.cfg_check'
+ _m = self._try_import(_module_name)
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv = [
+ "fake.py",
+ "-d",
+ "reclass",
+ "list",
+ "-p",
+ "/tmp"
+ ]
+ _m.cfg_check.config_check_entrypoint()
+
+ self.assertEqual(
+ ep.exception.code,
+ 0,
+ "mcp-checker command failes"
+ )
+
+ def test_cli_main_unknown_argument(self):
+ _module_name = 'cfg_checker.cfg_check'
+ _m = self._try_import(_module_name)
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv.append("reclass")
+ sys.argv.append("list")
+ _m.cfg_check.config_check_entrypoint()
+
+ self.assertEqual(
+ ep.exception.code,
+ 1,
+ "Unknown argument not handled"
+ )
+
+ def test_do_cli_module_command(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _command = "reclass"
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv = ["fake.py", "list", "-p", "/tmp"]
+ _m.cli.command.cli_command(
+ "Fake Reclass Comparer",
+ _command
+ )
+
+ self.assertEqual(
+ ep.exception.code,
+ 0,
+ "Cli command execution failed"
+ )
+
+ def test_do_cli_module_command_with_error(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _command = "reclass"
+ with self.save_arguments():
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ import sys
+ sys.argv = ["fake.py", "list", "-p", "/notexistingfolder"]
+ _m.cli.command.cli_command(
+ "Fake Reclass Comparer",
+ _command
+ )
+
+ self.assertEqual(
+ ep.exception.code,
+ 1,
+ "Cli command execution failed"
+ )
+
+ def test_cli_module_unknown_command(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _fake_args = mock.MagicMock(name="FakeArgsClass")
+ _command = "unknowncommand"
+ with self.redirect_output():
+ _r_value = _m.cli.command.execute_command(_fake_args, _command)
+
+ self.assertEqual(
+ _r_value,
+ 1,
+ "Unknown command 'type' not handled"
+ )
+
+ def test_cli_module_no_type(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _type = {}
+ _command = "unknowncommand"
+ with self.redirect_output():
+ _r_value = _m.cli.command.execute_command(_type, _command)
+
+ self.assertEqual(
+ _r_value,
+ 1,
+ "Unknown command not handled"
+ )
+
+ def test_cli_module_unknown_type(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _fake_args = mock.MagicMock(name="FakeArgsClass")
+ _command = "reclass"
+ with self.redirect_output():
+ _r_value = _m.cli.command.execute_command(_fake_args, _command)
+
+ self.assertEqual(
+ _r_value,
+ 1,
+ "Unknown command not handled"
+ )
+
+ def test_cli_module_unknown_argument(self):
+ _module_name = 'cfg_checker.cli.command'
+ _m = self._try_import(_module_name)
+ _command = "reclass"
+ with self.redirect_output():
+ with self.assertRaises(SystemExit) as ep:
+ _m.cli.command.cli_command(
+ "Fake Reclass Comparer",
+ _command
+ )
+
+ self.assertEqual(
+ ep.exception.code,
+ 1,
+ "Unknown argument not handled"
+ )
diff --git a/tests/test_common.py b/tests/test_common.py
new file mode 100644
index 0000000..65159a1
--- /dev/null
+++ b/tests/test_common.py
@@ -0,0 +1,208 @@
+import inspect
+import os
+import sys
+from unittest import mock
+
+
+from tests.test_base import CfgCheckerTestBase
+from tests.test_base import tests_dir
+
+gzip_filename = "textfile.txt.gz"
+fake_gzip_file_path = os.path.join(tests_dir, 'res', gzip_filename)
+_patch_buf = []
+with open(fake_gzip_file_path, 'rb') as _f:
+ _patch_buf = _f.read()
+
+
+def mocked_requests_get(*args, **kwargs):
+ class MockResponse:
+ def __init__(self, content, status_code):
+ self.content = content
+ self.status_code = status_code
+
+ def content(self):
+ return self.content
+
+ if args[0] == fake_gzip_file_path:
+ return MockResponse(_patch_buf, 200)
+
+ return MockResponse(None, 404)
+
+
+class TestCommonModules(CfgCheckerTestBase):
+ def test_exceptions(self):
+ _m = self._try_import("cfg_checker.common.exception")
+ # Get all classes from the exceptions module
+ _classes = inspect.getmembers(
+ sys.modules[_m.common.exception.__name__],
+ inspect.isclass
+ )
+ # Create instance for all detected classes except for the Base one
+ _errors = []
+ for _name, _class in _classes:
+ if _name.startswith("CheckerBase"):
+ continue
+ _, _msg = self._safe_run(_class, "Fake exception message")
+ if _msg:
+ _errors.append(_msg)
+
+ self.assertEqual(
+ len(_errors),
+ 0,
+ "Invalid Exception classes detected: \n{}".format(
+ "\n".join(_errors)
+ )
+ )
+
+ def test_file_utils(self):
+ # File operations itself is not to be tested
+ # Only classes that provide api methods
+ # I.e. no exceptions - no errors,
+ # file contents is not to be checked, only return types
+ _m = self._try_import("cfg_checker.common.file_utils")
+ _futils = _m.common.file_utils
+ _filename = "/tmp/fakefile.txt"
+ _fakestr = "Fake String in the file"
+ _errors = []
+
+ # write_str_to_file
+ _, _msg = self._safe_run(
+ _futils.write_str_to_file,
+ _filename,
+ _fakestr
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ # append_str_to_file
+ _, _msg = self._safe_run(
+ _futils.append_str_to_file,
+ _filename,
+ _fakestr
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ # remove_file
+ _, _msg = self._safe_run(_futils.remove_file, _filename)
+ if _msg:
+ _errors.append(_msg)
+
+ # write_lines_to_file
+ _, _msg = self._safe_run(
+ _futils.write_lines_to_file,
+ _filename,
+ [_fakestr]
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ # append_lines_to_file
+ _, _msg = self._safe_run(
+ _futils.append_lines_to_file,
+ _filename,
+ [_fakestr]
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ # append_line_to_file
+ _, _msg = self._safe_run(
+ _futils.append_line_to_file,
+ _filename,
+ _fakestr
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ # read_file
+ _r, _msg = self._safe_run(_futils.read_file, _filename)
+ if _msg:
+ _errors.append(_msg)
+ self.assertNotEqual(
+ len(_r),
+ 0,
+ "Empty buffer returned by 'read_file'"
+ )
+
+ # read_file_as_lines
+ _r, _msg = self._safe_run(_futils.read_file_as_lines, _filename)
+ if _msg:
+ _errors.append(_msg)
+ self.assertNotEqual(
+ len(_r),
+ 0,
+ "Empty buffer returned by 'read_file_as_lines'"
+ )
+ self.assertIsInstance(
+ _r,
+ list,
+ "Non-list type returned by 'read_file_as_lines'"
+ )
+ # get_file_info_fd
+ with open(_filename) as _fd:
+ _r, _msg = self._safe_run(_futils.get_file_info_fd, _fd)
+ if _msg:
+ _errors.append(_msg)
+ self.assertIsInstance(
+ _r,
+ dict,
+ "Non-dict type returned by get_file_info_fd"
+ )
+ _, _msg = self._safe_run(_futils.remove_file, _filename)
+
+ # get_gzipped_file
+
+ _folder = "/tmp/cfgcheckertmpfolder"
+ # ensure_folder_exists
+ _, _msg = self._safe_run(_futils.ensure_folder_exists, _folder)
+ if _msg:
+ _errors.append(_msg)
+ _, _msg = self._safe_run(_futils.ensure_folder_exists, _folder)
+ if _msg:
+ _errors.append(_msg)
+
+ # ensure_folder_removed
+ _, _msg = self._safe_run(_futils.ensure_folder_removed, _folder)
+ if _msg:
+ _errors.append(_msg)
+ _, _msg = self._safe_run(_futils.ensure_folder_removed, _folder)
+ if _msg:
+ _errors.append(_msg)
+
+ self.assertEqual(
+ len(_errors),
+ 0,
+ "Invalid file operations: \n{}".format(
+ "\n".join(_errors)
+ )
+ )
+
+ @mock.patch(
+ 'requests.get',
+ side_effect=mocked_requests_get
+ )
+ def test_get_gzip_file(self, mock_get):
+ _m = self._try_import("cfg_checker.common.file_utils")
+ _futils = _m.common.file_utils
+ _fakecontent = b"fakecontent\n"
+ _errors = []
+
+ # Call the method with patched data
+ _buf, _msg = self._safe_run(
+ _futils.get_gzipped_file,
+ fake_gzip_file_path
+ )
+ if _msg:
+ _errors.append(_msg)
+
+ self.assertNotEqual(
+ len(_buf),
+ 0,
+ "Empty buffer returned by 'get_gzipped_file'"
+ )
+ self.assertEqual(
+ _buf,
+ _fakecontent,
+ "Incorrect content returned by 'get_gzipped_file'"
+ )
diff --git a/tests/test_entrypoints.py b/tests/test_entrypoints.py
index eb463cc..6cbee3d 100644
--- a/tests/test_entrypoints.py
+++ b/tests/test_entrypoints.py
@@ -1,4 +1,4 @@
-from test_base import CfgCheckerTestBase
+from tests.test_base import CfgCheckerTestBase
class TestEntrypoints(CfgCheckerTestBase):
diff --git a/tests/test_network.py b/tests/test_network.py
new file mode 100644
index 0000000..2dc07b3
--- /dev/null
+++ b/tests/test_network.py
@@ -0,0 +1,97 @@
+import os
+
+from unittest.mock import patch
+
+from tests.mocks import mocked_salt_post, mocked_salt_get
+from tests.mocks import _res_dir
+from tests.mocks import mocked_shell, _shell_salt_path
+from tests.test_base import CfgCheckerTestBase
+
+from cfg_checker.modules.network.network_errors import NetworkErrors
+
+
+# Fake ErrorIndex
+_ErrorIndex_path = "cfg_checker.helpers.errors.ErrorIndex"
+_NErrors_path = "cfg_checker.modules.network.network_errors.NetworkErrors"
+
+_fake_nerrors = NetworkErrors(folder="tests/res/fakeerrors")
+_fake_nerrors._error_logs_folder_name = "tests/res/fakeerrors"
+
+
+class TestNetworkModule(CfgCheckerTestBase):
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ @patch(_NErrors_path, new=_fake_nerrors)
+ def test_network_list(self, m_get, m_post, m_shell):
+ _args = ["list"]
+ _pm = "cfg_checker.modules.network.mapper.NetworkErrors"
+ with patch(_pm, new=_fake_nerrors):
+ _r_code = self.run_cli(
+ "network",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-net {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ @patch(_NErrors_path, new=_fake_nerrors)
+ def test_network_map(self, m_get, m_post, m_shell):
+ _args = ["map"]
+ with patch(
+ "cfg_checker.modules.network.mapper.NetworkErrors",
+ new=_fake_nerrors
+ ):
+ _r_code = self.run_cli(
+ "network",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-net {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ @patch(_NErrors_path, new=_fake_nerrors)
+ def test_network_check(self, m_get, m_post, m_shell):
+ _args = ["check"]
+ with patch(
+ "cfg_checker.modules.network.checker.NetworkErrors",
+ new=_fake_nerrors
+ ):
+ _r_code = self.run_cli(
+ "network",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-net {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ @patch(_NErrors_path, new=_fake_nerrors)
+ def test_network_report_html(self, m_get, m_post, m_shell):
+ _fake_report = os.path.join(_res_dir, "fake.html")
+ _args = ["report", "--html", _fake_report]
+ _pc = "cfg_checker.modules.network.checker.NetworkErrors"
+ with patch(_pc, new=_fake_nerrors):
+ _r_code = self.run_cli(
+ "network",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-net {}' command failed".format(" ".join(_args))
+ )
diff --git a/tests/test_packages.py b/tests/test_packages.py
new file mode 100644
index 0000000..52ec23a
--- /dev/null
+++ b/tests/test_packages.py
@@ -0,0 +1,323 @@
+import os
+
+from unittest.mock import patch
+
+from tests.mocks import mocked_package_get
+from tests.mocks import mocked_salt_post, mocked_salt_get
+from tests.mocks import _res_dir
+from tests.mocks import mocked_shell, _shell_salt_path
+from tests.test_base import CfgCheckerTestBase
+
+from cfg_checker.modules.packages.repos import RepoManager, ReposInfo
+
+
+# init fake module path
+_ReposInfo_path = "cfg_checker.modules.packages.repos.ReposInfo"
+_RepoManager_path = "cfg_checker.modules.packages.repos.RepoManager"
+# init fakes
+_fakeReposInfo = ReposInfo(arch_folder=_res_dir)
+_fakeRepoManager = RepoManager(
+ arch_folder=_res_dir,
+ info_class=_fakeReposInfo
+)
+
+
+class TestPackageModule(CfgCheckerTestBase):
+ @patch('requests.get', side_effect=mocked_package_get)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ def test_build_repo_info(self, m_get):
+ # init arguments
+ _args = [
+ "versions",
+ "--url",
+ "http://fakedomain.com",
+ # "--tag",
+ # "2099.0.0",
+ "--build-repos"
+ ]
+
+ with patch(
+ "cfg_checker.modules.packages.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_package_get)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ def test_build_repo_info_for_tag(self, m_get):
+ # init arguments
+ _args = [
+ "versions",
+ "--url",
+ "http://fakedomain.com",
+ "--tag",
+ "2099.0.0"
+ ]
+
+ with patch(
+ "cfg_checker.modules.packages.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_package_get)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ def test_package_versions_tags(self, m_get):
+ _args = ["versions", "--list-tags"]
+ with patch(
+ "cfg_checker.modules.packages.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_package_get)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ def test_package_versions_show(self, m_get):
+ _args = ["show", "fakepackage-m"]
+ with patch(
+ "cfg_checker.modules.packages.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_package_get)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ def test_package_versions_show_app(self, m_get):
+ _args = ["show-app", "fakesection"]
+ with patch(
+ "cfg_checker.modules.packages.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ def test_package_report_html(self, m_get, m_post, m_shell):
+ _fake_report = os.path.join(_res_dir, "fake.html")
+ _args = ["report", "--html", _fake_report]
+ with patch(
+ "cfg_checker.modules.packages.checker.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ def test_package_report_html_full(self, m_get, m_post, m_shell):
+ _fake_report = os.path.join(_res_dir, "fake.html")
+ _args = ["report", "--full", "--html", _fake_report]
+ with patch(
+ "cfg_checker.modules.packages.checker.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ @patch('requests.get', side_effect=mocked_salt_get)
+ @patch('requests.post', side_effect=mocked_salt_post)
+ @patch(_ReposInfo_path, new=_fakeReposInfo)
+ @patch(_RepoManager_path, new=_fakeRepoManager)
+ @patch(_shell_salt_path, side_effect=mocked_shell)
+ def test_package_report_csv(self, m_get, m_post, m_shell):
+ _fake_report = os.path.join(_res_dir, "fake.csv")
+ _args = ["report", "--csv", _fake_report]
+ with patch(
+ "cfg_checker.modules.packages.checker.RepoManager",
+ new=_fakeRepoManager
+ ):
+ _r_code = self.run_cli(
+ "packages",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'mcp-pkg {}' command failed".format(" ".join(_args))
+ )
+
+ def test_package_cmp_result_class(self):
+ from cfg_checker.common.const import VERSION_OK, VERSION_UP, \
+ VERSION_DOWN, VERSION_WARN
+ from cfg_checker.common.const import ACT_NA, ACT_UPGRADE, \
+ ACT_NEED_UP, ACT_NEED_DOWN, ACT_REPO
+
+ _name = "cfg_checker.modules.packages.versions.VersionCmpResult"
+ _message, _vcmp = self._safe_import_class(_name)
+ _name = "cfg_checker.modules.packages.versions.DebianVersion"
+ _message, dv = self._safe_import_class(_name)
+
+ _ws = ": wrong status"
+ _wa = ": wrong action"
+
+ # Installed = Candidate = Release
+ _b = "i = c = r"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("1:1.2-0u4"), dv("1:1.2-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_NA, _b + _wa)
+
+ # Installed < Candidate, variations
+ _b = "i < c, i = r"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("2:1.3-0u4"), dv("1:1.2-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_UPGRADE, _b + _wa)
+
+ _b = "i < c, i > r"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("1:1.3-0u4"), dv("1:1.1-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_UP, _b + _ws)
+ self.assertEqual(out.action, ACT_UPGRADE, _b + _wa)
+
+ _b = "i < c, i < r, r < c"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("1:1.4-0u4"), dv("1:1.3-0u3")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_WARN, _b + _ws)
+ self.assertEqual(out.action, ACT_NEED_UP, _b + _wa)
+
+ _b = "i < c, i < r, r = c"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("1:1.4-0u4"), dv("1:1.4-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_WARN, _b + _ws)
+ self.assertEqual(out.action, ACT_NEED_UP, _b + _wa)
+
+ _b = "i < c, c < r"
+ _i, _c, _r = dv("1:1.2-0u4"), dv("1:1.3-0u4"), dv("1:1.4-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_WARN, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ # Installed > Candidate, variations
+ _b = "i > c, c = r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.2-0u4"), dv("1:1.2-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_WARN, _b + _ws)
+ self.assertEqual(out.action, ACT_NEED_DOWN, _b + _wa)
+
+ _b = "i > c, c > r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.2-0u4"), dv("0:1.2-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_UP, _b + _ws)
+ self.assertEqual(out.action, ACT_NEED_DOWN, _b + _wa)
+
+ _b = "i > c, c < r, r < i"
+ _i, _c, _r = dv("1:1.3.1-0u4"), dv("1:1.2-0u4"), dv("1:1.3-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_UP, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ _b = "i > c, c < r, r = i"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.2-0u4"), dv("1:1.3-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ _b = "i > c, i < r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.2-0u4"), dv("2:1.4-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_DOWN, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ # Installed = Candidate, variations
+ _b = "i = c, i < r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.3-0u4"), dv("2:1.4-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ _b = "i = c, i > r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.3-0u4"), dv("1:1.1-0u2")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_WARN, _b + _ws)
+ self.assertEqual(out.action, ACT_REPO, _b + _wa)
+
+ _b = "i = c, i = r"
+ _i, _c, _r = dv("1:1.3-0u4"), dv("1:1.3-0u4"), dv("1:1.3-0u4")
+ out = _vcmp(_i, _c, _r)
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_NA, _b + _wa)
+
+ # Installed vs Candidate, no release version
+ _b = "i = c"
+ _i, _c = dv("1:1.3-0u4"), dv("1:1.3-0u4")
+ out = _vcmp(_i, _c, "")
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_NA, _b + _wa)
+
+ _b = "i < c"
+ _i, _c = dv("1:1.3-0u4"), dv("2:1.4-0u4")
+ out = _vcmp(_i, _c, "")
+ self.assertEqual(out.status, VERSION_OK, _b + _ws)
+ self.assertEqual(out.action, ACT_UPGRADE, _b + _wa)
+
+ _b = "i > c"
+ _i, _c = dv("2:1.4-0~u4"), dv("1:1.2-0~u2")
+ out = _vcmp(_i, _c, "")
+ self.assertEqual(out.status, VERSION_UP, _b + _ws)
+ self.assertEqual(out.action, ACT_NEED_DOWN, _b + _wa)
diff --git a/tests/test_reclass_comparer.py b/tests/test_reclass_comparer.py
new file mode 100644
index 0000000..07ff9a6
--- /dev/null
+++ b/tests/test_reclass_comparer.py
@@ -0,0 +1,45 @@
+import os
+
+from tests.mocks import _res_dir
+from tests.test_base import CfgCheckerTestBase
+
+
+class TestReclassModule(CfgCheckerTestBase):
+ def test_reclass_list(self):
+ _models_dir = os.path.join(_res_dir, "models")
+ _args = ["list", "-p", _models_dir]
+
+ _r_code = self.run_cli(
+ "reclass",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'cmp-reclass {}' command failed".format(" ".join(_args))
+ )
+
+ def test_reclass_compare(self):
+ _models_dir = os.path.join(_res_dir, "models")
+ _model01 = os.path.join(_models_dir, "model01")
+ _model02 = os.path.join(_models_dir, "model02")
+ _report_path = os.path.join(_res_dir, "_fake.html")
+ _args = [
+ "diff",
+ "--model1",
+ _model01,
+ "--model2",
+ _model02,
+ "--html",
+ _report_path
+ ]
+
+ _r_code = self.run_cli(
+ "reclass",
+ _args
+ )
+ self.assertEqual(
+ _r_code,
+ 0,
+ "'cmp-reclass {}' command failed".format(" ".join(_args))
+ )
diff --git a/versions/repo.info.tgz b/versions/repo.info.tgz
index 29682c5..b63ec26 100644
--- a/versions/repo.info.tgz
+++ b/versions/repo.info.tgz
Binary files differ
diff --git a/versions/repo.versions.tgz b/versions/repo.versions.tgz
index d42fb2c..fd75ffb 100644
--- a/versions/repo.versions.tgz
+++ b/versions/repo.versions.tgz
Binary files differ