Fix underlay.get_logs() method
- use salt minions instead of underlay SSH to get logs from
all registered nodes
- use IP addresses from underlay.config_ssh for such nodes
which hostnames not matched any minion
- archive the full dump of logs to /tmp instead of /root
(archiving to /root caused duplicates in the archive at the
second time)
- use ${ENV_NAME} as part of the name of the archive in CICD
jobs
Change-Id: I21e6f5cb8eff6c5bccc707c5c88e509a7bf3a166
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 36ea29a..7b2ff15 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -108,7 +108,7 @@
} catch (e) {
common.printMsg("Saltstack cluster deploy is failed", "purple")
if (fileExists(xml_report_name)) {
- shared.download_logs("deploy_salt")
+ shared.download_logs("deploy_salt_${ENV_NAME}")
def String junit_report_xml = readFile(xml_report_name)
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
throw new Exception(junit_report_xml_pretty)
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 58474b9..1939b4d 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -62,7 +62,7 @@
} catch (e) {
common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_drivetrain")
+ shared.download_logs("deploy_drivetrain_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index c854c73..cb26aae 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -62,7 +62,7 @@
} catch (e) {
common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_platform")
+ shared.download_logs("deploy_platform_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 780229d..204aef4 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -74,7 +74,7 @@
""")
def snapshot_name = "test_completed"
- shared.download_logs("test_completed")
+ shared.download_logs("test_completed_${ENV_NAME}")
shared.run_cmd("""\
dos.py suspend ${ENV_NAME}
dos.py snapshot ${ENV_NAME} ${snapshot_name}
@@ -91,7 +91,7 @@
common.printMsg("Job is failed", "purple")
// Downloading logs usually not needed here
// because tests should use the decorator @pytest.mark.grab_versions
- // shared.download_logs("test_failed")
+ // shared.download_logs("test_failed_${ENV_NAME}")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 66f686b..cf1fb2b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -483,23 +483,43 @@
timeout=600)
# create target dir for archives
- master.check_call("mkdir /root/dump/")
+ master.check_call("mkdir -p /root/dump/")
+
+ saltkeys_res = master.check_call(
+ "salt-key --list all --out=yaml", verbose=True)
+
+ saltkeys_all = yaml.load(saltkeys_res.stdout_str)
+ minions = saltkeys_all['minions']
+
+ # add nodes registered self.config_ssh,
+ # to get logs from nodes without salt minions
+ for node in self.config_ssh:
+ # If there is no any minion which name starts
+ # with the same hostname as node['node_name']
+ if not any(minion.startswith(node['node_name'])
+ for minion in minions):
+ # Use IP address from node['host'] to access the node
+ # because cfg01 node may not know it's hostname.
+ # Note: SSH public key from system.openssh.server.team.lab
+ # should already be configured on that node
+ # in order to access the node from cfg01
+ minions.append(str(node['host']))
# get archived artifacts to the master node
- for node in self.config_ssh:
- LOG.info("Getting archived artifacts from the node {0}"
- .format(node['node_name']))
+ for minion in minions:
+ LOG.info("Getting archived artifacts from the minion {0}"
+ .format(minion))
master.check_call("rsync -aruv {0}:/root/*.tar.gz "
- "/root/dump/".format(node['node_name']),
+ "/root/dump/".format(minion.strip()),
raise_on_err=False,
timeout=120)
- destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
- # Archive the artifacts from all nodes
+ destination_name = '/tmp/{0}_dump.tar.gz'.format(artifact_name)
+ # Archive the artifacts from all minions
master.check_call(
'cd /root/dump/;'
'tar --absolute-names --warning=no-file-changed -czf '
- ' {0} ./'.format(destination_name))
+ ' {0} ./'.format(destination_name), verbose=True)
# Download the artifact to the host
LOG.info("Downloading the artifact {0}".format(destination_name))