Merge "Add possibility to override `role` via underlay.hot Add possibility to use different roles for nodes from MultipleInstance. Add possibility to use several roles for one node (for the future)"
diff --git a/tcp_tests/helpers/env_config.py b/tcp_tests/helpers/env_config.py
index 5dbc87d..b811030 100644
--- a/tcp_tests/helpers/env_config.py
+++ b/tcp_tests/helpers/env_config.py
@@ -150,7 +150,7 @@
:param path: string
:returns: key string, indexes list
"""
- pattern = re.compile("\[([0-9]*)\]")
+ pattern = re.compile(r"\[([0-9]*)\]")
# find all indexes of possible list object in path
indexes = (lambda x: [int(r) for r in pattern.findall(x)]
if pattern.search(x) else [])
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 0e57c89..5f5f4e8 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -252,22 +252,22 @@
if self.__config.k8s.run_extended_virtlet_conformance:
ci_image = "cloud-images.ubuntu.com/xenial/current/" \
"xenial-server-cloudimg-amd64-disk1.img"
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput {3} "
- "-image {2} -sshuser ubuntu -memoryLimit 1024 "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
+ cmd = (r"set -o pipefail; "
+ r"docker run --net=host {0} /virtlet-e2e-tests "
+ r"-include-cloud-init-tests -junitOutput {3} "
+ r"-image {2} -sshuser ubuntu -memoryLimit 1024 "
+ r"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ r"-ginkgo.focus '\[Conformance\]' "
+ r"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
log_file, ci_image, report_name))
else:
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput {2} "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
+ cmd = (r"set -o pipefail; "
+ r"docker run --net=host {0} /virtlet-e2e-tests "
+ r"-junitOutput {2} "
+ r"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ r"-ginkgo.focus '\[Conformance\]' "
+ r"| tee {1}".format(
self.__config.k8s_deploy.kubernetes_virtlet_image,
log_file, report_name))
LOG.info("Executing: {}".format(cmd))
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 6b5bebb..c2e82d4 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -414,38 +414,40 @@
# Prefix each '$' symbol with backslash '\' to disable
# early interpolation of environment variables on cfg01 node only
dump_commands = (
- "mkdir /root/\$(hostname -f)/;"
- "rsync -aruv /var/log/ /root/\$(hostname -f)/;"
- "dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
- "df -h > /root/\$(hostname -f)/dump_df.txt;"
- "mount > /root/\$(hostname -f)/dump_mount.txt;"
- "blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
- "iptables -t nat -S > /root/\$(hostname -f)/dump_iptables_nat.txt;"
- "iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
- "ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
- "docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
- "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
- "docker service ls > "
- " /root/\$(hostname -f)/dump_docker_services_ls.txt;"
- "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
- " do docker service ps --no-trunc 2>&1 \$SERVICE >> "
- " /root/\$(hostname -f)/dump_docker_service_ps.txt;"
- " done;"
- "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
- " do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
- " /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
- " done;"
- "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
- "lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
- "ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
- "ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
- "netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
- "brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
- "arp -an > /root/\$(hostname -f)/dump_arp.txt;"
- "uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
- "lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
- "cat /proc/interrupts > /root/\$(hostname -f)/dump_interrupts.txt;"
- "cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
+ r"mkdir /root/\$(hostname -f)/;"
+ r"rsync -aruv /var/log/ /root/\$(hostname -f)/;"
+ r"dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
+ r"df -h > /root/\$(hostname -f)/dump_df.txt;"
+ r"mount > /root/\$(hostname -f)/dump_mount.txt;"
+ r"blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
+ r"iptables -t nat -S > "
+ r" /root/\$(hostname -f)/dump_iptables_nat.txt;"
+ r"iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
+ r"ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
+ r"docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
+ r"docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
+ r"docker service ls > "
+ r" /root/\$(hostname -f)/dump_docker_services_ls.txt;"
+ r"for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+ r" do docker service ps --no-trunc 2>&1 \$SERVICE >> "
+ r" /root/\$(hostname -f)/dump_docker_service_ps.txt;"
+ r" done;"
+ r"for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+ r" do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
+ r" /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
+ r" done;"
+ r"vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
+ r"lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
+ r"ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
+ r"ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
+ r"netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
+ r"brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
+ r"arp -an > /root/\$(hostname -f)/dump_arp.txt;"
+ r"uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
+ r"lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
+ r"cat /proc/interrupts > "
+ r" /root/\$(hostname -f)/dump_interrupts.txt;"
+ r"cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
# OpenStack specific, will fail on other nodes
# "rabbitmqctl report > "
# " /root/\$(hostname -f)/dump_rabbitmqctl.txt;"
@@ -466,16 +468,22 @@
# " do echo Namespace: \${ns}; ip netns exec \${ns} netstat -anp;"
# "done > /root/\$(hostname -f)/dump_netstat_ns.txt;"
- "/usr/bin/haproxy-status.sh > "
- " /root/\$(hostname -f)/dump_haproxy.txt;"
+ r"/usr/bin/haproxy-status.sh > "
+ r" /root/\$(hostname -f)/dump_haproxy.txt;"
# Archive the files
- "cd /root/; tar --absolute-names --warning=no-file-changed "
- " -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
+ r"cd /root/; tar --absolute-names --warning=no-file-changed "
+ r" -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
)
master_host = self.__config.salt.salt_master_host
with self.remote(host=master_host) as master:
+ LOG.info("Make sure that 'rsync' is installed on all nodes")
+ master.check_call("salt '*' cmd.run "
+ " 'apt-get -qq install -y rsync'",
+ raise_on_err=False,
+ timeout=240)
+
# dump files
LOG.info("Archive artifacts on all nodes")
master.check_call('salt "*" cmd.run "{0}"'.format(dump_commands),
diff --git a/tcp_tests/report.py b/tcp_tests/report.py
index 0bec6ef..8a2a58e 100644
--- a/tcp_tests/report.py
+++ b/tcp_tests/report.py
@@ -188,7 +188,7 @@
LOG.info("Get results for run - {}".format(run.name))
results = t_client.results(run, result_type)
results_with_test = []
- if result_type is '5':
+ if result_type == '5':
ret = [(run, r) for r in results
if r.raw_data()['status_id'] is int(result_type) and
r.raw_data()['defects'] is None]
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 6d5b71d..5b3b03b 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -7,6 +7,7 @@
"MCP::Networks": fragments/Networks.yaml
"MCP::SingleInstance": fragments/Instance.yaml
"MCP::FoundationNode": fragments/FoundationNode.yaml
+ "MCP::VsrxNode": fragments/VsrxNode.yaml
parameter_defaults:
@@ -30,6 +31,7 @@
gtw_flavor: system.compact.openstack.gateway
kvm_fake_flavor: system.virtual.fake_kvm
foundation_flavor: system.virtual.foundation
+ vsrx_flavor: oc_vsrx
key_pair: system_key_8133
diff --git a/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml b/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml
new file mode 100644
index 0000000..b3b32ef
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/VsrxNode.yaml
@@ -0,0 +1,96 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ network:
+ type: string
+ instance_flavor:
+ type: string
+ instance_image:
+ type: string
+ instance_name:
+ type: string
+ instance_config_host:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ control_net_static_ip:
+ type: string
+ tenant_net_static_ip:
+ type: string
+ external_net_static_ip:
+ type: string
+ # underlay_userdata:
+ # type: string
+ env_name:
+ type: string
+ mcp_version:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: control_net_static_ip }
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: tenant_net_static_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: external_net_static_ip }
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: instance_flavor }
+ image: { get_param: instance_image }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: env_name } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
+ metadata:
+ roles:
+ - vsrx_node
+
+outputs:
+
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 763bda5..fc56b80 100644
--- a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -232,7 +232,7 @@
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+ tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.220
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
upstream_proxy_enabled: 'False'
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
index f3f0e12..d9a8971 100644
--- a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
@@ -60,7 +60,8 @@
type: string
foundation_flavor:
type: string
-
+ vsrx_flavor:
+ type: string
net_public:
type: string
@@ -508,6 +509,31 @@
- [ { get_attr: [networks, control_net_prefix] }, '5' ]
instance_config_host: { get_attr: [cfg01_node, instance_address] }
+ vsrx_node:
+ type: MCP::VsrxNode
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: vsrx
+ instance_flavor: {get_param: vsrx_flavor}
+ instance_image: system_vsrx-12.1X46-D20.5
+ network: { get_attr: [networks, network] }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '220' ]
+ tenant_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, tenant_net_prefix] }, '220' ]
+ external_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, external_net_prefix] }, '220' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
outputs:
control_subnet_cidr:
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index 60ac4a7..850cfd1 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -57,9 +57,9 @@
new_minion_vip =\
core_actions.get_keepalived_vip_minion_id(vip)
except Exception:
- time.sleep(15)
- new_minion_vip = \
- core_actions.get_keepalived_vip_minion_id(vip)
+ time.sleep(15)
+ new_minion_vip = \
+ core_actions.get_keepalived_vip_minion_id(vip)
LOG.info("VIP {0} migrated to {1}".format(vip, new_minion_vip))
assert new_minion_vip != minion_vip
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index d10d250..0c74d20 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -356,9 +356,9 @@
pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
try:
assert len(ip) > 0, 'fail to find jenkins ip'
@@ -367,9 +367,9 @@
tgt='cid*', fun='cmd.run',
args='service keepalived restart')
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
@@ -457,9 +457,9 @@
pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
try:
assert len(ip) > 0, 'fail to find jenkins ip'
@@ -468,9 +468,9 @@
tgt='cid*', fun='cmd.run',
args='service keepalived restart')
addresses = salt_deployed.get_pillar('cid01*', pillar)
- ip = list(set([ip
+ ip = list(set([ipaddr
for item in addresses
- for node, ip in item.items() if ip]))
+ for node, ipaddr in item.items() if ipaddr]))
LOG.info('Jenkins ip is {}'.format(ip))
assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)