Update offline deployment test
Change-Id: I3c935be8ad6f038ce6c25221de6d09e93619c94d
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index 15f9c8f..46bf9c8 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -18,6 +18,7 @@
import StringIO
import time
import traceback
+import signal
import jinja2
import paramiko
@@ -444,3 +445,23 @@
.format(top_fixtures_marks))
return top_fixtures_marks
+
+
+class RunLimit(object):
+ def __init__(self, seconds=60, error_message='Timeout'):
+ self.seconds = seconds
+ self.error_message = error_message
+
+ def handle_timeout(self, signum, frame):
+ raise TimeoutException(self.error_message)
+
+ def __enter__(self):
+ signal.signal(signal.SIGALRM, self.handle_timeout)
+ signal.alarm(self.seconds)
+
+ def __exit__(self, exc_type, value, traceback):
+ signal.alarm(0)
+
+
+class TimeoutException(Exception):
+ pass
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 80818cf..1e7f66c 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -16,16 +16,21 @@
from devops import error
from devops.helpers import helpers
+from devops.helpers.helpers import ssh_client
from devops import models
from django import db
from oslo_config import cfg
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadAuthenticationType)
+
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import exceptions
+from tcp_tests.helpers import ext
+from tcp_tests import logger
from tcp_tests import settings
from tcp_tests import settings_oslo
-from tcp_tests.helpers import env_config
-from tcp_tests.helpers import ext
-from tcp_tests.helpers import exceptions
-from tcp_tests import logger
LOG = logger.logger
@@ -305,8 +310,29 @@
for node in self.__env.get_nodes(role__in=underlay_node_roles):
LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
node.name, self.node_ip(node)))
+
+ def _ssh_wait(host,
+ port,
+ username=settings.SSH_NODE_CREDENTIALS['login'],
+ password=settings.SSH_NODE_CREDENTIALS['password'],
+ timeout=0):
+ try:
+ ssh = ssh_client.SSHClient(
+ host=host, port=port,
+ auth=ssh_client.SSHAuth(
+ username=username,
+ password=password))
+ except AuthenticationException:
+ return True
+ except BadAuthenticationType:
+ return True
+ except Exception:
+ return False
+
+ return ssh.execute('echo ok')['exit_code'] == 0
+
helpers.wait(
- lambda: helpers.tcp_ping(self.node_ip(node), 22),
+ lambda: _ssh_wait(self.node_ip(node), 22),
timeout=timeout,
timeout_msg="Node '{}' didn't open SSH in {} sec".format(
node.name, timeout
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index f781305..474713c 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -63,3 +63,6 @@
timeout=timeout,
timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
' still'.format(name, build_id))
+
+ def get_build_output(self, name, build_id):
+ return self.__client.get_build_console_output(name, build_id)
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 8f6b140..feb270b 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -95,19 +95,23 @@
self,
target='gtw01', pattern=None,
conf_name='lvm_mcp.conf',
- registry=None):
+ registry=None, node_name=None):
if not registry:
registry = ('{0}/{1}'.format(settings.DOCKER_REGISTRY,
settings.DOCKER_NAME))
- target_name = [node_name for node_name
- in self.__underlay.node_names() if target in node_name]
+ if node_name is None and target is not None:
+ target_name = next(
+ name for name in self.__underlay.node_names()
+ if target in name)
+ else:
+ target_name = node_name
cmd = ("apt-get -y install docker.io")
- with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_name) as node_remote:
result = node_remote.execute(cmd, verbose=True)
cmd_iptables = "iptables --policy FORWARD ACCEPT"
- with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_name) as node_remote:
result = node_remote.execute(cmd_iptables, verbose=True)
with self.__underlay.remote(
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 06e7d0b..2b06dc3 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -198,10 +198,20 @@
'password': settings.SSH_NODE_CREDENTIALS['password']
}
- return [
- host(k, next(i for i in v['ipv4'] if i in pool_net))
- for k, v in hosts.items()
- if next(i for i in v['ipv4'] if i in pool_net)]
+ try:
+ ret = [
+ host(k, next(i for i in v['ipv4'] if i in pool_net))
+ for k, v in hosts.items()
+ if next(i for i in v['ipv4'] if i in pool_net)]
+ LOG.debug("Fetched ssh data from salt grains - {}".format(ret))
+ return ret
+ except StopIteration:
+ msg = ("Can't match nodes ip address with network cidr\n"
+ "Managment network - {net}\n"
+ "Host with address - {host_list}".format(
+ net=pool_net,
+ host_list={k: v['ipv4'] for k, v in hosts.items()}))
+ raise StopIteration(msg)
def service_status(self, tgt, service):
result = self.local(tgt=tgt, fun='service.status', args=service)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index fce80cc..2c9ed55 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -175,6 +175,7 @@
else:
ssh_data = ssh
if ssh_data is None:
+ LOG.debug("config_ssh - {}".format(self.config_ssh))
raise Exception('Auth data for node was not found using '
'node_name="{}" , host="{}" , address_pool="{}"'
.format(node_name, host, address_pool))
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
index 04185ea..19cc801 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -20,7 +20,32 @@
skip_fail: false
- description: Sync all salt resources on master node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+ cmd: sleep 60; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+
+- description: MaaS auth
+ cmd: maas logout mirantis && maas login mirantis http://localhost:5240/MAAS/api/2.0/ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Set upstream dns in MaaS
+ cmd: sleep 30; maas mirantis maas set-config name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup NTP
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls ntp.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait until salt is up
+ cmd: sleep 60
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
index 6978bd3..838435c 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
@@ -48,6 +48,7 @@
# Node is ready, allow SSH access
- echo "Allow SSH access ..."
- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - touch /is_cloud_init_finish
########################################################
write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
index 6978bd3..b850283 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -19,10 +19,9 @@
bootcmd:
# Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ - cloud-init-per once sudo echo 'sshd:ALL' >> /etc/hosts.deny
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
@@ -39,6 +38,9 @@
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
# Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4
+ - ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- cd /root/config-drive && /bin/bash -xe ./user-data
# Enable root access
@@ -47,7 +49,8 @@
########################################################
# Node is ready, allow SSH access
- echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - "sed -i -e '/sshd:ALL/d' /etc/hosts.deny"
+ - touch /is_cloud_init_finish
########################################################
write_files:
@@ -60,3 +63,13 @@
ServerAliveCountMax 10
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
index aab7cde..0c365ac 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
@@ -66,6 +66,7 @@
#- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- apt-get install linux-generic-hwe-16.04 -y
- reboot
+ - touch /is_cloud_init_finish
########################################################
write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index b0568d3..db9c992 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -131,7 +131,7 @@
address_pools:
managment-pool01:
- net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/24:24') }}
+ net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/16:16') }}
params:
ip_reserved:
gateway: +1
@@ -173,7 +173,7 @@
default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
admin-pool01:
- net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/24:24') }}
+ net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/16:16') }}
params:
ip_reserved:
gateway: +1
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index e94188a..51757fd 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -12,9 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
# import pytest
+import time
+
+from collections import Counter
from tcp_tests import logger
from tcp_tests.managers.jenkins.client import JenkinsClient
+from tcp_tests import settings
+
+from tcp_tests import managers
LOG = logger.logger
@@ -22,119 +28,222 @@
class TestOfflineDeployment(object):
"""docstring for TestOfflineDeployment"""
- def test_deploy_day1(self, show_step, underlay, common_services_deployed,
- salt_deployed):
+ def test_deploy_day1(self, show_step, config, underlay, hardware,
+ common_services_deployed, salt_deployed):
"""Test for deploying an mcp from day01 images
Scenario:
- 1. Approve local ssh key to jenkins
- 2. Boot CFG and APT virtual machines
- 3. Setup jq
- 4. Wait salt master
- 5. Addition config of MaaS
- 6. Wait dhcpd server
- 7. Start comissioning node via MaaS
- 8. Wait of comissioning node by MaaS
- 9. Start deploing node via MaaS
- 10. Wait of deploing node by
- 11. Accept all keys
- 12. Run deploy OS job
+ 1. Wait salt master
+ 2. Addition config of MaaS
+ 3. Wait dhcpd server
+ 4. Start comissioning node via MaaS
+ 5. Wait of comissioning node by MaaS
+ 6. Start deploing node via MaaS
+ 7. Wait of deploing node by
+ 8. Accept all keys
+ 9. Configure and baremetal nodes after MaaS deployment
+ 10. Run deploy OS job
"""
# group = hardware._get_default_node_group()
nodes = underlay.node_names()
LOG.info("Nodes - {}".format(nodes))
cfg_node = 'cfg01.offline-ocata-vxlan.local'
+ tempest_node = 'gtw01.offline-ocata-vxlan.local'
verbose = True
+ ssh_test_key = config.underlay.ssh_keys[0]['public']
- # show_step(1)
- # cmd = ("mkdir -p /var/lib/jenkins/.ssh && "
- # "ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && "
- # "chown jenkins /var/lib/jenkins/.ssh/known_hosts")
+ show_step(1)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="""timeout 300s /bin/bash -c """
+ """'while ! salt-call test.ping; do """
+ """echo "salt master still isnt running"; sleep 10; done'"""
+ ) # noqa
+
+ show_step(2)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call saltutil.sync_all')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call state.sls maas.region')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='maas logout mirantis && '
+ 'maas login mirantis '
+ 'http://localhost:5240/MAAS/api/2.0/ '
+ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN' # noqa
+ )
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="maas mirantis maas set-config "
+ "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
+
# underlay.check_call(
- # node_name=cfg_node, verbose=verbose,
- # cmd=cmd)
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd="maas mirantis ipranges create "
+ # "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
+ # "subnet=$(maas mirantis subnets read | jq '.[] | "
+ # "select(.name==\"10.10.0.0/16\") | .id')")
- # show_step(2)
- # underlay.check_call(node_name=cfg_node, verbose=verbose,
- # cmd='salt-key')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="maas mirantis vlan update "
+ "$(maas mirantis subnets read | jq '.[] | "
+ "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
+ "0 dhcp_on=True primary_rack='cfg01'")
- # show_step(3)
- # underlay.check_call(node_name=cfg_node, verbose=verbose,
- # cmd='apt install -y jq')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='maas mirantis sshkeys create '
+ 'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+ show_step(3)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do """
+ """echo "dhcpd still isnt running"; sleep 10; done'""")
show_step(4)
underlay.check_call(
node_name=cfg_node,
verbose=verbose,
- cmd="""timeout 300s /bin/bash -c 'while ! salt-call test.ping; do echo "salt master still isnt running"; sleep 10; done'""") # noqa
-
+ cmd='salt-call state.sls maas.machines')
show_step(5)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call saltutil.sync_all')
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call state.sls maas.region')
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='maas logout mirantis && '
- 'maas login mirantis '
- 'http://localhost/MAAS/api/2.0/ '
- 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
-
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd="maas mirantis ipranges create "
- "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
- "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .id')") # noqa
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd="maas mirantis vlan update "
- "$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') " # noqa
- "0 dhcp_on=True primary_rack='cfg01'")
-
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='maas mirantis sshkeys create '
- 'key="$(cat ~root/.ssh/id_rsa.pub)"')
-
- show_step(6)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do echo "dhcpd still isnt running"; sleep 10; done'""") # noqa
-
- show_step(7)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call state.sls maas.machines')
- show_step(8)
- cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
+ cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
- show_step(9)
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call state.sls maas.machines.assign_ip')
+ show_step(6)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
cmd='salt-call state.sls maas.machines.deploy')
- show_step(10)
+ show_step(7)
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
+ node_name=cfg_node,
+ verbose=verbose,
cmd='salt-call state.sls maas.machines.wait_for_deployed')
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
-
- show_step(11)
underlay.check_call(
- node_name=cfg_node, verbose=verbose, expected=[0, 1],
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+ show_step(8)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ expected=[0, 1],
cmd='salt-key -A -y --include-denied --include-rejected')
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+ show_step(9)
+ cmd = "salt '*' saltutil.refresh_pillar"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ cmd = "salt '*' saltutil.sync_all"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose, cmd="reclass-salt --top")
+
+ cmd = "salt -C " \
+ "'I@salt:control or I@nova:compute or I@neutron:gateway' " \
+ "cmd.run 'touch /run/is_rebooted'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@salt:control' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@nova:compute' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@neutron:gateway' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ time.sleep(360) # TODO: Add ssh waiter
+
+ cmd = "salt -C " \
+ "'I@salt:control or I@nova:compute or I@neutron:gateway'" \
+ " test.ping"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = """salt -C """ \
+ """'I@salt:control or I@nova:compute or I@neutron:gateway' """ \
+ """cmd.run '[ -f "/run/is_rebooted" ] && """ \
+ """echo "Has not been rebooted!" || echo "Rebooted"' """
+ ret = underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ count = Counter(ret['stdout_str'].split())
+
+ assert count['Rebooted'] == 10, "Should be rebooted 10 baremetal nodes"
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
salt_api = \
salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
salt_api = salt_api[0].get(cfg_node)
- show_step(12)
+ show_step(10)
jenkins = JenkinsClient(
host='http://172.16.44.33:8081',
username='admin',
@@ -144,11 +253,56 @@
build = jenkins.run_build('deploy_openstack', params)
jenkins.wait_end_of_build(
- name=build[0],
- build_id=build[1],
- timeout=60 * 60 * 2)
+ name=build[0], build_id=build[1], timeout=60 * 60 * 2)
+
+ with open("{path}/cfg01_jenkins_deploy_openstack_console.log".format(
+ path=settings.LOGS_DIR), 'w') as f:
+ LOG.info("Save jenkins console log")
+ console_log = \
+ jenkins.get_build_output('deploy_openstack', build[1])
+ f.write(console_log)
assert \
jenkins.build_info(
name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
"Deploy openstack was failed"
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ salt_nodes = salt_deployed.get_ssh_data()
+ nodes_list = \
+ [node for node in salt_nodes
+ if not any(node['node_name'] == n['node_name']
+ for n in config.underlay.ssh)]
+ config.underlay.ssh = config.underlay.ssh + nodes_list
+ underlay.add_config_ssh(nodes_list)
+
+ time.sleep(120) # debug sleep
+ cmd = "salt '*' test.ping"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ openstack = managers.openstack_manager.OpenstackManager(
+ config, underlay, hardware, salt_deployed)
+
+ if settings.RUN_TEMPEST:
+ openstack.run_tempest(
+ pattern=settings.PATTERN,
+ node_name=tempest_node)
+ openstack.download_tempest_report()