(WIP) Use dynamic IPv4 network pools for deploy clusters (#18)
* Use dynamic IPv4 network pools for deploy clusters
- Use 'config' object as an additional jinja option to load templates
- Use 'address_pools' from 'config' object in
virtual-mcp11-ovs and virtual-mcp11-dvr
- Fill 'address_pools' in 'config' object from fuel-devops manager
* Use dynamic IPv4 network pools for deploy clusters
- Use 'config' object as an additional jinja option to load templates
- Use 'address_pools' from 'config' object in
virtual-mcp11-ovs and virtual-mcp11-dvr
- Fill 'address_pools' in 'config' object from fuel-devops manager
* return removed 'test_install_k8s'
* Use 'sed' to replace IP addresses in the models to pre-generated
also, two new environment variables avaliable for mcp11-ovs/dvr:
- SALT_MODELS_REPOSITORY
- SALT_MODELS_COMMIT
* remove temporary env_config.py.bak
diff --git a/tcp_tests/fixtures/common_services_fixtures.py b/tcp_tests/fixtures/common_services_fixtures.py
index eb46f93..21c0e5d 100644
--- a/tcp_tests/fixtures/common_services_fixtures.py
+++ b/tcp_tests/fixtures/common_services_fixtures.py
@@ -19,7 +19,6 @@
from tcp_tests import logger
from tcp_tests.helpers import ext
-from tcp_tests.helpers import utils
from tcp_tests import settings
from tcp_tests.managers import common_services_manager
@@ -72,7 +71,7 @@
# Create Salt cluster
if not config.common_services.common_services_installed:
steps_path = config.common_services_deploy.common_services_steps_path
- commands = utils.read_template(steps_path)
+ commands = underlay.read_template(steps_path)
common_services_actions.install(commands)
hardware.create_snapshot(ext.SNAPSHOT.common_services_deployed)
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index cba8823..6ec700a 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -19,7 +19,6 @@
from tcp_tests import logger
from tcp_tests.helpers import ext
-from tcp_tests.helpers import utils
from tcp_tests import settings
from tcp_tests.managers import openstack_manager
@@ -71,7 +70,7 @@
# Create Salt cluster
if not config.openstack.openstack_installed:
steps_path = config.openstack_deploy.openstack_steps_path
- commands = utils.read_template(steps_path)
+ commands = underlay.read_template(steps_path)
openstack_actions.install(commands)
hardware.create_snapshot(ext.SNAPSHOT.openstack_deployed)
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 57fb656..7efd296 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -19,7 +19,6 @@
from tcp_tests import logger
from tcp_tests.helpers import ext
-from tcp_tests.helpers import utils
from tcp_tests import settings
from tcp_tests.managers import saltmanager
@@ -67,8 +66,13 @@
"""
# Create Salt cluster
if config.salt.salt_master_host == '0.0.0.0':
- commands = utils.read_template(config.salt_deploy.salt_steps_path)
- LOG.info("##################Executing command ####### {0}".format(commands))
+ # Temporary workaround. Underlay should be extended with roles
+ salt_nodes = underlay.node_names()
+ config.salt.salt_master_host = \
+ underlay.host_by_node_name(salt_nodes[0])
+
+ commands = underlay.read_template(config.salt_deploy.salt_steps_path)
+ LOG.info("############ Executing command ####### {0}".format(commands))
salt_actions.install(commands)
hardware.create_snapshot(ext.SNAPSHOT.salt_deployed)
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index d7351bf..43024e3 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -137,17 +137,16 @@
and snapshot_needed:
snapshot_name = utils.extract_name_from_mark(snapshot_needed) or \
"{}_passed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
elif hasattr(request.node, 'rep_setup') and \
request.node.rep_setup.failed and fail_snapshot:
snapshot_name = "{0}_prep_failed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
elif hasattr(request.node, 'rep_call') and \
request.node.rep_call.failed and fail_snapshot:
snapshot_name = "{0}_failed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
+
+ hardware.create_snapshot(snapshot_name)
request.addfinalizer(test_fin)