Fix E* pep8 warnings
The list of fixed pep8 errors:
E122, E127, E128, E226, E231, E241, E265, E302, E305, E306, E501
Change-Id: I89fbc7748f24bfdb7dc765d08624a8898654f698
Reviewed-on: https://review.gerrithub.io/379375
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 04222d9..f02c843 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -55,10 +55,10 @@
pillar='kubernetes:pool:apiserver:host')
k8s_proxy_ip = set([ip
for item in k8s_proxy_ip_pillars
- for node,ip in item.items() if ip])
+ for node, ip in item.items() if ip])
k8s_hosts = set([ip
- for item in k8s_hosts
- for node,ip in item.items() if ip])
+ for item in k8s_hosts
+ for node, ip in item.items() if ip])
assert len(k8s_hosts) == 1, (
"Found more than one Kubernetes API hosts in pillars:{0}, "
"expected one!").format(k8s_hosts)
@@ -253,10 +253,12 @@
:param deploy_name: str, deploy name
:return: bool
"""
- deploy = self.api.deployments.get(name=deploy_name, namespace=namespace)
+ deploy = self.api.deployments.get(name=deploy_name,
+ namespace=namespace)
return deploy.status.available_replicas == deploy.status.replicas
- def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60, interval=5):
+ def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60,
+ interval=5):
"""Wait until all pods are scheduled on nodes
:param deploy_name: str, deploy name
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 5396cef..4969e6e 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -53,16 +53,16 @@
"-e SOURCE_FILE=keystonercv3 "
"-e CUSTOM='--pattern {1}' "
"-v /root/:/home/rally {2}{3} "
- "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output".format(
- conf_name, pattern, registry, image_name))
+ "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output"
+ .format(conf_name, pattern, registry, image_name))
else:
cmd = ("docker run --rm --net=host "
"-e TEMPEST_CONF={0} "
"-e SKIP_LIST=mcp_skip.list "
"-e SOURCE_FILE=keystonercv3 "
"-v /root/:/home/rally {2}{3} "
- "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output".format(
- conf_name, pattern, registry, image_name))
+ "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output"
+ .format(conf_name, pattern, registry, image_name))
with self.__underlay.remote(node_name=target_name[0]) as node_remote:
result = node_remote.execute(cmd)
LOG.debug("Test execution result is {}".format(result))
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 4d55d97..d02d7db 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -48,7 +48,7 @@
super(SaltManager, self).__init__(config=config, underlay=underlay)
def install(self, commands):
- #if self.__config.salt.salt_master_host == '0.0.0.0':
+ # if self.__config.salt.salt_master_host == '0.0.0.0':
# # Temporary workaround. Underlay should be extended with roles
# salt_nodes = self.__underlay.node_names()
# self.__config.salt.salt_master_host = \
@@ -66,7 +66,7 @@
if self.__host:
return self.__host
else:
- #TODO(ddmitriev): consider to add a check and raise
+ # TODO(ddmitriev): consider to add a check and raise
# exception if 'salt_master_host' is not initialized.
return self.__config.salt.salt_master_host
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 9ed13db..ee835ff 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -48,8 +48,8 @@
tgt='I@keepalived:cluster:enabled:true and not ctl*',
pillar='keepalived:cluster:instance:prometheus_server_vip:address')
sl_vip_ip = set([ip
- for item in sl_vip_address_pillars
- for node,ip in item.items() if ip])
+ for item in sl_vip_address_pillars
+ for node, ip in item.items() if ip])
assert len(sl_vip_ip) == 1, (
"Found more than one SL VIP in pillars:{0}, "
"expected one!").format(sl_vip_ip)
@@ -85,7 +85,8 @@
target_node_name = [node_name for node_name
in self.__underlay.node_names()
if node_to_run in node_name]
- with self.__underlay.remote(node_name=target_node_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_node_name[0]) \
+ as node_remote:
cmd = "pytest -k {}".format(path_tests_to_run)
result = node_remote.execute(cmd)
LOG.debug("Test execution result is {}".format(result))
@@ -113,7 +114,8 @@
.format(node, services_status, expected_services)
for service in expected_services:
assert service in services_status,\
- 'Missing service {0} in {1}'.format(service, services_status)
+ 'Missing service {0} in {1}'.format(service,
+ services_status)
assert '0' not in services_status.get(service),\
'Service {0} failed to start'.format(service)
@@ -129,11 +131,11 @@
LOG.info('Restarting keepalived service on mon nodes...')
for node in nodes:
self._salt.local(tgt=node, fun='cmd.run',
- args='systemctl restart keepalived')
+ args='systemctl restart keepalived')
LOG.warning(
'Ip states after force restart {0}'.format(
self._salt.local(tgt='mon*',
- fun='cmd.run', args='ip a')))
+ fun='cmd.run', args='ip a')))
current_targets = prometheus_client.get_targets()
LOG.debug('Current targets after install {0}'