Fixed setting the custom MTU at TF clouds by updating the ports

The MOS 22.5 cloud does not have TF version which can set the
custom MTU during the network creation. This is available only
since TF R21.4. Till that time, we can use the old way of
updating the VM ports, see [1].

Also, added the workaround for mtu field at MOS TF clouds
At the MOS TF clouds, the network payload from the API does not
containt 'mtu' attribute. Thus, we need to do the workaround. Here,
the workaround is done similarly to the CLI: it sets None for the mtu.

Also, made some PEP8 and codestile fixes.

[1] https://supportportal.juniper.net/s/article/Contrail-How-to-update-the-interface-MTU-of-an-exsiting-VMI-with-DHCP-option?language=en_US

Related-PROD: PROD-36943

Change-Id: Ieeb0dc50a29b3145b9ffaae2b7673543983fd915
diff --git a/tests/test_vm2vm.py b/tests/test_vm2vm.py
index d387ce1..0a89f1e 100644
--- a/tests/test_vm2vm.py
+++ b/tests/test_vm2vm.py
@@ -36,6 +36,7 @@
     ssh_timeout = int(config.get('ssh_timeout', 500))
     threads = int(config.get('multiple_threads_number', 10))
     iperf_utility = config.get('multiple_threads_iperf_utility', 'iperf3')
+    custom_mtu = config.get('custom_mtu') or 'default'
     utils.check_iperf_utility(iperf_utility)
     result_table = Texttable(max_width=120)
 
@@ -90,6 +91,13 @@
                 list(vms[i].addresses.keys())[0]][0]['addr']
             vm_info.append({'vm': vms[i], 'fip': fip['floating_ip_address'],
                             'private_address': private_address})
+        # Set custom MTU if required
+        if os_actions.is_cloud_tf() and (custom_mtu != "default"):
+            logger.info("Setting up custom MTU at network ports...")
+            for vm in vms:
+                os_actions.update_network_port_with_custom_mtu(vm.id,
+                                                               custom_mtu)
+
         # Check VMs are reachable and prepare iperf3
         transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu',
                                       password='dd', private_key=private_key)
@@ -101,14 +109,14 @@
                 ssh.prepare_iperf(vm_info[i]['fip'], private_key=private_key)
                 mtus.append(transport1.get_mtu_from_vm(
                     vm_info[i]['fip'], private_key=private_key))
-        logger.info("MTU at networks: {}, {}".format(os_resources['net1']['mtu'],
-                                                 os_resources['net2']['mtu']))
+        logger.info("MTU at networks: {}, {}".format(
+            os_resources['net1']['mtu'], os_resources['net2']['mtu']))
         logger.info("MTU at VMs: {}".format(", ".join(mtus)))
 
         # Prepare the result table and run iperf3
-        table_rows = []
-        table_rows.append(['Test Case', 'Host 1', 'Host 2',
-                           'MTU at VMs', 'Result'])
+        table_rows = [[
+            'Test Case', 'Host 1', 'Host 2', 'MTU at VMs', 'Result'
+        ]]
         # Do iperf3 measurement #1
         logger.info("Doing 'VM to VM in same tenant on same node via Private "
                     "IP, 1 thread' measurement...")
@@ -152,8 +160,9 @@
         else:
             iperf_utility = "iperf"
             result3 = transport1.exec_command(
-                '{} -c {} -P {} -t {} | tail -n 1'.format(iperf_utility,
-                    vm_info[2]['private_address'], threads, iperf_time))
+                '{} -c {} -P {} -t {} | tail -n 1'.format(
+                    iperf_utility, vm_info[2]['private_address'],
+                    threads, iperf_time))
             res3 = (b" ".join(result3.split()[-2::])).decode('utf-8')
         logger.info("Result #3 is {}".format(res3))
         table_rows.append(['VM to VM in same tenant on different HW nodes '