Deploy ocata ovs with ceph

Change-Id: Id5aa57ef76b9fe9121c81c824ff99cc1f3a30b1f
diff --git a/tcp_tests/fixtures/ceph_fixtures.py b/tcp_tests/fixtures/ceph_fixtures.py
new file mode 100644
index 0000000..c294542
--- /dev/null
+++ b/tcp_tests/fixtures/ceph_fixtures.py
@@ -0,0 +1,84 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+from tcp_tests.managers import ceph_manager
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='function')
+def ceph_actions(config, hardware, underlay, salt_deployed):
+    """Fixture that provides various actions for OpenStack
+
+    :param config: fixture provides oslo.config
+    :param config: fixture provides oslo.config
+    :param underlay: fixture provides underlay manager
+    :param salt_deployed: fixture provides salt manager
+    :rtype: CephManager
+
+    For use in tests or fixtures to deploy a custom OpenStack
+    """
+    return ceph_manager.CephManager(config, underlay, hardware, salt_deployed)
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.ceph_deployed)
+@pytest.fixture(scope='function')
+def ceph_deployed(revert_snapshot, request, config,
+                  hardware, underlay, common_services_deployed,
+                  ceph_actions):
+    """Fixture to get or install Ceph services on environment
+
+    :param revert_snapshot: fixture that reverts snapshot that is specified
+                            in test with @pytest.mark.revert_snapshot(<name>)
+    :param request: fixture provides pytest data
+    :param config: fixture provides oslo.config
+    :param hardware: fixture provides enviromnet manager
+    :param underlay: fixture provides underlay manager
+    :param common_services_deployed: fixture provides CommonServicesManager
+    :param ceph_actions: fixture provides CephManager instance
+    :rtype: CephManager
+
+    If config.ceph.ceph_installed is not set, this fixture assumes
+    that the ceph services were not installed, and do the following:
+    - install ceph services
+    - make snapshot with name 'ceph_deployed'
+    - return CephManager instance
+
+    If config.ceph.ceph_installed was set, this fixture assumes that
+    the ceph services were already installed, and do the following:
+    - return CephManager instance
+
+    If you want to revert 'ceph_deployed' snapshot, please use mark:
+    @pytest.mark.revert_snapshot("ceph_deployed")
+    """
+    # Deploy Ceph cluster
+    if not config.ceph.ceph_installed:
+        steps_path = config.ceph_deploy.ceph_steps_path
+        commands = underlay.read_template(steps_path)
+        ceph_actions.install(commands)
+        hardware.create_snapshot(ext.SNAPSHOT.ceph_deployed)
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        # 3. config.tcp.* options contain access credentials to the already
+        #    installed TCP API endpoint
+        pass
+
+    return ceph_actions
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 99baaff..804d110 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -51,6 +51,7 @@
     'virtlet_ceph_deployed',
     'k8s_deployed',
     'decapod_deployed',
+    'ceph_deployed',
 )
 
 
diff --git a/tcp_tests/managers/ceph_manager.py b/tcp_tests/managers/ceph_manager.py
new file mode 100644
index 0000000..bd68496
--- /dev/null
+++ b/tcp_tests/managers/ceph_manager.py
@@ -0,0 +1,39 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class CephManager(ExecuteCommandsMixin):
+    """docstring for CephManager"""
+
+    __config = None
+    __underlay = None
+    __hardware = None
+
+    def __init__(self, config, underlay,  hardware, salt):
+        self.__config = config
+        self.__underlay = underlay
+        self.__hardware = hardware
+        self._salt = salt
+        super(CephManager, self).__init__(
+            config=config, underlay=underlay)
+
+    def install(self, commands):
+        self.execute_commands(commands,
+                              label='Install Ceph')
+        self.__config.ceph.ceph_installed = True
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 39e5e6b..293e1cc 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -235,41 +235,12 @@
 ceph_deploy_opts = [
     ct.Cfg('ceph_steps_path', ct.String(),
            help="Path to YAML with steps to deploy sl",
-           default=_default_sl_prepare_tests_steps_path),
-    ct.Cfg('docker_image_alertmanager', ct.String(),
-           default='{}/openstack-docker/alertmanager:latest'.format(
-               settings.DOCKER_REGISTRY)),
-    ct.Cfg('docker_image_pushgateway', ct.String(),
-           default='{}/openstack-docker/pushgateway:latest'.format(
-               settings.DOCKER_REGISTRY)),
-    ct.Cfg('docker_image_prometheus', ct.String(),
-           default='{}/openstack-docker/prometheus:latest'.format(
-               settings.DOCKER_REGISTRY)),
-    ct.Cfg('docker_image_remote_agent', ct.String(),
-           default='{}/openstack-docker/telegraf:latest'.format(
-               settings.DOCKER_REGISTRY)),
-    ct.Cfg('docker_image_remote_storage_adapter', ct.String(),
-           default='{}/openstack-docker/remote_storage_adapter:latest'.format(
-               settings.DOCKER_REGISTRY)),
-    # SalesForce connection options for pushkin
-    ct.Cfg('sfdc_sandbox_enabled', ct.String(), default='False'),
-    ct.Cfg('sfdc_auth_url', ct.String(), default=''),
-    ct.Cfg('sfdc_username', ct.String(), default=''),
-    ct.Cfg('sfdc_password', ct.String(), default=''),
-    ct.Cfg('sfdc_consumer_key', ct.String(), default=''),
-    ct.Cfg('sfdc_consumer_secret', ct.String(), default=''),
-    ct.Cfg('sfdc_organization_id', ct.String(), default=''),
+           default=_default_ceph_prepare_tests_steps_path),
 ]
 
 ceph_opts = [
-    ct.Cfg('sl_installed', ct.Boolean(),
+    ct.Cfg('ceph_installed', ct.Boolean(),
            help="", default=False),
-    ct.Cfg('sl_vip_host', ct.IPAddress(),
-           help="Vip address for SL services", default='0.0.0.0'),
-    ct.Cfg('sl_prometheus_port', ct.String(),
-           help="Prometheus port", default='15010'),
-    ct.Cfg('sl_prometheus_proto', ct.String(),
-           help="Proemtheus protocol", default='http'),
 ]
 
 k8s_deploy_opts = [
@@ -348,6 +319,8 @@
     ('opencontrail', opencontrail_opts),
     ('stack_light', sl_opts),
     ('sl_deploy', sl_deploy_opts),
+    ('ceph', ceph_opts),
+    ('ceph_deploy', ceph_deploy_opts),
     ('k8s_deploy', k8s_deploy_opts),
     ('k8s', k8s_opts),
 ]
@@ -429,6 +402,16 @@
     config.register_group(cfg.OptGroup(name='k8s',
                                        title="K8s config and credentials"))
     config.register_opts(group='k8s', opts=k8s_opts)
+    config.register_group(cfg.OptGroup(name='ceph',
+                                       title="ceph config", help=""))
+    config.register_opts(group='ceph', opts=ceph_opts)
+
+    config.register_group(
+        cfg.OptGroup(name='ceph_deploy',
+                     title="Ceph deploy config ",
+                     help=""))
+    config.register_opts(group='ceph_deploy', opts=ceph_deploy_opts)
+
     return config
 
 
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
index 723515d..eb0a042 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
@@ -17,14 +17,14 @@
 
 - description: Sync grains on ceph mon nodes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon' state.sls saltutil.sync_grains
+    -C 'I@ceph:mon' saltutil.sync_grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine on ceph mons
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls mine.update
+    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -36,9 +36,9 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install ceph mgr if defined
+- description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
-    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgt' ; then
+    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
       salt -C 'I@ceph:mgr' state.sls ceph.mgr
     fi
   node_name: {{ HOSTNAME_CFG01 }}
@@ -54,7 +54,7 @@
 
 - description: Sync grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls saltutil.sync_grains
+    -C 'I@ceph:osd' saltutil.sync_grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -64,18 +64,18 @@
     -C 'I@ceph:osd' state.sls ceph.osd.custom
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: false
+  skip_fail: true
 
 - description: Sync grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls saltutil.sync_grains
+    -C 'I@ceph:osd' saltutil.sync_grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
 - description: Update mine on ceph osd
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls mine.update
+    -C 'I@ceph:osd' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -97,7 +97,7 @@
 - description: Install radosgw if exists
   cmd: |
     if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt -C 'I@ceph:radosgw' state.sls saltutil.sync_grains;
+      salt -C 'I@ceph:radosgw' saltutil.sync_grains;
       salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
       salt -C 'I@keystone:client' state.sls keystone.client;
     fi
@@ -112,32 +112,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Connect ceph to glance if glance is using it
+- description: Connect ceph to glance
   cmd: |
-    if salt -C 'I@ceph:common and I@glance:server' match.pillar 'ceph:common and glance:server' ; then
-      salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-      salt -C 'I@ceph:common and I@glance:server' service.restart glance-api,glance-glare,glance-registry
-    fi
+    salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+    salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+    salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+    salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
-- description: Connect ceph to cinder if cinder is using it
+- description: Connect ceph to cinder and nova
   cmd: |
-    if salt -C 'I@ceph:common and I@cinder:controller' match.pillar 'ceph:common and cinder:controller' ; then
-      salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Connect ceph to nova
-  cmd: |
-    if salt -C 'I@ceph:common and I@nova:compute' match.pillar 'ceph:common and nova:compute' ; then
-      salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-      salt -C 'I@ceph:common and I@nova:compute' state.sls saltutil.sync_grains;
-      salt -C 'I@ceph:common and I@nova:compute' state.sls nova
-    fi
+    salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+    salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+    salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index 678103c..4dd76c5 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -57,14 +57,14 @@
 
 - description: Check keystone service-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
 - description: Check glance image-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -79,7 +79,7 @@
 
 - description: Check nova service-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 5}
   skip_fail: false
@@ -89,12 +89,12 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@cinder:controller' state.sls cinder -b 1
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Check cinder list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -116,7 +116,7 @@
 
 - description: Check neutron agent-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -131,7 +131,7 @@
 
 - description: Check heat service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack orchestration resource type list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 5, delay: 10}
   skip_fail: false
@@ -260,79 +260,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index 5dee9e4..480beef 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -399,6 +399,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -425,6 +428,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -451,9 +457,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 9aa021e..e767663 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from tcp_tests.fixtures.common_fixtures import *  # noqa
+from tcp_tests.fixtures.ceph_fixtures import *  # noqa
 from tcp_tests.fixtures.config_fixtures import *  # noqa
 from tcp_tests.fixtures.underlay_fixtures import *  # noqa
 from tcp_tests.fixtures.rally_fixtures import *  # noqa
@@ -58,6 +59,8 @@
     # stacklight_fixtures
     'sl_actions',
     'sl_deployed',
+    'ceph_deployed',
+    'ceph_action',
     # k8s fixtures
     'k8s_actions',
     'k8s_deployed'
diff --git a/tcp_tests/tests/system/test_ovs_ocata_ceph.py b/tcp_tests/tests/system/test_ovs_ocata_ceph.py
new file mode 100644
index 0000000..5f5df02
--- /dev/null
+++ b/tcp_tests/tests/system/test_ovs_ocata_ceph.py
@@ -0,0 +1,48 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestInstallOvsOcataCeph(object):
+    """Test class for test openstack with ceph and ovs deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_ocata_ceph_all_ovs_install(self, underlay, openstack_deployed,
+                                        ceph_deployed,
+                                        openstack_actions):
+        """Test for deploying ocata ovs with ceph and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Setup ceph
+        5. Run tempest
+
+        """
+        openstack_actions._salt.local(
+                tgt='*', fun='cmd.run',
+                args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")