added wally folder for future and also edited cleanup to use only openstack client if possible

Change-Id: I8538dca46db49047154f093256645e141067ab3e
diff --git a/cleanup.sh b/cleanup.sh
index 88c2e8f..9792873 100755
--- a/cleanup.sh
+++ b/cleanup.sh
@@ -10,19 +10,22 @@
 echo "Delete roles"
 for i in `openstack role list | grep $mask | awk '{print $2}'`; do openstack role delete $i; echo deleted $i; done
 
+#echo "Delete projects"
+#for i in `openstack project list | grep $mask | awk '{print $2}'`; do openstack project delete $i; echo deleted $i; done
+
 echo "Delete servers"
 for i in `openstack server list --all | grep $mask | awk '{print $2}'`; do openstack server delete $i; echo deleted $i; done
 
-echo "Delete snapshot"
-for i in `cinder snapshot-list --all | grep $mask | awk '{print $2}'`; do cinder snapshot-reset-state $i; echo snapshot reset state is done for $i; done
-for i in `cinder snapshot-list --all | grep $mask | awk '{print $2}'`; do cinder snapshot-delete $i; echo deleted $i; done
+echo "Reset snapshot state and delete"
+for i in `openstack volume snapshot list --all | grep $mask | awk '{print $2}'`; do openstack snapshot set --state available $i; echo snapshot reset state is done for $i; done
+for i in `openstack volume snapshot list --all | grep $mask | awk '{print $2}'`; do openstack snapshot set --state available $i; echo deleted $i; done
 
-echo "Delete volumes"
-for i in `openstack volume list --all | grep $mask | awk '{print $2}'`; do cinder reset-state $i --state available; echo reset state is done for $i; done
+echo "Reset volume state and delete"
+for i in `openstack volume list --all | grep $mask | awk '{print $2}'`; do openstack volume set --state available $i; echo reset state is done for $i; done
 for i in `openstack volume list --all | grep $mask | awk '{print $2}'`; do openstack volume delete $i; echo deleted $i; done
 
 echo "Delete volume types"
-for i in `cinder type-list | grep $mask | awk '{print $2}'`; do cinder type-delete $i; done
+for i in `openstack volume type list | grep $mask | awk '{print $2}'`; do openstack volume type delete $i; done
 
 echo "Delete images"
 for i in `openstack image list | grep $mask | awk '{print $2}'`; do openstack image delete $i; echo deleted $i; done
@@ -34,19 +37,19 @@
 for i in `openstack keypair list | grep $mask | awk '{print $2}'`; do openstack keypair delete $i; echo deleted $i; done
 
 echo "Delete ports"
-for i in `neutron port-list --all | grep $mask | awk '{print $2}'`; do neutron port-delete $i; done
+for i in `openstack port list | grep $mask | awk '{print $2}'`; do openstack port delete $i; done
 
 echo "Delete Router ports (experimental)"
 neutron router-list|grep $mask|awk '{print $2}'|while read line; do echo $line; neutron router-port-list $line|grep subnet_id|awk '{print $11}'|sed 's/^\"//;s/\",//'|while read interface; do neutron router-interface-delete $line $interface; done; done
 
 echo "Delete subnets"
-for i in `neutron subnet-list --all | grep $mask | awk '{print $2}'`; do neutron subnet-delete $i; done
+for i in `openstack subnet list | grep $mask | awk '{print $2}'`; do openstack subnet delete $i; done
 
 echo "Delete nets"
-for i in `neutron net-list --all | grep $mask | awk '{print $2}'`; do neutron net-delete $i; done
+for i in `openstack network list | grep $mask | awk '{print $2}'`; do openstack network delete $i; done
 
 echo "Delete routers"
-for i in `neutron router-list --all | grep $mask | awk '{print $2}'`; do neutron router-delete $i; done
+for i in `openstack router list | grep $mask | awk '{print $2}'`; do openstack router delete $i; done
 
 echo "Delete regions"
 for i in `openstack region list | grep $mask | awk '{print $2}'`; do openstack region delete $i; echo deleted $i; done
diff --git a/wally/ceph_perf_3_vm_example.yaml b/wally/ceph_perf_3_vm_example.yaml
new file mode 100644
index 0000000..3204009
--- /dev/null
+++ b/wally/ceph_perf_3_vm_example.yaml
@@ -0,0 +1,76 @@
+# LOGGING PART. USE IT BY DEFAULT.
+logging:
+    version: 1
+    disable_existing_loggers: true
+    formatters:
+        simple:
+            format: "%(asctime)s - %(levelname)s - %(message)s"
+            datefmt: "%H:%M:%S"
+    handlers:
+        console:
+            level: INFO
+            class: logging.StreamHandler
+            formatter: simple
+            stream: "ext://sys.stdout"
+        log_file:
+            level: DEBUG
+            class: logging.FileHandler
+            formatter: simple
+            filename: null
+    loggers:
+        cmd:     {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        storage: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        rpc:     {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        cephlib: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        collect: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        agent:   {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        wally:   {"level": "DEBUG", "handlers": ["console", "log_file"]}
+
+# default parameters copied from default.yaml
+# to avoid including useless parameters for particular load test
+collect_info: true
+var_dir_root: /tmp/perf_tests
+settings_dir: ~/.wally
+connect_timeout: 30
+max_time_diff_ms: 5000
+rpc_log_level: DEBUG
+default_test_local_folder: "/tmp/wally_{name}_{uuid}"
+keep_raw_files: false  # don't change this value, keep is not supported atm
+download_rpc_logs: true
+results_storage: /tmp/disk_perf_reports
+ceph_opts: nodeep-scrub, noscrub
+
+# default optional roles
+default_dev_roles:
+    - role=testnode:
+        - type=cpu: client_cpu
+        - type=block: client_disk
+        - type=eth: client_net
+        - type=weth: client_net
+
+# predefined nodes or VM's to benchmark Ceph on them
+nodes:
+    "ssh://<SSH_USER>@<INSTANCE_IP>::<PRIV_KEY>": testnode
+    "ssh://<SSH_USER>@<INSTANCE_IP>::<PRIV_KEY>": testnode
+    "ssh://<SSH_USER>@<INSTANCE_IP>::<PRIV_KEY>": testnode
+
+# system-cpu, block-io, net-io sensors are used by default
+sensors:
+    online: true
+    roles_mapping:
+        testnode: system-cpu, system-ram, perprocess-cpu, perprocess-ram, block-io, net-io
+
+# tests to run on VM's
+# use_system_fio set to true to launch fio
+# that was installed manually on testnodes
+# use_sudo for sudo rights
+# as not always it has access to mounted drive
+tests:
+    - fio:
+        load: ceph
+        use_system_fio: true
+        use_sudo: true
+        params:
+            FILENAME: /mnt/fio
+            FILESIZE: 400G
+