fixing fio runner
diff --git a/wally/suits/io/ceph.cfg b/wally/suits/io/ceph.cfg
index 9287030..a44c749 100644
--- a/wally/suits/io/ceph.cfg
+++ b/wally/suits/io/ceph.cfg
@@ -1,9 +1,10 @@
 [global]
-include defaults.cfg
+include defaults_qd.cfg
 
-NUMJOBS_R={% 1, 5, 10, 15, 25, 40, 80, 120 %}
-NUMJOBS_W={% 1, 5, 10, 15, 25, 40 %}
-NUMJOBS_SEQ_OPS={% 1, 3, 10 %}
+QD_R={% 1, 5, 10, 15, 25, 40, 80, 120 %}
+QD_W={% 1, 5, 10, 15, 25, 40 %}
+QD_SEQ_R={% 1, 3, 10 %}
+QD_SEQ_W={% 1, 2, 4 %}
 
 ramp_time=30
 runtime=180
@@ -14,8 +15,7 @@
 [ceph_{TEST_SUMM}]
 blocksize=4k
 rw=randwrite
-sync=1
-numjobs={NUMJOBS_W}
+iodepth={QD_W}
 
 # ---------------------------------------------------------------------
 # check different thread count, direct read mode. (latency, iops) = func(th_count)
@@ -25,24 +25,34 @@
 blocksize=4k
 rw=randread
 direct=1
-numjobs={NUMJOBS_R}
+iodepth={QD_R}
 
 # ---------------------------------------------------------------------
-# direct write
+# sync write
 # ---------------------------------------------------------------------
 [ceph_{TEST_SUMM}]
 blocksize=4k
 rw=randwrite
 direct=1
+sync=1
 numjobs=1
 
 # ---------------------------------------------------------------------
-# this is essentially sequential write/read operations
+# this is essentially sequential write operations
 # we can't use sequential with numjobs > 1 due to caching and block merging
 # ---------------------------------------------------------------------
 [ceph_{TEST_SUMM}]
 blocksize=16m
-rw={% randread, randwrite %}
+rw=randwrite
 direct=1
-numjobs={NUMJOBS_SEQ_OPS}
+iodepth={QD_SEQ_W}
 
+# ---------------------------------------------------------------------
+# this is essentially sequential read operations
+# we can't use sequential with numjobs > 1 due to caching and block merging
+# ---------------------------------------------------------------------
+[ceph_{TEST_SUMM}]
+blocksize=16m
+rw=randread
+direct=1
+iodepth={QD_SEQ_R}