working on reporting, this commit represent broking code state
diff --git a/wally/suits/io/defaults_qd.cfg b/wally/suits/io/defaults_qd.cfg
index fcf5c16..c3dee19 100644
--- a/wally/suits/io/defaults_qd.cfg
+++ b/wally/suits/io/defaults_qd.cfg
@@ -18,7 +18,6 @@
filename={FILENAME}
size={FILESIZE}
-write_iops_log=fio_iops_log
write_bw_log=fio_bw_log
log_avg_msec=1000
write_hist_log=fio_lat_hist_log
diff --git a/wally/suits/io/fio.py b/wally/suits/io/fio.py
index 7b2c3e3..33e8343 100644
--- a/wally/suits/io/fio.py
+++ b/wally/suits/io/fio.py
@@ -1,14 +1,15 @@
import array
import os.path
import logging
-from typing import cast, Any
+from typing import cast, Any, Tuple, List
import wally
-from ...utils import StopTestError, get_os, ssize2b
+from ...utils import StopTestError, ssize2b, b2ssize
from ...node_interfaces import IRPCNode
+from ...node_utils import get_os
from ..itest import ThreadedTest
-from ...result_classes import TimeSeries, JobMetrics
+from ...result_classes import TimeSeries, DataSource, TestJobConfig
from .fio_task_parser import execution_time, fio_cfg_compile, FioJobConfig, FioParams, get_log_files
from . import rpc_plugin
from .fio_hist import expected_lat_bins
@@ -17,7 +18,7 @@
logger = logging.getLogger("wally")
-class IOPerfTest(ThreadedTest):
+class FioTest(ThreadedTest):
soft_runcycle = 5 * 60
retry_time = 30
configs_dir = os.path.dirname(__file__) # type: str
@@ -27,7 +28,7 @@
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
- get = self.config.params.get
+ get = self.suite.params.get
self.remote_task_file = self.join_remote("task.fio")
self.remote_output_file = self.join_remote("fio_result.json")
@@ -35,7 +36,7 @@
self.use_sudo = get("use_sudo", True) # type: bool
self.force_prefill = get('force_prefill', False) # type: bool
- self.load_profile_name = self.config.params['load'] # type: str
+ self.load_profile_name = self.suite.params['load'] # type: str
if os.path.isfile(self.load_profile_name):
self.load_profile_path = self.load_profile_name # type: str
@@ -47,16 +48,16 @@
if self.use_system_fio:
self.fio_path = "fio" # type: str
else:
- self.fio_path = os.path.join(self.config.remote_dir, "fio")
+ self.fio_path = os.path.join(self.suite.remote_dir, "fio")
- self.load_params = self.config.params['params']
+ self.load_params = self.suite.params['params']
self.file_name = self.load_params['FILENAME']
if 'FILESIZE' not in self.load_params:
logger.debug("Getting test file sizes on all nodes")
try:
- sizes = {node.conn.fs.file_stat(self.file_name)['size']
- for node in self.config.nodes}
+ sizes = {node.conn.fs.file_stat(self.file_name)[b'size']
+ for node in self.suite.nodes}
except Exception:
logger.exception("FILESIZE is not set in config file and fail to detect it." +
"Set FILESIZE or fix error and rerun test")
@@ -68,7 +69,7 @@
raise StopTestError()
self.file_size = list(sizes)[0]
- logger.info("Detected test file size is %s", self.file_size)
+ logger.info("Detected test file size is %sB", b2ssize(self.file_size))
self.load_params['FILESIZE'] = self.file_size
else:
self.file_size = ssize2b(self.load_params['FILESIZE'])
@@ -80,31 +81,41 @@
logger.error("Empty fio config provided")
raise StopTestError()
- self.exec_folder = self.config.remote_dir
+ self.exec_folder = self.suite.remote_dir
def config_node(self, node: IRPCNode) -> None:
plugin_code = open(rpc_plugin.__file__.rsplit(".", 1)[0] + ".py", "rb").read() # type: bytes
node.upload_plugin("fio", plugin_code)
try:
- node.conn.fs.rmtree(self.config.remote_dir)
+ node.conn.fs.rmtree(self.suite.remote_dir)
except Exception:
pass
try:
- node.conn.fs.makedirs(self.config.remote_dir)
+ node.conn.fs.makedirs(self.suite.remote_dir)
except Exception:
- msg = "Failed to recreate folder {} on remote {}.".format(self.config.remote_dir, node)
+ msg = "Failed to recreate folder {} on remote {}.".format(self.suite.remote_dir, node)
logger.exception(msg)
raise StopTestError()
+ # TODO: check this during config validation
+ if self.file_size % (4 * (1024 ** 2)) != 0:
+ logger.error("Test file size must be proportional to 4MiB")
+ raise StopTestError()
+
self.install_utils(node)
mb = int(self.file_size / 1024 ** 2)
- logger.info("Filling test file %s with %sMiB of random data", self.file_name, mb)
- fill_bw = node.conn.fio.fill_file(self.file_name, mb, force=self.force_prefill, fio_path=self.fio_path)
- if fill_bw is not None:
- logger.info("Initial fio fill bw is {} MiBps for {}".format(fill_bw, node))
+ logger.info("Filling test file %s on node %s with %sMiB of random data", self.file_name, node.info, mb)
+ is_prefilled, fill_bw = node.conn.fio.fill_file(self.file_name, mb,
+ force=self.force_prefill,
+ fio_path=self.fio_path)
+
+ if not is_prefilled:
+ logger.info("Test file on node %s is already prefilled", node.info)
+ elif fill_bw is not None:
+ logger.info("Initial fio fill bw is %s MiBps for %s", fill_bw, node.info)
def install_utils(self, node: IRPCNode) -> None:
os_info = get_os(node)
@@ -126,19 +137,19 @@
raise StopTestError()
bz_dest = self.join_remote('fio.bz2') # type: str
- node.copy_file(fio_path, bz_dest)
+ node.copy_file(fio_path, bz_dest, compress=False)
node.run("bzip2 --decompress {} ; chmod a+x {}".format(bz_dest, self.join_remote("fio")))
- def get_expected_runtime(self, job_config: FioJobConfig) -> int:
+ def get_expected_runtime(self, job_config: TestJobConfig) -> int:
return execution_time(cast(FioJobConfig, job_config))
- def prepare_iteration(self, node: IRPCNode, job_config: FioJobConfig) -> None:
- node.put_to_file(self.remote_task_file, str(job_config).encode("utf8"))
+ def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
+ node.put_to_file(self.remote_task_file, str(job).encode("utf8"))
# TODO: get a link to substorage as a parameter
- def run_iteration(self, node: IRPCNode, iter_config: FioJobConfig, job_root: str) -> JobMetrics:
- f_iter_config = cast(FioJobConfig, iter_config)
- exec_time = execution_time(f_iter_config)
+ def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
+ exec_time = execution_time(cast(FioJobConfig, job))
+
fio_cmd_templ = "cd {exec_folder}; " + \
"{fio_path} --output-format=json --output={out_file} --alloc-size=262144 {job_file}"
@@ -152,20 +163,26 @@
if must_be_empty:
logger.error("Unexpected fio output: %r", must_be_empty)
- res = {} # type: JobMetrics
-
# put fio output into storage
fio_out = node.get_file_content(self.remote_output_file)
- self.rstorage.put_extra(job_root, node.info.node_id(), "fio_raw", fio_out)
+
+ path = DataSource(suite_id=self.suite.storage_id,
+ job_id=job.storage_id,
+ node_id=node.node_id,
+ dev='fio',
+ sensor='stdout',
+ tag='json')
+
+ self.storage.put_extra(fio_out, path)
node.conn.fs.unlink(self.remote_output_file)
files = [name for name in node.conn.fs.listdir(self.exec_folder)]
-
- for name, path in get_log_files(f_iter_config):
- log_files = [fname for fname in files if fname.startswith(path)]
+ result = []
+ for name, file_path in get_log_files(cast(FioJobConfig, job)):
+ log_files = [fname for fname in files if fname.startswith(file_path)]
if len(log_files) != 1:
logger.error("Found %s files, match log pattern %s(%s) - %s",
- len(log_files), path, name, ",".join(log_files[10:]))
+ len(log_files), file_path, name, ",".join(log_files[10:]))
raise StopTestError()
fname = os.path.join(self.exec_folder, log_files[0])
@@ -203,14 +220,13 @@
logger.exception("Error during parse %s fio log file in line %s: %r", name, idx, line)
raise StopTestError()
- ts = TimeSeries(name=name,
- raw=raw_result,
- second_axis_size=expected_lat_bins if name == 'lat' else 1,
- data=parsed,
- times=times)
- res[(node.info.node_id(), 'fio', name)] = ts
-
- return res
+ result.append(TimeSeries(name=name,
+ raw=raw_result,
+ second_axis_size=expected_lat_bins if name == 'lat' else 1,
+ data=parsed,
+ times=times,
+ source=path(sensor=name, tag=None)))
+ return result
def format_for_console(self, data: Any) -> str:
raise NotImplementedError()
diff --git a/wally/suits/io/fio_task_parser.py b/wally/suits/io/fio_task_parser.py
index 6940aaf..03702ae 100644
--- a/wally/suits/io/fio_task_parser.py
+++ b/wally/suits/io/fio_task_parser.py
@@ -7,13 +7,12 @@
import os.path
import argparse
import itertools
-from typing import Optional, Iterator, Union, Dict, Iterable, List, TypeVar, Callable, Tuple, NamedTuple, Any
+from typing import Optional, Iterator, Union, Dict, Iterable, List, TypeVar, Callable, Tuple, NamedTuple, Any, cast
from collections import OrderedDict
-from ...result_classes import IStorable
from ...result_classes import TestJobConfig
-from ...utils import sec_to_str, ssize2b
+from ...utils import sec_to_str, ssize2b, b2ssize, flatmap
SECTION = 0
@@ -29,23 +28,162 @@
('tp', int),
('name', str),
('val', Any)])
+FioTestSumm = NamedTuple("FioTestSumm",
+ [("oper", str),
+ ("sync_mode", str),
+ ("bsize", int),
+ ("qd", int),
+ ("thcount", int),
+ ("write_perc", Optional[int])])
-TestSumm = NamedTuple("TestSumm",
- [("oper", str),
- ("mode", str),
- ("bsize", int),
- ("iodepth", int),
- ("vm_count", int)])
+
+def is_fio_opt_true(vl: Union[str, int]) -> bool:
+ return str(vl).lower() in ['1', 'true', 't', 'yes', 'y']
class FioJobConfig(TestJobConfig):
- def __init__(self, name: str) -> None:
- TestJobConfig.__init__(self)
- self.vals = OrderedDict() # type: Dict[str, Any]
- self.name = name
- def __eq__(self, other: 'FioJobConfig') -> bool:
- return self.vals == other.vals
+ ds2mode = {(True, True): 'x',
+ (True, False): 's',
+ (False, True): 'd',
+ (False, False): 'a'}
+
+ sync2long = {'x': "sync direct",
+ 's': "sync",
+ 'd': "direct",
+ 'a': "buffered"}
+
+ op_type2short = {"randread": "rr",
+ "randwrite": "rw",
+ "read": "sr",
+ "write": "sw",
+ "randrw": "rx"}
+
+ def __init__(self, name: str, idx: int) -> None:
+ TestJobConfig.__init__(self, idx)
+ self.name = name
+ self._sync_mode = None # type: Optional[str]
+ self._ctuple = None # type: Optional[FioTestSumm]
+ self._ctuple_no_qd = None # type: Optional[FioTestSumm]
+
+ # ------------- BASIC PROPERTIES -----------------------------------------------------------------------------------
+
+ @property
+ def write_perc(self) -> Optional[int]:
+ try:
+ return int(self.vals["rwmixwrite"])
+ except (KeyError, TypeError):
+ try:
+ return 100 - int(self.vals["rwmixread"])
+ except (KeyError, TypeError):
+ return None
+
+ @property
+ def qd(self) -> int:
+ return int(self.vals['iodepth'])
+
+ @property
+ def bsize(self) -> int:
+ return ssize2b(self.vals['blocksize']) // 1024
+
+ @property
+ def oper(self) -> str:
+ return self.vals['rw']
+
+ @property
+ def op_type_short(self) -> str:
+ return self.op_type2short[self.vals['rw']]
+
+ @property
+ def thcount(self) -> int:
+ return int(self.vals.get('numjobs', 1))
+
+ @property
+ def sync_mode(self) -> str:
+ if self._sync_mode is None:
+ direct = is_fio_opt_true(self.vals.get('direct', '0')) or \
+ not is_fio_opt_true(self.vals.get('buffered', '0'))
+ sync = is_fio_opt_true(self.vals.get('sync', '0'))
+ self._sync_mode = self.ds2mode[(sync, direct)]
+ return cast(str, self._sync_mode)
+
+ @property
+ def sync_mode_long(self) -> str:
+ return self.sync2long[self.sync_mode]
+
+ # ----------- COMPLEX PROPERTIES -----------------------------------------------------------------------------------
+
+ @property
+ def characterized_tuple(self) -> Tuple:
+ if self._ctuple is None:
+ self._ctuple = FioTestSumm(oper=self.oper,
+ sync_mode=self.sync_mode,
+ bsize=self.bsize,
+ qd=self.qd,
+ thcount=self.thcount,
+ write_perc=self.write_perc)
+
+ return cast(Tuple, self._ctuple)
+
+ @property
+ def characterized_tuple_no_qd(self) -> FioTestSumm:
+ if self._ctuple_no_qd is None:
+ self._ctuple_no_qd = FioTestSumm(oper=self.oper,
+ sync_mode=self.sync_mode,
+ bsize=self.bsize,
+ qd=None,
+ thcount=self.thcount,
+ write_perc=self.write_perc)
+
+ return cast(FioTestSumm, self._ctuple_no_qd)
+
+ @property
+ def long_summary(self) -> str:
+ res = "{0.sync_mode_long} {0.oper} {1} QD={0.qd}".format(self, b2ssize(self.bsize * 1024))
+ if self.thcount != 1:
+ res += " threads={}".format(self.thcount)
+ if self.write_perc is not None:
+ res += " write_perc={}%".format(self.write_perc)
+ return res
+
+ @property
+ def long_summary_no_qd(self) -> str:
+ res = "{0.sync_mode_long} {0.oper} {1}".format(self, b2ssize(self.bsize * 1024))
+ if self.thcount != 1:
+ res += " threads={}".format(self.thcount)
+ if self.write_perc is not None:
+ res += " write_perc={}%".format(self.write_perc)
+ return res
+
+ @property
+ def summary(self) -> str:
+ tpl = cast(FioTestSumm, self.characterized_tuple)
+ res = "{0.oper}{0.sync_mode}{0.bsize}_qd{0.qd}".format(tpl)
+
+ if tpl.thcount != 1:
+ res += "th" + str(tpl.thcount)
+ if tpl.write_perc != 1:
+ res += "wr" + str(tpl.write_perc)
+
+ return res
+
+ @property
+ def summary_no_qd(self) -> str:
+ tpl = cast(FioTestSumm, self.characterized_tuple)
+ res = "{0.oper}{0.sync_mode}{0.bsize}".format(tpl)
+
+ if tpl.thcount != 1:
+ res += "th" + str(tpl.thcount)
+ if tpl.write_perc != 1:
+ res += "wr" + str(tpl.write_perc)
+
+ return res
+ # ------------------------------------------------------------------------------------------------------------------
+
+ def __eq__(self, o: object) -> bool:
+ if not isinstance(o, FioJobConfig):
+ return False
+ return self.vals == cast(FioJobConfig, o).vals
def copy(self) -> 'FioJobConfig':
return copy.deepcopy(self)
@@ -75,17 +213,17 @@
return str(self)
def raw(self) -> Dict[str, Any]:
- return {
- 'vals': [[key, val] for key, val in self.vals.items()],
- 'summary': self.summary,
- 'name': self.name
- }
+ res = self.__dict__.copy()
+ del res['_sync_mode']
+ res['vals'] = [[key, val] for key, val in self.vals.items()]
+ return res
@classmethod
def fromraw(cls, data: Dict[str, Any]) -> 'FioJobConfig':
- obj = cls(data['name'])
- obj.summary = data['summary']
- obj.vals.update(data['vals'])
+ obj = cls.__new__(cls)
+ data['vals'] = OrderedDict(data['vals'])
+ data['_sync_mode'] = None
+ obj.__dict__.update(data)
return obj
@@ -203,6 +341,8 @@
lexed_lines = new_lines
+ suite_section_idx = 0
+
for fname, lineno, oline, tp, name, val in lexed_lines:
if tp == SECTION:
if curr_section is not None:
@@ -215,7 +355,8 @@
in_globals = True
else:
in_globals = False
- curr_section = FioJobConfig(name)
+ curr_section = FioJobConfig(name, idx=suite_section_idx)
+ suite_section_idx += 1
curr_section.vals = glob_vals.copy()
sections_count += 1
else:
@@ -332,68 +473,13 @@
params = sec.vals.copy()
params['UNIQ'] = 'UN{0}'.format(counter[0])
params['COUNTER'] = str(counter[0])
- params['TEST_SUMM'] = get_test_summary(sec)
+ params['TEST_SUMM'] = sec.summary
sec.name = sec.name.format(**params)
counter[0] += 1
return sec
-def get_test_sync_mode(sec: FioJobConfig) -> str:
- if isinstance(sec, dict):
- vals = sec
- else:
- vals = sec.vals
-
- is_sync = str(vals.get("sync", "0")) == "1"
- is_direct = str(vals.get("direct", "0")) == "1"
-
- if is_sync and is_direct:
- return 'x'
- elif is_sync:
- return 's'
- elif is_direct:
- return 'd'
- else:
- return 'a'
-
-
-def get_test_summary_tuple(sec: FioJobConfig, vm_count: int = None) -> TestSumm:
- if isinstance(sec, dict):
- vals = sec
- else:
- vals = sec.vals
-
- rw = {"randread": "rr",
- "randwrite": "rw",
- "read": "sr",
- "write": "sw",
- "randrw": "rm",
- "rw": "sm",
- "readwrite": "sm"}[vals["rw"]]
-
- sync_mode = get_test_sync_mode(sec)
-
- return TestSumm(rw,
- sync_mode,
- vals['blocksize'],
- vals.get('iodepth', '1'),
- vm_count)
-
-
-def get_test_summary(sec: FioJobConfig, vm_count: int = None, noiodepth: bool = False) -> str:
- tpl = get_test_summary_tuple(sec, vm_count)
-
- res = "{0.oper}{0.mode}{0.bsize}".format(tpl)
- if not noiodepth:
- res += "qd{}".format(tpl.iodepth)
-
- if tpl.vm_count is not None:
- res += "vm{}".format(tpl.vm_count)
-
- return res
-
-
def execution_time(sec: FioJobConfig) -> int:
return sec.vals.get('ramp_time', 0) + sec.vals.get('runtime', 0)
@@ -402,23 +488,18 @@
return fio_config_parse(fio_config_lexer(source, fname))
-FM_FUNC_INPUT = TypeVar("FM_FUNC_INPUT")
-FM_FUNC_RES = TypeVar("FM_FUNC_RES")
-
-
-def flatmap(func: Callable[[FM_FUNC_INPUT], Iterable[FM_FUNC_RES]],
- inp_iter: Iterable[FM_FUNC_INPUT]) -> Iterator[FM_FUNC_RES]:
- for val in inp_iter:
- for res in func(val):
- yield res
-
-
-def get_log_files(sec: FioJobConfig) -> List[Tuple[str, str]]:
+def get_log_files(sec: FioJobConfig, iops: bool = False) -> List[Tuple[str, str]]:
res = [] # type: List[Tuple[str, str]]
- for key, name in (('write_iops_log', 'iops'), ('write_bw_log', 'bw'), ('write_hist_log', 'lat')):
+
+ keys = [('write_bw_log', 'bw'), ('write_hist_log', 'lat')]
+ if iops:
+ keys.append(('write_iops_log', 'iops'))
+
+ for key, name in keys:
log = sec.vals.get(key)
if log is not None:
res.append((name, log))
+
return res
@@ -427,7 +508,6 @@
it = (apply_params(sec, test_params) for sec in it)
it = flatmap(process_cycles, it)
for sec in map(final_process, it):
- sec.summary = get_test_summary(sec)
yield sec
diff --git a/wally/suits/io/one_step.cfg b/wally/suits/io/one_step.cfg
new file mode 100644
index 0000000..3e08c8b
--- /dev/null
+++ b/wally/suits/io/one_step.cfg
@@ -0,0 +1,9 @@
+[global]
+include defaults_qd.cfg
+ramp_time=0
+runtime={RUNTIME}
+
+[test_{TEST_SUMM}]
+blocksize=60k
+rw=randread
+iodepth=1
\ No newline at end of file
diff --git a/wally/suits/io/rpc_plugin.py b/wally/suits/io/rpc_plugin.py
index 98e55f0..5f5cfb5 100644
--- a/wally/suits/io/rpc_plugin.py
+++ b/wally/suits/io/rpc_plugin.py
@@ -13,6 +13,7 @@
logger = logging.getLogger("agent.fio")
+# TODO: fix this in case if file is block device
def check_file_prefilled(path, used_size_mb):
used_size = used_size_mb * 1024 ** 2
blocks_to_check = 16
@@ -20,9 +21,9 @@
try:
fstats = os.stat(path)
if stat.S_ISREG(fstats.st_mode) and fstats.st_size < used_size:
- return True
+ return False
except EnvironmentError:
- return True
+ return False
offsets = [random.randrange(used_size - 1024) for _ in range(blocks_to_check)]
offsets.append(used_size - 1024)
@@ -32,15 +33,15 @@
for offset in offsets:
fd.seek(offset)
if b"\x00" * 1024 == fd.read(1024):
- return True
+ return False
- return False
+ return True
def rpc_fill_file(fname, size, force=False, fio_path='fio'):
if not force:
- if not check_file_prefilled(fname, size):
- return
+ if check_file_prefilled(fname, size):
+ return False, None
assert size % 4 == 0, "File size must be proportional to 4M"
@@ -50,7 +51,9 @@
subprocess.check_output(cmd_templ.format(fio_path, fname, size), shell=True)
run_time = time.time() - run_time
- return None if run_time < 1.0 else int(size / run_time)
+ prefill_bw = None if run_time < 1.0 else int(size / run_time)
+
+ return True, prefill_bw
def rpc_install(name, binary):
diff --git a/wally/suits/io/rrd.cfg b/wally/suits/io/rrd.cfg
index ca3c613..ae2d960 100644
--- a/wally/suits/io/rrd.cfg
+++ b/wally/suits/io/rrd.cfg
@@ -1,7 +1,7 @@
[global]
include defaults_qd.cfg
ramp_time=0
-runtime=10
+runtime={RUNTIME}
[test_{TEST_SUMM}]
blocksize=60k
@@ -12,3 +12,23 @@
iodepth=16
blocksize=60k
rw=randread
+
+[test_{TEST_SUMM}]
+blocksize=60k
+rw=randwrite
+iodepth=1
+
+[test_{TEST_SUMM}]
+iodepth=16
+blocksize=60k
+rw=randwrite
+
+[test_{TEST_SUMM}]
+iodepth=1
+blocksize=1m
+rw=write
+
+[test_{TEST_SUMM}]
+iodepth=1
+blocksize=1m
+rw=read
diff --git a/wally/suits/io/rrd_qd_scan.cfg b/wally/suits/io/rrd_qd_scan.cfg
new file mode 100644
index 0000000..e0937c9
--- /dev/null
+++ b/wally/suits/io/rrd_qd_scan.cfg
@@ -0,0 +1,9 @@
+[global]
+include defaults_qd.cfg
+ramp_time=0
+runtime={RUNTIME}
+
+[test_{TEST_SUMM}]
+blocksize=4k
+rw=randread
+iodepth={QDS}
diff --git a/wally/suits/io/rrd_raw.cfg b/wally/suits/io/rrd_raw.cfg
new file mode 100644
index 0000000..2b0fc74
--- /dev/null
+++ b/wally/suits/io/rrd_raw.cfg
@@ -0,0 +1,21 @@
+[test]
+blocksize=4k
+rw=randread
+iodepth=1
+ramp_time=0
+runtime=120
+buffered=0
+direct=1
+sync=0
+ioengine=libaio
+group_reporting=1
+unified_rw_reporting=1
+norandommap=1
+numjobs=1
+thread=1
+time_based=1
+wait_for_previous=1
+per_job_logs=0
+randrepeat=0
+filename=/dev/sdb
+size=100G
\ No newline at end of file