THRIFT-3611 Add --regex filter to cross test runner
Client: Test
Patch: Nobuaki Sukegawa
This closes #843
diff --git a/test/README.md b/test/README.md
index 066b34f..0682f5d 100755
--- a/test/README.md
+++ b/test/README.md
@@ -31,6 +31,11 @@
test/test.py --server cpp,java --client nodejs
test/test.py --server nodejs --client cpp,java
+Another useful flag is --regex. For example, to run all tests that involve
+Java TBinaryProtocol:
+
+ test/test.py --regex "java.*binary"
+
## Test case definition file
The cross test cases are defined in [tests.json](tests.json).
diff --git a/test/crossrunner/collect.py b/test/crossrunner/collect.py
index e91ac0b..d7594cb 100644
--- a/test/crossrunner/collect.py
+++ b/test/crossrunner/collect.py
@@ -22,6 +22,7 @@
from itertools import product
from .util import merge_dict
+from .test import TestEntry
# Those keys are passed to execution as is.
# Note that there are keys other than these, namely:
@@ -144,12 +145,18 @@
}
-def collect_cross_tests(tests_dict, server_match, client_match):
+def _filter_entries(tests, regex):
+ if regex:
+ return filter(lambda t: re.search(regex, TestEntry.get_name(**t)), tests)
+ return tests
+
+
+def collect_cross_tests(tests_dict, server_match, client_match, regex):
sv, cl = _collect_testlibs(tests_dict, server_match, client_match)
- return list(_do_collect_tests(sv, cl))
+ return list(_filter_entries(_do_collect_tests(sv, cl), regex))
-def collect_feature_tests(tests_dict, features_dict, server_match, feature_match):
+def collect_feature_tests(tests_dict, features_dict, server_match, feature_match, regex):
sv, _ = _collect_testlibs(tests_dict, server_match)
ft = collect_features(features_dict, feature_match)
- return list(_do_collect_tests(sv, ft))
+ return list(_filter_entries(_do_collect_tests(sv, ft), regex))
diff --git a/test/crossrunner/test.py b/test/crossrunner/test.py
index dcc8a94..74fd916 100644
--- a/test/crossrunner/test.py
+++ b/test/crossrunner/test.py
@@ -124,8 +124,8 @@
return config
@classmethod
- def get_name(cls, server, client, proto, trans, sock, *args):
- return '%s-%s_%s_%s-%s' % (server, client, proto, trans, sock)
+ def get_name(cls, server, client, protocol, transport, socket, *args, **kwargs):
+ return '%s-%s_%s_%s-%s' % (server, client, protocol, transport, socket)
@property
def name(self):
diff --git a/test/test.py b/test/test.py
index 42babeb..9305967 100755
--- a/test/test.py
+++ b/test/test.py
@@ -45,12 +45,12 @@
CONFIG_FILE = 'tests.json'
-def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count):
+def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count, regex):
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
j = json.load(fp)
- tests = crossrunner.collect_cross_tests(j, server_match, client_match)
+ tests = crossrunner.collect_cross_tests(j, server_match, client_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
@@ -74,7 +74,7 @@
return False
-def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, retry_count):
+def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, retry_count, regex):
basedir = path_join(ROOT_DIR, FEATURE_DIR_RELATIVE)
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
@@ -82,7 +82,7 @@
j = json.load(fp)
with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
j2 = json.load(fp)
- tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match)
+ tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
@@ -122,6 +122,7 @@
help='list of clients to test')
parser.add_argument('-F', '--features', nargs='*', default=None,
help='run server feature tests instead of cross language tests')
+ parser.add_argument('-R', '--regex', help='test name pattern to run')
parser.add_argument('-s', '--skip-known-failures', action='store_true', dest='skip_known_failures',
help='do not execute tests that are known to fail')
parser.add_argument('-r', '--retry-count', type=int,
@@ -160,9 +161,9 @@
options.update_failures, options.print_failures)
elif options.features is not None:
features = options.features or ['.*']
- res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count)
+ res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
else:
- res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count)
+ res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
return 0 if res else 1
if __name__ == '__main__':