THRIFT-3516 Add feature test for THeader TBinaryProtocol

This closes #767
diff --git a/test/test.py b/test/test.py
index 1176369..0c799b9 100755
--- a/test/test.py
+++ b/test/test.py
@@ -25,39 +25,102 @@
 # This script supports python 2.7 and later.
 # python 3.x is recommended for better stability.
 #
-# TODO: eliminate a few 2.7 occurrences to support 2.6 ?
-#
 
+from __future__ import print_function
+from itertools import chain
 import json
 import logging
 import multiprocessing
-import optparse
+import argparse
 import os
 import sys
 
 import crossrunner
+from crossrunner.compat import path_join
 
 TEST_DIR = os.path.realpath(os.path.dirname(__file__))
-CONFIG_PATH = os.path.join(TEST_DIR, 'tests.json')
+CONFIG_FILE = 'tests.json'
 
 
-def prepare(server_match, client_match):
-  with open(CONFIG_PATH, 'r') as fp:
-    j = json.load(fp)
-  return crossrunner.prepare(j, TEST_DIR, server_match, client_match)
-
-
-def run_tests(server_match, client_match, jobs, skip_known_failures):
+def run_tests(collect_func, basedir, server_match, client_match, jobs, skip):
   logger = multiprocessing.get_logger()
   logger.debug('Collecting tests')
-  with open(CONFIG_PATH, 'r') as fp:
+  with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
     j = json.load(fp)
-  tests = list(crossrunner.collect_tests(j, server_match, client_match))
+  tests = collect_func(j, server_match, client_match)
+  if not tests:
+    print('No test found that matches the criteria', file=sys.stderr)
+    # print('  servers: %s' % server_match, file=sys.stderr)
+    # print('  clients: %s' % client_match, file=sys.stderr)
+    return False
+  if skip:
+    logger.debug('Skipping known failures')
+    known = crossrunner.load_known_failures(basedir)
+    tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
+
+  dispatcher = crossrunner.TestDispatcher(TEST_DIR, basedir, jobs)
+  logger.debug('Executing %d tests' % len(tests))
+  try:
+    for r in [dispatcher.dispatch(test) for test in tests]:
+      r.wait()
+    logger.debug('Waiting for completion')
+    return dispatcher.wait()
+  except (KeyboardInterrupt, SystemExit):
+    logger.debug('Interrupted, shutting down')
+    dispatcher.terminate()
+    return False
+
+
+def run_cross_tests(server_match, client_match, jobs, skip_known_failures):
+  logger = multiprocessing.get_logger()
+  logger.debug('Collecting tests')
+  with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
+    j = json.load(fp)
+  tests = crossrunner.collect_cross_tests(j, server_match, client_match)
+  if not tests:
+    print('No test found that matches the criteria', file=sys.stderr)
+    print('  servers: %s' % server_match, file=sys.stderr)
+    print('  clients: %s' % client_match, file=sys.stderr)
+    return False
   if skip_known_failures:
+    logger.debug('Skipping known failures')
     known = crossrunner.load_known_failures(TEST_DIR)
     tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
 
-  dispatcher = crossrunner.TestDispatcher(TEST_DIR, jobs)
+  dispatcher = crossrunner.TestDispatcher(TEST_DIR, TEST_DIR, jobs)
+  logger.debug('Executing %d tests' % len(tests))
+  try:
+    for r in [dispatcher.dispatch(test) for test in tests]:
+      r.wait()
+    logger.debug('Waiting for completion')
+    return dispatcher.wait()
+  except (KeyboardInterrupt, SystemExit):
+    logger.debug('Interrupted, shutting down')
+    dispatcher.terminate()
+    return False
+
+
+def run_feature_tests(server_match, feature_match, jobs, skip_known_failures):
+  basedir = path_join(TEST_DIR, 'features')
+  # run_tests(crossrunner.collect_feature_tests, basedir, server_match, feature_match, jobs, skip_known_failures)
+  logger = multiprocessing.get_logger()
+  logger.debug('Collecting tests')
+  with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
+    j = json.load(fp)
+  with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
+    j2 = json.load(fp)
+  tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match)
+  if not tests:
+    print('No test found that matches the criteria', file=sys.stderr)
+    print('  servers: %s' % server_match, file=sys.stderr)
+    print('  features: %s' % feature_match, file=sys.stderr)
+    return False
+  if skip_known_failures:
+    logger.debug('Skipping known failures')
+    known = crossrunner.load_known_failures(basedir)
+    tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
+
+  dispatcher = crossrunner.TestDispatcher(TEST_DIR, basedir, jobs)
   logger.debug('Executing %d tests' % len(tests))
   try:
     for r in [dispatcher.dispatch(test) for test in tests]:
@@ -79,44 +142,47 @@
 
 
 def main(argv):
-  parser = optparse.OptionParser()
-  parser.add_option('--server', type='string', dest='servers', default='',
-                    help='list of servers to test separated by commas, eg:- --server=cpp,java')
-  parser.add_option('--client', type='string', dest='clients', default='',
-                    help='list of clients to test separated by commas, eg:- --client=cpp,java')
-  parser.add_option('-s', '--skip-known-failures', action='store_true', dest='skip_known_failures',
-                    help='do not execute tests that are known to fail')
-  parser.add_option('-j', '--jobs', type='int', dest='jobs',
-                    default=default_concurrenty(),
-                    help='number of concurrent test executions')
-  g = optparse.OptionGroup(parser, 'Advanced')
-  g.add_option('-v', '--verbose', action='store_const',
-               dest='log_level', const=logging.DEBUG, default=logging.WARNING,
-               help='show debug output for test runner')
-  g.add_option('-P', '--print-expected-failures', choices=['merge', 'overwrite'],
-               dest='print_failures', default=None,
-               help="generate expected failures based on last result and print to stdout")
-  g.add_option('-U', '--update-expected-failures', choices=['merge', 'overwrite'],
-               dest='update_failures', default=None,
-               help="generate expected failures based on last result and save to default file location")
-  g.add_option('--prepare', action='store_true',
-               dest='prepare',
-               help="try to prepare files needed for cross test (experimental)")
-  parser.add_option_group(g)
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--server', default='', nargs='*',
+                      help='list of servers to test')
+  parser.add_argument('--client', default='', nargs='*',
+                      help='list of clients to test')
+  parser.add_argument('-s', '--skip-known-failures', action='store_true', dest='skip_known_failures',
+                      help='do not execute tests that are known to fail')
+  parser.add_argument('-j', '--jobs', type=int,
+                      default=default_concurrenty(),
+                      help='number of concurrent test executions')
+  parser.add_argument('-F', '--features', nargs='*', default=None,
+                      help='run feature tests instead of cross language tests')
+
+  g = parser.add_argument_group(title='Advanced')
+  g.add_argument('-v', '--verbose', action='store_const',
+                 dest='log_level', const=logging.DEBUG, default=logging.WARNING,
+                 help='show debug output for test runner')
+  g.add_argument('-P', '--print-expected-failures', choices=['merge', 'overwrite'],
+                 dest='print_failures',
+                 help="generate expected failures based on last result and print to stdout")
+  g.add_argument('-U', '--update-expected-failures', choices=['merge', 'overwrite'],
+                 dest='update_failures',
+                 help="generate expected failures based on last result and save to default file location")
+  options = parser.parse_args(argv)
+
   logger = multiprocessing.log_to_stderr()
-  options, _ = parser.parse_args(argv)
-  server_match = options.servers.split(',') if options.servers else []
-  client_match = options.clients.split(',') if options.clients else []
   logger.setLevel(options.log_level)
 
-  if options.prepare:
-    res = prepare(server_match, client_match)
-  elif options.update_failures or options.print_failures:
+  # Allow multiple args separated with ',' for backward compatibility
+  server_match = list(chain(*[x.split(',') for x in options.server]))
+  client_match = list(chain(*[x.split(',') for x in options.client]))
+
+  if options.update_failures or options.print_failures:
     res = crossrunner.generate_known_failures(
         TEST_DIR, options.update_failures == 'overwrite',
         options.update_failures, options.print_failures)
+  elif options.features is not None:
+    features = options.features or ['.*']
+    res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures)
   else:
-    res = run_tests(server_match, client_match, options.jobs, options.skip_known_failures)
+    res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures)
   return 0 if res else 1
 
 if __name__ == '__main__':