haskell-te: 2a6fd7a832240a98dfb49e4a818d2f55f2fe8fac

     1: from json         import loads, dumps
     2: from os           import environ, getenv, getpgid, killpg, setsid
     3: from parameters   import reps, sizes, timeout_secs
     4: from signal       import SIGTERM
     5: from subprocess32 import check_output, PIPE, Popen, TimeoutExpired
     6: from sys          import exc_info
     7: from tempfile     import mkdtemp
     8: from threading    import Thread
     9: from timeit       import default_timer
    10: 
    11: def time(f):
    12:     '''Run function 'f' and return its result, and the time it took to run (in
    13:     seconds).'''
    14:     start  = default_timer()
    15:     result = f()
    16:     end    = default_timer()
    17:     return (result, end - start)
    18: 
    19: def pipe(cmd, stdin=None, timeout=None, env=None):
    20:     '''Runs 'stdin' through 'cmd' and returns stdout, stderr and whether we
    21:     timed out.'''
    22: 
    23:     # Extend the current environment, if requested
    24:     extra_env = None
    25:     if env:
    26:         extra_env = environ.copy()
    27:         for var in env:
    28:             extra_env[var] = env[var]
    29: 
    30:     # setsid puts the subprocess in its own process group, rather than the group
    31:     # containing this Python process
    32:     killed = None
    33:     proc   = Popen(cmd, stdin=PIPE if stdin else None, stdout=PIPE, stderr=PIPE,
    34:                    preexec_fn=setsid, env=extra_env)
    35: 
    36:     try:
    37:         (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
    38:         result = {'stdout': stdout, 'stderr': stderr, 'killed': False}
    39:     except TimeoutExpired:
    40:         # Kill the process group, which will include all children
    41:         killpg(getpgid(proc.pid), SIGTERM)
    42:         result = {'stdout': None, 'stderr': None, 'killed': True}
    43: 
    44:     proc.wait()  # Reaps zombies
    45: 
    46:     return result
    47: 
    48: def timed_run(cmd, stdin, timeout=None, env=None):
    49:     '''Run the given command+args, for at most timeout_secs. Returns stdout,
    50:     wall-clock time taken and success/fail depending whether it timed out.'''
    51:     result, secs = time(lambda *_: pipe(cmd, stdin, timeout, env=env))
    52: 
    53:     return {
    54:         'stdout'  : result['stdout'],
    55:         'stderr'  : result['stderr'],
    56:         'time'    : secs,
    57:         'success' : not result['killed']
    58:     }
    59: 
    60: def cached(cache, theory, rep, *path):
    61:     '''Look up the given data from the cache. If this run failed, an exception
    62:     is thrown (so we avoid looking up data that wasn't generated).'''
    63:     result = cache[theory]['reps'][rep]
    64:     if result['success']:
    65:         for key in path:
    66:             result = result[key]
    67:         return result
    68:     raise Exception('Repetition {0} of theory {1} failed'.format(rep, theory))
    69: 
    70: def sort(l):
    71:     '''Built-in sort is in-place; this will return the sorted list too.'''
    72:     l.sort()
    73:     return l
    74: 
    75: def generate_cache(theories, f):
    76:     '''Call the function 'f' for each repetition of each given theory, return an
    77:     accumulated dictionary of the results.'''
    78:     cache = {}
    79:     for theory in theories:
    80:         cache[theory] = {'reps': {}}
    81:         for rep in reps:
    82:             data = {'rep': rep, 'timeout': timeout_secs}
    83:             data.update(f(theory, rep))
    84:             cache[theory]['reps'][rep] = data
    85:     return cache
    86: 
    87: def set_attributes(funcs, attrs):
    88:     for f in funcs:
    89:         for attr in attrs:
    90:             setattr(f, attr, attrs[attr])
    91: 
    92: def tip_benchmarks():
    93:     benchmarks = {
    94:         # Store the generated data in our results, so we can inspect it and
    95:         # reproduce the executions/analysis.'''
    96:         'track_data': lambda cache, _: cache,
    97: 
    98:         # Benchmark functions
    99: 
   100:         # All of the conjectures we wanted to find.
   101:         'track_conjectures': lambda cache, rep, size:
   102:             len(cached(cache, size, rep, 'wanted')),
   103: 
   104:         # All of the wanted conjectures which were equations. QuickSpec can only
   105:         # find equations, so this is our theoretical maximum.
   106:         'track_conjectured_equations': lambda cache, rep, size:
   107:             sum(map(lambda c: len(c['equation']),
   108:                     cached(cache, size, rep, 'wanted'))),
   109: 
   110:         # How many equations we found (in total).
   111:         'track_equations': lambda cache, rep, size:
   112:             len(loads(cached(cache, size, rep, 'stdout'))),
   113: 
   114:         # Proportion of found equations which were wanted.
   115:         'track_precision': lambda cache, rep, size:
   116:             cached(cache, size, rep, 'precision') or 0,
   117: 
   118:         # Proportion of wanted conjectures which were found.
   119:         'track_recall': lambda cache, rep, size:
   120:             cached(cache, size, rep, 'recall'),
   121: 
   122:         # Time taken to explore (excludes setup and analysis).
   123:         'track_time': lambda cache, rep, size:
   124:             cached(cache, size, rep, 'time')
   125:     }
   126: 
   127:     # Tells asv how to run the benchmarks
   128: 
   129:     for name in benchmarks:
   130:         benchmarks[name].func_name = name
   131: 
   132:     set_attributes(benchmarks.values(),
   133:                    {
   134:                        'repeat'      : 1,
   135:                        'number'      : 1,
   136:                        'params'      : reduce(lambda x, y: x + (y,),
   137:                                               [reps, sizes],
   138:                                               ()),
   139:                        'param_names' : ['rep', 'size']
   140:                    })
   141: 
   142:     # track_data isn't a "real" benchmark, so only do it once
   143:     benchmarks['track_data'].repeat      = 1
   144:     benchmarks['track_data'].number      = 1
   145:     benchmarks['track_data'].params      = (["dummy"],)
   146:     benchmarks['track_data'].param_names = ["dummy"]
   147: 
   148:     return benchmarks
   149: 
   150: def theories():
   151:     '''The standalone theories we're benchmarking (nat-simple, etc.)'''
   152:     return loads(getenv('qsStandalone')).keys()
   153: 
   154: tips = {
   155:     'quickspecTip': loads(open(getenv('quickspecTip'), 'r').read())
   156: }
   157: 
   158: def tip_cache(var_name):
   159:     '''Running a TE tool is expensive, so we only want to run each sample once.
   160:     By returning all of the results from setup_cache, each benchmark can pick
   161:     out the values it cares about, without having to re-run anything.
   162:     The returned value will appear as the first argument to each benchmark.'''
   163:     def setup_cache():
   164:         def gen(size, rep):
   165:             cmds     = tips['quickspecTip'][str(size)][str(rep)]
   166:             result   = timed_run([cmds['runner']], '', timeout=timeout_secs)
   167:             analysis = {'analysed': False}
   168:             with open(cmds['sampleFile'], 'r') as sampleFile:
   169:                 result['sample'] = filter(None, sampleFile.read().split('\n'))
   170: 
   171:             to_analyse = result['stdout'] if result['success'] else '[]'
   172:             try:
   173:                 analysed = pipe([cmds['analyser']], to_analyse)['stdout']
   174:                 analysis = {'analysed': True}
   175:             except:
   176:                 analysis = {'analysed':       False,
   177:                             'analysis error': repr(exc_info())}
   178: 
   179:             if analysis['analysed']:
   180:                 try:
   181:                     analysis = loads(analysed)
   182:                     analysis['analysed'] = True
   183:                 except:
   184:                     analysis = {'analysed':        False,
   185:                                 'analysis error':  repr(exc_info()),
   186:                                 'analysis stdout': analysed}
   187:             return dict(result, **analysis)
   188: 
   189:         return generate_cache(sizes, gen)
   190: 
   191:     setup_cache.timeout = max(3600, timeout_secs * len(reps) * len(sizes))
   192: 
   193:     return setup_cache
   194: 
   195: def compose(f, g):
   196:     return lambda x: f(g(x))

Generated by git2html.