(root)/
Python-3.12.0/
Tools/
peg_generator/
scripts/
benchmark.py
       1  #!/usr/bin/env python3
       2  
       3  import argparse
       4  import ast
       5  import sys
       6  import os
       7  from time import time
       8  
       9  try:
      10      import memory_profiler
      11  except ModuleNotFoundError:
      12      print(
      13          "Please run `make venv` to create a virtual environment and install"
      14          " all the dependencies, before running this script."
      15      )
      16      sys.exit(1)
      17  
      18  sys.path.insert(0, os.getcwd())
      19  from scripts.test_parse_directory import parse_directory
      20  
      21  argparser = argparse.ArgumentParser(
      22      prog="benchmark", description="Reproduce the various pegen benchmarks"
      23  )
      24  argparser.add_argument(
      25      "--target",
      26      action="store",
      27      choices=["xxl", "stdlib"],
      28      default="xxl",
      29      help="Which target to use for the benchmark (default is xxl.py)",
      30  )
      31  
      32  subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
      33  command_compile = subcommands.add_parser(
      34      "compile", help="Benchmark parsing and compiling to bytecode"
      35  )
      36  command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST")
      37  
      38  
      39  def benchmark(func):
      40      def wrapper(*args):
      41          times = list()
      42          for _ in range(3):
      43              start = time()
      44              result = func(*args)
      45              end = time()
      46              times.append(end - start)
      47          memory = memory_profiler.memory_usage((func, args))
      48          print(f"{func.__name__}")
      49          print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs")
      50          print(f"\tMemory: {max(memory)} MiB on an average of 3 runs")
      51          return result
      52  
      53      return wrapper
      54  
      55  
      56  @benchmark
      57  def time_compile(source):
      58      return compile(source, "<string>", "exec")
      59  
      60  
      61  @benchmark
      62  def time_parse(source):
      63      return ast.parse(source)
      64  
      65  
      66  def run_benchmark_xxl(subcommand, source):
      67      if subcommand == "compile":
      68          time_compile(source)
      69      elif subcommand == "parse":
      70          time_parse(source)
      71  
      72  
      73  def run_benchmark_stdlib(subcommand):
      74      modes = {"compile": 2, "parse": 1}
      75      for _ in range(3):
      76          parse_directory(
      77              "../../Lib",
      78              verbose=False,
      79              excluded_files=[
      80                  "*/bad*",
      81                  "*/test/test_lib2to3/data/*",
      82              ],
      83              short=True,
      84              mode=modes[subcommand],
      85          )
      86  
      87  
      88  def main():
      89      args = argparser.parse_args()
      90      subcommand = args.subcommand
      91      target = args.target
      92  
      93      if subcommand is None:
      94          argparser.error("A benchmark to run is required")
      95  
      96      if target == "xxl":
      97          with open(os.path.join("data", "xxl.py"), "r") as f:
      98              source = f.read()
      99              run_benchmark_xxl(subcommand, source)
     100      elif target == "stdlib":
     101          run_benchmark_stdlib(subcommand)
     102  
     103  
     104  if __name__ == "__main__":
     105      main()