(root)/
Python-3.12.0/
Tools/
importbench/
importbench.py
       1  """Benchmark some basic import use-cases.
       2  
       3  The assumption is made that this benchmark is run in a fresh interpreter and
       4  thus has no external changes made to import-related attributes in sys.
       5  
       6  """
       7  from test.test_importlib import util
       8  import decimal
       9  from importlib.util import cache_from_source
      10  import importlib
      11  import importlib.machinery
      12  import json
      13  import os
      14  import py_compile
      15  import sys
      16  import tabnanny
      17  import timeit
      18  import types
      19  
      20  
      21  def bench(name, cleanup=lambda: None, *, seconds=1, repeat=3):
      22      """Bench the given statement as many times as necessary until total
      23      executions take one second."""
      24      stmt = "__import__({!r})".format(name)
      25      timer = timeit.Timer(stmt)
      26      for x in range(repeat):
      27          total_time = 0
      28          count = 0
      29          while total_time < seconds:
      30              try:
      31                  total_time += timer.timeit(1)
      32              finally:
      33                  cleanup()
      34              count += 1
      35          else:
      36              # One execution too far
      37              if total_time > seconds:
      38                  count -= 1
      39          yield count // seconds
      40  
      41  def from_cache(seconds, repeat):
      42      """sys.modules"""
      43      name = '<benchmark import>'
      44      module = types.ModuleType(name)
      45      module.__file__ = '<test>'
      46      module.__package__ = ''
      47      with util.uncache(name):
      48          sys.modules[name] = module
      49          yield from bench(name, repeat=repeat, seconds=seconds)
      50  
      51  
      52  def builtin_mod(seconds, repeat):
      53      """Built-in module"""
      54      name = 'errno'
      55      if name in sys.modules:
      56          del sys.modules[name]
      57      # Relying on built-in importer being implicit.
      58      yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat,
      59                       seconds=seconds)
      60  
      61  
      62  def source_wo_bytecode(seconds, repeat):
      63      """Source w/o bytecode: small"""
      64      sys.dont_write_bytecode = True
      65      try:
      66          name = '__importlib_test_benchmark__'
      67          # Clears out sys.modules and puts an entry at the front of sys.path.
      68          with util.create_modules(name) as mapping:
      69              assert not os.path.exists(cache_from_source(mapping[name]))
      70              sys.meta_path.append(importlib.machinery.PathFinder)
      71              loader = (importlib.machinery.SourceFileLoader,
      72                        importlib.machinery.SOURCE_SUFFIXES)
      73              sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader))
      74              yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat,
      75                               seconds=seconds)
      76      finally:
      77          sys.dont_write_bytecode = False
      78  
      79  
      80  def _wo_bytecode(module):
      81      name = module.__name__
      82      def benchmark_wo_bytecode(seconds, repeat):
      83          """Source w/o bytecode: {}"""
      84          bytecode_path = cache_from_source(module.__file__)
      85          if os.path.exists(bytecode_path):
      86              os.unlink(bytecode_path)
      87          sys.dont_write_bytecode = True
      88          try:
      89              yield from bench(name, lambda: sys.modules.pop(name),
      90                               repeat=repeat, seconds=seconds)
      91          finally:
      92              sys.dont_write_bytecode = False
      93  
      94      benchmark_wo_bytecode.__doc__ = benchmark_wo_bytecode.__doc__.format(name)
      95      return benchmark_wo_bytecode
      96  
      97  tabnanny_wo_bytecode = _wo_bytecode(tabnanny)
      98  decimal_wo_bytecode = _wo_bytecode(decimal)
      99  
     100  
     101  def source_writing_bytecode(seconds, repeat):
     102      """Source writing bytecode: small"""
     103      assert not sys.dont_write_bytecode
     104      name = '__importlib_test_benchmark__'
     105      with util.create_modules(name) as mapping:
     106          sys.meta_path.append(importlib.machinery.PathFinder)
     107          loader = (importlib.machinery.SourceFileLoader,
     108                    importlib.machinery.SOURCE_SUFFIXES)
     109          sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader))
     110          def cleanup():
     111              sys.modules.pop(name)
     112              os.unlink(cache_from_source(mapping[name]))
     113          for result in bench(name, cleanup, repeat=repeat, seconds=seconds):
     114              assert not os.path.exists(cache_from_source(mapping[name]))
     115              yield result
     116  
     117  
     118  def _writing_bytecode(module):
     119      name = module.__name__
     120      def writing_bytecode_benchmark(seconds, repeat):
     121          """Source writing bytecode: {}"""
     122          assert not sys.dont_write_bytecode
     123          def cleanup():
     124              sys.modules.pop(name)
     125              os.unlink(cache_from_source(module.__file__))
     126          yield from bench(name, cleanup, repeat=repeat, seconds=seconds)
     127  
     128      writing_bytecode_benchmark.__doc__ = (
     129                                  writing_bytecode_benchmark.__doc__.format(name))
     130      return writing_bytecode_benchmark
     131  
     132  tabnanny_writing_bytecode = _writing_bytecode(tabnanny)
     133  decimal_writing_bytecode = _writing_bytecode(decimal)
     134  
     135  
     136  def source_using_bytecode(seconds, repeat):
     137      """Source w/ bytecode: small"""
     138      name = '__importlib_test_benchmark__'
     139      with util.create_modules(name) as mapping:
     140          sys.meta_path.append(importlib.machinery.PathFinder)
     141          loader = (importlib.machinery.SourceFileLoader,
     142                    importlib.machinery.SOURCE_SUFFIXES)
     143          sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader))
     144          py_compile.compile(mapping[name])
     145          assert os.path.exists(cache_from_source(mapping[name]))
     146          yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat,
     147                           seconds=seconds)
     148  
     149  
     150  def _using_bytecode(module):
     151      name = module.__name__
     152      def using_bytecode_benchmark(seconds, repeat):
     153          """Source w/ bytecode: {}"""
     154          py_compile.compile(module.__file__)
     155          yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat,
     156                           seconds=seconds)
     157  
     158      using_bytecode_benchmark.__doc__ = (
     159                                  using_bytecode_benchmark.__doc__.format(name))
     160      return using_bytecode_benchmark
     161  
     162  tabnanny_using_bytecode = _using_bytecode(tabnanny)
     163  decimal_using_bytecode = _using_bytecode(decimal)
     164  
     165  
     166  def main(import_, options):
     167      if options.source_file:
     168          with options.source_file:
     169              prev_results = json.load(options.source_file)
     170      else:
     171          prev_results = {}
     172      __builtins__.__import__ = import_
     173      benchmarks = (from_cache, builtin_mod,
     174                    source_writing_bytecode,
     175                    source_wo_bytecode, source_using_bytecode,
     176                    tabnanny_writing_bytecode,
     177                    tabnanny_wo_bytecode, tabnanny_using_bytecode,
     178                    decimal_writing_bytecode,
     179                    decimal_wo_bytecode, decimal_using_bytecode,
     180                  )
     181      if options.benchmark:
     182          for b in benchmarks:
     183              if b.__doc__ == options.benchmark:
     184                  benchmarks = [b]
     185                  break
     186          else:
     187              print('Unknown benchmark: {!r}'.format(options.benchmark),
     188                    file=sys.stderr)
     189              sys.exit(1)
     190      seconds = 1
     191      seconds_plural = 's' if seconds > 1 else ''
     192      repeat = 3
     193      header = ('Measuring imports/second over {} second{}, best out of {}\n'
     194                'Entire benchmark run should take about {} seconds\n'
     195                'Using {!r} as __import__\n')
     196      print(header.format(seconds, seconds_plural, repeat,
     197                          len(benchmarks) * seconds * repeat, __import__))
     198      new_results = {}
     199      for benchmark in benchmarks:
     200          print(benchmark.__doc__, "[", end=' ')
     201          sys.stdout.flush()
     202          results = []
     203          for result in benchmark(seconds=seconds, repeat=repeat):
     204              results.append(result)
     205              print(result, end=' ')
     206              sys.stdout.flush()
     207          assert not sys.dont_write_bytecode
     208          print("]", "best is", format(max(results), ',d'))
     209          new_results[benchmark.__doc__] = results
     210      if prev_results:
     211          print('\n\nComparing new vs. old\n')
     212          for benchmark in benchmarks:
     213              benchmark_name = benchmark.__doc__
     214              old_result = max(prev_results[benchmark_name])
     215              new_result = max(new_results[benchmark_name])
     216              result = '{:,d} vs. {:,d} ({:%})'.format(new_result,
     217                                                       old_result,
     218                                                new_result/old_result)
     219              print(benchmark_name, ':', result)
     220      if options.dest_file:
     221          with options.dest_file:
     222              json.dump(new_results, options.dest_file, indent=2)
     223  
     224  
     225  if __name__ == '__main__':
     226      import argparse
     227  
     228      parser = argparse.ArgumentParser()
     229      parser.add_argument('-b', '--builtin', dest='builtin', action='store_true',
     230                          default=False, help="use the built-in __import__")
     231      parser.add_argument('-r', '--read', dest='source_file',
     232                          type=argparse.FileType('r'),
     233                          help='file to read benchmark data from to compare '
     234                               'against')
     235      parser.add_argument('-w', '--write', dest='dest_file',
     236                          type=argparse.FileType('w'),
     237                          help='file to write benchmark data to')
     238      parser.add_argument('--benchmark', dest='benchmark',
     239                          help='specific benchmark to run')
     240      options = parser.parse_args()
     241      import_ = __import__
     242      if not options.builtin:
     243          import_ = importlib.__import__
     244  
     245      main(import_, options)