python (3.12.0)

(root)/
lib/
python3.12/
test/
test_regrtest.py
       1  """
       2  Tests of regrtest.py.
       3  
       4  Note: test_regrtest cannot be run twice in parallel.
       5  """
       6  
       7  import contextlib
       8  import dataclasses
       9  import glob
      10  import io
      11  import locale
      12  import os.path
      13  import platform
      14  import re
      15  import subprocess
      16  import sys
      17  import sysconfig
      18  import tempfile
      19  import textwrap
      20  import unittest
      21  from test import libregrtest
      22  from test import support
      23  from test.support import os_helper, TestStats
      24  from test.libregrtest import utils, setup
      25  from test.libregrtest.runtest import normalize_test_name
      26  
      27  if not support.has_subprocess_support:
      28      raise unittest.SkipTest("test module requires subprocess")
      29  
      30  ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
      31  ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
      32  LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
      33  
      34  EXITCODE_BAD_TEST = 2
      35  EXITCODE_ENV_CHANGED = 3
      36  EXITCODE_NO_TESTS_RAN = 4
      37  EXITCODE_RERUN_FAIL = 5
      38  EXITCODE_INTERRUPTED = 130
      39  
      40  TEST_INTERRUPTED = textwrap.dedent("""
      41      from signal import SIGINT, raise_signal
      42      try:
      43          raise_signal(SIGINT)
      44      except ImportError:
      45          import os
      46          os.kill(os.getpid(), SIGINT)
      47      """)
      48  
      49  
      50  class ESC[4;38;5;81mParseArgsTestCase(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
      51      """
      52      Test regrtest's argument parsing, function _parse_args().
      53      """
      54  
      55      def checkError(self, args, msg):
      56          with support.captured_stderr() as err, self.assertRaises(SystemExit):
      57              libregrtest._parse_args(args)
      58          self.assertIn(msg, err.getvalue())
      59  
      60      def test_help(self):
      61          for opt in '-h', '--help':
      62              with self.subTest(opt=opt):
      63                  with support.captured_stdout() as out, \
      64                       self.assertRaises(SystemExit):
      65                      libregrtest._parse_args([opt])
      66                  self.assertIn('Run Python regression tests.', out.getvalue())
      67  
      68      def test_timeout(self):
      69          ns = libregrtest._parse_args(['--timeout', '4.2'])
      70          self.assertEqual(ns.timeout, 4.2)
      71          self.checkError(['--timeout'], 'expected one argument')
      72          self.checkError(['--timeout', 'foo'], 'invalid float value')
      73  
      74      def test_wait(self):
      75          ns = libregrtest._parse_args(['--wait'])
      76          self.assertTrue(ns.wait)
      77  
      78      def test_worker_args(self):
      79          ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
      80          self.assertEqual(ns.worker_args, '[[], {}]')
      81          self.checkError(['--worker-args'], 'expected one argument')
      82  
      83      def test_start(self):
      84          for opt in '-S', '--start':
      85              with self.subTest(opt=opt):
      86                  ns = libregrtest._parse_args([opt, 'foo'])
      87                  self.assertEqual(ns.start, 'foo')
      88                  self.checkError([opt], 'expected one argument')
      89  
      90      def test_verbose(self):
      91          ns = libregrtest._parse_args(['-v'])
      92          self.assertEqual(ns.verbose, 1)
      93          ns = libregrtest._parse_args(['-vvv'])
      94          self.assertEqual(ns.verbose, 3)
      95          ns = libregrtest._parse_args(['--verbose'])
      96          self.assertEqual(ns.verbose, 1)
      97          ns = libregrtest._parse_args(['--verbose'] * 3)
      98          self.assertEqual(ns.verbose, 3)
      99          ns = libregrtest._parse_args([])
     100          self.assertEqual(ns.verbose, 0)
     101  
     102      def test_rerun(self):
     103          for opt in '-w', '--rerun', '--verbose2':
     104              with self.subTest(opt=opt):
     105                  ns = libregrtest._parse_args([opt])
     106                  self.assertTrue(ns.rerun)
     107  
     108      def test_verbose3(self):
     109          for opt in '-W', '--verbose3':
     110              with self.subTest(opt=opt):
     111                  ns = libregrtest._parse_args([opt])
     112                  self.assertTrue(ns.verbose3)
     113  
     114      def test_quiet(self):
     115          for opt in '-q', '--quiet':
     116              with self.subTest(opt=opt):
     117                  ns = libregrtest._parse_args([opt])
     118                  self.assertTrue(ns.quiet)
     119                  self.assertEqual(ns.verbose, 0)
     120  
     121      def test_slowest(self):
     122          for opt in '-o', '--slowest':
     123              with self.subTest(opt=opt):
     124                  ns = libregrtest._parse_args([opt])
     125                  self.assertTrue(ns.print_slow)
     126  
     127      def test_header(self):
     128          ns = libregrtest._parse_args(['--header'])
     129          self.assertTrue(ns.header)
     130  
     131          ns = libregrtest._parse_args(['--verbose'])
     132          self.assertTrue(ns.header)
     133  
     134      def test_randomize(self):
     135          for opt in '-r', '--randomize':
     136              with self.subTest(opt=opt):
     137                  ns = libregrtest._parse_args([opt])
     138                  self.assertTrue(ns.randomize)
     139  
     140      def test_randseed(self):
     141          ns = libregrtest._parse_args(['--randseed', '12345'])
     142          self.assertEqual(ns.random_seed, 12345)
     143          self.assertTrue(ns.randomize)
     144          self.checkError(['--randseed'], 'expected one argument')
     145          self.checkError(['--randseed', 'foo'], 'invalid int value')
     146  
     147      def test_fromfile(self):
     148          for opt in '-f', '--fromfile':
     149              with self.subTest(opt=opt):
     150                  ns = libregrtest._parse_args([opt, 'foo'])
     151                  self.assertEqual(ns.fromfile, 'foo')
     152                  self.checkError([opt], 'expected one argument')
     153                  self.checkError([opt, 'foo', '-s'], "don't go together")
     154  
     155      def test_exclude(self):
     156          for opt in '-x', '--exclude':
     157              with self.subTest(opt=opt):
     158                  ns = libregrtest._parse_args([opt])
     159                  self.assertTrue(ns.exclude)
     160  
     161      def test_single(self):
     162          for opt in '-s', '--single':
     163              with self.subTest(opt=opt):
     164                  ns = libregrtest._parse_args([opt])
     165                  self.assertTrue(ns.single)
     166                  self.checkError([opt, '-f', 'foo'], "don't go together")
     167  
     168      def test_ignore(self):
     169          for opt in '-i', '--ignore':
     170              with self.subTest(opt=opt):
     171                  ns = libregrtest._parse_args([opt, 'pattern'])
     172                  self.assertEqual(ns.ignore_tests, ['pattern'])
     173                  self.checkError([opt], 'expected one argument')
     174  
     175          self.addCleanup(os_helper.unlink, os_helper.TESTFN)
     176          with open(os_helper.TESTFN, "w") as fp:
     177              print('matchfile1', file=fp)
     178              print('matchfile2', file=fp)
     179  
     180          filename = os.path.abspath(os_helper.TESTFN)
     181          ns = libregrtest._parse_args(['-m', 'match',
     182                                        '--ignorefile', filename])
     183          self.assertEqual(ns.ignore_tests,
     184                           ['matchfile1', 'matchfile2'])
     185  
     186      def test_match(self):
     187          for opt in '-m', '--match':
     188              with self.subTest(opt=opt):
     189                  ns = libregrtest._parse_args([opt, 'pattern'])
     190                  self.assertEqual(ns.match_tests, ['pattern'])
     191                  self.checkError([opt], 'expected one argument')
     192  
     193          ns = libregrtest._parse_args(['-m', 'pattern1',
     194                                        '-m', 'pattern2'])
     195          self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
     196  
     197          self.addCleanup(os_helper.unlink, os_helper.TESTFN)
     198          with open(os_helper.TESTFN, "w") as fp:
     199              print('matchfile1', file=fp)
     200              print('matchfile2', file=fp)
     201  
     202          filename = os.path.abspath(os_helper.TESTFN)
     203          ns = libregrtest._parse_args(['-m', 'match',
     204                                        '--matchfile', filename])
     205          self.assertEqual(ns.match_tests,
     206                           ['match', 'matchfile1', 'matchfile2'])
     207  
     208      def test_failfast(self):
     209          for opt in '-G', '--failfast':
     210              with self.subTest(opt=opt):
     211                  ns = libregrtest._parse_args([opt, '-v'])
     212                  self.assertTrue(ns.failfast)
     213                  ns = libregrtest._parse_args([opt, '-W'])
     214                  self.assertTrue(ns.failfast)
     215                  self.checkError([opt], '-G/--failfast needs either -v or -W')
     216  
     217      def test_use(self):
     218          for opt in '-u', '--use':
     219              with self.subTest(opt=opt):
     220                  ns = libregrtest._parse_args([opt, 'gui,network'])
     221                  self.assertEqual(ns.use_resources, ['gui', 'network'])
     222  
     223                  ns = libregrtest._parse_args([opt, 'gui,none,network'])
     224                  self.assertEqual(ns.use_resources, ['network'])
     225  
     226                  expected = list(libregrtest.ALL_RESOURCES)
     227                  expected.remove('gui')
     228                  ns = libregrtest._parse_args([opt, 'all,-gui'])
     229                  self.assertEqual(ns.use_resources, expected)
     230                  self.checkError([opt], 'expected one argument')
     231                  self.checkError([opt, 'foo'], 'invalid resource')
     232  
     233                  # all + a resource not part of "all"
     234                  ns = libregrtest._parse_args([opt, 'all,tzdata'])
     235                  self.assertEqual(ns.use_resources,
     236                                   list(libregrtest.ALL_RESOURCES) + ['tzdata'])
     237  
     238                  # test another resource which is not part of "all"
     239                  ns = libregrtest._parse_args([opt, 'extralargefile'])
     240                  self.assertEqual(ns.use_resources, ['extralargefile'])
     241  
     242      def test_memlimit(self):
     243          for opt in '-M', '--memlimit':
     244              with self.subTest(opt=opt):
     245                  ns = libregrtest._parse_args([opt, '4G'])
     246                  self.assertEqual(ns.memlimit, '4G')
     247                  self.checkError([opt], 'expected one argument')
     248  
     249      def test_testdir(self):
     250          ns = libregrtest._parse_args(['--testdir', 'foo'])
     251          self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
     252          self.checkError(['--testdir'], 'expected one argument')
     253  
     254      def test_runleaks(self):
     255          for opt in '-L', '--runleaks':
     256              with self.subTest(opt=opt):
     257                  ns = libregrtest._parse_args([opt])
     258                  self.assertTrue(ns.runleaks)
     259  
     260      def test_huntrleaks(self):
     261          for opt in '-R', '--huntrleaks':
     262              with self.subTest(opt=opt):
     263                  ns = libregrtest._parse_args([opt, ':'])
     264                  self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
     265                  ns = libregrtest._parse_args([opt, '6:'])
     266                  self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
     267                  ns = libregrtest._parse_args([opt, ':3'])
     268                  self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
     269                  ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
     270                  self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
     271                  self.checkError([opt], 'expected one argument')
     272                  self.checkError([opt, '6'],
     273                                  'needs 2 or 3 colon-separated arguments')
     274                  self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
     275                  self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
     276  
     277      def test_multiprocess(self):
     278          for opt in '-j', '--multiprocess':
     279              with self.subTest(opt=opt):
     280                  ns = libregrtest._parse_args([opt, '2'])
     281                  self.assertEqual(ns.use_mp, 2)
     282                  self.checkError([opt], 'expected one argument')
     283                  self.checkError([opt, 'foo'], 'invalid int value')
     284                  self.checkError([opt, '2', '-T'], "don't go together")
     285                  self.checkError([opt, '0', '-T'], "don't go together")
     286  
     287      def test_coverage(self):
     288          for opt in '-T', '--coverage':
     289              with self.subTest(opt=opt):
     290                  ns = libregrtest._parse_args([opt])
     291                  self.assertTrue(ns.trace)
     292  
     293      def test_coverdir(self):
     294          for opt in '-D', '--coverdir':
     295              with self.subTest(opt=opt):
     296                  ns = libregrtest._parse_args([opt, 'foo'])
     297                  self.assertEqual(ns.coverdir,
     298                                   os.path.join(os_helper.SAVEDCWD, 'foo'))
     299                  self.checkError([opt], 'expected one argument')
     300  
     301      def test_nocoverdir(self):
     302          for opt in '-N', '--nocoverdir':
     303              with self.subTest(opt=opt):
     304                  ns = libregrtest._parse_args([opt])
     305                  self.assertIsNone(ns.coverdir)
     306  
     307      def test_threshold(self):
     308          for opt in '-t', '--threshold':
     309              with self.subTest(opt=opt):
     310                  ns = libregrtest._parse_args([opt, '1000'])
     311                  self.assertEqual(ns.threshold, 1000)
     312                  self.checkError([opt], 'expected one argument')
     313                  self.checkError([opt, 'foo'], 'invalid int value')
     314  
     315      def test_nowindows(self):
     316          for opt in '-n', '--nowindows':
     317              with self.subTest(opt=opt):
     318                  with contextlib.redirect_stderr(io.StringIO()) as stderr:
     319                      ns = libregrtest._parse_args([opt])
     320                  self.assertTrue(ns.nowindows)
     321                  err = stderr.getvalue()
     322                  self.assertIn('the --nowindows (-n) option is deprecated', err)
     323  
     324      def test_forever(self):
     325          for opt in '-F', '--forever':
     326              with self.subTest(opt=opt):
     327                  ns = libregrtest._parse_args([opt])
     328                  self.assertTrue(ns.forever)
     329  
     330      def test_unrecognized_argument(self):
     331          self.checkError(['--xxx'], 'usage:')
     332  
     333      def test_long_option__partial(self):
     334          ns = libregrtest._parse_args(['--qui'])
     335          self.assertTrue(ns.quiet)
     336          self.assertEqual(ns.verbose, 0)
     337  
     338      def test_two_options(self):
     339          ns = libregrtest._parse_args(['--quiet', '--exclude'])
     340          self.assertTrue(ns.quiet)
     341          self.assertEqual(ns.verbose, 0)
     342          self.assertTrue(ns.exclude)
     343  
     344      def test_option_with_empty_string_value(self):
     345          ns = libregrtest._parse_args(['--start', ''])
     346          self.assertEqual(ns.start, '')
     347  
     348      def test_arg(self):
     349          ns = libregrtest._parse_args(['foo'])
     350          self.assertEqual(ns.args, ['foo'])
     351  
     352      def test_option_and_arg(self):
     353          ns = libregrtest._parse_args(['--quiet', 'foo'])
     354          self.assertTrue(ns.quiet)
     355          self.assertEqual(ns.verbose, 0)
     356          self.assertEqual(ns.args, ['foo'])
     357  
     358      def test_arg_option_arg(self):
     359          ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
     360          self.assertEqual(ns.verbose, 1)
     361          self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
     362  
     363      def test_unknown_option(self):
     364          self.checkError(['--unknown-option'],
     365                          'unrecognized arguments: --unknown-option')
     366  
     367  
     368  @dataclasses.dataclass(slots=True)
     369  class ESC[4;38;5;81mRerun:
     370      name: str
     371      match: str | None
     372      success: bool
     373  
     374  
     375  class ESC[4;38;5;81mBaseTestCase(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
     376      TEST_UNIQUE_ID = 1
     377      TESTNAME_PREFIX = 'test_regrtest_'
     378      TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
     379  
     380      def setUp(self):
     381          self.testdir = os.path.realpath(os.path.dirname(__file__))
     382  
     383          self.tmptestdir = tempfile.mkdtemp()
     384          self.addCleanup(os_helper.rmtree, self.tmptestdir)
     385  
     386      def create_test(self, name=None, code=None):
     387          if not name:
     388              name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
     389              BaseTestCase.TEST_UNIQUE_ID += 1
     390  
     391          if code is None:
     392              code = textwrap.dedent("""
     393                      import unittest
     394  
     395                      class Tests(unittest.TestCase):
     396                          def test_empty_test(self):
     397                              pass
     398                  """)
     399  
     400          # test_regrtest cannot be run twice in parallel because
     401          # of setUp() and create_test()
     402          name = self.TESTNAME_PREFIX + name
     403          path = os.path.join(self.tmptestdir, name + '.py')
     404  
     405          self.addCleanup(os_helper.unlink, path)
     406          # Use 'x' mode to ensure that we do not override existing tests
     407          try:
     408              with open(path, 'x', encoding='utf-8') as fp:
     409                  fp.write(code)
     410          except PermissionError as exc:
     411              if not sysconfig.is_python_build():
     412                  self.skipTest("cannot write %s: %s" % (path, exc))
     413              raise
     414          return name
     415  
     416      def regex_search(self, regex, output):
     417          match = re.search(regex, output, re.MULTILINE)
     418          if not match:
     419              self.fail("%r not found in %r" % (regex, output))
     420          return match
     421  
     422      def check_line(self, output, regex, full=False):
     423          if full:
     424              regex += '\n'
     425          regex = re.compile(r'^' + regex, re.MULTILINE)
     426          self.assertRegex(output, regex)
     427  
     428      def parse_executed_tests(self, output):
     429          regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
     430                   % (LOG_PREFIX, self.TESTNAME_REGEX))
     431          parser = re.finditer(regex, output, re.MULTILINE)
     432          return list(match.group(1) for match in parser)
     433  
     434      def check_executed_tests(self, output, tests, skipped=(), failed=(),
     435                               env_changed=(), omitted=(),
     436                               rerun=None, run_no_tests=(),
     437                               resource_denied=(),
     438                               randomize=False, interrupted=False,
     439                               fail_env_changed=False,
     440                               *, stats, forever=False, filtered=False):
     441          if isinstance(tests, str):
     442              tests = [tests]
     443          if isinstance(skipped, str):
     444              skipped = [skipped]
     445          if isinstance(resource_denied, str):
     446              resource_denied = [resource_denied]
     447          if isinstance(failed, str):
     448              failed = [failed]
     449          if isinstance(env_changed, str):
     450              env_changed = [env_changed]
     451          if isinstance(omitted, str):
     452              omitted = [omitted]
     453          if isinstance(run_no_tests, str):
     454              run_no_tests = [run_no_tests]
     455          if isinstance(stats, int):
     456              stats = TestStats(stats)
     457  
     458          rerun_failed = []
     459          if rerun is not None:
     460              failed = [rerun.name]
     461              if not rerun.success:
     462                  rerun_failed.append(rerun.name)
     463  
     464          executed = self.parse_executed_tests(output)
     465          total_tests = list(tests)
     466          if rerun is not None:
     467              total_tests.append(rerun.name)
     468          if randomize:
     469              self.assertEqual(set(executed), set(total_tests), output)
     470          else:
     471              self.assertEqual(executed, total_tests, output)
     472  
     473          def plural(count):
     474              return 's' if count != 1 else ''
     475  
     476          def list_regex(line_format, tests):
     477              count = len(tests)
     478              names = ' '.join(sorted(tests))
     479              regex = line_format % (count, plural(count))
     480              regex = r'%s:\n    %s$' % (regex, names)
     481              return regex
     482  
     483          if skipped:
     484              regex = list_regex('%s test%s skipped', skipped)
     485              self.check_line(output, regex)
     486  
     487          if resource_denied:
     488              regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied)
     489              self.check_line(output, regex)
     490  
     491          if failed:
     492              regex = list_regex('%s test%s failed', failed)
     493              self.check_line(output, regex)
     494  
     495          if env_changed:
     496              regex = list_regex('%s test%s altered the execution environment',
     497                                 env_changed)
     498              self.check_line(output, regex)
     499  
     500          if omitted:
     501              regex = list_regex('%s test%s omitted', omitted)
     502              self.check_line(output, regex)
     503  
     504          if rerun is not None:
     505              regex = list_regex('%s re-run test%s', [rerun.name])
     506              self.check_line(output, regex)
     507              regex = LOG_PREFIX + fr"Re-running 1 failed tests in verbose mode"
     508              self.check_line(output, regex)
     509              regex = fr"Re-running {rerun.name} in verbose mode"
     510              if rerun.match:
     511                  regex = fr"{regex} \(matching: {rerun.match}\)"
     512              self.check_line(output, regex)
     513  
     514          if run_no_tests:
     515              regex = list_regex('%s test%s run no tests', run_no_tests)
     516              self.check_line(output, regex)
     517  
     518          good = (len(tests) - len(skipped) - len(resource_denied) - len(failed)
     519                  - len(omitted) - len(env_changed) - len(run_no_tests))
     520          if good:
     521              regex = r'%s test%s OK\.' % (good, plural(good))
     522              if not skipped and not failed and (rerun is None or rerun.success) and good > 1:
     523                  regex = 'All %s' % regex
     524              self.check_line(output, regex, full=True)
     525  
     526          if interrupted:
     527              self.check_line(output, 'Test suite interrupted by signal SIGINT.')
     528  
     529          # Total tests
     530          text = f'run={stats.tests_run:,}'
     531          if filtered:
     532              text = fr'{text} \(filtered\)'
     533          parts = [text]
     534          if stats.failures:
     535              parts.append(f'failures={stats.failures:,}')
     536          if stats.skipped:
     537              parts.append(f'skipped={stats.skipped:,}')
     538          line = fr'Total tests: {" ".join(parts)}'
     539          self.check_line(output, line, full=True)
     540  
     541          # Total test files
     542          run = len(total_tests) - len(resource_denied)
     543          if rerun is not None:
     544              total_failed = len(rerun_failed)
     545              total_rerun = 1
     546          else:
     547              total_failed = len(failed)
     548              total_rerun = 0
     549          if interrupted:
     550              run = 0
     551          text = f'run={run}'
     552          if not forever:
     553              text = f'{text}/{len(tests)}'
     554          if filtered:
     555              text = fr'{text} \(filtered\)'
     556          report = [text]
     557          for name, ntest in (
     558              ('failed', total_failed),
     559              ('env_changed', len(env_changed)),
     560              ('skipped', len(skipped)),
     561              ('resource_denied', len(resource_denied)),
     562              ('rerun', total_rerun),
     563              ('run_no_tests', len(run_no_tests)),
     564          ):
     565              if ntest:
     566                  report.append(f'{name}={ntest}')
     567          line = fr'Total test files: {" ".join(report)}'
     568          self.check_line(output, line, full=True)
     569  
     570          # Result
     571          state = []
     572          if failed:
     573              state.append('FAILURE')
     574          elif fail_env_changed and env_changed:
     575              state.append('ENV CHANGED')
     576          if interrupted:
     577              state.append('INTERRUPTED')
     578          if not any((good, failed, interrupted, skipped,
     579                      env_changed, fail_env_changed)):
     580              state.append("NO TESTS RAN")
     581          elif not state:
     582              state.append('SUCCESS')
     583          state = ', '.join(state)
     584          if rerun is not None:
     585              new_state = 'SUCCESS' if rerun.success else 'FAILURE'
     586              state = 'FAILURE then ' + new_state
     587          self.check_line(output, f'Result: {state}', full=True)
     588  
     589      def parse_random_seed(self, output):
     590          match = self.regex_search(r'Using random seed ([0-9]+)', output)
     591          randseed = int(match.group(1))
     592          self.assertTrue(0 <= randseed <= 10000000, randseed)
     593          return randseed
     594  
     595      def run_command(self, args, input=None, exitcode=0, **kw):
     596          if not input:
     597              input = ''
     598          if 'stderr' not in kw:
     599              kw['stderr'] = subprocess.STDOUT
     600          proc = subprocess.run(args,
     601                                universal_newlines=True,
     602                                input=input,
     603                                stdout=subprocess.PIPE,
     604                                **kw)
     605          if proc.returncode != exitcode:
     606              msg = ("Command %s failed with exit code %s, but exit code %s expected!\n"
     607                     "\n"
     608                     "stdout:\n"
     609                     "---\n"
     610                     "%s\n"
     611                     "---\n"
     612                     % (str(args), proc.returncode, exitcode, proc.stdout))
     613              if proc.stderr:
     614                  msg += ("\n"
     615                          "stderr:\n"
     616                          "---\n"
     617                          "%s"
     618                          "---\n"
     619                          % proc.stderr)
     620              self.fail(msg)
     621          return proc
     622  
     623      def run_python(self, args, **kw):
     624          args = [sys.executable, '-X', 'faulthandler', '-I', *args]
     625          proc = self.run_command(args, **kw)
     626          return proc.stdout
     627  
     628  
     629  class ESC[4;38;5;81mCheckActualTests(ESC[4;38;5;149mBaseTestCase):
     630      def test_finds_expected_number_of_tests(self):
     631          """
     632          Check that regrtest appears to find the expected set of tests.
     633          """
     634          args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
     635          output = self.run_python(args)
     636          rough_number_of_tests_found = len(output.splitlines())
     637          actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
     638                                               'test*.py')
     639          rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
     640          # We're not trying to duplicate test finding logic in here,
     641          # just give a rough estimate of how many there should be and
     642          # be near that.  This is a regression test to prevent mishaps
     643          # such as https://bugs.python.org/issue37667 in the future.
     644          # If you need to change the values in here during some
     645          # mythical future test suite reorganization, don't go
     646          # overboard with logic and keep that goal in mind.
     647          self.assertGreater(rough_number_of_tests_found,
     648                             rough_counted_test_py_files*9//10,
     649                             msg='Unexpectedly low number of tests found in:\n'
     650                             f'{", ".join(output.splitlines())}')
     651  
     652  
     653  class ESC[4;38;5;81mProgramsTestCase(ESC[4;38;5;149mBaseTestCase):
     654      """
     655      Test various ways to run the Python test suite. Use options close
     656      to options used on the buildbot.
     657      """
     658  
     659      NTEST = 4
     660  
     661      def setUp(self):
     662          super().setUp()
     663  
     664          # Create NTEST tests doing nothing
     665          self.tests = [self.create_test() for index in range(self.NTEST)]
     666  
     667          self.python_args = ['-Wd', '-E', '-bb']
     668          self.regrtest_args = ['-uall', '-rwW',
     669                                '--testdir=%s' % self.tmptestdir]
     670          self.regrtest_args.extend(('--timeout', '3600', '-j4'))
     671          if sys.platform == 'win32':
     672              self.regrtest_args.append('-n')
     673  
     674      def check_output(self, output):
     675          self.parse_random_seed(output)
     676          self.check_executed_tests(output, self.tests,
     677                                    randomize=True, stats=len(self.tests))
     678  
     679      def run_tests(self, args):
     680          output = self.run_python(args)
     681          self.check_output(output)
     682  
     683      def test_script_regrtest(self):
     684          # Lib/test/regrtest.py
     685          script = os.path.join(self.testdir, 'regrtest.py')
     686  
     687          args = [*self.python_args, script, *self.regrtest_args, *self.tests]
     688          self.run_tests(args)
     689  
     690      def test_module_test(self):
     691          # -m test
     692          args = [*self.python_args, '-m', 'test',
     693                  *self.regrtest_args, *self.tests]
     694          self.run_tests(args)
     695  
     696      def test_module_regrtest(self):
     697          # -m test.regrtest
     698          args = [*self.python_args, '-m', 'test.regrtest',
     699                  *self.regrtest_args, *self.tests]
     700          self.run_tests(args)
     701  
     702      def test_module_autotest(self):
     703          # -m test.autotest
     704          args = [*self.python_args, '-m', 'test.autotest',
     705                  *self.regrtest_args, *self.tests]
     706          self.run_tests(args)
     707  
     708      def test_module_from_test_autotest(self):
     709          # from test import autotest
     710          code = 'from test import autotest'
     711          args = [*self.python_args, '-c', code,
     712                  *self.regrtest_args, *self.tests]
     713          self.run_tests(args)
     714  
     715      def test_script_autotest(self):
     716          # Lib/test/autotest.py
     717          script = os.path.join(self.testdir, 'autotest.py')
     718          args = [*self.python_args, script, *self.regrtest_args, *self.tests]
     719          self.run_tests(args)
     720  
     721      @unittest.skipUnless(sysconfig.is_python_build(),
     722                           'run_tests.py script is not installed')
     723      def test_tools_script_run_tests(self):
     724          # Tools/scripts/run_tests.py
     725          script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
     726          args = [script, *self.regrtest_args, *self.tests]
     727          self.run_tests(args)
     728  
     729      def run_batch(self, *args):
     730          proc = self.run_command(args)
     731          self.check_output(proc.stdout)
     732  
     733      @unittest.skipUnless(sysconfig.is_python_build(),
     734                           'test.bat script is not installed')
     735      @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
     736      def test_tools_buildbot_test(self):
     737          # Tools\buildbot\test.bat
     738          script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
     739          test_args = ['--testdir=%s' % self.tmptestdir]
     740          if platform.machine() == 'ARM64':
     741              test_args.append('-arm64') # ARM 64-bit build
     742          elif platform.machine() == 'ARM':
     743              test_args.append('-arm32')   # 32-bit ARM build
     744          elif platform.architecture()[0] == '64bit':
     745              test_args.append('-x64')   # 64-bit build
     746          if not support.Py_DEBUG:
     747              test_args.append('+d')     # Release build, use python.exe
     748          self.run_batch(script, *test_args, *self.tests)
     749  
     750      @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
     751      def test_pcbuild_rt(self):
     752          # PCbuild\rt.bat
     753          script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
     754          if not os.path.isfile(script):
     755              self.skipTest(f'File "{script}" does not exist')
     756          rt_args = ["-q"]             # Quick, don't run tests twice
     757          if platform.machine() == 'ARM64':
     758              rt_args.append('-arm64') # ARM 64-bit build
     759          elif platform.machine() == 'ARM':
     760              rt_args.append('-arm32')   # 32-bit ARM build
     761          elif platform.architecture()[0] == '64bit':
     762              rt_args.append('-x64')   # 64-bit build
     763          if support.Py_DEBUG:
     764              rt_args.append('-d')     # Debug build, use python_d.exe
     765          self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
     766  
     767  
     768  class ESC[4;38;5;81mArgsTestCase(ESC[4;38;5;149mBaseTestCase):
     769      """
     770      Test arguments of the Python test suite.
     771      """
     772  
     773      def run_tests(self, *testargs, **kw):
     774          cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
     775          return self.run_python(cmdargs, **kw)
     776  
     777      def test_success(self):
     778          code = textwrap.dedent("""
     779              import unittest
     780  
     781              class PassingTests(unittest.TestCase):
     782                  def test_test1(self):
     783                      pass
     784  
     785                  def test_test2(self):
     786                      pass
     787  
     788                  def test_test3(self):
     789                      pass
     790          """)
     791          tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)]
     792  
     793          output = self.run_tests(*tests)
     794          self.check_executed_tests(output, tests,
     795                                    stats=3 * len(tests))
     796  
     797      def test_skip(self):
     798          code = textwrap.dedent("""
     799              import unittest
     800              raise unittest.SkipTest("nope")
     801          """)
     802          test_ok = self.create_test('ok')
     803          test_skip = self.create_test('skip', code=code)
     804          tests = [test_ok, test_skip]
     805  
     806          output = self.run_tests(*tests)
     807          self.check_executed_tests(output, tests,
     808                                    skipped=[test_skip],
     809                                    stats=1)
     810  
     811      def test_failing_test(self):
     812          # test a failing test
     813          code = textwrap.dedent("""
     814              import unittest
     815  
     816              class FailingTest(unittest.TestCase):
     817                  def test_failing(self):
     818                      self.fail("bug")
     819          """)
     820          test_ok = self.create_test('ok')
     821          test_failing = self.create_test('failing', code=code)
     822          tests = [test_ok, test_failing]
     823  
     824          output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
     825          self.check_executed_tests(output, tests, failed=test_failing,
     826                                    stats=TestStats(2, 1))
     827  
     828      def test_resources(self):
     829          # test -u command line option
     830          tests = {}
     831          for resource in ('audio', 'network'):
     832              code = textwrap.dedent("""
     833                          from test import support; support.requires(%r)
     834                          import unittest
     835                          class PassingTest(unittest.TestCase):
     836                              def test_pass(self):
     837                                  pass
     838                      """ % resource)
     839  
     840              tests[resource] = self.create_test(resource, code)
     841          test_names = sorted(tests.values())
     842  
     843          # -u all: 2 resources enabled
     844          output = self.run_tests('-u', 'all', *test_names)
     845          self.check_executed_tests(output, test_names, stats=2)
     846  
     847          # -u audio: 1 resource enabled
     848          output = self.run_tests('-uaudio', *test_names)
     849          self.check_executed_tests(output, test_names,
     850                                    resource_denied=tests['network'],
     851                                    stats=1)
     852  
     853          # no option: 0 resources enabled
     854          output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN)
     855          self.check_executed_tests(output, test_names,
     856                                    resource_denied=test_names,
     857                                    stats=0)
     858  
     859      def test_random(self):
     860          # test -r and --randseed command line option
     861          code = textwrap.dedent("""
     862              import random
     863              print("TESTRANDOM: %s" % random.randint(1, 1000))
     864          """)
     865          test = self.create_test('random', code)
     866  
     867          # first run to get the output with the random seed
     868          output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN)
     869          randseed = self.parse_random_seed(output)
     870          match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
     871          test_random = int(match.group(1))
     872  
     873          # try to reproduce with the random seed
     874          output = self.run_tests('-r', '--randseed=%s' % randseed, test,
     875                                  exitcode=EXITCODE_NO_TESTS_RAN)
     876          randseed2 = self.parse_random_seed(output)
     877          self.assertEqual(randseed2, randseed)
     878  
     879          match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
     880          test_random2 = int(match.group(1))
     881          self.assertEqual(test_random2, test_random)
     882  
     883      def test_fromfile(self):
     884          # test --fromfile
     885          tests = [self.create_test() for index in range(5)]
     886  
     887          # Write the list of files using a format similar to regrtest output:
     888          # [1/2] test_1
     889          # [2/2] test_2
     890          filename = os_helper.TESTFN
     891          self.addCleanup(os_helper.unlink, filename)
     892  
     893          # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
     894          with open(filename, "w") as fp:
     895              previous = None
     896              for index, name in enumerate(tests, 1):
     897                  line = ("00:00:%02i [%s/%s] %s"
     898                          % (index, index, len(tests), name))
     899                  if previous:
     900                      line += " -- %s took 0 sec" % previous
     901                  print(line, file=fp)
     902                  previous = name
     903  
     904          output = self.run_tests('--fromfile', filename)
     905          stats = len(tests)
     906          self.check_executed_tests(output, tests, stats=stats)
     907  
     908          # test format '[2/7] test_opcodes'
     909          with open(filename, "w") as fp:
     910              for index, name in enumerate(tests, 1):
     911                  print("[%s/%s] %s" % (index, len(tests), name), file=fp)
     912  
     913          output = self.run_tests('--fromfile', filename)
     914          self.check_executed_tests(output, tests, stats=stats)
     915  
     916          # test format 'test_opcodes'
     917          with open(filename, "w") as fp:
     918              for name in tests:
     919                  print(name, file=fp)
     920  
     921          output = self.run_tests('--fromfile', filename)
     922          self.check_executed_tests(output, tests, stats=stats)
     923  
     924          # test format 'Lib/test/test_opcodes.py'
     925          with open(filename, "w") as fp:
     926              for name in tests:
     927                  print('Lib/test/%s.py' % name, file=fp)
     928  
     929          output = self.run_tests('--fromfile', filename)
     930          self.check_executed_tests(output, tests, stats=stats)
     931  
     932      def test_interrupted(self):
     933          code = TEST_INTERRUPTED
     934          test = self.create_test('sigint', code=code)
     935          output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
     936          self.check_executed_tests(output, test, omitted=test,
     937                                    interrupted=True, stats=0)
     938  
     939      def test_slowest(self):
     940          # test --slowest
     941          tests = [self.create_test() for index in range(3)]
     942          output = self.run_tests("--slowest", *tests)
     943          self.check_executed_tests(output, tests, stats=len(tests))
     944          regex = ('10 slowest tests:\n'
     945                   '(?:- %s: .*\n){%s}'
     946                   % (self.TESTNAME_REGEX, len(tests)))
     947          self.check_line(output, regex)
     948  
     949      def test_slowest_interrupted(self):
     950          # Issue #25373: test --slowest with an interrupted test
     951          code = TEST_INTERRUPTED
     952          test = self.create_test("sigint", code=code)
     953  
     954          for multiprocessing in (False, True):
     955              with self.subTest(multiprocessing=multiprocessing):
     956                  if multiprocessing:
     957                      args = ("--slowest", "-j2", test)
     958                  else:
     959                      args = ("--slowest", test)
     960                  output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
     961                  self.check_executed_tests(output, test,
     962                                            omitted=test, interrupted=True,
     963                                            stats=0)
     964  
     965                  regex = ('10 slowest tests:\n')
     966                  self.check_line(output, regex)
     967  
     968      def test_coverage(self):
     969          # test --coverage
     970          test = self.create_test('coverage')
     971          output = self.run_tests("--coverage", test)
     972          self.check_executed_tests(output, [test], stats=1)
     973          regex = (r'lines +cov% +module +\(path\)\n'
     974                   r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
     975          self.check_line(output, regex)
     976  
     977      def test_wait(self):
     978          # test --wait
     979          test = self.create_test('wait')
     980          output = self.run_tests("--wait", test, input='key')
     981          self.check_line(output, 'Press any key to continue')
     982  
     983      def test_forever(self):
     984          # test --forever
     985          code = textwrap.dedent("""
     986              import builtins
     987              import unittest
     988  
     989              class ForeverTester(unittest.TestCase):
     990                  def test_run(self):
     991                      # Store the state in the builtins module, because the test
     992                      # module is reload at each run
     993                      if 'RUN' in builtins.__dict__:
     994                          builtins.__dict__['RUN'] += 1
     995                          if builtins.__dict__['RUN'] >= 3:
     996                              self.fail("fail at the 3rd runs")
     997                      else:
     998                          builtins.__dict__['RUN'] = 1
     999          """)
    1000          test = self.create_test('forever', code=code)
    1001  
    1002          # --forever
    1003          output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
    1004          self.check_executed_tests(output, [test]*3, failed=test,
    1005                                    stats=TestStats(3, 1),
    1006                                    forever=True)
    1007  
    1008          # --forever --rerun
    1009          output = self.run_tests('--forever', '--rerun', test, exitcode=0)
    1010          self.check_executed_tests(output, [test]*3,
    1011                                    rerun=Rerun(test,
    1012                                                match='test_run',
    1013                                                success=True),
    1014                                    stats=TestStats(4, 1),
    1015                                    forever=True)
    1016  
    1017      def check_leak(self, code, what):
    1018          test = self.create_test('huntrleaks', code=code)
    1019  
    1020          filename = 'reflog.txt'
    1021          self.addCleanup(os_helper.unlink, filename)
    1022          output = self.run_tests('--huntrleaks', '3:3:', test,
    1023                                  exitcode=EXITCODE_BAD_TEST,
    1024                                  stderr=subprocess.STDOUT)
    1025          self.check_executed_tests(output, [test], failed=test, stats=1)
    1026  
    1027          line = 'beginning 6 repetitions\n123456\n......\n'
    1028          self.check_line(output, re.escape(line))
    1029  
    1030          line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
    1031          self.assertIn(line2, output)
    1032  
    1033          with open(filename) as fp:
    1034              reflog = fp.read()
    1035              self.assertIn(line2, reflog)
    1036  
    1037      @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
    1038      def test_huntrleaks(self):
    1039          # test --huntrleaks
    1040          code = textwrap.dedent("""
    1041              import unittest
    1042  
    1043              GLOBAL_LIST = []
    1044  
    1045              class RefLeakTest(unittest.TestCase):
    1046                  def test_leak(self):
    1047                      GLOBAL_LIST.append(object())
    1048          """)
    1049          self.check_leak(code, 'references')
    1050  
    1051      @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
    1052      def test_huntrleaks_fd_leak(self):
    1053          # test --huntrleaks for file descriptor leak
    1054          code = textwrap.dedent("""
    1055              import os
    1056              import unittest
    1057  
    1058              class FDLeakTest(unittest.TestCase):
    1059                  def test_leak(self):
    1060                      fd = os.open(__file__, os.O_RDONLY)
    1061                      # bug: never close the file descriptor
    1062          """)
    1063          self.check_leak(code, 'file descriptors')
    1064  
    1065      def test_list_tests(self):
    1066          # test --list-tests
    1067          tests = [self.create_test() for i in range(5)]
    1068          output = self.run_tests('--list-tests', *tests)
    1069          self.assertEqual(output.rstrip().splitlines(),
    1070                           tests)
    1071  
    1072      def test_list_cases(self):
    1073          # test --list-cases
    1074          code = textwrap.dedent("""
    1075              import unittest
    1076  
    1077              class Tests(unittest.TestCase):
    1078                  def test_method1(self):
    1079                      pass
    1080                  def test_method2(self):
    1081                      pass
    1082          """)
    1083          testname = self.create_test(code=code)
    1084  
    1085          # Test --list-cases
    1086          all_methods = ['%s.Tests.test_method1' % testname,
    1087                         '%s.Tests.test_method2' % testname]
    1088          output = self.run_tests('--list-cases', testname)
    1089          self.assertEqual(output.splitlines(), all_methods)
    1090  
    1091          # Test --list-cases with --match
    1092          all_methods = ['%s.Tests.test_method1' % testname]
    1093          output = self.run_tests('--list-cases',
    1094                                  '-m', 'test_method1',
    1095                                  testname)
    1096          self.assertEqual(output.splitlines(), all_methods)
    1097  
    1098      @support.cpython_only
    1099      def test_crashed(self):
    1100          # Any code which causes a crash
    1101          code = 'import faulthandler; faulthandler._sigsegv()'
    1102          crash_test = self.create_test(name="crash", code=code)
    1103  
    1104          tests = [crash_test]
    1105          output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
    1106          self.check_executed_tests(output, tests, failed=crash_test,
    1107                                    randomize=True, stats=0)
    1108  
    1109      def parse_methods(self, output):
    1110          regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
    1111          return [match.group(1) for match in regex.finditer(output)]
    1112  
    1113      def test_ignorefile(self):
    1114          code = textwrap.dedent("""
    1115              import unittest
    1116  
    1117              class Tests(unittest.TestCase):
    1118                  def test_method1(self):
    1119                      pass
    1120                  def test_method2(self):
    1121                      pass
    1122                  def test_method3(self):
    1123                      pass
    1124                  def test_method4(self):
    1125                      pass
    1126          """)
    1127          all_methods = ['test_method1', 'test_method2',
    1128                         'test_method3', 'test_method4']
    1129          testname = self.create_test(code=code)
    1130  
    1131          # only run a subset
    1132          filename = os_helper.TESTFN
    1133          self.addCleanup(os_helper.unlink, filename)
    1134  
    1135          subset = [
    1136              # only ignore the method name
    1137              'test_method1',
    1138              # ignore the full identifier
    1139              '%s.Tests.test_method3' % testname]
    1140          with open(filename, "w") as fp:
    1141              for name in subset:
    1142                  print(name, file=fp)
    1143  
    1144          output = self.run_tests("-v", "--ignorefile", filename, testname)
    1145          methods = self.parse_methods(output)
    1146          subset = ['test_method2', 'test_method4']
    1147          self.assertEqual(methods, subset)
    1148  
    1149      def test_matchfile(self):
    1150          code = textwrap.dedent("""
    1151              import unittest
    1152  
    1153              class Tests(unittest.TestCase):
    1154                  def test_method1(self):
    1155                      pass
    1156                  def test_method2(self):
    1157                      pass
    1158                  def test_method3(self):
    1159                      pass
    1160                  def test_method4(self):
    1161                      pass
    1162          """)
    1163          all_methods = ['test_method1', 'test_method2',
    1164                         'test_method3', 'test_method4']
    1165          testname = self.create_test(code=code)
    1166  
    1167          # by default, all methods should be run
    1168          output = self.run_tests("-v", testname)
    1169          methods = self.parse_methods(output)
    1170          self.assertEqual(methods, all_methods)
    1171  
    1172          # only run a subset
    1173          filename = os_helper.TESTFN
    1174          self.addCleanup(os_helper.unlink, filename)
    1175  
    1176          subset = [
    1177              # only match the method name
    1178              'test_method1',
    1179              # match the full identifier
    1180              '%s.Tests.test_method3' % testname]
    1181          with open(filename, "w") as fp:
    1182              for name in subset:
    1183                  print(name, file=fp)
    1184  
    1185          output = self.run_tests("-v", "--matchfile", filename, testname)
    1186          methods = self.parse_methods(output)
    1187          subset = ['test_method1', 'test_method3']
    1188          self.assertEqual(methods, subset)
    1189  
    1190      def test_env_changed(self):
    1191          code = textwrap.dedent("""
    1192              import unittest
    1193  
    1194              class Tests(unittest.TestCase):
    1195                  def test_env_changed(self):
    1196                      open("env_changed", "w").close()
    1197          """)
    1198          testname = self.create_test(code=code)
    1199  
    1200          # don't fail by default
    1201          output = self.run_tests(testname)
    1202          self.check_executed_tests(output, [testname],
    1203                                    env_changed=testname, stats=1)
    1204  
    1205          # fail with --fail-env-changed
    1206          output = self.run_tests("--fail-env-changed", testname,
    1207                                  exitcode=EXITCODE_ENV_CHANGED)
    1208          self.check_executed_tests(output, [testname], env_changed=testname,
    1209                                    fail_env_changed=True, stats=1)
    1210  
    1211      def test_rerun_fail(self):
    1212          # FAILURE then FAILURE
    1213          code = textwrap.dedent("""
    1214              import unittest
    1215  
    1216              class Tests(unittest.TestCase):
    1217                  def test_succeed(self):
    1218                      return
    1219  
    1220                  def test_fail_always(self):
    1221                      # test that always fails
    1222                      self.fail("bug")
    1223          """)
    1224          testname = self.create_test(code=code)
    1225  
    1226          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1227          self.check_executed_tests(output, [testname],
    1228                                    rerun=Rerun(testname,
    1229                                                "test_fail_always",
    1230                                                success=False),
    1231                                    stats=TestStats(3, 2))
    1232  
    1233      def test_rerun_success(self):
    1234          # FAILURE then SUCCESS
    1235          marker_filename = os.path.abspath("regrtest_marker_filename")
    1236          self.addCleanup(os_helper.unlink, marker_filename)
    1237          self.assertFalse(os.path.exists(marker_filename))
    1238  
    1239          code = textwrap.dedent(f"""
    1240              import os.path
    1241              import unittest
    1242  
    1243              marker_filename = {marker_filename!r}
    1244  
    1245              class Tests(unittest.TestCase):
    1246                  def test_succeed(self):
    1247                      return
    1248  
    1249                  def test_fail_once(self):
    1250                      if not os.path.exists(marker_filename):
    1251                          open(marker_filename, "w").close()
    1252                          self.fail("bug")
    1253          """)
    1254          testname = self.create_test(code=code)
    1255  
    1256          # FAILURE then SUCCESS => exit code 0
    1257          output = self.run_tests("--rerun", testname, exitcode=0)
    1258          self.check_executed_tests(output, [testname],
    1259                                    rerun=Rerun(testname,
    1260                                                match="test_fail_once",
    1261                                                success=True),
    1262                                    stats=TestStats(3, 1))
    1263          os_helper.unlink(marker_filename)
    1264  
    1265          # with --fail-rerun, exit code EXITCODE_RERUN_FAIL
    1266          # on "FAILURE then SUCCESS" state.
    1267          output = self.run_tests("--rerun", "--fail-rerun", testname,
    1268                                  exitcode=EXITCODE_RERUN_FAIL)
    1269          self.check_executed_tests(output, [testname],
    1270                                    rerun=Rerun(testname,
    1271                                                match="test_fail_once",
    1272                                                success=True),
    1273                                    stats=TestStats(3, 1))
    1274          os_helper.unlink(marker_filename)
    1275  
    1276      def test_rerun_setup_class_hook_failure(self):
    1277          # FAILURE then FAILURE
    1278          code = textwrap.dedent("""
    1279              import unittest
    1280  
    1281              class ExampleTests(unittest.TestCase):
    1282                  @classmethod
    1283                  def setUpClass(self):
    1284                      raise RuntimeError('Fail')
    1285  
    1286                  def test_success(self):
    1287                      return
    1288          """)
    1289          testname = self.create_test(code=code)
    1290  
    1291          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1292          self.check_executed_tests(output, testname,
    1293                                    failed=[testname],
    1294                                    rerun=Rerun(testname,
    1295                                                match="ExampleTests",
    1296                                                success=False),
    1297                                    stats=0)
    1298  
    1299      def test_rerun_teardown_class_hook_failure(self):
    1300          # FAILURE then FAILURE
    1301          code = textwrap.dedent("""
    1302              import unittest
    1303  
    1304              class ExampleTests(unittest.TestCase):
    1305                  @classmethod
    1306                  def tearDownClass(self):
    1307                      raise RuntimeError('Fail')
    1308  
    1309                  def test_success(self):
    1310                      return
    1311          """)
    1312          testname = self.create_test(code=code)
    1313  
    1314          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1315          self.check_executed_tests(output, testname,
    1316                                    failed=[testname],
    1317                                    rerun=Rerun(testname,
    1318                                                match="ExampleTests",
    1319                                                success=False),
    1320                                    stats=2)
    1321  
    1322      def test_rerun_setup_module_hook_failure(self):
    1323          # FAILURE then FAILURE
    1324          code = textwrap.dedent("""
    1325              import unittest
    1326  
    1327              def setUpModule():
    1328                  raise RuntimeError('Fail')
    1329  
    1330              class ExampleTests(unittest.TestCase):
    1331                  def test_success(self):
    1332                      return
    1333          """)
    1334          testname = self.create_test(code=code)
    1335  
    1336          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1337          self.check_executed_tests(output, testname,
    1338                                    failed=[testname],
    1339                                    rerun=Rerun(testname,
    1340                                                match=None,
    1341                                                success=False),
    1342                                    stats=0)
    1343  
    1344      def test_rerun_teardown_module_hook_failure(self):
    1345          # FAILURE then FAILURE
    1346          code = textwrap.dedent("""
    1347              import unittest
    1348  
    1349              def tearDownModule():
    1350                  raise RuntimeError('Fail')
    1351  
    1352              class ExampleTests(unittest.TestCase):
    1353                  def test_success(self):
    1354                      return
    1355          """)
    1356          testname = self.create_test(code=code)
    1357  
    1358          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1359          self.check_executed_tests(output, [testname],
    1360                                    failed=[testname],
    1361                                    rerun=Rerun(testname,
    1362                                                match=None,
    1363                                                success=False),
    1364                                    stats=2)
    1365  
    1366      def test_rerun_setup_hook_failure(self):
    1367          # FAILURE then FAILURE
    1368          code = textwrap.dedent("""
    1369              import unittest
    1370  
    1371              class ExampleTests(unittest.TestCase):
    1372                  def setUp(self):
    1373                      raise RuntimeError('Fail')
    1374  
    1375                  def test_success(self):
    1376                      return
    1377          """)
    1378          testname = self.create_test(code=code)
    1379  
    1380          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1381          self.check_executed_tests(output, testname,
    1382                                    failed=[testname],
    1383                                    rerun=Rerun(testname,
    1384                                                match="test_success",
    1385                                                success=False),
    1386                                    stats=2)
    1387  
    1388      def test_rerun_teardown_hook_failure(self):
    1389          # FAILURE then FAILURE
    1390          code = textwrap.dedent("""
    1391              import unittest
    1392  
    1393              class ExampleTests(unittest.TestCase):
    1394                  def tearDown(self):
    1395                      raise RuntimeError('Fail')
    1396  
    1397                  def test_success(self):
    1398                      return
    1399          """)
    1400          testname = self.create_test(code=code)
    1401  
    1402          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1403          self.check_executed_tests(output, testname,
    1404                                    failed=[testname],
    1405                                    rerun=Rerun(testname,
    1406                                                match="test_success",
    1407                                                success=False),
    1408                                    stats=2)
    1409  
    1410      def test_rerun_async_setup_hook_failure(self):
    1411          # FAILURE then FAILURE
    1412          code = textwrap.dedent("""
    1413              import unittest
    1414  
    1415              class ExampleTests(unittest.IsolatedAsyncioTestCase):
    1416                  async def asyncSetUp(self):
    1417                      raise RuntimeError('Fail')
    1418  
    1419                  async def test_success(self):
    1420                      return
    1421          """)
    1422          testname = self.create_test(code=code)
    1423  
    1424          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1425          self.check_executed_tests(output, testname,
    1426                                    rerun=Rerun(testname,
    1427                                                match="test_success",
    1428                                                success=False),
    1429                                    stats=2)
    1430  
    1431      def test_rerun_async_teardown_hook_failure(self):
    1432          # FAILURE then FAILURE
    1433          code = textwrap.dedent("""
    1434              import unittest
    1435  
    1436              class ExampleTests(unittest.IsolatedAsyncioTestCase):
    1437                  async def asyncTearDown(self):
    1438                      raise RuntimeError('Fail')
    1439  
    1440                  async def test_success(self):
    1441                      return
    1442          """)
    1443          testname = self.create_test(code=code)
    1444  
    1445          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1446          self.check_executed_tests(output, testname,
    1447                                    failed=[testname],
    1448                                    rerun=Rerun(testname,
    1449                                                match="test_success",
    1450                                                success=False),
    1451                                    stats=2)
    1452  
    1453      def test_no_tests_ran(self):
    1454          code = textwrap.dedent("""
    1455              import unittest
    1456  
    1457              class Tests(unittest.TestCase):
    1458                  def test_bug(self):
    1459                      pass
    1460          """)
    1461          testname = self.create_test(code=code)
    1462  
    1463          output = self.run_tests(testname, "-m", "nosuchtest",
    1464                                  exitcode=EXITCODE_NO_TESTS_RAN)
    1465          self.check_executed_tests(output, [testname],
    1466                                    run_no_tests=testname,
    1467                                    stats=0, filtered=True)
    1468  
    1469      def test_no_tests_ran_skip(self):
    1470          code = textwrap.dedent("""
    1471              import unittest
    1472  
    1473              class Tests(unittest.TestCase):
    1474                  def test_skipped(self):
    1475                      self.skipTest("because")
    1476          """)
    1477          testname = self.create_test(code=code)
    1478  
    1479          output = self.run_tests(testname)
    1480          self.check_executed_tests(output, [testname],
    1481                                    stats=TestStats(1, skipped=1))
    1482  
    1483      def test_no_tests_ran_multiple_tests_nonexistent(self):
    1484          code = textwrap.dedent("""
    1485              import unittest
    1486  
    1487              class Tests(unittest.TestCase):
    1488                  def test_bug(self):
    1489                      pass
    1490          """)
    1491          testname = self.create_test(code=code)
    1492          testname2 = self.create_test(code=code)
    1493  
    1494          output = self.run_tests(testname, testname2, "-m", "nosuchtest",
    1495                                  exitcode=EXITCODE_NO_TESTS_RAN)
    1496          self.check_executed_tests(output, [testname, testname2],
    1497                                    run_no_tests=[testname, testname2],
    1498                                    stats=0, filtered=True)
    1499  
    1500      def test_no_test_ran_some_test_exist_some_not(self):
    1501          code = textwrap.dedent("""
    1502              import unittest
    1503  
    1504              class Tests(unittest.TestCase):
    1505                  def test_bug(self):
    1506                      pass
    1507          """)
    1508          testname = self.create_test(code=code)
    1509          other_code = textwrap.dedent("""
    1510              import unittest
    1511  
    1512              class Tests(unittest.TestCase):
    1513                  def test_other_bug(self):
    1514                      pass
    1515          """)
    1516          testname2 = self.create_test(code=other_code)
    1517  
    1518          output = self.run_tests(testname, testname2, "-m", "nosuchtest",
    1519                                  "-m", "test_other_bug", exitcode=0)
    1520          self.check_executed_tests(output, [testname, testname2],
    1521                                    run_no_tests=[testname],
    1522                                    stats=1, filtered=True)
    1523  
    1524      @support.cpython_only
    1525      def test_uncollectable(self):
    1526          code = textwrap.dedent(r"""
    1527              import _testcapi
    1528              import gc
    1529              import unittest
    1530  
    1531              @_testcapi.with_tp_del
    1532              class Garbage:
    1533                  def __tp_del__(self):
    1534                      pass
    1535  
    1536              class Tests(unittest.TestCase):
    1537                  def test_garbage(self):
    1538                      # create an uncollectable object
    1539                      obj = Garbage()
    1540                      obj.ref_cycle = obj
    1541                      obj = None
    1542          """)
    1543          testname = self.create_test(code=code)
    1544  
    1545          output = self.run_tests("--fail-env-changed", testname,
    1546                                  exitcode=EXITCODE_ENV_CHANGED)
    1547          self.check_executed_tests(output, [testname],
    1548                                    env_changed=[testname],
    1549                                    fail_env_changed=True,
    1550                                    stats=1)
    1551  
    1552      def test_multiprocessing_timeout(self):
    1553          code = textwrap.dedent(r"""
    1554              import time
    1555              import unittest
    1556              try:
    1557                  import faulthandler
    1558              except ImportError:
    1559                  faulthandler = None
    1560  
    1561              class Tests(unittest.TestCase):
    1562                  # test hangs and so should be stopped by the timeout
    1563                  def test_sleep(self):
    1564                      # we want to test regrtest multiprocessing timeout,
    1565                      # not faulthandler timeout
    1566                      if faulthandler is not None:
    1567                          faulthandler.cancel_dump_traceback_later()
    1568  
    1569                      time.sleep(60 * 5)
    1570          """)
    1571          testname = self.create_test(code=code)
    1572  
    1573          output = self.run_tests("-j2", "--timeout=1.0", testname,
    1574                                  exitcode=EXITCODE_BAD_TEST)
    1575          self.check_executed_tests(output, [testname],
    1576                                    failed=testname, stats=0)
    1577          self.assertRegex(output,
    1578                           re.compile('%s timed out' % testname, re.MULTILINE))
    1579  
    1580      def test_unraisable_exc(self):
    1581          # --fail-env-changed must catch unraisable exception.
    1582          # The exception must be displayed even if sys.stderr is redirected.
    1583          code = textwrap.dedent(r"""
    1584              import unittest
    1585              import weakref
    1586              from test.support import captured_stderr
    1587  
    1588              class MyObject:
    1589                  pass
    1590  
    1591              def weakref_callback(obj):
    1592                  raise Exception("weakref callback bug")
    1593  
    1594              class Tests(unittest.TestCase):
    1595                  def test_unraisable_exc(self):
    1596                      obj = MyObject()
    1597                      ref = weakref.ref(obj, weakref_callback)
    1598                      with captured_stderr() as stderr:
    1599                          # call weakref_callback() which logs
    1600                          # an unraisable exception
    1601                          obj = None
    1602                      self.assertEqual(stderr.getvalue(), '')
    1603          """)
    1604          testname = self.create_test(code=code)
    1605  
    1606          output = self.run_tests("--fail-env-changed", "-v", testname,
    1607                                  exitcode=EXITCODE_ENV_CHANGED)
    1608          self.check_executed_tests(output, [testname],
    1609                                    env_changed=[testname],
    1610                                    fail_env_changed=True,
    1611                                    stats=1)
    1612          self.assertIn("Warning -- Unraisable exception", output)
    1613          self.assertIn("Exception: weakref callback bug", output)
    1614  
    1615      def test_threading_excepthook(self):
    1616          # --fail-env-changed must catch uncaught thread exception.
    1617          # The exception must be displayed even if sys.stderr is redirected.
    1618          code = textwrap.dedent(r"""
    1619              import threading
    1620              import unittest
    1621              from test.support import captured_stderr
    1622  
    1623              class MyObject:
    1624                  pass
    1625  
    1626              def func_bug():
    1627                  raise Exception("bug in thread")
    1628  
    1629              class Tests(unittest.TestCase):
    1630                  def test_threading_excepthook(self):
    1631                      with captured_stderr() as stderr:
    1632                          thread = threading.Thread(target=func_bug)
    1633                          thread.start()
    1634                          thread.join()
    1635                      self.assertEqual(stderr.getvalue(), '')
    1636          """)
    1637          testname = self.create_test(code=code)
    1638  
    1639          output = self.run_tests("--fail-env-changed", "-v", testname,
    1640                                  exitcode=EXITCODE_ENV_CHANGED)
    1641          self.check_executed_tests(output, [testname],
    1642                                    env_changed=[testname],
    1643                                    fail_env_changed=True,
    1644                                    stats=1)
    1645          self.assertIn("Warning -- Uncaught thread exception", output)
    1646          self.assertIn("Exception: bug in thread", output)
    1647  
    1648      def test_print_warning(self):
    1649          # bpo-45410: The order of messages must be preserved when -W and
    1650          # support.print_warning() are used.
    1651          code = textwrap.dedent(r"""
    1652              import sys
    1653              import unittest
    1654              from test import support
    1655  
    1656              class MyObject:
    1657                  pass
    1658  
    1659              def func_bug():
    1660                  raise Exception("bug in thread")
    1661  
    1662              class Tests(unittest.TestCase):
    1663                  def test_print_warning(self):
    1664                      print("msg1: stdout")
    1665                      support.print_warning("msg2: print_warning")
    1666                      # Fail with ENV CHANGED to see print_warning() log
    1667                      support.environment_altered = True
    1668          """)
    1669          testname = self.create_test(code=code)
    1670  
    1671          # Expect an output like:
    1672          #
    1673          #   test_threading_excepthook (test.test_x.Tests) ... msg1: stdout
    1674          #   Warning -- msg2: print_warning
    1675          #   ok
    1676          regex = (r"test_print_warning.*msg1: stdout\n"
    1677                   r"Warning -- msg2: print_warning\n"
    1678                   r"ok\n")
    1679          for option in ("-v", "-W"):
    1680              with self.subTest(option=option):
    1681                  cmd = ["--fail-env-changed", option, testname]
    1682                  output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
    1683                  self.check_executed_tests(output, [testname],
    1684                                            env_changed=[testname],
    1685                                            fail_env_changed=True,
    1686                                            stats=1)
    1687                  self.assertRegex(output, regex)
    1688  
    1689      def test_unicode_guard_env(self):
    1690          guard = os.environ.get(setup.UNICODE_GUARD_ENV)
    1691          self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
    1692          if guard.isascii():
    1693              # Skip to signify that the env var value was changed by the user;
    1694              # possibly to something ASCII to work around Unicode issues.
    1695              self.skipTest("Modified guard")
    1696  
    1697      def test_cleanup(self):
    1698          dirname = os.path.join(self.tmptestdir, "test_python_123")
    1699          os.mkdir(dirname)
    1700          filename = os.path.join(self.tmptestdir, "test_python_456")
    1701          open(filename, "wb").close()
    1702          names = [dirname, filename]
    1703  
    1704          cmdargs = ['-m', 'test',
    1705                     '--tempdir=%s' % self.tmptestdir,
    1706                     '--cleanup']
    1707          self.run_python(cmdargs)
    1708  
    1709          for name in names:
    1710              self.assertFalse(os.path.exists(name), name)
    1711  
    1712      @unittest.skipIf(support.is_wasi,
    1713                       'checking temp files is not implemented on WASI')
    1714      def test_leak_tmp_file(self):
    1715          code = textwrap.dedent(r"""
    1716              import os.path
    1717              import tempfile
    1718              import unittest
    1719  
    1720              class FileTests(unittest.TestCase):
    1721                  def test_leak_tmp_file(self):
    1722                      filename = os.path.join(tempfile.gettempdir(), 'mytmpfile')
    1723                      with open(filename, "wb") as fp:
    1724                          fp.write(b'content')
    1725          """)
    1726          testnames = [self.create_test(code=code) for _ in range(3)]
    1727  
    1728          output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames,
    1729                                  exitcode=EXITCODE_ENV_CHANGED)
    1730          self.check_executed_tests(output, testnames,
    1731                                    env_changed=testnames,
    1732                                    fail_env_changed=True,
    1733                                    randomize=True,
    1734                                    stats=len(testnames))
    1735          for testname in testnames:
    1736              self.assertIn(f"Warning -- {testname} leaked temporary "
    1737                            f"files (1): mytmpfile",
    1738                            output)
    1739  
    1740      def test_mp_decode_error(self):
    1741          # gh-101634: If a worker stdout cannot be decoded, report a failed test
    1742          # and a non-zero exit code.
    1743          if sys.platform == 'win32':
    1744              encoding = locale.getencoding()
    1745          else:
    1746              encoding = sys.stdout.encoding
    1747              if encoding is None:
    1748                  encoding = sys.__stdout__.encoding
    1749                  if encoding is None:
    1750                      self.skipTest(f"cannot get regrtest worker encoding")
    1751  
    1752          nonascii = b"byte:\xa0\xa9\xff\n"
    1753          try:
    1754              nonascii.decode(encoding)
    1755          except UnicodeDecodeError:
    1756              pass
    1757          else:
    1758              self.skipTest(f"{encoding} can decode non-ASCII bytes {nonascii!a}")
    1759  
    1760          code = textwrap.dedent(fr"""
    1761              import sys
    1762              # bytes which cannot be decoded from UTF-8
    1763              nonascii = {nonascii!a}
    1764              sys.stdout.buffer.write(nonascii)
    1765              sys.stdout.buffer.flush()
    1766          """)
    1767          testname = self.create_test(code=code)
    1768  
    1769          output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
    1770                                  exitcode=EXITCODE_BAD_TEST)
    1771          self.check_executed_tests(output, [testname],
    1772                                    failed=[testname],
    1773                                    randomize=True,
    1774                                    stats=0)
    1775  
    1776      def test_doctest(self):
    1777          code = textwrap.dedent(fr'''
    1778              import doctest
    1779              import sys
    1780              from test import support
    1781  
    1782              def my_function():
    1783                  """
    1784                  Pass:
    1785  
    1786                  >>> 1 + 1
    1787                  2
    1788  
    1789                  Failure:
    1790  
    1791                  >>> 2 + 3
    1792                  23
    1793                  >>> 1 + 1
    1794                  11
    1795  
    1796                  Skipped test (ignored):
    1797  
    1798                  >>> id(1.0)  # doctest: +SKIP
    1799                  7948648
    1800                  """
    1801  
    1802              def load_tests(loader, tests, pattern):
    1803                  tests.addTest(doctest.DocTestSuite())
    1804                  return tests
    1805          ''')
    1806          testname = self.create_test(code=code)
    1807  
    1808          output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
    1809                                  exitcode=EXITCODE_BAD_TEST)
    1810          self.check_executed_tests(output, [testname],
    1811                                    failed=[testname],
    1812                                    randomize=True,
    1813                                    stats=TestStats(1, 1, 0))
    1814  
    1815  
    1816  class ESC[4;38;5;81mTestUtils(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
    1817      def test_format_duration(self):
    1818          self.assertEqual(utils.format_duration(0),
    1819                           '0 ms')
    1820          self.assertEqual(utils.format_duration(1e-9),
    1821                           '1 ms')
    1822          self.assertEqual(utils.format_duration(10e-3),
    1823                           '10 ms')
    1824          self.assertEqual(utils.format_duration(1.5),
    1825                           '1.5 sec')
    1826          self.assertEqual(utils.format_duration(1),
    1827                           '1.0 sec')
    1828          self.assertEqual(utils.format_duration(2 * 60),
    1829                           '2 min')
    1830          self.assertEqual(utils.format_duration(2 * 60 + 1),
    1831                           '2 min 1 sec')
    1832          self.assertEqual(utils.format_duration(3 * 3600),
    1833                           '3 hour')
    1834          self.assertEqual(utils.format_duration(3 * 3600  + 2 * 60 + 1),
    1835                           '3 hour 2 min')
    1836          self.assertEqual(utils.format_duration(3 * 3600 + 1),
    1837                           '3 hour 1 sec')
    1838  
    1839      def test_normalize_test_name(self):
    1840          normalize = normalize_test_name
    1841          self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'),
    1842                           'test_access')
    1843          self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True),
    1844                           'ChownFileTests')
    1845          self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True),
    1846                           'test_success')
    1847          self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True))
    1848          self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True))
    1849  
    1850  
    1851  if __name__ == '__main__':
    1852      unittest.main()