(root)/
Python-3.11.7/
Lib/
test/
test_regrtest.py
       1  """
       2  Tests of regrtest.py.
       3  
       4  Note: test_regrtest cannot be run twice in parallel.
       5  """
       6  
       7  import contextlib
       8  import dataclasses
       9  import glob
      10  import io
      11  import locale
      12  import os.path
      13  import platform
      14  import random
      15  import re
      16  import shlex
      17  import signal
      18  import subprocess
      19  import sys
      20  import sysconfig
      21  import tempfile
      22  import textwrap
      23  import unittest
      24  from test import support
      25  from test.support import os_helper
      26  from test.libregrtest import cmdline
      27  from test.libregrtest import main
      28  from test.libregrtest import setup
      29  from test.libregrtest import utils
      30  from test.libregrtest.filter import set_match_tests, match_test
      31  from test.libregrtest.result import TestStats
      32  from test.libregrtest.utils import normalize_test_name
      33  
      34  if not support.has_subprocess_support:
      35      raise unittest.SkipTest("test module requires subprocess")
      36  
      37  ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
      38  ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
      39  LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
      40  
      41  EXITCODE_BAD_TEST = 2
      42  EXITCODE_ENV_CHANGED = 3
      43  EXITCODE_NO_TESTS_RAN = 4
      44  EXITCODE_RERUN_FAIL = 5
      45  EXITCODE_INTERRUPTED = 130
      46  
      47  TEST_INTERRUPTED = textwrap.dedent("""
      48      from signal import SIGINT, raise_signal
      49      try:
      50          raise_signal(SIGINT)
      51      except ImportError:
      52          import os
      53          os.kill(os.getpid(), SIGINT)
      54      """)
      55  
      56  
      57  class ESC[4;38;5;81mParseArgsTestCase(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
      58      """
      59      Test regrtest's argument parsing, function _parse_args().
      60      """
      61  
      62      @staticmethod
      63      def parse_args(args):
      64          return cmdline._parse_args(args)
      65  
      66      def checkError(self, args, msg):
      67          with support.captured_stderr() as err, self.assertRaises(SystemExit):
      68              self.parse_args(args)
      69          self.assertIn(msg, err.getvalue())
      70  
      71      def test_help(self):
      72          for opt in '-h', '--help':
      73              with self.subTest(opt=opt):
      74                  with support.captured_stdout() as out, \
      75                       self.assertRaises(SystemExit):
      76                      self.parse_args([opt])
      77                  self.assertIn('Run Python regression tests.', out.getvalue())
      78  
      79      def test_timeout(self):
      80          ns = self.parse_args(['--timeout', '4.2'])
      81          self.assertEqual(ns.timeout, 4.2)
      82  
      83          # negative, zero and empty string are treated as "no timeout"
      84          for value in ('-1', '0', ''):
      85              with self.subTest(value=value):
      86                  ns = self.parse_args([f'--timeout={value}'])
      87                  self.assertEqual(ns.timeout, None)
      88  
      89          self.checkError(['--timeout'], 'expected one argument')
      90          self.checkError(['--timeout', 'foo'], 'invalid timeout value:')
      91  
      92      def test_wait(self):
      93          ns = self.parse_args(['--wait'])
      94          self.assertTrue(ns.wait)
      95  
      96      def test_start(self):
      97          for opt in '-S', '--start':
      98              with self.subTest(opt=opt):
      99                  ns = self.parse_args([opt, 'foo'])
     100                  self.assertEqual(ns.start, 'foo')
     101                  self.checkError([opt], 'expected one argument')
     102  
     103      def test_verbose(self):
     104          ns = self.parse_args(['-v'])
     105          self.assertEqual(ns.verbose, 1)
     106          ns = self.parse_args(['-vvv'])
     107          self.assertEqual(ns.verbose, 3)
     108          ns = self.parse_args(['--verbose'])
     109          self.assertEqual(ns.verbose, 1)
     110          ns = self.parse_args(['--verbose'] * 3)
     111          self.assertEqual(ns.verbose, 3)
     112          ns = self.parse_args([])
     113          self.assertEqual(ns.verbose, 0)
     114  
     115      def test_rerun(self):
     116          for opt in '-w', '--rerun', '--verbose2':
     117              with self.subTest(opt=opt):
     118                  ns = self.parse_args([opt])
     119                  self.assertTrue(ns.rerun)
     120  
     121      def test_verbose3(self):
     122          for opt in '-W', '--verbose3':
     123              with self.subTest(opt=opt):
     124                  ns = self.parse_args([opt])
     125                  self.assertTrue(ns.verbose3)
     126  
     127      def test_quiet(self):
     128          for opt in '-q', '--quiet':
     129              with self.subTest(opt=opt):
     130                  ns = self.parse_args([opt])
     131                  self.assertTrue(ns.quiet)
     132                  self.assertEqual(ns.verbose, 0)
     133  
     134      def test_slowest(self):
     135          for opt in '-o', '--slowest':
     136              with self.subTest(opt=opt):
     137                  ns = self.parse_args([opt])
     138                  self.assertTrue(ns.print_slow)
     139  
     140      def test_header(self):
     141          ns = self.parse_args(['--header'])
     142          self.assertTrue(ns.header)
     143  
     144          ns = self.parse_args(['--verbose'])
     145          self.assertTrue(ns.header)
     146  
     147      def test_randomize(self):
     148          for opt in ('-r', '--randomize'):
     149              with self.subTest(opt=opt):
     150                  ns = self.parse_args([opt])
     151                  self.assertTrue(ns.randomize)
     152  
     153          with os_helper.EnvironmentVarGuard() as env:
     154              # with SOURCE_DATE_EPOCH
     155              env['SOURCE_DATE_EPOCH'] = '1697839080'
     156              ns = self.parse_args(['--randomize'])
     157              regrtest = main.Regrtest(ns)
     158              self.assertFalse(regrtest.randomize)
     159              self.assertIsInstance(regrtest.random_seed, str)
     160              self.assertEqual(regrtest.random_seed, '1697839080')
     161  
     162              # without SOURCE_DATE_EPOCH
     163              del env['SOURCE_DATE_EPOCH']
     164              ns = self.parse_args(['--randomize'])
     165              regrtest = main.Regrtest(ns)
     166              self.assertTrue(regrtest.randomize)
     167              self.assertIsInstance(regrtest.random_seed, int)
     168  
     169      def test_randseed(self):
     170          ns = self.parse_args(['--randseed', '12345'])
     171          self.assertEqual(ns.random_seed, 12345)
     172          self.assertTrue(ns.randomize)
     173          self.checkError(['--randseed'], 'expected one argument')
     174          self.checkError(['--randseed', 'foo'], 'invalid int value')
     175  
     176      def test_fromfile(self):
     177          for opt in '-f', '--fromfile':
     178              with self.subTest(opt=opt):
     179                  ns = self.parse_args([opt, 'foo'])
     180                  self.assertEqual(ns.fromfile, 'foo')
     181                  self.checkError([opt], 'expected one argument')
     182                  self.checkError([opt, 'foo', '-s'], "don't go together")
     183  
     184      def test_exclude(self):
     185          for opt in '-x', '--exclude':
     186              with self.subTest(opt=opt):
     187                  ns = self.parse_args([opt])
     188                  self.assertTrue(ns.exclude)
     189  
     190      def test_single(self):
     191          for opt in '-s', '--single':
     192              with self.subTest(opt=opt):
     193                  ns = self.parse_args([opt])
     194                  self.assertTrue(ns.single)
     195                  self.checkError([opt, '-f', 'foo'], "don't go together")
     196  
     197      def test_match(self):
     198          for opt in '-m', '--match':
     199              with self.subTest(opt=opt):
     200                  ns = self.parse_args([opt, 'pattern'])
     201                  self.assertEqual(ns.match_tests, [('pattern', True)])
     202                  self.checkError([opt], 'expected one argument')
     203  
     204          for opt in '-i', '--ignore':
     205              with self.subTest(opt=opt):
     206                  ns = self.parse_args([opt, 'pattern'])
     207                  self.assertEqual(ns.match_tests, [('pattern', False)])
     208                  self.checkError([opt], 'expected one argument')
     209  
     210          ns = self.parse_args(['-m', 'pattern1', '-m', 'pattern2'])
     211          self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', True)])
     212  
     213          ns = self.parse_args(['-m', 'pattern1', '-i', 'pattern2'])
     214          self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', False)])
     215  
     216          ns = self.parse_args(['-i', 'pattern1', '-m', 'pattern2'])
     217          self.assertEqual(ns.match_tests, [('pattern1', False), ('pattern2', True)])
     218  
     219          self.addCleanup(os_helper.unlink, os_helper.TESTFN)
     220          with open(os_helper.TESTFN, "w") as fp:
     221              print('matchfile1', file=fp)
     222              print('matchfile2', file=fp)
     223  
     224          filename = os.path.abspath(os_helper.TESTFN)
     225          ns = self.parse_args(['-m', 'match', '--matchfile', filename])
     226          self.assertEqual(ns.match_tests,
     227                           [('match', True), ('matchfile1', True), ('matchfile2', True)])
     228  
     229          ns = self.parse_args(['-i', 'match', '--ignorefile', filename])
     230          self.assertEqual(ns.match_tests,
     231                           [('match', False), ('matchfile1', False), ('matchfile2', False)])
     232  
     233      def test_failfast(self):
     234          for opt in '-G', '--failfast':
     235              with self.subTest(opt=opt):
     236                  ns = self.parse_args([opt, '-v'])
     237                  self.assertTrue(ns.failfast)
     238                  ns = self.parse_args([opt, '-W'])
     239                  self.assertTrue(ns.failfast)
     240                  self.checkError([opt], '-G/--failfast needs either -v or -W')
     241  
     242      def test_use(self):
     243          for opt in '-u', '--use':
     244              with self.subTest(opt=opt):
     245                  ns = self.parse_args([opt, 'gui,network'])
     246                  self.assertEqual(ns.use_resources, ['gui', 'network'])
     247  
     248                  ns = self.parse_args([opt, 'gui,none,network'])
     249                  self.assertEqual(ns.use_resources, ['network'])
     250  
     251                  expected = list(cmdline.ALL_RESOURCES)
     252                  expected.remove('gui')
     253                  ns = self.parse_args([opt, 'all,-gui'])
     254                  self.assertEqual(ns.use_resources, expected)
     255                  self.checkError([opt], 'expected one argument')
     256                  self.checkError([opt, 'foo'], 'invalid resource')
     257  
     258                  # all + a resource not part of "all"
     259                  ns = self.parse_args([opt, 'all,tzdata'])
     260                  self.assertEqual(ns.use_resources,
     261                                   list(cmdline.ALL_RESOURCES) + ['tzdata'])
     262  
     263                  # test another resource which is not part of "all"
     264                  ns = self.parse_args([opt, 'extralargefile'])
     265                  self.assertEqual(ns.use_resources, ['extralargefile'])
     266  
     267      def test_memlimit(self):
     268          for opt in '-M', '--memlimit':
     269              with self.subTest(opt=opt):
     270                  ns = self.parse_args([opt, '4G'])
     271                  self.assertEqual(ns.memlimit, '4G')
     272                  self.checkError([opt], 'expected one argument')
     273  
     274      def test_testdir(self):
     275          ns = self.parse_args(['--testdir', 'foo'])
     276          self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
     277          self.checkError(['--testdir'], 'expected one argument')
     278  
     279      def test_runleaks(self):
     280          for opt in '-L', '--runleaks':
     281              with self.subTest(opt=opt):
     282                  ns = self.parse_args([opt])
     283                  self.assertTrue(ns.runleaks)
     284  
     285      def test_huntrleaks(self):
     286          for opt in '-R', '--huntrleaks':
     287              with self.subTest(opt=opt):
     288                  ns = self.parse_args([opt, ':'])
     289                  self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
     290                  ns = self.parse_args([opt, '6:'])
     291                  self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
     292                  ns = self.parse_args([opt, ':3'])
     293                  self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
     294                  ns = self.parse_args([opt, '6:3:leaks.log'])
     295                  self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
     296                  self.checkError([opt], 'expected one argument')
     297                  self.checkError([opt, '6'],
     298                                  'needs 2 or 3 colon-separated arguments')
     299                  self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
     300                  self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
     301  
     302      def test_multiprocess(self):
     303          for opt in '-j', '--multiprocess':
     304              with self.subTest(opt=opt):
     305                  ns = self.parse_args([opt, '2'])
     306                  self.assertEqual(ns.use_mp, 2)
     307                  self.checkError([opt], 'expected one argument')
     308                  self.checkError([opt, 'foo'], 'invalid int value')
     309                  self.checkError([opt, '2', '-T'], "don't go together")
     310                  self.checkError([opt, '0', '-T'], "don't go together")
     311  
     312      def test_coverage(self):
     313          for opt in '-T', '--coverage':
     314              with self.subTest(opt=opt):
     315                  ns = self.parse_args([opt])
     316                  self.assertTrue(ns.trace)
     317  
     318      def test_coverdir(self):
     319          for opt in '-D', '--coverdir':
     320              with self.subTest(opt=opt):
     321                  ns = self.parse_args([opt, 'foo'])
     322                  self.assertEqual(ns.coverdir,
     323                                   os.path.join(os_helper.SAVEDCWD, 'foo'))
     324                  self.checkError([opt], 'expected one argument')
     325  
     326      def test_nocoverdir(self):
     327          for opt in '-N', '--nocoverdir':
     328              with self.subTest(opt=opt):
     329                  ns = self.parse_args([opt])
     330                  self.assertIsNone(ns.coverdir)
     331  
     332      def test_threshold(self):
     333          for opt in '-t', '--threshold':
     334              with self.subTest(opt=opt):
     335                  ns = self.parse_args([opt, '1000'])
     336                  self.assertEqual(ns.threshold, 1000)
     337                  self.checkError([opt], 'expected one argument')
     338                  self.checkError([opt, 'foo'], 'invalid int value')
     339  
     340      def test_nowindows(self):
     341          for opt in '-n', '--nowindows':
     342              with self.subTest(opt=opt):
     343                  with contextlib.redirect_stderr(io.StringIO()) as stderr:
     344                      ns = self.parse_args([opt])
     345                  self.assertTrue(ns.nowindows)
     346                  err = stderr.getvalue()
     347                  self.assertIn('the --nowindows (-n) option is deprecated', err)
     348  
     349      def test_forever(self):
     350          for opt in '-F', '--forever':
     351              with self.subTest(opt=opt):
     352                  ns = self.parse_args([opt])
     353                  self.assertTrue(ns.forever)
     354  
     355      def test_unrecognized_argument(self):
     356          self.checkError(['--xxx'], 'usage:')
     357  
     358      def test_long_option__partial(self):
     359          ns = self.parse_args(['--qui'])
     360          self.assertTrue(ns.quiet)
     361          self.assertEqual(ns.verbose, 0)
     362  
     363      def test_two_options(self):
     364          ns = self.parse_args(['--quiet', '--exclude'])
     365          self.assertTrue(ns.quiet)
     366          self.assertEqual(ns.verbose, 0)
     367          self.assertTrue(ns.exclude)
     368  
     369      def test_option_with_empty_string_value(self):
     370          ns = self.parse_args(['--start', ''])
     371          self.assertEqual(ns.start, '')
     372  
     373      def test_arg(self):
     374          ns = self.parse_args(['foo'])
     375          self.assertEqual(ns.args, ['foo'])
     376  
     377      def test_option_and_arg(self):
     378          ns = self.parse_args(['--quiet', 'foo'])
     379          self.assertTrue(ns.quiet)
     380          self.assertEqual(ns.verbose, 0)
     381          self.assertEqual(ns.args, ['foo'])
     382  
     383      def test_arg_option_arg(self):
     384          ns = self.parse_args(['test_unaryop', '-v', 'test_binop'])
     385          self.assertEqual(ns.verbose, 1)
     386          self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
     387  
     388      def test_unknown_option(self):
     389          self.checkError(['--unknown-option'],
     390                          'unrecognized arguments: --unknown-option')
     391  
     392      def check_ci_mode(self, args, use_resources, rerun=True):
     393          ns = cmdline._parse_args(args)
     394  
     395          # Check Regrtest attributes which are more reliable than Namespace
     396          # which has an unclear API
     397          with os_helper.EnvironmentVarGuard() as env:
     398              # Ignore SOURCE_DATE_EPOCH env var if it's set
     399              if 'SOURCE_DATE_EPOCH' in env:
     400                  del env['SOURCE_DATE_EPOCH']
     401  
     402              regrtest = main.Regrtest(ns)
     403  
     404          self.assertEqual(regrtest.num_workers, -1)
     405          self.assertEqual(regrtest.want_rerun, rerun)
     406          self.assertTrue(regrtest.randomize)
     407          self.assertIsInstance(regrtest.random_seed, int)
     408          self.assertTrue(regrtest.fail_env_changed)
     409          self.assertTrue(regrtest.fail_rerun)
     410          self.assertTrue(regrtest.print_slowest)
     411          self.assertTrue(regrtest.output_on_failure)
     412          self.assertEqual(sorted(regrtest.use_resources), sorted(use_resources))
     413          return regrtest
     414  
     415      def test_fast_ci(self):
     416          args = ['--fast-ci']
     417          use_resources = sorted(cmdline.ALL_RESOURCES)
     418          use_resources.remove('cpu')
     419          regrtest = self.check_ci_mode(args, use_resources)
     420          self.assertEqual(regrtest.timeout, 10 * 60)
     421  
     422      def test_fast_ci_python_cmd(self):
     423          args = ['--fast-ci', '--python', 'python -X dev']
     424          use_resources = sorted(cmdline.ALL_RESOURCES)
     425          use_resources.remove('cpu')
     426          regrtest = self.check_ci_mode(args, use_resources, rerun=False)
     427          self.assertEqual(regrtest.timeout, 10 * 60)
     428          self.assertEqual(regrtest.python_cmd, ('python', '-X', 'dev'))
     429  
     430      def test_fast_ci_resource(self):
     431          # it should be possible to override resources individually
     432          args = ['--fast-ci', '-u-network']
     433          use_resources = sorted(cmdline.ALL_RESOURCES)
     434          use_resources.remove('cpu')
     435          use_resources.remove('network')
     436          self.check_ci_mode(args, use_resources)
     437  
     438      def test_slow_ci(self):
     439          args = ['--slow-ci']
     440          use_resources = sorted(cmdline.ALL_RESOURCES)
     441          regrtest = self.check_ci_mode(args, use_resources)
     442          self.assertEqual(regrtest.timeout, 20 * 60)
     443  
     444      def test_dont_add_python_opts(self):
     445          args = ['--dont-add-python-opts']
     446          ns = cmdline._parse_args(args)
     447          self.assertFalse(ns._add_python_opts)
     448  
     449  
     450  @dataclasses.dataclass(slots=True)
     451  class ESC[4;38;5;81mRerun:
     452      name: str
     453      match: str | None
     454      success: bool
     455  
     456  
     457  class ESC[4;38;5;81mBaseTestCase(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
     458      TEST_UNIQUE_ID = 1
     459      TESTNAME_PREFIX = 'test_regrtest_'
     460      TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
     461  
     462      def setUp(self):
     463          self.testdir = os.path.realpath(os.path.dirname(__file__))
     464  
     465          self.tmptestdir = tempfile.mkdtemp()
     466          self.addCleanup(os_helper.rmtree, self.tmptestdir)
     467  
     468      def create_test(self, name=None, code=None):
     469          if not name:
     470              name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
     471              BaseTestCase.TEST_UNIQUE_ID += 1
     472  
     473          if code is None:
     474              code = textwrap.dedent("""
     475                      import unittest
     476  
     477                      class Tests(unittest.TestCase):
     478                          def test_empty_test(self):
     479                              pass
     480                  """)
     481  
     482          # test_regrtest cannot be run twice in parallel because
     483          # of setUp() and create_test()
     484          name = self.TESTNAME_PREFIX + name
     485          path = os.path.join(self.tmptestdir, name + '.py')
     486  
     487          self.addCleanup(os_helper.unlink, path)
     488          # Use 'x' mode to ensure that we do not override existing tests
     489          try:
     490              with open(path, 'x', encoding='utf-8') as fp:
     491                  fp.write(code)
     492          except PermissionError as exc:
     493              if not sysconfig.is_python_build():
     494                  self.skipTest("cannot write %s: %s" % (path, exc))
     495              raise
     496          return name
     497  
     498      def regex_search(self, regex, output):
     499          match = re.search(regex, output, re.MULTILINE)
     500          if not match:
     501              self.fail("%r not found in %r" % (regex, output))
     502          return match
     503  
     504      def check_line(self, output, pattern, full=False, regex=True):
     505          if not regex:
     506              pattern = re.escape(pattern)
     507          if full:
     508              pattern += '\n'
     509          regex = re.compile(r'^' + pattern, re.MULTILINE)
     510          self.assertRegex(output, regex)
     511  
     512      def parse_executed_tests(self, output):
     513          regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
     514                   % (LOG_PREFIX, self.TESTNAME_REGEX))
     515          parser = re.finditer(regex, output, re.MULTILINE)
     516          return list(match.group(1) for match in parser)
     517  
     518      def check_executed_tests(self, output, tests, *, stats,
     519                               skipped=(), failed=(),
     520                               env_changed=(), omitted=(),
     521                               rerun=None, run_no_tests=(),
     522                               resource_denied=(),
     523                               randomize=False, parallel=False, interrupted=False,
     524                               fail_env_changed=False,
     525                               forever=False, filtered=False):
     526          if isinstance(tests, str):
     527              tests = [tests]
     528          if isinstance(skipped, str):
     529              skipped = [skipped]
     530          if isinstance(resource_denied, str):
     531              resource_denied = [resource_denied]
     532          if isinstance(failed, str):
     533              failed = [failed]
     534          if isinstance(env_changed, str):
     535              env_changed = [env_changed]
     536          if isinstance(omitted, str):
     537              omitted = [omitted]
     538          if isinstance(run_no_tests, str):
     539              run_no_tests = [run_no_tests]
     540          if isinstance(stats, int):
     541              stats = TestStats(stats)
     542          if parallel:
     543              randomize = True
     544  
     545          rerun_failed = []
     546          if rerun is not None and not env_changed:
     547              failed = [rerun.name]
     548              if not rerun.success:
     549                  rerun_failed.append(rerun.name)
     550  
     551          executed = self.parse_executed_tests(output)
     552          total_tests = list(tests)
     553          if rerun is not None:
     554              total_tests.append(rerun.name)
     555          if randomize:
     556              self.assertEqual(set(executed), set(total_tests), output)
     557          else:
     558              self.assertEqual(executed, total_tests, output)
     559  
     560          def plural(count):
     561              return 's' if count != 1 else ''
     562  
     563          def list_regex(line_format, tests):
     564              count = len(tests)
     565              names = ' '.join(sorted(tests))
     566              regex = line_format % (count, plural(count))
     567              regex = r'%s:\n    %s$' % (regex, names)
     568              return regex
     569  
     570          if skipped:
     571              regex = list_regex('%s test%s skipped', skipped)
     572              self.check_line(output, regex)
     573  
     574          if resource_denied:
     575              regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied)
     576              self.check_line(output, regex)
     577  
     578          if failed:
     579              regex = list_regex('%s test%s failed', failed)
     580              self.check_line(output, regex)
     581  
     582          if env_changed:
     583              regex = list_regex(r'%s test%s altered the execution environment '
     584                                 r'\(env changed\)',
     585                                 env_changed)
     586              self.check_line(output, regex)
     587  
     588          if omitted:
     589              regex = list_regex('%s test%s omitted', omitted)
     590              self.check_line(output, regex)
     591  
     592          if rerun is not None:
     593              regex = list_regex('%s re-run test%s', [rerun.name])
     594              self.check_line(output, regex)
     595              regex = LOG_PREFIX + r"Re-running 1 failed tests in verbose mode"
     596              self.check_line(output, regex)
     597              regex = fr"Re-running {rerun.name} in verbose mode"
     598              if rerun.match:
     599                  regex = fr"{regex} \(matching: {rerun.match}\)"
     600              self.check_line(output, regex)
     601  
     602          if run_no_tests:
     603              regex = list_regex('%s test%s run no tests', run_no_tests)
     604              self.check_line(output, regex)
     605  
     606          good = (len(tests) - len(skipped) - len(resource_denied) - len(failed)
     607                  - len(omitted) - len(env_changed) - len(run_no_tests))
     608          if good:
     609              regex = r'%s test%s OK\.' % (good, plural(good))
     610              if not skipped and not failed and (rerun is None or rerun.success) and good > 1:
     611                  regex = 'All %s' % regex
     612              self.check_line(output, regex, full=True)
     613  
     614          if interrupted:
     615              self.check_line(output, 'Test suite interrupted by signal SIGINT.')
     616  
     617          # Total tests
     618          text = f'run={stats.tests_run:,}'
     619          if filtered:
     620              text = fr'{text} \(filtered\)'
     621          parts = [text]
     622          if stats.failures:
     623              parts.append(f'failures={stats.failures:,}')
     624          if stats.skipped:
     625              parts.append(f'skipped={stats.skipped:,}')
     626          line = fr'Total tests: {" ".join(parts)}'
     627          self.check_line(output, line, full=True)
     628  
     629          # Total test files
     630          run = len(total_tests) - len(resource_denied)
     631          if rerun is not None:
     632              total_failed = len(rerun_failed)
     633              total_rerun = 1
     634          else:
     635              total_failed = len(failed)
     636              total_rerun = 0
     637          if interrupted:
     638              run = 0
     639          text = f'run={run}'
     640          if not forever:
     641              text = f'{text}/{len(tests)}'
     642          if filtered:
     643              text = fr'{text} \(filtered\)'
     644          report = [text]
     645          for name, ntest in (
     646              ('failed', total_failed),
     647              ('env_changed', len(env_changed)),
     648              ('skipped', len(skipped)),
     649              ('resource_denied', len(resource_denied)),
     650              ('rerun', total_rerun),
     651              ('run_no_tests', len(run_no_tests)),
     652          ):
     653              if ntest:
     654                  report.append(f'{name}={ntest}')
     655          line = fr'Total test files: {" ".join(report)}'
     656          self.check_line(output, line, full=True)
     657  
     658          # Result
     659          state = []
     660          if failed:
     661              state.append('FAILURE')
     662          elif fail_env_changed and env_changed:
     663              state.append('ENV CHANGED')
     664          if interrupted:
     665              state.append('INTERRUPTED')
     666          if not any((good, failed, interrupted, skipped,
     667                      env_changed, fail_env_changed)):
     668              state.append("NO TESTS RAN")
     669          elif not state:
     670              state.append('SUCCESS')
     671          state = ', '.join(state)
     672          if rerun is not None:
     673              new_state = 'SUCCESS' if rerun.success else 'FAILURE'
     674              state = f'{state} then {new_state}'
     675          self.check_line(output, f'Result: {state}', full=True)
     676  
     677      def parse_random_seed(self, output: str) -> str:
     678          match = self.regex_search(r'Using random seed: (.*)', output)
     679          return match.group(1)
     680  
     681      def run_command(self, args, input=None, exitcode=0, **kw):
     682          if not input:
     683              input = ''
     684          if 'stderr' not in kw:
     685              kw['stderr'] = subprocess.STDOUT
     686  
     687          env = kw.pop('env', None)
     688          if env is None:
     689              env = dict(os.environ)
     690              env.pop('SOURCE_DATE_EPOCH', None)
     691  
     692          proc = subprocess.run(args,
     693                                text=True,
     694                                input=input,
     695                                stdout=subprocess.PIPE,
     696                                env=env,
     697                                **kw)
     698          if proc.returncode != exitcode:
     699              msg = ("Command %s failed with exit code %s, but exit code %s expected!\n"
     700                     "\n"
     701                     "stdout:\n"
     702                     "---\n"
     703                     "%s\n"
     704                     "---\n"
     705                     % (str(args), proc.returncode, exitcode, proc.stdout))
     706              if proc.stderr:
     707                  msg += ("\n"
     708                          "stderr:\n"
     709                          "---\n"
     710                          "%s"
     711                          "---\n"
     712                          % proc.stderr)
     713              self.fail(msg)
     714          return proc
     715  
     716      def run_python(self, args, **kw):
     717          extraargs = []
     718          if 'uops' in sys._xoptions:
     719              # Pass -X uops along
     720              extraargs.extend(['-X', 'uops'])
     721          args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args]
     722          proc = self.run_command(args, **kw)
     723          return proc.stdout
     724  
     725  
     726  class ESC[4;38;5;81mCheckActualTests(ESC[4;38;5;149mBaseTestCase):
     727      def test_finds_expected_number_of_tests(self):
     728          """
     729          Check that regrtest appears to find the expected set of tests.
     730          """
     731          args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
     732          output = self.run_python(args)
     733          rough_number_of_tests_found = len(output.splitlines())
     734          actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
     735                                               'test*.py')
     736          rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
     737          # We're not trying to duplicate test finding logic in here,
     738          # just give a rough estimate of how many there should be and
     739          # be near that.  This is a regression test to prevent mishaps
     740          # such as https://bugs.python.org/issue37667 in the future.
     741          # If you need to change the values in here during some
     742          # mythical future test suite reorganization, don't go
     743          # overboard with logic and keep that goal in mind.
     744          self.assertGreater(rough_number_of_tests_found,
     745                             rough_counted_test_py_files*9//10,
     746                             msg='Unexpectedly low number of tests found in:\n'
     747                             f'{", ".join(output.splitlines())}')
     748  
     749  
     750  class ESC[4;38;5;81mProgramsTestCase(ESC[4;38;5;149mBaseTestCase):
     751      """
     752      Test various ways to run the Python test suite. Use options close
     753      to options used on the buildbot.
     754      """
     755  
     756      NTEST = 4
     757  
     758      def setUp(self):
     759          super().setUp()
     760  
     761          # Create NTEST tests doing nothing
     762          self.tests = [self.create_test() for index in range(self.NTEST)]
     763  
     764          self.python_args = ['-Wd', '-E', '-bb']
     765          self.regrtest_args = ['-uall', '-rwW',
     766                                '--testdir=%s' % self.tmptestdir]
     767          self.regrtest_args.extend(('--timeout', '3600', '-j4'))
     768          if sys.platform == 'win32':
     769              self.regrtest_args.append('-n')
     770  
     771      def check_output(self, output):
     772          randseed = self.parse_random_seed(output)
     773          self.assertTrue(randseed.isdigit(), randseed)
     774  
     775          self.check_executed_tests(output, self.tests,
     776                                    randomize=True, stats=len(self.tests))
     777  
     778      def run_tests(self, args, env=None):
     779          output = self.run_python(args, env=env)
     780          self.check_output(output)
     781  
     782      def test_script_regrtest(self):
     783          # Lib/test/regrtest.py
     784          script = os.path.join(self.testdir, 'regrtest.py')
     785  
     786          args = [*self.python_args, script, *self.regrtest_args, *self.tests]
     787          self.run_tests(args)
     788  
     789      def test_module_test(self):
     790          # -m test
     791          args = [*self.python_args, '-m', 'test',
     792                  *self.regrtest_args, *self.tests]
     793          self.run_tests(args)
     794  
     795      def test_module_regrtest(self):
     796          # -m test.regrtest
     797          args = [*self.python_args, '-m', 'test.regrtest',
     798                  *self.regrtest_args, *self.tests]
     799          self.run_tests(args)
     800  
     801      def test_module_autotest(self):
     802          # -m test.autotest
     803          args = [*self.python_args, '-m', 'test.autotest',
     804                  *self.regrtest_args, *self.tests]
     805          self.run_tests(args)
     806  
     807      def test_module_from_test_autotest(self):
     808          # from test import autotest
     809          code = 'from test import autotest'
     810          args = [*self.python_args, '-c', code,
     811                  *self.regrtest_args, *self.tests]
     812          self.run_tests(args)
     813  
     814      def test_script_autotest(self):
     815          # Lib/test/autotest.py
     816          script = os.path.join(self.testdir, 'autotest.py')
     817          args = [*self.python_args, script, *self.regrtest_args, *self.tests]
     818          self.run_tests(args)
     819  
     820      def run_batch(self, *args):
     821          proc = self.run_command(args)
     822          self.check_output(proc.stdout)
     823  
     824      @unittest.skipUnless(sysconfig.is_python_build(),
     825                           'test.bat script is not installed')
     826      @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
     827      def test_tools_buildbot_test(self):
     828          # Tools\buildbot\test.bat
     829          script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
     830          test_args = ['--testdir=%s' % self.tmptestdir]
     831          if platform.machine() == 'ARM64':
     832              test_args.append('-arm64') # ARM 64-bit build
     833          elif platform.machine() == 'ARM':
     834              test_args.append('-arm32')   # 32-bit ARM build
     835          elif platform.architecture()[0] == '64bit':
     836              test_args.append('-x64')   # 64-bit build
     837          if not support.Py_DEBUG:
     838              test_args.append('+d')     # Release build, use python.exe
     839          self.run_batch(script, *test_args, *self.tests)
     840  
     841      @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
     842      def test_pcbuild_rt(self):
     843          # PCbuild\rt.bat
     844          script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
     845          if not os.path.isfile(script):
     846              self.skipTest(f'File "{script}" does not exist')
     847          rt_args = ["-q"]             # Quick, don't run tests twice
     848          if platform.machine() == 'ARM64':
     849              rt_args.append('-arm64') # ARM 64-bit build
     850          elif platform.machine() == 'ARM':
     851              rt_args.append('-arm32')   # 32-bit ARM build
     852          elif platform.architecture()[0] == '64bit':
     853              rt_args.append('-x64')   # 64-bit build
     854          if support.Py_DEBUG:
     855              rt_args.append('-d')     # Debug build, use python_d.exe
     856          self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
     857  
     858  
     859  class ESC[4;38;5;81mArgsTestCase(ESC[4;38;5;149mBaseTestCase):
     860      """
     861      Test arguments of the Python test suite.
     862      """
     863  
     864      def run_tests(self, *testargs, **kw):
     865          cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
     866          return self.run_python(cmdargs, **kw)
     867  
     868      def test_success(self):
     869          code = textwrap.dedent("""
     870              import unittest
     871  
     872              class PassingTests(unittest.TestCase):
     873                  def test_test1(self):
     874                      pass
     875  
     876                  def test_test2(self):
     877                      pass
     878  
     879                  def test_test3(self):
     880                      pass
     881          """)
     882          tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)]
     883  
     884          output = self.run_tests(*tests)
     885          self.check_executed_tests(output, tests,
     886                                    stats=3 * len(tests))
     887  
     888      def test_skip(self):
     889          code = textwrap.dedent("""
     890              import unittest
     891              raise unittest.SkipTest("nope")
     892          """)
     893          test_ok = self.create_test('ok')
     894          test_skip = self.create_test('skip', code=code)
     895          tests = [test_ok, test_skip]
     896  
     897          output = self.run_tests(*tests)
     898          self.check_executed_tests(output, tests,
     899                                    skipped=[test_skip],
     900                                    stats=1)
     901  
     902      def test_failing_test(self):
     903          # test a failing test
     904          code = textwrap.dedent("""
     905              import unittest
     906  
     907              class FailingTest(unittest.TestCase):
     908                  def test_failing(self):
     909                      self.fail("bug")
     910          """)
     911          test_ok = self.create_test('ok')
     912          test_failing = self.create_test('failing', code=code)
     913          tests = [test_ok, test_failing]
     914  
     915          output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
     916          self.check_executed_tests(output, tests, failed=test_failing,
     917                                    stats=TestStats(2, 1))
     918  
     919      def test_resources(self):
     920          # test -u command line option
     921          tests = {}
     922          for resource in ('audio', 'network'):
     923              code = textwrap.dedent("""
     924                          from test import support; support.requires(%r)
     925                          import unittest
     926                          class PassingTest(unittest.TestCase):
     927                              def test_pass(self):
     928                                  pass
     929                      """ % resource)
     930  
     931              tests[resource] = self.create_test(resource, code)
     932          test_names = sorted(tests.values())
     933  
     934          # -u all: 2 resources enabled
     935          output = self.run_tests('-u', 'all', *test_names)
     936          self.check_executed_tests(output, test_names, stats=2)
     937  
     938          # -u audio: 1 resource enabled
     939          output = self.run_tests('-uaudio', *test_names)
     940          self.check_executed_tests(output, test_names,
     941                                    resource_denied=tests['network'],
     942                                    stats=1)
     943  
     944          # no option: 0 resources enabled
     945          output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN)
     946          self.check_executed_tests(output, test_names,
     947                                    resource_denied=test_names,
     948                                    stats=0)
     949  
     950      def test_random(self):
     951          # test -r and --randseed command line option
     952          code = textwrap.dedent("""
     953              import random
     954              print("TESTRANDOM: %s" % random.randint(1, 1000))
     955          """)
     956          test = self.create_test('random', code)
     957  
     958          # first run to get the output with the random seed
     959          output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN)
     960          randseed = self.parse_random_seed(output)
     961          match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
     962          test_random = int(match.group(1))
     963  
     964          # try to reproduce with the random seed
     965          output = self.run_tests('-r', f'--randseed={randseed}', test,
     966                                  exitcode=EXITCODE_NO_TESTS_RAN)
     967          randseed2 = self.parse_random_seed(output)
     968          self.assertEqual(randseed2, randseed)
     969  
     970          match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
     971          test_random2 = int(match.group(1))
     972          self.assertEqual(test_random2, test_random)
     973  
     974          # check that random.seed is used by default
     975          output = self.run_tests(test, exitcode=EXITCODE_NO_TESTS_RAN)
     976          randseed = self.parse_random_seed(output)
     977          self.assertTrue(randseed.isdigit(), randseed)
     978  
     979          # check SOURCE_DATE_EPOCH (integer)
     980          timestamp = '1697839080'
     981          env = dict(os.environ, SOURCE_DATE_EPOCH=timestamp)
     982          output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN,
     983                                  env=env)
     984          randseed = self.parse_random_seed(output)
     985          self.assertEqual(randseed, timestamp)
     986          self.check_line(output, 'TESTRANDOM: 520')
     987  
     988          # check SOURCE_DATE_EPOCH (string)
     989          env = dict(os.environ, SOURCE_DATE_EPOCH='XYZ')
     990          output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN,
     991                                  env=env)
     992          randseed = self.parse_random_seed(output)
     993          self.assertEqual(randseed, 'XYZ')
     994          self.check_line(output, 'TESTRANDOM: 22')
     995  
     996          # check SOURCE_DATE_EPOCH (empty string): ignore the env var
     997          env = dict(os.environ, SOURCE_DATE_EPOCH='')
     998          output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN,
     999                                  env=env)
    1000          randseed = self.parse_random_seed(output)
    1001          self.assertTrue(randseed.isdigit(), randseed)
    1002  
    1003      def test_fromfile(self):
    1004          # test --fromfile
    1005          tests = [self.create_test() for index in range(5)]
    1006  
    1007          # Write the list of files using a format similar to regrtest output:
    1008          # [1/2] test_1
    1009          # [2/2] test_2
    1010          filename = os_helper.TESTFN
    1011          self.addCleanup(os_helper.unlink, filename)
    1012  
    1013          # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
    1014          with open(filename, "w") as fp:
    1015              previous = None
    1016              for index, name in enumerate(tests, 1):
    1017                  line = ("00:00:%02i [%s/%s] %s"
    1018                          % (index, index, len(tests), name))
    1019                  if previous:
    1020                      line += " -- %s took 0 sec" % previous
    1021                  print(line, file=fp)
    1022                  previous = name
    1023  
    1024          output = self.run_tests('--fromfile', filename)
    1025          stats = len(tests)
    1026          self.check_executed_tests(output, tests, stats=stats)
    1027  
    1028          # test format '[2/7] test_opcodes'
    1029          with open(filename, "w") as fp:
    1030              for index, name in enumerate(tests, 1):
    1031                  print("[%s/%s] %s" % (index, len(tests), name), file=fp)
    1032  
    1033          output = self.run_tests('--fromfile', filename)
    1034          self.check_executed_tests(output, tests, stats=stats)
    1035  
    1036          # test format 'test_opcodes'
    1037          with open(filename, "w") as fp:
    1038              for name in tests:
    1039                  print(name, file=fp)
    1040  
    1041          output = self.run_tests('--fromfile', filename)
    1042          self.check_executed_tests(output, tests, stats=stats)
    1043  
    1044          # test format 'Lib/test/test_opcodes.py'
    1045          with open(filename, "w") as fp:
    1046              for name in tests:
    1047                  print('Lib/test/%s.py' % name, file=fp)
    1048  
    1049          output = self.run_tests('--fromfile', filename)
    1050          self.check_executed_tests(output, tests, stats=stats)
    1051  
    1052      def test_interrupted(self):
    1053          code = TEST_INTERRUPTED
    1054          test = self.create_test('sigint', code=code)
    1055          output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
    1056          self.check_executed_tests(output, test, omitted=test,
    1057                                    interrupted=True, stats=0)
    1058  
    1059      def test_slowest(self):
    1060          # test --slowest
    1061          tests = [self.create_test() for index in range(3)]
    1062          output = self.run_tests("--slowest", *tests)
    1063          self.check_executed_tests(output, tests, stats=len(tests))
    1064          regex = ('10 slowest tests:\n'
    1065                   '(?:- %s: .*\n){%s}'
    1066                   % (self.TESTNAME_REGEX, len(tests)))
    1067          self.check_line(output, regex)
    1068  
    1069      def test_slowest_interrupted(self):
    1070          # Issue #25373: test --slowest with an interrupted test
    1071          code = TEST_INTERRUPTED
    1072          test = self.create_test("sigint", code=code)
    1073  
    1074          for multiprocessing in (False, True):
    1075              with self.subTest(multiprocessing=multiprocessing):
    1076                  if multiprocessing:
    1077                      args = ("--slowest", "-j2", test)
    1078                  else:
    1079                      args = ("--slowest", test)
    1080                  output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
    1081                  self.check_executed_tests(output, test,
    1082                                            omitted=test, interrupted=True,
    1083                                            stats=0)
    1084  
    1085                  regex = ('10 slowest tests:\n')
    1086                  self.check_line(output, regex)
    1087  
    1088      def test_coverage(self):
    1089          # test --coverage
    1090          test = self.create_test('coverage')
    1091          output = self.run_tests("--coverage", test)
    1092          self.check_executed_tests(output, [test], stats=1)
    1093          regex = (r'lines +cov% +module +\(path\)\n'
    1094                   r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
    1095          self.check_line(output, regex)
    1096  
    1097      def test_wait(self):
    1098          # test --wait
    1099          test = self.create_test('wait')
    1100          output = self.run_tests("--wait", test, input='key')
    1101          self.check_line(output, 'Press any key to continue')
    1102  
    1103      def test_forever(self):
    1104          # test --forever
    1105          code = textwrap.dedent("""
    1106              import builtins
    1107              import unittest
    1108  
    1109              class ForeverTester(unittest.TestCase):
    1110                  def test_run(self):
    1111                      # Store the state in the builtins module, because the test
    1112                      # module is reload at each run
    1113                      if 'RUN' in builtins.__dict__:
    1114                          builtins.__dict__['RUN'] += 1
    1115                          if builtins.__dict__['RUN'] >= 3:
    1116                              self.fail("fail at the 3rd runs")
    1117                      else:
    1118                          builtins.__dict__['RUN'] = 1
    1119          """)
    1120          test = self.create_test('forever', code=code)
    1121  
    1122          # --forever
    1123          output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
    1124          self.check_executed_tests(output, [test]*3, failed=test,
    1125                                    stats=TestStats(3, 1),
    1126                                    forever=True)
    1127  
    1128          # --forever --rerun
    1129          output = self.run_tests('--forever', '--rerun', test, exitcode=0)
    1130          self.check_executed_tests(output, [test]*3,
    1131                                    rerun=Rerun(test,
    1132                                                match='test_run',
    1133                                                success=True),
    1134                                    stats=TestStats(4, 1),
    1135                                    forever=True)
    1136  
    1137      def check_leak(self, code, what, *, run_workers=False):
    1138          test = self.create_test('huntrleaks', code=code)
    1139  
    1140          filename = 'reflog.txt'
    1141          self.addCleanup(os_helper.unlink, filename)
    1142          cmd = ['--huntrleaks', '3:3:']
    1143          if run_workers:
    1144              cmd.append('-j1')
    1145          cmd.append(test)
    1146          output = self.run_tests(*cmd,
    1147                                  exitcode=EXITCODE_BAD_TEST,
    1148                                  stderr=subprocess.STDOUT)
    1149          self.check_executed_tests(output, [test], failed=test, stats=1)
    1150  
    1151          line = 'beginning 6 repetitions\n123456\n......\n'
    1152          self.check_line(output, re.escape(line))
    1153  
    1154          line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
    1155          self.assertIn(line2, output)
    1156  
    1157          with open(filename) as fp:
    1158              reflog = fp.read()
    1159              self.assertIn(line2, reflog)
    1160  
    1161      @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
    1162      def check_huntrleaks(self, *, run_workers: bool):
    1163          # test --huntrleaks
    1164          code = textwrap.dedent("""
    1165              import unittest
    1166  
    1167              GLOBAL_LIST = []
    1168  
    1169              class RefLeakTest(unittest.TestCase):
    1170                  def test_leak(self):
    1171                      GLOBAL_LIST.append(object())
    1172          """)
    1173          self.check_leak(code, 'references', run_workers=run_workers)
    1174  
    1175      def test_huntrleaks(self):
    1176          self.check_huntrleaks(run_workers=False)
    1177  
    1178      def test_huntrleaks_mp(self):
    1179          self.check_huntrleaks(run_workers=True)
    1180  
    1181      @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
    1182      def test_huntrleaks_fd_leak(self):
    1183          # test --huntrleaks for file descriptor leak
    1184          code = textwrap.dedent("""
    1185              import os
    1186              import unittest
    1187  
    1188              class FDLeakTest(unittest.TestCase):
    1189                  def test_leak(self):
    1190                      fd = os.open(__file__, os.O_RDONLY)
    1191                      # bug: never close the file descriptor
    1192          """)
    1193          self.check_leak(code, 'file descriptors')
    1194  
    1195      def test_list_tests(self):
    1196          # test --list-tests
    1197          tests = [self.create_test() for i in range(5)]
    1198          output = self.run_tests('--list-tests', *tests)
    1199          self.assertEqual(output.rstrip().splitlines(),
    1200                           tests)
    1201  
    1202      def test_list_cases(self):
    1203          # test --list-cases
    1204          code = textwrap.dedent("""
    1205              import unittest
    1206  
    1207              class Tests(unittest.TestCase):
    1208                  def test_method1(self):
    1209                      pass
    1210                  def test_method2(self):
    1211                      pass
    1212          """)
    1213          testname = self.create_test(code=code)
    1214  
    1215          # Test --list-cases
    1216          all_methods = ['%s.Tests.test_method1' % testname,
    1217                         '%s.Tests.test_method2' % testname]
    1218          output = self.run_tests('--list-cases', testname)
    1219          self.assertEqual(output.splitlines(), all_methods)
    1220  
    1221          # Test --list-cases with --match
    1222          all_methods = ['%s.Tests.test_method1' % testname]
    1223          output = self.run_tests('--list-cases',
    1224                                  '-m', 'test_method1',
    1225                                  testname)
    1226          self.assertEqual(output.splitlines(), all_methods)
    1227  
    1228      @support.cpython_only
    1229      def test_crashed(self):
    1230          # Any code which causes a crash
    1231          code = 'import faulthandler; faulthandler._sigsegv()'
    1232          crash_test = self.create_test(name="crash", code=code)
    1233  
    1234          tests = [crash_test]
    1235          output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
    1236          self.check_executed_tests(output, tests, failed=crash_test,
    1237                                    parallel=True, stats=0)
    1238  
    1239      def parse_methods(self, output):
    1240          regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
    1241          return [match.group(1) for match in regex.finditer(output)]
    1242  
    1243      def test_ignorefile(self):
    1244          code = textwrap.dedent("""
    1245              import unittest
    1246  
    1247              class Tests(unittest.TestCase):
    1248                  def test_method1(self):
    1249                      pass
    1250                  def test_method2(self):
    1251                      pass
    1252                  def test_method3(self):
    1253                      pass
    1254                  def test_method4(self):
    1255                      pass
    1256          """)
    1257          testname = self.create_test(code=code)
    1258  
    1259          # only run a subset
    1260          filename = os_helper.TESTFN
    1261          self.addCleanup(os_helper.unlink, filename)
    1262  
    1263          subset = [
    1264              # only ignore the method name
    1265              'test_method1',
    1266              # ignore the full identifier
    1267              '%s.Tests.test_method3' % testname]
    1268          with open(filename, "w") as fp:
    1269              for name in subset:
    1270                  print(name, file=fp)
    1271  
    1272          output = self.run_tests("-v", "--ignorefile", filename, testname)
    1273          methods = self.parse_methods(output)
    1274          subset = ['test_method2', 'test_method4']
    1275          self.assertEqual(methods, subset)
    1276  
    1277      def test_matchfile(self):
    1278          code = textwrap.dedent("""
    1279              import unittest
    1280  
    1281              class Tests(unittest.TestCase):
    1282                  def test_method1(self):
    1283                      pass
    1284                  def test_method2(self):
    1285                      pass
    1286                  def test_method3(self):
    1287                      pass
    1288                  def test_method4(self):
    1289                      pass
    1290          """)
    1291          all_methods = ['test_method1', 'test_method2',
    1292                         'test_method3', 'test_method4']
    1293          testname = self.create_test(code=code)
    1294  
    1295          # by default, all methods should be run
    1296          output = self.run_tests("-v", testname)
    1297          methods = self.parse_methods(output)
    1298          self.assertEqual(methods, all_methods)
    1299  
    1300          # only run a subset
    1301          filename = os_helper.TESTFN
    1302          self.addCleanup(os_helper.unlink, filename)
    1303  
    1304          subset = [
    1305              # only match the method name
    1306              'test_method1',
    1307              # match the full identifier
    1308              '%s.Tests.test_method3' % testname]
    1309          with open(filename, "w") as fp:
    1310              for name in subset:
    1311                  print(name, file=fp)
    1312  
    1313          output = self.run_tests("-v", "--matchfile", filename, testname)
    1314          methods = self.parse_methods(output)
    1315          subset = ['test_method1', 'test_method3']
    1316          self.assertEqual(methods, subset)
    1317  
    1318      def test_env_changed(self):
    1319          code = textwrap.dedent("""
    1320              import unittest
    1321  
    1322              class Tests(unittest.TestCase):
    1323                  def test_env_changed(self):
    1324                      open("env_changed", "w").close()
    1325          """)
    1326          testname = self.create_test(code=code)
    1327  
    1328          # don't fail by default
    1329          output = self.run_tests(testname)
    1330          self.check_executed_tests(output, [testname],
    1331                                    env_changed=testname, stats=1)
    1332  
    1333          # fail with --fail-env-changed
    1334          output = self.run_tests("--fail-env-changed", testname,
    1335                                  exitcode=EXITCODE_ENV_CHANGED)
    1336          self.check_executed_tests(output, [testname], env_changed=testname,
    1337                                    fail_env_changed=True, stats=1)
    1338  
    1339          # rerun
    1340          output = self.run_tests("--rerun", testname)
    1341          self.check_executed_tests(output, [testname],
    1342                                    env_changed=testname,
    1343                                    rerun=Rerun(testname,
    1344                                                match=None,
    1345                                                success=True),
    1346                                    stats=2)
    1347  
    1348      def test_rerun_fail(self):
    1349          # FAILURE then FAILURE
    1350          code = textwrap.dedent("""
    1351              import unittest
    1352  
    1353              class Tests(unittest.TestCase):
    1354                  def test_succeed(self):
    1355                      return
    1356  
    1357                  def test_fail_always(self):
    1358                      # test that always fails
    1359                      self.fail("bug")
    1360          """)
    1361          testname = self.create_test(code=code)
    1362  
    1363          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1364          self.check_executed_tests(output, [testname],
    1365                                    rerun=Rerun(testname,
    1366                                                "test_fail_always",
    1367                                                success=False),
    1368                                    stats=TestStats(3, 2))
    1369  
    1370      def test_rerun_success(self):
    1371          # FAILURE then SUCCESS
    1372          marker_filename = os.path.abspath("regrtest_marker_filename")
    1373          self.addCleanup(os_helper.unlink, marker_filename)
    1374          self.assertFalse(os.path.exists(marker_filename))
    1375  
    1376          code = textwrap.dedent(f"""
    1377              import os.path
    1378              import unittest
    1379  
    1380              marker_filename = {marker_filename!r}
    1381  
    1382              class Tests(unittest.TestCase):
    1383                  def test_succeed(self):
    1384                      return
    1385  
    1386                  def test_fail_once(self):
    1387                      if not os.path.exists(marker_filename):
    1388                          open(marker_filename, "w").close()
    1389                          self.fail("bug")
    1390          """)
    1391          testname = self.create_test(code=code)
    1392  
    1393          # FAILURE then SUCCESS => exit code 0
    1394          output = self.run_tests("--rerun", testname, exitcode=0)
    1395          self.check_executed_tests(output, [testname],
    1396                                    rerun=Rerun(testname,
    1397                                                match="test_fail_once",
    1398                                                success=True),
    1399                                    stats=TestStats(3, 1))
    1400          os_helper.unlink(marker_filename)
    1401  
    1402          # with --fail-rerun, exit code EXITCODE_RERUN_FAIL
    1403          # on "FAILURE then SUCCESS" state.
    1404          output = self.run_tests("--rerun", "--fail-rerun", testname,
    1405                                  exitcode=EXITCODE_RERUN_FAIL)
    1406          self.check_executed_tests(output, [testname],
    1407                                    rerun=Rerun(testname,
    1408                                                match="test_fail_once",
    1409                                                success=True),
    1410                                    stats=TestStats(3, 1))
    1411          os_helper.unlink(marker_filename)
    1412  
    1413      def test_rerun_setup_class_hook_failure(self):
    1414          # FAILURE then FAILURE
    1415          code = textwrap.dedent("""
    1416              import unittest
    1417  
    1418              class ExampleTests(unittest.TestCase):
    1419                  @classmethod
    1420                  def setUpClass(self):
    1421                      raise RuntimeError('Fail')
    1422  
    1423                  def test_success(self):
    1424                      return
    1425          """)
    1426          testname = self.create_test(code=code)
    1427  
    1428          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1429          self.check_executed_tests(output, testname,
    1430                                    failed=[testname],
    1431                                    rerun=Rerun(testname,
    1432                                                match="ExampleTests",
    1433                                                success=False),
    1434                                    stats=0)
    1435  
    1436      def test_rerun_teardown_class_hook_failure(self):
    1437          # FAILURE then FAILURE
    1438          code = textwrap.dedent("""
    1439              import unittest
    1440  
    1441              class ExampleTests(unittest.TestCase):
    1442                  @classmethod
    1443                  def tearDownClass(self):
    1444                      raise RuntimeError('Fail')
    1445  
    1446                  def test_success(self):
    1447                      return
    1448          """)
    1449          testname = self.create_test(code=code)
    1450  
    1451          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1452          self.check_executed_tests(output, testname,
    1453                                    failed=[testname],
    1454                                    rerun=Rerun(testname,
    1455                                                match="ExampleTests",
    1456                                                success=False),
    1457                                    stats=2)
    1458  
    1459      def test_rerun_setup_module_hook_failure(self):
    1460          # FAILURE then FAILURE
    1461          code = textwrap.dedent("""
    1462              import unittest
    1463  
    1464              def setUpModule():
    1465                  raise RuntimeError('Fail')
    1466  
    1467              class ExampleTests(unittest.TestCase):
    1468                  def test_success(self):
    1469                      return
    1470          """)
    1471          testname = self.create_test(code=code)
    1472  
    1473          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1474          self.check_executed_tests(output, testname,
    1475                                    failed=[testname],
    1476                                    rerun=Rerun(testname,
    1477                                                match=None,
    1478                                                success=False),
    1479                                    stats=0)
    1480  
    1481      def test_rerun_teardown_module_hook_failure(self):
    1482          # FAILURE then FAILURE
    1483          code = textwrap.dedent("""
    1484              import unittest
    1485  
    1486              def tearDownModule():
    1487                  raise RuntimeError('Fail')
    1488  
    1489              class ExampleTests(unittest.TestCase):
    1490                  def test_success(self):
    1491                      return
    1492          """)
    1493          testname = self.create_test(code=code)
    1494  
    1495          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1496          self.check_executed_tests(output, [testname],
    1497                                    failed=[testname],
    1498                                    rerun=Rerun(testname,
    1499                                                match=None,
    1500                                                success=False),
    1501                                    stats=2)
    1502  
    1503      def test_rerun_setup_hook_failure(self):
    1504          # FAILURE then FAILURE
    1505          code = textwrap.dedent("""
    1506              import unittest
    1507  
    1508              class ExampleTests(unittest.TestCase):
    1509                  def setUp(self):
    1510                      raise RuntimeError('Fail')
    1511  
    1512                  def test_success(self):
    1513                      return
    1514          """)
    1515          testname = self.create_test(code=code)
    1516  
    1517          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1518          self.check_executed_tests(output, testname,
    1519                                    failed=[testname],
    1520                                    rerun=Rerun(testname,
    1521                                                match="test_success",
    1522                                                success=False),
    1523                                    stats=2)
    1524  
    1525      def test_rerun_teardown_hook_failure(self):
    1526          # FAILURE then FAILURE
    1527          code = textwrap.dedent("""
    1528              import unittest
    1529  
    1530              class ExampleTests(unittest.TestCase):
    1531                  def tearDown(self):
    1532                      raise RuntimeError('Fail')
    1533  
    1534                  def test_success(self):
    1535                      return
    1536          """)
    1537          testname = self.create_test(code=code)
    1538  
    1539          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1540          self.check_executed_tests(output, testname,
    1541                                    failed=[testname],
    1542                                    rerun=Rerun(testname,
    1543                                                match="test_success",
    1544                                                success=False),
    1545                                    stats=2)
    1546  
    1547      def test_rerun_async_setup_hook_failure(self):
    1548          # FAILURE then FAILURE
    1549          code = textwrap.dedent("""
    1550              import unittest
    1551  
    1552              class ExampleTests(unittest.IsolatedAsyncioTestCase):
    1553                  async def asyncSetUp(self):
    1554                      raise RuntimeError('Fail')
    1555  
    1556                  async def test_success(self):
    1557                      return
    1558          """)
    1559          testname = self.create_test(code=code)
    1560  
    1561          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1562          self.check_executed_tests(output, testname,
    1563                                    rerun=Rerun(testname,
    1564                                                match="test_success",
    1565                                                success=False),
    1566                                    stats=2)
    1567  
    1568      def test_rerun_async_teardown_hook_failure(self):
    1569          # FAILURE then FAILURE
    1570          code = textwrap.dedent("""
    1571              import unittest
    1572  
    1573              class ExampleTests(unittest.IsolatedAsyncioTestCase):
    1574                  async def asyncTearDown(self):
    1575                      raise RuntimeError('Fail')
    1576  
    1577                  async def test_success(self):
    1578                      return
    1579          """)
    1580          testname = self.create_test(code=code)
    1581  
    1582          output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
    1583          self.check_executed_tests(output, testname,
    1584                                    failed=[testname],
    1585                                    rerun=Rerun(testname,
    1586                                                match="test_success",
    1587                                                success=False),
    1588                                    stats=2)
    1589  
    1590      def test_no_tests_ran(self):
    1591          code = textwrap.dedent("""
    1592              import unittest
    1593  
    1594              class Tests(unittest.TestCase):
    1595                  def test_bug(self):
    1596                      pass
    1597          """)
    1598          testname = self.create_test(code=code)
    1599  
    1600          output = self.run_tests(testname, "-m", "nosuchtest",
    1601                                  exitcode=EXITCODE_NO_TESTS_RAN)
    1602          self.check_executed_tests(output, [testname],
    1603                                    run_no_tests=testname,
    1604                                    stats=0, filtered=True)
    1605  
    1606      def test_no_tests_ran_skip(self):
    1607          code = textwrap.dedent("""
    1608              import unittest
    1609  
    1610              class Tests(unittest.TestCase):
    1611                  def test_skipped(self):
    1612                      self.skipTest("because")
    1613          """)
    1614          testname = self.create_test(code=code)
    1615  
    1616          output = self.run_tests(testname)
    1617          self.check_executed_tests(output, [testname],
    1618                                    stats=TestStats(1, skipped=1))
    1619  
    1620      def test_no_tests_ran_multiple_tests_nonexistent(self):
    1621          code = textwrap.dedent("""
    1622              import unittest
    1623  
    1624              class Tests(unittest.TestCase):
    1625                  def test_bug(self):
    1626                      pass
    1627          """)
    1628          testname = self.create_test(code=code)
    1629          testname2 = self.create_test(code=code)
    1630  
    1631          output = self.run_tests(testname, testname2, "-m", "nosuchtest",
    1632                                  exitcode=EXITCODE_NO_TESTS_RAN)
    1633          self.check_executed_tests(output, [testname, testname2],
    1634                                    run_no_tests=[testname, testname2],
    1635                                    stats=0, filtered=True)
    1636  
    1637      def test_no_test_ran_some_test_exist_some_not(self):
    1638          code = textwrap.dedent("""
    1639              import unittest
    1640  
    1641              class Tests(unittest.TestCase):
    1642                  def test_bug(self):
    1643                      pass
    1644          """)
    1645          testname = self.create_test(code=code)
    1646          other_code = textwrap.dedent("""
    1647              import unittest
    1648  
    1649              class Tests(unittest.TestCase):
    1650                  def test_other_bug(self):
    1651                      pass
    1652          """)
    1653          testname2 = self.create_test(code=other_code)
    1654  
    1655          output = self.run_tests(testname, testname2, "-m", "nosuchtest",
    1656                                  "-m", "test_other_bug", exitcode=0)
    1657          self.check_executed_tests(output, [testname, testname2],
    1658                                    run_no_tests=[testname],
    1659                                    stats=1, filtered=True)
    1660  
    1661      @support.cpython_only
    1662      def test_uncollectable(self):
    1663          code = textwrap.dedent(r"""
    1664              import _testcapi
    1665              import gc
    1666              import unittest
    1667  
    1668              @_testcapi.with_tp_del
    1669              class Garbage:
    1670                  def __tp_del__(self):
    1671                      pass
    1672  
    1673              class Tests(unittest.TestCase):
    1674                  def test_garbage(self):
    1675                      # create an uncollectable object
    1676                      obj = Garbage()
    1677                      obj.ref_cycle = obj
    1678                      obj = None
    1679          """)
    1680          testname = self.create_test(code=code)
    1681  
    1682          output = self.run_tests("--fail-env-changed", testname,
    1683                                  exitcode=EXITCODE_ENV_CHANGED)
    1684          self.check_executed_tests(output, [testname],
    1685                                    env_changed=[testname],
    1686                                    fail_env_changed=True,
    1687                                    stats=1)
    1688  
    1689      def test_multiprocessing_timeout(self):
    1690          code = textwrap.dedent(r"""
    1691              import time
    1692              import unittest
    1693              try:
    1694                  import faulthandler
    1695              except ImportError:
    1696                  faulthandler = None
    1697  
    1698              class Tests(unittest.TestCase):
    1699                  # test hangs and so should be stopped by the timeout
    1700                  def test_sleep(self):
    1701                      # we want to test regrtest multiprocessing timeout,
    1702                      # not faulthandler timeout
    1703                      if faulthandler is not None:
    1704                          faulthandler.cancel_dump_traceback_later()
    1705  
    1706                      time.sleep(60 * 5)
    1707          """)
    1708          testname = self.create_test(code=code)
    1709  
    1710          output = self.run_tests("-j2", "--timeout=1.0", testname,
    1711                                  exitcode=EXITCODE_BAD_TEST)
    1712          self.check_executed_tests(output, [testname],
    1713                                    failed=testname, stats=0)
    1714          self.assertRegex(output,
    1715                           re.compile('%s timed out' % testname, re.MULTILINE))
    1716  
    1717      def test_unraisable_exc(self):
    1718          # --fail-env-changed must catch unraisable exception.
    1719          # The exception must be displayed even if sys.stderr is redirected.
    1720          code = textwrap.dedent(r"""
    1721              import unittest
    1722              import weakref
    1723              from test.support import captured_stderr
    1724  
    1725              class MyObject:
    1726                  pass
    1727  
    1728              def weakref_callback(obj):
    1729                  raise Exception("weakref callback bug")
    1730  
    1731              class Tests(unittest.TestCase):
    1732                  def test_unraisable_exc(self):
    1733                      obj = MyObject()
    1734                      ref = weakref.ref(obj, weakref_callback)
    1735                      with captured_stderr() as stderr:
    1736                          # call weakref_callback() which logs
    1737                          # an unraisable exception
    1738                          obj = None
    1739                      self.assertEqual(stderr.getvalue(), '')
    1740          """)
    1741          testname = self.create_test(code=code)
    1742  
    1743          output = self.run_tests("--fail-env-changed", "-v", testname,
    1744                                  exitcode=EXITCODE_ENV_CHANGED)
    1745          self.check_executed_tests(output, [testname],
    1746                                    env_changed=[testname],
    1747                                    fail_env_changed=True,
    1748                                    stats=1)
    1749          self.assertIn("Warning -- Unraisable exception", output)
    1750          self.assertIn("Exception: weakref callback bug", output)
    1751  
    1752      def test_threading_excepthook(self):
    1753          # --fail-env-changed must catch uncaught thread exception.
    1754          # The exception must be displayed even if sys.stderr is redirected.
    1755          code = textwrap.dedent(r"""
    1756              import threading
    1757              import unittest
    1758              from test.support import captured_stderr
    1759  
    1760              class MyObject:
    1761                  pass
    1762  
    1763              def func_bug():
    1764                  raise Exception("bug in thread")
    1765  
    1766              class Tests(unittest.TestCase):
    1767                  def test_threading_excepthook(self):
    1768                      with captured_stderr() as stderr:
    1769                          thread = threading.Thread(target=func_bug)
    1770                          thread.start()
    1771                          thread.join()
    1772                      self.assertEqual(stderr.getvalue(), '')
    1773          """)
    1774          testname = self.create_test(code=code)
    1775  
    1776          output = self.run_tests("--fail-env-changed", "-v", testname,
    1777                                  exitcode=EXITCODE_ENV_CHANGED)
    1778          self.check_executed_tests(output, [testname],
    1779                                    env_changed=[testname],
    1780                                    fail_env_changed=True,
    1781                                    stats=1)
    1782          self.assertIn("Warning -- Uncaught thread exception", output)
    1783          self.assertIn("Exception: bug in thread", output)
    1784  
    1785      def test_print_warning(self):
    1786          # bpo-45410: The order of messages must be preserved when -W and
    1787          # support.print_warning() are used.
    1788          code = textwrap.dedent(r"""
    1789              import sys
    1790              import unittest
    1791              from test import support
    1792  
    1793              class MyObject:
    1794                  pass
    1795  
    1796              def func_bug():
    1797                  raise Exception("bug in thread")
    1798  
    1799              class Tests(unittest.TestCase):
    1800                  def test_print_warning(self):
    1801                      print("msg1: stdout")
    1802                      support.print_warning("msg2: print_warning")
    1803                      # Fail with ENV CHANGED to see print_warning() log
    1804                      support.environment_altered = True
    1805          """)
    1806          testname = self.create_test(code=code)
    1807  
    1808          # Expect an output like:
    1809          #
    1810          #   test_threading_excepthook (test.test_x.Tests) ... msg1: stdout
    1811          #   Warning -- msg2: print_warning
    1812          #   ok
    1813          regex = (r"test_print_warning.*msg1: stdout\n"
    1814                   r"Warning -- msg2: print_warning\n"
    1815                   r"ok\n")
    1816          for option in ("-v", "-W"):
    1817              with self.subTest(option=option):
    1818                  cmd = ["--fail-env-changed", option, testname]
    1819                  output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
    1820                  self.check_executed_tests(output, [testname],
    1821                                            env_changed=[testname],
    1822                                            fail_env_changed=True,
    1823                                            stats=1)
    1824                  self.assertRegex(output, regex)
    1825  
    1826      def test_unicode_guard_env(self):
    1827          guard = os.environ.get(setup.UNICODE_GUARD_ENV)
    1828          self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
    1829          if guard.isascii():
    1830              # Skip to signify that the env var value was changed by the user;
    1831              # possibly to something ASCII to work around Unicode issues.
    1832              self.skipTest("Modified guard")
    1833  
    1834      def test_cleanup(self):
    1835          dirname = os.path.join(self.tmptestdir, "test_python_123")
    1836          os.mkdir(dirname)
    1837          filename = os.path.join(self.tmptestdir, "test_python_456")
    1838          open(filename, "wb").close()
    1839          names = [dirname, filename]
    1840  
    1841          cmdargs = ['-m', 'test',
    1842                     '--tempdir=%s' % self.tmptestdir,
    1843                     '--cleanup']
    1844          self.run_python(cmdargs)
    1845  
    1846          for name in names:
    1847              self.assertFalse(os.path.exists(name), name)
    1848  
    1849      @unittest.skipIf(support.is_wasi,
    1850                       'checking temp files is not implemented on WASI')
    1851      def test_leak_tmp_file(self):
    1852          code = textwrap.dedent(r"""
    1853              import os.path
    1854              import tempfile
    1855              import unittest
    1856  
    1857              class FileTests(unittest.TestCase):
    1858                  def test_leak_tmp_file(self):
    1859                      filename = os.path.join(tempfile.gettempdir(), 'mytmpfile')
    1860                      with open(filename, "wb") as fp:
    1861                          fp.write(b'content')
    1862          """)
    1863          testnames = [self.create_test(code=code) for _ in range(3)]
    1864  
    1865          output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames,
    1866                                  exitcode=EXITCODE_ENV_CHANGED)
    1867          self.check_executed_tests(output, testnames,
    1868                                    env_changed=testnames,
    1869                                    fail_env_changed=True,
    1870                                    parallel=True,
    1871                                    stats=len(testnames))
    1872          for testname in testnames:
    1873              self.assertIn(f"Warning -- {testname} leaked temporary "
    1874                            f"files (1): mytmpfile",
    1875                            output)
    1876  
    1877      def test_worker_decode_error(self):
    1878          # gh-109425: Use "backslashreplace" error handler to decode stdout.
    1879          if sys.platform == 'win32':
    1880              encoding = locale.getencoding()
    1881          else:
    1882              encoding = sys.stdout.encoding
    1883              if encoding is None:
    1884                  encoding = sys.__stdout__.encoding
    1885                  if encoding is None:
    1886                      self.skipTest("cannot get regrtest worker encoding")
    1887  
    1888          nonascii = bytes(ch for ch in range(128, 256))
    1889          corrupted_output = b"nonascii:%s\n" % (nonascii,)
    1890          # gh-108989: On Windows, assertion errors are written in UTF-16: when
    1891          # decoded each letter is follow by a NUL character.
    1892          assertion_failed = 'Assertion failed: tstate_is_alive(tstate)\n'
    1893          corrupted_output += assertion_failed.encode('utf-16-le')
    1894          try:
    1895              corrupted_output.decode(encoding)
    1896          except UnicodeDecodeError:
    1897              pass
    1898          else:
    1899              self.skipTest(f"{encoding} can decode non-ASCII bytes")
    1900  
    1901          expected_line = corrupted_output.decode(encoding, 'backslashreplace')
    1902  
    1903          code = textwrap.dedent(fr"""
    1904              import sys
    1905              import unittest
    1906  
    1907              class Tests(unittest.TestCase):
    1908                  def test_pass(self):
    1909                      pass
    1910  
    1911              # bytes which cannot be decoded from UTF-8
    1912              corrupted_output = {corrupted_output!a}
    1913              sys.stdout.buffer.write(corrupted_output)
    1914              sys.stdout.buffer.flush()
    1915          """)
    1916          testname = self.create_test(code=code)
    1917  
    1918          output = self.run_tests("--fail-env-changed", "-v", "-j1", testname)
    1919          self.check_executed_tests(output, [testname],
    1920                                    parallel=True,
    1921                                    stats=1)
    1922          self.check_line(output, expected_line, regex=False)
    1923  
    1924      def test_doctest(self):
    1925          code = textwrap.dedent(r'''
    1926              import doctest
    1927              import sys
    1928              from test import support
    1929  
    1930              def my_function():
    1931                  """
    1932                  Pass:
    1933  
    1934                  >>> 1 + 1
    1935                  2
    1936  
    1937                  Failure:
    1938  
    1939                  >>> 2 + 3
    1940                  23
    1941                  >>> 1 + 1
    1942                  11
    1943  
    1944                  Skipped test (ignored):
    1945  
    1946                  >>> id(1.0)  # doctest: +SKIP
    1947                  7948648
    1948                  """
    1949  
    1950              def load_tests(loader, tests, pattern):
    1951                  tests.addTest(doctest.DocTestSuite())
    1952                  return tests
    1953          ''')
    1954          testname = self.create_test(code=code)
    1955  
    1956          output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
    1957                                  exitcode=EXITCODE_BAD_TEST)
    1958          self.check_executed_tests(output, [testname],
    1959                                    failed=[testname],
    1960                                    parallel=True,
    1961                                    stats=TestStats(1, 1, 0))
    1962  
    1963      def _check_random_seed(self, run_workers: bool):
    1964          # gh-109276: When -r/--randomize is used, random.seed() is called
    1965          # with the same random seed before running each test file.
    1966          code = textwrap.dedent(r'''
    1967              import random
    1968              import unittest
    1969  
    1970              class RandomSeedTest(unittest.TestCase):
    1971                  def test_randint(self):
    1972                      numbers = [random.randint(0, 1000) for _ in range(10)]
    1973                      print(f"Random numbers: {numbers}")
    1974          ''')
    1975          tests = [self.create_test(name=f'test_random{i}', code=code)
    1976                   for i in range(1, 3+1)]
    1977  
    1978          random_seed = 856_656_202
    1979          cmd = ["--randomize", f"--randseed={random_seed}"]
    1980          if run_workers:
    1981              # run as many worker processes than the number of tests
    1982              cmd.append(f'-j{len(tests)}')
    1983          cmd.extend(tests)
    1984          output = self.run_tests(*cmd)
    1985  
    1986          random.seed(random_seed)
    1987          # Make the assumption that nothing consume entropy between libregrest
    1988          # setup_tests() which calls random.seed() and RandomSeedTest calling
    1989          # random.randint().
    1990          numbers = [random.randint(0, 1000) for _ in range(10)]
    1991          expected = f"Random numbers: {numbers}"
    1992  
    1993          regex = r'^Random numbers: .*$'
    1994          matches = re.findall(regex, output, flags=re.MULTILINE)
    1995          self.assertEqual(matches, [expected] * len(tests))
    1996  
    1997      def test_random_seed(self):
    1998          self._check_random_seed(run_workers=False)
    1999  
    2000      def test_random_seed_workers(self):
    2001          self._check_random_seed(run_workers=True)
    2002  
    2003      def test_python_command(self):
    2004          code = textwrap.dedent(r"""
    2005              import sys
    2006              import unittest
    2007  
    2008              class WorkerTests(unittest.TestCase):
    2009                  def test_dev_mode(self):
    2010                      self.assertTrue(sys.flags.dev_mode)
    2011          """)
    2012          tests = [self.create_test(code=code) for _ in range(3)]
    2013  
    2014          # Custom Python command: "python -X dev"
    2015          python_cmd = [sys.executable, '-X', 'dev']
    2016          # test.libregrtest.cmdline uses shlex.split() to parse the Python
    2017          # command line string
    2018          python_cmd = shlex.join(python_cmd)
    2019  
    2020          output = self.run_tests("--python", python_cmd, "-j0", *tests)
    2021          self.check_executed_tests(output, tests,
    2022                                    stats=len(tests), parallel=True)
    2023  
    2024      def test_unload_tests(self):
    2025          # Test that unloading test modules does not break tests
    2026          # that import from other tests.
    2027          # The test execution order matters for this test.
    2028          # Both test_regrtest_a and test_regrtest_c which are executed before
    2029          # and after test_regrtest_b import a submodule from the test_regrtest_b
    2030          # package and use it in testing. test_regrtest_b itself does not import
    2031          # that submodule.
    2032          # Previously test_regrtest_c failed because test_regrtest_b.util in
    2033          # sys.modules was left after test_regrtest_a (making the import
    2034          # statement no-op), but new test_regrtest_b without the util attribute
    2035          # was imported for test_regrtest_b.
    2036          testdir = os.path.join(os.path.dirname(__file__),
    2037                                 'regrtestdata', 'import_from_tests')
    2038          tests = [f'test_regrtest_{name}' for name in ('a', 'b', 'c')]
    2039          args = ['-Wd', '-E', '-bb', '-m', 'test', '--testdir=%s' % testdir, *tests]
    2040          output = self.run_python(args)
    2041          self.check_executed_tests(output, tests, stats=3)
    2042  
    2043      def check_add_python_opts(self, option):
    2044          # --fast-ci and --slow-ci add "-u -W default -bb -E" options to Python
    2045          code = textwrap.dedent(r"""
    2046              import sys
    2047              import unittest
    2048              from test import support
    2049              try:
    2050                  from _testinternalcapi import get_config
    2051              except ImportError:
    2052                  get_config = None
    2053  
    2054              # WASI/WASM buildbots don't use -E option
    2055              use_environment = (support.is_emscripten or support.is_wasi)
    2056  
    2057              class WorkerTests(unittest.TestCase):
    2058                  @unittest.skipUnless(get_config is None, 'need get_config()')
    2059                  def test_config(self):
    2060                      config = get_config()['config']
    2061                      # -u option
    2062                      self.assertEqual(config['buffered_stdio'], 0)
    2063                      # -W default option
    2064                      self.assertTrue(config['warnoptions'], ['default'])
    2065                      # -bb option
    2066                      self.assertTrue(config['bytes_warning'], 2)
    2067                      # -E option
    2068                      self.assertTrue(config['use_environment'], use_environment)
    2069  
    2070                  def test_python_opts(self):
    2071                      # -u option
    2072                      self.assertTrue(sys.__stdout__.write_through)
    2073                      self.assertTrue(sys.__stderr__.write_through)
    2074  
    2075                      # -W default option
    2076                      self.assertTrue(sys.warnoptions, ['default'])
    2077  
    2078                      # -bb option
    2079                      self.assertEqual(sys.flags.bytes_warning, 2)
    2080  
    2081                      # -E option
    2082                      self.assertEqual(not sys.flags.ignore_environment,
    2083                                       use_environment)
    2084          """)
    2085          testname = self.create_test(code=code)
    2086  
    2087          # Use directly subprocess to control the exact command line
    2088          cmd = [sys.executable,
    2089                 "-m", "test", option,
    2090                 f'--testdir={self.tmptestdir}',
    2091                 testname]
    2092          proc = subprocess.run(cmd,
    2093                                stdout=subprocess.PIPE,
    2094                                stderr=subprocess.STDOUT,
    2095                                text=True)
    2096          self.assertEqual(proc.returncode, 0, proc)
    2097  
    2098      def test_add_python_opts(self):
    2099          for opt in ("--fast-ci", "--slow-ci"):
    2100              with self.subTest(opt=opt):
    2101                  self.check_add_python_opts(opt)
    2102  
    2103      # gh-76319: Raising SIGSEGV on Android may not cause a crash.
    2104      @unittest.skipIf(support.is_android,
    2105                       'raising SIGSEGV on Android is unreliable')
    2106      def test_worker_output_on_failure(self):
    2107          try:
    2108              from faulthandler import _sigsegv
    2109          except ImportError:
    2110              self.skipTest("need faulthandler._sigsegv")
    2111  
    2112          code = textwrap.dedent(r"""
    2113              import faulthandler
    2114              import unittest
    2115              from test import support
    2116  
    2117              class CrashTests(unittest.TestCase):
    2118                  def test_crash(self):
    2119                      print("just before crash!", flush=True)
    2120  
    2121                      with support.SuppressCrashReport():
    2122                          faulthandler._sigsegv(True)
    2123          """)
    2124          testname = self.create_test(code=code)
    2125  
    2126          # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd())
    2127          env = dict(os.environ)
    2128          option = 'handle_segv=0'
    2129          support.set_sanitizer_env_var(env, option)
    2130  
    2131          output = self.run_tests("-j1", testname,
    2132                                  exitcode=EXITCODE_BAD_TEST,
    2133                                  env=env)
    2134          self.check_executed_tests(output, testname,
    2135                                    failed=[testname],
    2136                                    stats=0, parallel=True)
    2137          if not support.MS_WINDOWS:
    2138              exitcode = -int(signal.SIGSEGV)
    2139              self.assertIn(f"Exit code {exitcode} (SIGSEGV)", output)
    2140          self.check_line(output, "just before crash!", full=True, regex=False)
    2141  
    2142      def test_verbose3(self):
    2143          code = textwrap.dedent(r"""
    2144              import unittest
    2145              from test import support
    2146  
    2147              class VerboseTests(unittest.TestCase):
    2148                  def test_pass(self):
    2149                      print("SPAM SPAM SPAM")
    2150          """)
    2151          testname = self.create_test(code=code)
    2152  
    2153          # Run sequentially
    2154          output = self.run_tests("--verbose3", testname)
    2155          self.check_executed_tests(output, testname, stats=1)
    2156          self.assertNotIn('SPAM SPAM SPAM', output)
    2157  
    2158          # -R option needs a debug build
    2159          if support.Py_DEBUG:
    2160              # Check for reference leaks, run in parallel
    2161              output = self.run_tests("-R", "3:3", "-j1", "--verbose3", testname)
    2162              self.check_executed_tests(output, testname, stats=1, parallel=True)
    2163              self.assertNotIn('SPAM SPAM SPAM', output)
    2164  
    2165  
    2166  class ESC[4;38;5;81mTestUtils(ESC[4;38;5;149munittestESC[4;38;5;149m.ESC[4;38;5;149mTestCase):
    2167      def test_format_duration(self):
    2168          self.assertEqual(utils.format_duration(0),
    2169                           '0 ms')
    2170          self.assertEqual(utils.format_duration(1e-9),
    2171                           '1 ms')
    2172          self.assertEqual(utils.format_duration(10e-3),
    2173                           '10 ms')
    2174          self.assertEqual(utils.format_duration(1.5),
    2175                           '1.5 sec')
    2176          self.assertEqual(utils.format_duration(1),
    2177                           '1.0 sec')
    2178          self.assertEqual(utils.format_duration(2 * 60),
    2179                           '2 min')
    2180          self.assertEqual(utils.format_duration(2 * 60 + 1),
    2181                           '2 min 1 sec')
    2182          self.assertEqual(utils.format_duration(3 * 3600),
    2183                           '3 hour')
    2184          self.assertEqual(utils.format_duration(3 * 3600  + 2 * 60 + 1),
    2185                           '3 hour 2 min')
    2186          self.assertEqual(utils.format_duration(3 * 3600 + 1),
    2187                           '3 hour 1 sec')
    2188  
    2189      def test_normalize_test_name(self):
    2190          normalize = normalize_test_name
    2191          self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'),
    2192                           'test_access')
    2193          self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True),
    2194                           'ChownFileTests')
    2195          self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True),
    2196                           'test_success')
    2197          self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True))
    2198          self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True))
    2199  
    2200      def test_get_signal_name(self):
    2201          for exitcode, expected in (
    2202              (-int(signal.SIGINT), 'SIGINT'),
    2203              (-int(signal.SIGSEGV), 'SIGSEGV'),
    2204              (3221225477, "STATUS_ACCESS_VIOLATION"),
    2205              (0xC00000FD, "STATUS_STACK_OVERFLOW"),
    2206          ):
    2207              self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode)
    2208  
    2209      def test_format_resources(self):
    2210          format_resources = utils.format_resources
    2211          ALL_RESOURCES = utils.ALL_RESOURCES
    2212          self.assertEqual(
    2213              format_resources(("network",)),
    2214              'resources (1): network')
    2215          self.assertEqual(
    2216              format_resources(("audio", "decimal", "network")),
    2217              'resources (3): audio,decimal,network')
    2218          self.assertEqual(
    2219              format_resources(ALL_RESOURCES),
    2220              'resources: all')
    2221          self.assertEqual(
    2222              format_resources(tuple(name for name in ALL_RESOURCES
    2223                                     if name != "cpu")),
    2224              'resources: all,-cpu')
    2225          self.assertEqual(
    2226              format_resources((*ALL_RESOURCES, "tzdata")),
    2227              'resources: all,tzdata')
    2228  
    2229      def test_match_test(self):
    2230          class ESC[4;38;5;81mTest:
    2231              def __init__(self, test_id):
    2232                  self.test_id = test_id
    2233  
    2234              def id(self):
    2235                  return self.test_id
    2236  
    2237          test_access = Test('test.test_os.FileTests.test_access')
    2238          test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir')
    2239          test_copy = Test('test.test_shutil.TestCopy.test_copy')
    2240  
    2241          # Test acceptance
    2242          with support.swap_attr(support, '_test_matchers', ()):
    2243              # match all
    2244              set_match_tests([])
    2245              self.assertTrue(match_test(test_access))
    2246              self.assertTrue(match_test(test_chdir))
    2247  
    2248              # match all using None
    2249              set_match_tests(None)
    2250              self.assertTrue(match_test(test_access))
    2251              self.assertTrue(match_test(test_chdir))
    2252  
    2253              # match the full test identifier
    2254              set_match_tests([(test_access.id(), True)])
    2255              self.assertTrue(match_test(test_access))
    2256              self.assertFalse(match_test(test_chdir))
    2257  
    2258              # match the module name
    2259              set_match_tests([('test_os', True)])
    2260              self.assertTrue(match_test(test_access))
    2261              self.assertTrue(match_test(test_chdir))
    2262              self.assertFalse(match_test(test_copy))
    2263  
    2264              # Test '*' pattern
    2265              set_match_tests([('test_*', True)])
    2266              self.assertTrue(match_test(test_access))
    2267              self.assertTrue(match_test(test_chdir))
    2268  
    2269              # Test case sensitivity
    2270              set_match_tests([('filetests', True)])
    2271              self.assertFalse(match_test(test_access))
    2272              set_match_tests([('FileTests', True)])
    2273              self.assertTrue(match_test(test_access))
    2274  
    2275              # Test pattern containing '.' and a '*' metacharacter
    2276              set_match_tests([('*test_os.*.test_*', True)])
    2277              self.assertTrue(match_test(test_access))
    2278              self.assertTrue(match_test(test_chdir))
    2279              self.assertFalse(match_test(test_copy))
    2280  
    2281              # Multiple patterns
    2282              set_match_tests([(test_access.id(), True), (test_chdir.id(), True)])
    2283              self.assertTrue(match_test(test_access))
    2284              self.assertTrue(match_test(test_chdir))
    2285              self.assertFalse(match_test(test_copy))
    2286  
    2287              set_match_tests([('test_access', True), ('DONTMATCH', True)])
    2288              self.assertTrue(match_test(test_access))
    2289              self.assertFalse(match_test(test_chdir))
    2290  
    2291          # Test rejection
    2292          with support.swap_attr(support, '_test_matchers', ()):
    2293              # match the full test identifier
    2294              set_match_tests([(test_access.id(), False)])
    2295              self.assertFalse(match_test(test_access))
    2296              self.assertTrue(match_test(test_chdir))
    2297  
    2298              # match the module name
    2299              set_match_tests([('test_os', False)])
    2300              self.assertFalse(match_test(test_access))
    2301              self.assertFalse(match_test(test_chdir))
    2302              self.assertTrue(match_test(test_copy))
    2303  
    2304              # Test '*' pattern
    2305              set_match_tests([('test_*', False)])
    2306              self.assertFalse(match_test(test_access))
    2307              self.assertFalse(match_test(test_chdir))
    2308  
    2309              # Test case sensitivity
    2310              set_match_tests([('filetests', False)])
    2311              self.assertTrue(match_test(test_access))
    2312              set_match_tests([('FileTests', False)])
    2313              self.assertFalse(match_test(test_access))
    2314  
    2315              # Test pattern containing '.' and a '*' metacharacter
    2316              set_match_tests([('*test_os.*.test_*', False)])
    2317              self.assertFalse(match_test(test_access))
    2318              self.assertFalse(match_test(test_chdir))
    2319              self.assertTrue(match_test(test_copy))
    2320  
    2321              # Multiple patterns
    2322              set_match_tests([(test_access.id(), False), (test_chdir.id(), False)])
    2323              self.assertFalse(match_test(test_access))
    2324              self.assertFalse(match_test(test_chdir))
    2325              self.assertTrue(match_test(test_copy))
    2326  
    2327              set_match_tests([('test_access', False), ('DONTMATCH', False)])
    2328              self.assertFalse(match_test(test_access))
    2329              self.assertTrue(match_test(test_chdir))
    2330  
    2331          # Test mixed filters
    2332          with support.swap_attr(support, '_test_matchers', ()):
    2333              set_match_tests([('*test_os', False), ('test_access', True)])
    2334              self.assertTrue(match_test(test_access))
    2335              self.assertFalse(match_test(test_chdir))
    2336              self.assertTrue(match_test(test_copy))
    2337  
    2338              set_match_tests([('*test_os', True), ('test_access', False)])
    2339              self.assertFalse(match_test(test_access))
    2340              self.assertTrue(match_test(test_chdir))
    2341              self.assertFalse(match_test(test_copy))
    2342  
    2343  
    2344  if __name__ == '__main__':
    2345      unittest.main()