(root)/
Python-3.11.7/
Lib/
test/
libregrtest/
results.py
       1  import sys
       2  
       3  from .runtests import RunTests
       4  from .result import State, TestResult, TestStats
       5  from .utils import (
       6      StrPath, TestName, TestTuple, TestList, FilterDict,
       7      printlist, count, format_duration)
       8  
       9  
      10  # Python uses exit code 1 when an exception is not catched
      11  # argparse.ArgumentParser.error() uses exit code 2
      12  EXITCODE_BAD_TEST = 2
      13  EXITCODE_ENV_CHANGED = 3
      14  EXITCODE_NO_TESTS_RAN = 4
      15  EXITCODE_RERUN_FAIL = 5
      16  EXITCODE_INTERRUPTED = 130   # 128 + signal.SIGINT=2
      17  
      18  
      19  class ESC[4;38;5;81mTestResults:
      20      def __init__(self):
      21          self.bad: TestList = []
      22          self.good: TestList = []
      23          self.rerun_bad: TestList = []
      24          self.skipped: TestList = []
      25          self.resource_denied: TestList = []
      26          self.env_changed: TestList = []
      27          self.run_no_tests: TestList = []
      28          self.rerun: TestList = []
      29          self.rerun_results: list[TestResult] = []
      30  
      31          self.interrupted: bool = False
      32          self.worker_bug: bool = False
      33          self.test_times: list[tuple[float, TestName]] = []
      34          self.stats = TestStats()
      35          # used by --junit-xml
      36          self.testsuite_xml: list = []
      37  
      38      def is_all_good(self):
      39          return (not self.bad
      40                  and not self.skipped
      41                  and not self.interrupted
      42                  and not self.worker_bug)
      43  
      44      def get_executed(self):
      45          return (set(self.good) | set(self.bad) | set(self.skipped)
      46                  | set(self.resource_denied) | set(self.env_changed)
      47                  | set(self.run_no_tests))
      48  
      49      def no_tests_run(self):
      50          return not any((self.good, self.bad, self.skipped, self.interrupted,
      51                          self.env_changed))
      52  
      53      def get_state(self, fail_env_changed):
      54          state = []
      55          if self.bad:
      56              state.append("FAILURE")
      57          elif fail_env_changed and self.env_changed:
      58              state.append("ENV CHANGED")
      59          elif self.no_tests_run():
      60              state.append("NO TESTS RAN")
      61  
      62          if self.interrupted:
      63              state.append("INTERRUPTED")
      64          if self.worker_bug:
      65              state.append("WORKER BUG")
      66          if not state:
      67              state.append("SUCCESS")
      68  
      69          return ', '.join(state)
      70  
      71      def get_exitcode(self, fail_env_changed, fail_rerun):
      72          exitcode = 0
      73          if self.bad:
      74              exitcode = EXITCODE_BAD_TEST
      75          elif self.interrupted:
      76              exitcode = EXITCODE_INTERRUPTED
      77          elif fail_env_changed and self.env_changed:
      78              exitcode = EXITCODE_ENV_CHANGED
      79          elif self.no_tests_run():
      80              exitcode = EXITCODE_NO_TESTS_RAN
      81          elif fail_rerun and self.rerun:
      82              exitcode = EXITCODE_RERUN_FAIL
      83          elif self.worker_bug:
      84              exitcode = EXITCODE_BAD_TEST
      85          return exitcode
      86  
      87      def accumulate_result(self, result: TestResult, runtests: RunTests):
      88          test_name = result.test_name
      89          rerun = runtests.rerun
      90          fail_env_changed = runtests.fail_env_changed
      91  
      92          match result.state:
      93              case State.PASSED:
      94                  self.good.append(test_name)
      95              case State.ENV_CHANGED:
      96                  self.env_changed.append(test_name)
      97                  self.rerun_results.append(result)
      98              case State.SKIPPED:
      99                  self.skipped.append(test_name)
     100              case State.RESOURCE_DENIED:
     101                  self.resource_denied.append(test_name)
     102              case State.INTERRUPTED:
     103                  self.interrupted = True
     104              case State.DID_NOT_RUN:
     105                  self.run_no_tests.append(test_name)
     106              case _:
     107                  if result.is_failed(fail_env_changed):
     108                      self.bad.append(test_name)
     109                      self.rerun_results.append(result)
     110                  else:
     111                      raise ValueError(f"invalid test state: {result.state!r}")
     112  
     113          if result.state == State.WORKER_BUG:
     114              self.worker_bug = True
     115  
     116          if result.has_meaningful_duration() and not rerun:
     117              if result.duration is None:
     118                  raise ValueError("result.duration is None")
     119              self.test_times.append((result.duration, test_name))
     120          if result.stats is not None:
     121              self.stats.accumulate(result.stats)
     122          if rerun:
     123              self.rerun.append(test_name)
     124  
     125          xml_data = result.xml_data
     126          if xml_data:
     127              self.add_junit(xml_data)
     128  
     129      def need_rerun(self):
     130          return bool(self.rerun_results)
     131  
     132      def prepare_rerun(self) -> tuple[TestTuple, FilterDict]:
     133          tests: TestList = []
     134          match_tests_dict = {}
     135          for result in self.rerun_results:
     136              tests.append(result.test_name)
     137  
     138              match_tests = result.get_rerun_match_tests()
     139              # ignore empty match list
     140              if match_tests:
     141                  match_tests_dict[result.test_name] = match_tests
     142  
     143          # Clear previously failed tests
     144          self.rerun_bad.extend(self.bad)
     145          self.bad.clear()
     146          self.env_changed.clear()
     147          self.rerun_results.clear()
     148  
     149          return (tuple(tests), match_tests_dict)
     150  
     151      def add_junit(self, xml_data: list[str]):
     152          import xml.etree.ElementTree as ET
     153          for e in xml_data:
     154              try:
     155                  self.testsuite_xml.append(ET.fromstring(e))
     156              except ET.ParseError:
     157                  print(xml_data, file=sys.__stderr__)
     158                  raise
     159  
     160      def write_junit(self, filename: StrPath):
     161          if not self.testsuite_xml:
     162              # Don't create empty XML file
     163              return
     164  
     165          import xml.etree.ElementTree as ET
     166          root = ET.Element("testsuites")
     167  
     168          # Manually count the totals for the overall summary
     169          totals = {'tests': 0, 'errors': 0, 'failures': 0}
     170          for suite in self.testsuite_xml:
     171              root.append(suite)
     172              for k in totals:
     173                  try:
     174                      totals[k] += int(suite.get(k, 0))
     175                  except ValueError:
     176                      pass
     177  
     178          for k, v in totals.items():
     179              root.set(k, str(v))
     180  
     181          with open(filename, 'wb') as f:
     182              for s in ET.tostringlist(root):
     183                  f.write(s)
     184  
     185      def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool):
     186          if print_slowest:
     187              self.test_times.sort(reverse=True)
     188              print()
     189              print("10 slowest tests:")
     190              for test_time, test in self.test_times[:10]:
     191                  print("- %s: %s" % (test, format_duration(test_time)))
     192  
     193          all_tests = []
     194          omitted = set(tests) - self.get_executed()
     195  
     196          # less important
     197          all_tests.append((omitted, "test", "{} omitted:"))
     198          if not quiet:
     199              all_tests.append((self.skipped, "test", "{} skipped:"))
     200              all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
     201          all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
     202  
     203          # more important
     204          all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
     205          all_tests.append((self.rerun, "re-run test", "{}:"))
     206          all_tests.append((self.bad, "test", "{} failed:"))
     207  
     208          for tests_list, count_text, title_format in all_tests:
     209              if tests_list:
     210                  print()
     211                  count_text = count(len(tests_list), count_text)
     212                  print(title_format.format(count_text))
     213                  printlist(tests_list)
     214  
     215          if self.good and not quiet:
     216              print()
     217              text = count(len(self.good), "test")
     218              text = f"{text} OK."
     219              if (self.is_all_good() and len(self.good) > 1):
     220                  text = f"All {text}"
     221              print(text)
     222  
     223          if self.interrupted:
     224              print()
     225              print("Test suite interrupted by signal SIGINT.")
     226  
     227      def display_summary(self, first_runtests: RunTests, filtered: bool):
     228          # Total tests
     229          stats = self.stats
     230          text = f'run={stats.tests_run:,}'
     231          if filtered:
     232              text = f"{text} (filtered)"
     233          report = [text]
     234          if stats.failures:
     235              report.append(f'failures={stats.failures:,}')
     236          if stats.skipped:
     237              report.append(f'skipped={stats.skipped:,}')
     238          print(f"Total tests: {' '.join(report)}")
     239  
     240          # Total test files
     241          all_tests = [self.good, self.bad, self.rerun,
     242                       self.skipped,
     243                       self.env_changed, self.run_no_tests]
     244          run = sum(map(len, all_tests))
     245          text = f'run={run}'
     246          if not first_runtests.forever:
     247              ntest = len(first_runtests.tests)
     248              text = f"{text}/{ntest}"
     249          if filtered:
     250              text = f"{text} (filtered)"
     251          report = [text]
     252          for name, tests in (
     253              ('failed', self.bad),
     254              ('env_changed', self.env_changed),
     255              ('skipped', self.skipped),
     256              ('resource_denied', self.resource_denied),
     257              ('rerun', self.rerun),
     258              ('run_no_tests', self.run_no_tests),
     259          ):
     260              if tests:
     261                  report.append(f'{name}={len(tests)}')
     262          print(f"Total test files: {' '.join(report)}")