diff --git a/pkgs/sagemath-repl/MANIFEST.in b/pkgs/sagemath-repl/MANIFEST.in index ba331ec2931..0d9a0289491 100644 --- a/pkgs/sagemath-repl/MANIFEST.in +++ b/pkgs/sagemath-repl/MANIFEST.in @@ -7,6 +7,12 @@ include sage/misc/sagedoc.py include sage/misc/sage_input.py include sage/misc/sage_eval.py +# expect_objects from sage.interfaces.quit is needed to compute the +# CPU time used by each doctest. We include sage.interfaces.cleaner +# because it is conditionally imported by sage.interfaces.quit. +include sage/interfaces/quit.py +include sage/interfaces/cleaner.py + include VERSION.txt global-exclude all__*.py diff --git a/pkgs/sagemath-repl/pyproject.toml.m4 b/pkgs/sagemath-repl/pyproject.toml.m4 index 459a599f1fb..f0ed5af473c 100644 --- a/pkgs/sagemath-repl/pyproject.toml.m4 +++ b/pkgs/sagemath-repl/pyproject.toml.m4 @@ -42,6 +42,7 @@ py-modules = [ ] packages = [ "sage.doctest", + "sage.interfaces", "sage.repl", "sage.repl.display", "sage.repl.ipython_kernel", diff --git a/src/bin/sage b/src/bin/sage index df4ff198426..d102056b6c6 100755 --- a/src/bin/sage +++ b/src/bin/sage @@ -428,7 +428,7 @@ usage_advanced() { echo " --initial -- only show the first failure per block" echo " --debug -- drop into PDB after an unexpected error" echo " --failed -- only test files that failed last test" - echo " --warn-long [timeout] -- warning if doctest is slow" + echo " --warn-long [timeout] -- warn if tests take too much CPU time" echo " --only-errors -- only output failures, not successes" echo " --gc=GC -- control garbarge collection (ALWAYS:" echo " collect garbage before every test; NEVER:" diff --git a/src/doc/en/developer/doctesting.rst b/src/doc/en/developer/doctesting.rst index 75cc659934f..71145b8082a 100644 --- a/src/doc/en/developer/doctesting.rst +++ b/src/doc/en/developer/doctesting.rst @@ -745,10 +745,10 @@ In order to run the long tests as well, do the following:: cpu time: 25.2 seconds cumulative wall time: 34.7 seconds -To find tests that take longer than the allowed time use the -``--warn-long`` flag. Without any options it will cause tests to -print a warning if they take longer than 1.0 second. Note that this is -a warning, not an error:: +To find tests that take longer than a specified amount of CPU time, +use the ``--warn-long`` flag. Without any options, it will cause a +warning to be printed if any tests take longer than one +cpu-second. Note that this is a warning, not an error:: [roed@localhost sage]$ ./sage -t --warn-long src/sage/rings/factorint.pyx Running doctests with ID 2012-07-14-03-27-03-2c952ac1. @@ -758,22 +758,22 @@ a warning, not an error:: File "src/sage/rings/factorint.pyx", line 125, in sage.rings.factorint.base_exponent Failed example: base_exponent(-4) - Test ran for 4.09 s + Test ran for 4.09 cpu seconds ********************************************************************** File "src/sage/rings/factorint.pyx", line 153, in sage.rings.factorint.factor_aurifeuillian Failed example: fa(2^6+1) - Test ran for 2.22 s + Test ran for 2.22 cpu seconds ********************************************************************** File "src/sage/rings/factorint.pyx", line 155, in sage.rings.factorint.factor_aurifeuillian Failed example: fa(2^58+1) - Test ran for 2.22 s + Test ran for 2.22 cpu seconds ********************************************************************** File "src/sage/rings/factorint.pyx", line 163, in sage.rings.factorint.factor_aurifeuillian Failed example: fa(2^4+1) - Test ran for 2.25 s + Test ran for 2.25 cpu seconds ********************************************************************** ---------------------------------------------------------------------- All tests passed! @@ -792,12 +792,12 @@ You can also pass in an explicit amount of time:: File "tests.py", line 240, in sage.rings.tests.test_random_elements Failed example: sage.rings.tests.test_random_elements(trials=1000) # long time (5 seconds) - Test ran for 13.36 s + Test ran for 13.36 cpu seconds ********************************************************************** File "tests.py", line 283, in sage.rings.tests.test_random_arith Failed example: sage.rings.tests.test_random_arith(trials=1000) # long time (5 seconds?) - Test ran for 12.42 s + Test ran for 12.42 cpu seconds ********************************************************************** ---------------------------------------------------------------------- All tests passed! diff --git a/src/sage/doctest/__main__.py b/src/sage/doctest/__main__.py index 9c1c7931325..1af1b22ce1d 100644 --- a/src/sage/doctest/__main__.py +++ b/src/sage/doctest/__main__.py @@ -64,7 +64,7 @@ def _make_parser(): help="run as many doctests as possible in about 300 seconds (or the number of seconds given as an optional argument)") parser.add_argument("--warn-long", dest="warn_long", nargs='?', type=float, default=-1.0, const=1.0, metavar="SECONDS", - help="warn if tests take more time than SECONDS") + help="warn if tests take more CPU time than SECONDS") # By default, include all tests marked 'sagemath_doc_html' -- see # https://github.com/sagemath/sage/issues/25345 and # https://github.com/sagemath/sage/issues/26110: diff --git a/src/sage/doctest/control.py b/src/sage/doctest/control.py index e6fcc29a61b..ac5cfe04958 100644 --- a/src/sage/doctest/control.py +++ b/src/sage/doctest/control.py @@ -588,36 +588,38 @@ def __del__(self): def _init_warn_long(self): """ - Pick a suitable default for the ``--warn-long`` option if not specified. + Pick a suitable default for the ``--warn-long`` option if not + specified. It is desirable to have all tests (even ``# long`` ones) finish in less than about 5 seconds. Longer tests typically don't add coverage, they just make testing slow. - The default used here is 60 seconds on a modern computer. It - should eventually be lowered to 5 seconds, but its best to - boil the frog slowly. + The default used here is 5 seconds, unless `--long` was used, + in which case it is 30 seconds. - The stored timings are used to adjust this limit according to - the machine running the tests. + TESTS: - EXAMPLES:: + Ensure that the user's command-line options are not changed:: - sage: from sage.doctest.control import DocTestDefaults, DocTestController + sage: from sage.doctest.control import (DocTestDefaults, + ....: DocTestController) sage: DC = DocTestController(DocTestDefaults(), []) sage: DC.options.warn_long = 5.0 sage: DC._init_warn_long() - sage: DC.options.warn_long # existing command-line options are not changed + sage: DC.options.warn_long 5.00000000000000 """ # default is -1.0 if self.options.warn_long >= 0: # Specified on the command line return - try: - self.options.warn_long = 60.0 * self.second_on_modern_computer() - except RuntimeError as err: - if not sage.doctest.DOCTEST_MODE: - print(err) # No usable timing information + + # The developer's guide says that even a "long time" test + # should ideally complete in under five seconds, so we're + # being rather generous here. + self.options.warn_long = 5.0 + if self.options.long: + self.options.warn_long = 30.0 def second_on_modern_computer(self): """ @@ -637,6 +639,9 @@ def second_on_modern_computer(self): sage: DC = DocTestController(DocTestDefaults(), []) sage: DC.second_on_modern_computer() # not tested """ + from sage.misc.superseded import deprecation + deprecation(32981, "this method is no longer used by the sage library and will eventually be removed") + if len(self.stats) == 0: raise RuntimeError('no stored timings available') success = [] @@ -1152,7 +1157,7 @@ def run_doctests(self): sage: DC.run_doctests() Doctesting 1 file. sage -t .../sage/rings/homset.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1229,7 +1234,7 @@ def cleanup(self, final=True): Running doctests with ID ... Doctesting 1 file. sage -t .../rings/all.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1433,7 +1438,7 @@ def run(self): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/sets/non_negative_integers.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1457,7 +1462,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [0 tests, ... s] + [0 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1483,7 +1488,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1501,7 +1506,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1519,7 +1524,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1627,7 +1632,7 @@ def run_doctests(module, options=None): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/rings/all.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- diff --git a/src/sage/doctest/forker.py b/src/sage/doctest/forker.py index dfe57f59b0b..9d86c314cb1 100644 --- a/src/sage/doctest/forker.py +++ b/src/sage/doctest/forker.py @@ -709,27 +709,19 @@ def compiler(example): elif self.options.gc < 0: gc.disable() + from cysignals.signals import SignalError try: # Don't blink! This is where the user's code gets run. self.compile_and_execute(example, compiler, test.globs) - except SystemExit: + except (SignalError, SystemExit): + # Tests can be killed by signals in unexpected places. raise except BaseException: exception = sys.exc_info() - # On Python 2, the exception lives in sys.exc_info() as - # long we are in the same stack frame. To ensure that - # sig_occurred() works correctly, we need to clear the - # exception. This is not an issue on Python 3, where the - # exception is cleared as soon as we are outside of the - # "except" clause. - try: - sys.exc_clear() - except AttributeError: - pass # Python 3 finally: if self.debugger is not None: self.debugger.set_continue() # ==== Example Finished ==== - check_starttime = walltime() + check_timer = Timer().start() got = self._fakeout.getvalue() outcome = FAILURE # guilty until proved innocent or insane @@ -803,22 +795,22 @@ def compiler(example): f"and it succeeded (raised an exception as expected).") outcome = SUCCESS - check_duration = walltime(check_starttime) - self.total_walltime += example.walltime + check_duration + check_timer.stop() + self.total_walltime += example.walltime + check_timer.walltime # Report the outcome. if example.warnings: for warning in example.warnings: out(self._failure_header(test, example, f'Warning: {warning}')) if outcome is SUCCESS: - if self.options.warn_long > 0 and example.walltime + check_duration > self.options.warn_long: + if self.options.warn_long > 0 and example.cputime + check_timer.cputime > self.options.warn_long: self.report_overtime(out, test, example, got, - check_duration=check_duration) + check_timer=check_timer) elif example.warnings: pass elif not quiet: self.report_success(out, test, example, got, - check_duration=check_duration) + check_timer=check_timer) elif probed_tags: pass elif outcome is FAILURE: @@ -1326,7 +1318,7 @@ def report_start(self, out, test, example): start_txt += 'Expecting nothing\n' out(start_txt) - def report_success(self, out, test, example, got, *, check_duration=0): + def report_success(self, out, test, example, got, *, check_timer=None): """ Called when an example succeeds. @@ -1340,8 +1332,9 @@ def report_success(self, out, test, example, got, *, check_duration=0): - ``got`` -- string; the result of running ``example`` - - ``check_duration`` -- number (default: ``0``); time spent for checking - the test output + - ``check_timer`` -- a :class:`sage.doctest.util.Timer` (default: + ``None``) that measures the time spent checking whether or not + the output was correct OUTPUT: prints a report to ``out``; if in debugging mode, starts an IPython prompt at the point of the failure @@ -1352,20 +1345,27 @@ def report_success(self, out, test, example, got, *, check_duration=0): sage: from sage.doctest.forker import SageDocTestRunner sage: from sage.doctest.sources import FileDocTestSource sage: from sage.doctest.control import DocTestDefaults; DD = DocTestDefaults() - sage: from sage.misc.timing import walltime + sage: from sage.doctest.util import Timer sage: import doctest, sys, os sage: DTR = SageDocTestRunner(SageOutputChecker(), verbose=True, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS) sage: filename = sage.doctest.forker.__file__ sage: FDS = FileDocTestSource(filename, DD) sage: doctests, extras = FDS.create_doctests(globals()) sage: ex = doctests[0].examples[0] - sage: ex.walltime = 0.0r - sage: DTR.report_success(sys.stdout.write, doctests[0], ex, '1764') - ok [0.00 s] + sage: ex.cputime = 1.01 + sage: ex.walltime = 1.12 + sage: check = Timer() + sage: check.cputime = 2.14 + sage: check.walltime = 2.71 + sage: DTR.report_success(sys.stdout.write, doctests[0], ex, '1764', + ....: check_timer=check) + ok [3.83s wall] """ - # We completely replace doctest.DocTestRunner.report_success so that we can include time taken for the test + # We completely replace doctest.DocTestRunner.report_success + # so that we can include time taken for the test if self._verbose: - out("ok [%.2f s]\n" % (example.walltime + check_duration)) + out("ok [%.2fs wall]\n" % + (example.walltime + check_timer.walltime)) def report_failure(self, out, test, example, got, globs): r""" @@ -1495,7 +1495,7 @@ def report_failure(self, out, test, example, got, globs): self._fakeout.start_spoofing() return returnval - def report_overtime(self, out, test, example, got, *, check_duration=0): + def report_overtime(self, out, test, example, got, *, check_timer=None): r""" Called when the ``warn_long`` option flag is set and a doctest runs longer than the specified time. @@ -1510,8 +1510,9 @@ def report_overtime(self, out, test, example, got, *, check_duration=0): - ``got`` -- string; the result of running ``example`` - - ``check_duration`` -- number (default: ``0``); time spent for checking - the test output + - ``check_timer`` -- a :class:`sage.doctest.util.Timer` (default: + ``None``) that measures the time spent checking whether or not + the output was correct OUTPUT: prints a report to ``out`` @@ -1521,24 +1522,32 @@ def report_overtime(self, out, test, example, got, *, check_duration=0): sage: from sage.doctest.forker import SageDocTestRunner sage: from sage.doctest.sources import FileDocTestSource sage: from sage.doctest.control import DocTestDefaults; DD = DocTestDefaults() - sage: from sage.misc.timing import walltime + sage: from sage.doctest.util import Timer sage: import doctest, sys, os sage: DTR = SageDocTestRunner(SageOutputChecker(), verbose=True, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS) sage: filename = sage.doctest.forker.__file__ sage: FDS = FileDocTestSource(filename, DD) sage: doctests, extras = FDS.create_doctests(globals()) sage: ex = doctests[0].examples[0] - sage: ex.walltime = 1.23r - sage: DTR.report_overtime(sys.stdout.write, doctests[0], ex, 'BAD ANSWER\n', check_duration=2.34r) + sage: ex.cputime = 1.23 + sage: ex.walltime = 2.50 + sage: check = Timer() + sage: check.cputime = 2.34 + sage: check.walltime = 3.12 + sage: DTR.report_overtime(sys.stdout.write, doctests[0], ex, 'BAD ANSWER\n', check_timer=check) ********************************************************************** File ".../sage/doctest/forker.py", line 12, in sage.doctest.forker - Warning, slow doctest: + Warning: slow doctest: doctest_var = 42; doctest_var^2 - Test ran for 1.23 s, check ran for 2.34 s + Test ran for 1.23s cpu, 2.50s wall + Check ran for 2.34s cpu, 3.12s wall """ - out(self._failure_header(test, example, 'Warning, slow doctest:') + - ('Test ran for %.2f s, check ran for %.2f s\n' - % (example.walltime, check_duration))) + out(self._failure_header(test, example, 'Warning: slow doctest:') + + ('Test ran for %.2fs cpu, %.2fs wall\nCheck ran for %.2fs cpu, %.2fs wall\n' + % (example.cputime, + example.walltime, + check_timer.cputime, + check_timer.walltime))) def report_unexpected_exception(self, out, test, example, exc_info): r""" @@ -1741,9 +1750,9 @@ def serial_dispatch(self): sage: DC.timer = Timer().start() sage: DD.serial_dispatch() sage -t .../rings/homset.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../rings/ideal.py - [... tests, ... s] + [... tests, ...s wall] """ for source in self.controller.sources: heading = self.controller.reporter.report_head(source) @@ -1787,9 +1796,9 @@ def parallel_dispatch(self): sage: DC.timer = Timer().start() sage: DD.parallel_dispatch() sage -t .../databases/cremona.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] If the ``exitfirst=True`` option is given, the results for a failing module will be immediately printed and any other ongoing tests @@ -1824,7 +1833,7 @@ def parallel_dispatch(self): ********************************************************************** 1 item had failures: 1 of 1 in ... - [1 test, 1 failure, ... s] + [1 test, 1 failure, ...s wall] Killing test ... """ opt = self.controller.options @@ -2118,9 +2127,9 @@ def dispatch(self): sage: DC.timer = Timer().start() sage: DD.dispatch() sage -t .../sage/modules/free_module_homspace.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../sage/rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] """ if self.controller.options.serial: self.serial_dispatch() @@ -2170,7 +2179,7 @@ class should be accessed by the child process. sage: W.join() # Wait for worker to finish sage: result = W.result_queue.get() sage: reporter.report(FDS, False, W.exitcode, result, "") - [... tests, ... s] + [... tests, ...s wall] """ def __init__(self, source, options, funclist=[], baseline=None): """ @@ -2182,7 +2191,7 @@ def __init__(self, source, options, funclist=[], baseline=None): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -2229,7 +2238,7 @@ def run(self): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/symbolic/units.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- diff --git a/src/sage/doctest/reporting.py b/src/sage/doctest/reporting.py index d72fd18023b..e6bfd52bf33 100644 --- a/src/sage/doctest/reporting.py +++ b/src/sage/doctest/reporting.py @@ -403,7 +403,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 0 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Good tests") - [... tests, ... s] + [... tests, ...s wall] sage: DTR.stats {'sage.doctest.reporting': {'ntests': ..., 'walltime': ...}} @@ -414,7 +414,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Doctest output including the failure...") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] If the user has requested that we report on skipped doctests, we do so:: @@ -433,7 +433,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 5 magma tests not run 2 not tested tests not run 0 tests not run because we ran out of time - [... tests, ... s] + [... tests, ...s wall] Test an internal error in the reporter:: @@ -466,7 +466,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Failed test") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] """ log = self.controller.log process_name = 'process (pid={0})'.format(pid) if pid else 'process' @@ -625,7 +625,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): else: total = count_noun(ntests, "test") if not (self.controller.options.only_errors and not f): - log(" [%s, %s%.2f s]" % (total, "%s, " % (count_noun(f, "failure")) if f else "", wall)) + log(" [%s, %s%.2fs wall]" % (total, "%s, " % (count_noun(f, "failure")) if f else "", wall)) self.sources_completed += 1 @@ -680,13 +680,13 @@ def finalize(self): 0 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Good tests") - [... tests, ... s] + [... tests, ...s wall] sage: runner.failures = 1 sage: runner.update_results(D) 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Doctest output including the failure...") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] Now we can show the output of finalize:: diff --git a/src/sage/doctest/test.py b/src/sage/doctest/test.py index 2819b782a66..a6aa893bb22 100644 --- a/src/sage/doctest/test.py +++ b/src/sage/doctest/test.py @@ -29,7 +29,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=0 longtime.rst - [0 tests, ...s] + [0 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -40,7 +40,7 @@ Running doctests... Doctesting 1 file. sage -t --long --warn-long 0.0 --random-seed=0 longtime.rst - [1 test, ...s] + [1 test, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -442,7 +442,7 @@ Running doctests... Doctesting 1 file... sage -t... 1second.rst... - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -471,7 +471,7 @@ 1 long test not run 1 not tested test not run 0 tests not run because we ran out of time - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -488,7 +488,7 @@ 2 tests not run due to known bugs 1 not tested test not run 0 tests not run because we ran out of time - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -504,7 +504,7 @@ 1 not tested test not run 2 sage tests not run 0 tests not run because we ran out of time - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -533,7 +533,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=0 atexit.rst - [3 tests, ... s] + [3 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -564,7 +564,7 @@ ********************************************************************** 1 item had failures: 1 of 2 in sage.doctest.tests.random_seed - [1 test, 1 failure, ...s] + [1 test, 1 failure, ...s wall] ---------------------------------------------------------------------- sage -t --warn-long 0.0 --random-seed=0 random_seed.rst # 1 doctest failed ---------------------------------------------------------------------- @@ -575,7 +575,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=1 random_seed.rst - [1 test, ...s] + [1 test, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- diff --git a/src/sage/doctest/util.py b/src/sage/doctest/util.py index 6bffb67156b..ed831598e65 100644 --- a/src/sage/doctest/util.py +++ b/src/sage/doctest/util.py @@ -23,8 +23,8 @@ # https://www.gnu.org/licenses/ # **************************************************************************** -from sage.misc.timing import walltime, cputime - +from time import time as walltime +from os import sysconf, times def count_noun(number, noun, plural=None, pad_number=False, pad_noun=False): """ @@ -104,6 +104,219 @@ class Timer: {} sage: TestSuite(Timer()).run() """ + + def _proc_stat_cpu_seconds(self, path): + r""" + Parse a "stat" file from the ``/proc`` filesystem to get + the cputime of a process. + + This also includes the times for child processes, but only + those that have already terminated and for which ``wait()`` + was called. It is important to note that pexpect processes DO + NOT fall into that category. + + The document ``Documentation/filesystems/proc.rst`` within the + Linux kernel source tree defines a "stat" file. + + INPUT: + + - ``path`` -- string; the path to a "stat" file on the ``/proc`` + filesystem, typically "/proc//stat", from which we will + read cputime information + + OUTPUT: + + A nonnegative float representing the number of cpu-seconds + used by the process associated with ``path``. An ``OSError`` is + raised if anything goes wrong, which typically happens on + platforms that don't store this information under ``/proc``. + + TESTS: + + About all we can say for certain is that this will return a + nonnegative float or raise an ``OSError``:: + + sage: from sage.doctest.util import Timer + sage: cputime = float(0.0) + sage: path = "/proc/1/stat" + sage: try: + ....: cputime = Timer()._proc_stat_cpu_seconds(path) + ....: except OSError: + ....: pass + sage: cputime >= 0.0 + True + sage: isinstance(cputime, float) + True + + We can force an ``OSError`` with an invalid PID:: + + sage: from sage.doctest.util import Timer + sage: path = "/proc/-1/stat" + sage: cputime = Timer()._proc_stat_cpu_seconds(path) + Traceback (most recent call last): + ... + OSError: unable to access /proc/-1/stat + + Or with an unparseable file (wrong number of fields, non-float + fields, et cetera):: + + sage: from tempfile import NamedTemporaryFile + sage: from os import unlink + sage: from sage.doctest.util import Timer + sage: with NamedTemporaryFile(delete=False, mode="w") as f: + ....: _ = f.write("1 2 3 4 5") + sage: cputime = Timer()._proc_stat_cpu_seconds(f.name) + Traceback (most recent call last): + ... + OSError: unable to parse ... + sage: os.unlink(f.name) + sage: with NamedTemporaryFile(delete=False, mode="w") as f: + ....: _ = f.write("1 2 3 4 5 6 7 8 9 10 11 12 w x y z 17") + sage: cputime = Timer()._proc_stat_cpu_seconds(f.name) + Traceback (most recent call last): + ... + OSError: unable to parse ... + sage: os.unlink(f.name) + + """ + try: + with open(path, "r") as statfile: + stats = statfile.read().split() + except (FileNotFoundError, PermissionError) as e: + # FileNotFoundError: bad PID, or no /proc support + # PermissionError: can't read the stat file + raise OSError(f"unable to access {path}") from e + + if len(stats) < 17: + raise OSError(f"unable to parse {path}") + + try: + # These fields used to be documented in the proc(5) man + # page, but are now most easily found in the Linux kernel + # documentation (Documentation/filesystems/proc.rst). The + # intent is to sum the user- and kernel-mode "jiffies" for + # both the given process and its children. + cputicks = sum( float(s) for s in stats[13:17] ) + except (ArithmeticError, TypeError, ValueError) as e: + # ArithmeticError: unexpected (non-numeric?) values in fields + # TypeError/ValueError: fields can't be converted to float + raise OSError(f"unable to parse {path}") from e + + try: + hertz = sysconf("SC_CLK_TCK") + except (ValueError) as e: + # ValueError: SC_CLK_TCK doesn't exist + raise OSError("SC_CLK_TCK sysconf not found") from e + + if hertz <= 0: + # The python documentation for os.sysconf() says, "If the + # configuration value specified by name isn’t defined, -1 + # is returned." Having tried this with a junk value, I + # don't believe it: I got a ValueError that was handled + # above. Nevertheless, we play it safe here and turn a -1 + # into an OSError. We check for zero, too, because we're + # about to divide by it. + raise OSError("SC_CLK_TCK sysconf is nonpositive") + + return (cputicks / hertz) + + def _quick_cputime(self, expect_objects): + r""" + A fast replacement for ``sage.misc.timing.cputime``. + + This is a "reliable" replacement (on Linux/BSD) that takes + subprocesses (particularly pexpect interfaces) into + account. The ``cputime()`` function from the ``misc`` module + can be passed ``subprocesses=True``, but this has a few + faults; mainly that it relies on each pexpect interface to + implement its own ``cputime()`` function. And most of our + pexpect interfaces either don't implement one, or implement + one in a way that requires the subprocess (being pexpected) to + be in perfect working condition -- that will often not be the + case at the end of a doctest line. + + INPUT: + + - ``expect_objects`` -- list; a list of + :class:`sage.interfaces.expect.Expect` instances whose CPU + times will be included in the total + + OUTPUT: + + A float measuring the cputime in seconds of the sage process + and all its subprocesses. + + TESTS: + + About all we can say for certain is that this will return a + nonnegative float:: + + sage: from sage.doctest.util import Timer + sage: from sage.interfaces.quit import expect_objects + sage: cputime = Timer()._quick_cputime(expect_objects) + sage: cputime >= 0.0 + True + sage: isinstance(cputime, float) + True + + If an error occurs in :meth:`_proc_stat_cpu_seconds`, this + function should still return a valid answer, albeit one that + is missing timing information for the PID that failed:: + + sage: class FakeExpect: + ....: def __call__(self): + ....: return self + ....: def is_running(self): + ....: return True + ....: def pid(self): + ....: return -1 + sage: e = FakeExpect() + sage: from sage.doctest.util import Timer + sage: cputime = Timer()._quick_cputime([e]) + sage: cputime >= 0.0 + True + sage: isinstance(cputime, float) + True + """ + # Start by using os.times() to get the cputime for sage itself + # and any subprocesses that have been wait()ed for and that + # have terminated. + cputime = sum( times()[:4] ) + + # Now try to get the times for any pexpect interfaces, since + # they do not fall into the category above. + for s in expect_objects: + S = s() + if S and S.is_running(): + try: + # This will fail anywhere but linux/BSD, but + # there's no good cross-platform way to get the + # cputimes from pexpect interfaces without totally + # mucking up the doctests. + path = f"/proc/{S.pid()}/stat" + cputime += self._proc_stat_cpu_seconds(path) + except OSError: + # If we're on macOS, we can fall back to using + # psutil, but only if it's installed. It's usually + # installed as a transitive dependency (ipython + # needs it), but it isn't explicitly listed as + # a dependency of sagelib. + try: + from psutil import (NoSuchProcess, + Process, + ZombieProcess) + try: + cputime += sum(Process(S.pid()).cpu_times()[0:2]) + except (ValueError, NoSuchProcess, ZombieProcess): + # ValueError: invalid (e.g. negative) PID + # NoSuchProcess: it's gone + # ZombieProcess: PID refers to a zombie + pass + except ImportError: + pass + + return cputime + def start(self): """ Start the timer. @@ -116,7 +329,8 @@ def start(self): sage: Timer().start() {'cputime': ..., 'walltime': ...} """ - self.cputime = cputime() + from sage.interfaces.quit import expect_objects + self.cputime = self._quick_cputime(expect_objects) self.walltime = walltime() return self @@ -134,8 +348,9 @@ def stop(self): sage: timer.stop() {'cputime': ..., 'walltime': ...} """ - self.cputime = cputime(self.cputime) - self.walltime = walltime(self.walltime) + from sage.interfaces.quit import expect_objects + self.cputime = self._quick_cputime(expect_objects) - self.cputime + self.walltime = walltime() - self.walltime return self def annotate(self, object): diff --git a/src/sage/interfaces/cleaner.py b/src/sage/interfaces/cleaner.py index 54bfac779ac..f731205b652 100644 --- a/src/sage/interfaces/cleaner.py +++ b/src/sage/interfaces/cleaner.py @@ -1,3 +1,4 @@ +# sage_setup: distribution = sagemath-repl """ Interface to the Sage cleaner diff --git a/src/sage/interfaces/quit.py b/src/sage/interfaces/quit.py index 9392ffe094f..c7c1643be83 100644 --- a/src/sage/interfaces/quit.py +++ b/src/sage/interfaces/quit.py @@ -1,3 +1,4 @@ +# sage_setup: distribution = sagemath-repl """ Quitting interfaces """