diff --git a/src/sage/doctest/control.py b/src/sage/doctest/control.py index 7bc371b0efa..89f7cd0ea2e 100644 --- a/src/sage/doctest/control.py +++ b/src/sage/doctest/control.py @@ -1154,7 +1154,7 @@ def run_doctests(self): sage: DC.run_doctests() Doctesting 1 file. sage -t .../sage/rings/homset.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1231,7 +1231,7 @@ def cleanup(self, final=True): Running doctests with ID ... Doctesting 1 file. sage -t .../rings/all.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1435,7 +1435,7 @@ def run(self): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/sets/non_negative_integers.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1459,7 +1459,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [0 tests, ... s] + [0 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1485,7 +1485,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1503,7 +1503,7 @@ def run(self): Features to be detected: ... Doctesting 1 file. sage -t ....py - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -1629,7 +1629,7 @@ def run_doctests(module, options=None): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/rings/all.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- diff --git a/src/sage/doctest/forker.py b/src/sage/doctest/forker.py index 25a9dc729e3..7059a2224f3 100644 --- a/src/sage/doctest/forker.py +++ b/src/sage/doctest/forker.py @@ -1758,9 +1758,9 @@ def serial_dispatch(self): sage: DC.timer = Timer().start() sage: DD.serial_dispatch() sage -t .../rings/homset.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../rings/ideal.py - [... tests, ... s] + [... tests, ...s wall] """ for source in self.controller.sources: heading = self.controller.reporter.report_head(source) @@ -1804,9 +1804,9 @@ def parallel_dispatch(self): sage: DC.timer = Timer().start() sage: DD.parallel_dispatch() sage -t .../databases/cremona.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] If the ``exitfirst=True`` option is given, the results for a failing module will be immediately printed and any other ongoing tests @@ -1841,7 +1841,7 @@ def parallel_dispatch(self): ********************************************************************** 1 item had failures: 1 of 1 in ... - [1 test, 1 failure, ... s] + [1 test, 1 failure, ...s wall] Killing test ... """ opt = self.controller.options @@ -2135,9 +2135,9 @@ def dispatch(self): sage: DC.timer = Timer().start() sage: DD.dispatch() sage -t .../sage/modules/free_module_homspace.py - [... tests, ... s] + [... tests, ...s wall] sage -t .../sage/rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] """ if self.controller.options.serial: self.serial_dispatch() @@ -2187,7 +2187,7 @@ class should be accessed by the child process. sage: W.join() # Wait for worker to finish sage: result = W.result_queue.get() sage: reporter.report(FDS, False, W.exitcode, result, "") - [... tests, ... s] + [... tests, ...s wall] """ def __init__(self, source, options, funclist=[], baseline=None): """ @@ -2199,7 +2199,7 @@ def __init__(self, source, options, funclist=[], baseline=None): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/rings/big_oh.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -2246,7 +2246,7 @@ def run(self): Running doctests with ID ... Doctesting 1 file. sage -t .../sage/symbolic/units.py - [... tests, ... s] + [... tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- diff --git a/src/sage/doctest/reporting.py b/src/sage/doctest/reporting.py index d72fd18023b..a005e0f2f1b 100644 --- a/src/sage/doctest/reporting.py +++ b/src/sage/doctest/reporting.py @@ -403,7 +403,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 0 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Good tests") - [... tests, ... s] + [... tests, ...s wall] sage: DTR.stats {'sage.doctest.reporting': {'ntests': ..., 'walltime': ...}} @@ -414,7 +414,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Doctest output including the failure...") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] If the user has requested that we report on skipped doctests, we do so:: @@ -433,7 +433,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 5 magma tests not run 2 not tested tests not run 0 tests not run because we ran out of time - [... tests, ... s] + [... tests, ...s wall] Test an internal error in the reporter:: @@ -466,7 +466,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Failed test") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] """ log = self.controller.log process_name = 'process (pid={0})'.format(pid) if pid else 'process' @@ -625,7 +625,7 @@ def report(self, source, timeout, return_code, results, output, pid=None): else: total = count_noun(ntests, "test") if not (self.controller.options.only_errors and not f): - log(" [%s, %s%.2f s]" % (total, "%s, " % (count_noun(f, "failure")) if f else "", wall)) + log(" [%s, %s%.2fs wall]" % (total, "%s, "%(count_noun(f, "failure")) if f else "", wall)) self.sources_completed += 1 @@ -680,13 +680,13 @@ def finalize(self): 0 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Good tests") - [... tests, ... s] + [... tests, ...s wall] sage: runner.failures = 1 sage: runner.update_results(D) 1 sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), ....: "Doctest output including the failure...") - [... tests, 1 failure, ... s] + [... tests, 1 failure, ...s wall] Now we can show the output of finalize:: diff --git a/src/sage/doctest/test.py b/src/sage/doctest/test.py index 2819b782a66..a6aa893bb22 100644 --- a/src/sage/doctest/test.py +++ b/src/sage/doctest/test.py @@ -29,7 +29,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=0 longtime.rst - [0 tests, ...s] + [0 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -40,7 +40,7 @@ Running doctests... Doctesting 1 file. sage -t --long --warn-long 0.0 --random-seed=0 longtime.rst - [1 test, ...s] + [1 test, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -442,7 +442,7 @@ Running doctests... Doctesting 1 file... sage -t... 1second.rst... - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -471,7 +471,7 @@ 1 long test not run 1 not tested test not run 0 tests not run because we ran out of time - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -488,7 +488,7 @@ 2 tests not run due to known bugs 1 not tested test not run 0 tests not run because we ran out of time - [4 tests, ... s] + [4 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -504,7 +504,7 @@ 1 not tested test not run 2 sage tests not run 0 tests not run because we ran out of time - [2 tests, ... s] + [2 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -533,7 +533,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=0 atexit.rst - [3 tests, ... s] + [3 tests, ...s wall] ---------------------------------------------------------------------- All tests passed! ---------------------------------------------------------------------- @@ -564,7 +564,7 @@ ********************************************************************** 1 item had failures: 1 of 2 in sage.doctest.tests.random_seed - [1 test, 1 failure, ...s] + [1 test, 1 failure, ...s wall] ---------------------------------------------------------------------- sage -t --warn-long 0.0 --random-seed=0 random_seed.rst # 1 doctest failed ---------------------------------------------------------------------- @@ -575,7 +575,7 @@ Running doctests... Doctesting 1 file. sage -t --warn-long 0.0 --random-seed=1 random_seed.rst - [1 test, ...s] + [1 test, ...s wall] ---------------------------------------------------------------------- All tests passed! ----------------------------------------------------------------------