diff --git a/.coveragerc b/.coveragerc
index a974b527f2e9..6f8d8121fbfc 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -17,6 +17,7 @@ omit =
cms/djangoapps/contentstore/views/dev.py
cms/djangoapps/*/migrations/*
cms/djangoapps/*/features/*
+ cms/lib/*/migrations/*
lms/debug/*
lms/envs/*
lms/djangoapps/*/migrations/*
@@ -25,6 +26,7 @@ omit =
common/djangoapps/*/migrations/*
openedx/core/djangoapps/*/migrations/*
openedx/core/djangoapps/debug/*
+ openedx/features/*/migrations/*
concurrency=multiprocessing
diff --git a/.gitignore b/.gitignore
index 13e6d07f4f43..c3e36ba7963f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,6 +64,7 @@ conf/locale/messages.mo
.testids/
.noseids
nosetests.xml
+.cache/
.coverage
.coverage.*
coverage.xml
diff --git a/cms/conftest.py b/cms/conftest.py
new file mode 100644
index 000000000000..3b07f67be0f4
--- /dev/null
+++ b/cms/conftest.py
@@ -0,0 +1,44 @@
+"""
+Studio unit test configuration and fixtures.
+
+This module needs to exist because the pytest.ini in the cms package stops
+pytest from looking for the conftest.py module in the parent directory when
+only running cms tests.
+"""
+
+from __future__ import absolute_import, unicode_literals
+
+import importlib
+import os
+import contracts
+import pytest
+
+
+def pytest_configure(config):
+ """
+ Do core setup operations from manage.py before collecting tests.
+ """
+ if config.getoption('help'):
+ return
+ enable_contracts = os.environ.get('ENABLE_CONTRACTS', False)
+ if not enable_contracts:
+ contracts.disable_all()
+ settings_module = os.environ.get('DJANGO_SETTINGS_MODULE')
+ startup_module = 'cms.startup' if settings_module.startswith('cms') else 'lms.startup'
+ startup = importlib.import_module(startup_module)
+ startup.run()
+
+
+@pytest.fixture(autouse=True, scope='function')
+def _django_clear_site_cache():
+ """
+ pytest-django uses this fixture to automatically clear the Site object
+ cache by replacing it with a new dictionary. edx-django-sites-extensions
+ grabs the cache dictionary at startup, and uses that one for all lookups
+ from then on. Our CacheIsolationMixin class tries to clear the cache by
+ grabbing the current dictionary from the site models module and clearing
+ it. Long story short: if you use this all together, neither cache
+ clearing mechanism actually works. So override this fixture to not mess
+ with what has been working for us so far.
+ """
+ pass
diff --git a/cms/pytest.ini b/cms/pytest.ini
new file mode 100644
index 000000000000..c12268a41fb1
--- /dev/null
+++ b/cms/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+DJANGO_SETTINGS_MODULE = cms.envs.test
+addopts = --nomigrations --reuse-db --durations=20 -p no:randomly
+norecursedirs = envs
+python_classes =
+python_files = tests.py test_*.py *_tests.py
diff --git a/common/lib/conftest.py b/common/lib/conftest.py
new file mode 100644
index 000000000000..0295dabcc80c
--- /dev/null
+++ b/common/lib/conftest.py
@@ -0,0 +1,8 @@
+from django.conf import settings
+
+
+def pytest_configure():
+ """
+ Use Django's default settings for tests in common/lib.
+ """
+ settings.configure()
diff --git a/common/lib/pytest.ini b/common/lib/pytest.ini
new file mode 100644
index 000000000000..bcdab5935157
--- /dev/null
+++ b/common/lib/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+addopts = --nomigrations --reuse-db --durations=20
+norecursedirs = .cache
+python_classes =
+python_files = tests.py test_*.py tests_*.py *_tests.py __init__.py
diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py
index 9b036f23a407..42835c422233 100644
--- a/common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py
+++ b/common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py
@@ -13,12 +13,6 @@
import ddt
from contracts import contract
from nose.plugins.attrib import attr
-# For the cache tests to work, we need to be using the Django default
-# settings (not our usual cms or lms test settings) and they need to
-# be configured before importing from django.core.cache
-from django.conf import settings
-if not settings.configured:
- settings.configure()
from django.core.cache import caches, InvalidCacheBackendError
from openedx.core.lib import tempdir
diff --git a/common/test/pytest.ini b/common/test/pytest.ini
new file mode 100644
index 000000000000..d8172d2726fd
--- /dev/null
+++ b/common/test/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+addopts = -p no:randomly --durations=20
+norecursedirs = .cache
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 000000000000..02b08cb1419b
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,10 @@
+"""
+Default unit test configuration and fixtures.
+"""
+
+from __future__ import absolute_import, unicode_literals
+
+# Import hooks and fixture overrides from the cms package to
+# avoid duplicating the implementation
+
+from cms.conftest import _django_clear_site_cache, pytest_configure # pylint: disable=unused-import
diff --git a/docs/testing.rst b/docs/testing.rst
index a97c42d96018..c0c978c18a9f 100644
--- a/docs/testing.rst
+++ b/docs/testing.rst
@@ -130,14 +130,15 @@ however, run any acceptance tests.
Note -
`paver` is a scripting tool. To get information about various options, you can run the this command.
+
::
- paver -h
+
+ paver -h
+
Running Python Unit tests
-------------------------
-We use `nose `__ through the
-`django-nose plugin `__ to run
-the test suite.
+We use `pytest `__ to run the test suite.
For example, this command runs all the python test scripts.
@@ -194,7 +195,7 @@ To run a single django test class use this command.
::
- paver test_system -t lms/djangoapps/courseware/tests/tests.py:ActivateLoginTest
+ paver test_system -t lms/djangoapps/courseware/tests/tests.py::ActivateLoginTest
When developing tests, it is often helpful to be able to really just run
one single test without the overhead of PIP installs, UX builds, etc. In
@@ -204,23 +205,23 @@ the time of this writing, the command is the following.
::
- python ./manage.py lms test --verbosity=1 lms/djangoapps/courseware/tests/test_courses.py --traceback --settings=test
+ pytest lms/djangoapps/courseware/tests/test_courses.py
To run a single test format the command like this.
::
- paver test_system -t lms/djangoapps/courseware/tests/tests.py:ActivateLoginTest.test_activate_login
+ paver test_system -t lms/djangoapps/courseware/tests/tests.py::ActivateLoginTest::test_activate_login
-The ``lms`` suite of tests runs with randomized order, by default.
-You can override these by using ``--no-randomize`` to disable randomization.
+You can use ``--randomize`` to randomize the test case sequence. In the
+short term, this is likely to reveal bugs in our test setup and teardown;
+please fix (or at least file tickets for) any such issues you encounter.
You can also enable test concurrency with the ``--processes=N`` flag (where ``N``
is the number of processes to run tests with, and ``-1`` means one process per
available core). Note, however, that when running concurrently, breakpoints may
-not work correctly, and you will not be able to run single test methods (only
-single test classes).
+not work correctly.
For example:
@@ -239,44 +240,44 @@ To re-run all failing django tests from lms or cms, use the
paver test_system -s lms --failed
paver test_system -s cms --failed
-There is also a ``--fail_fast``, ``-x`` option that will stop nosetests
+There is also a ``--exitfirst``, ``-x`` option that will stop pytest
after the first failure.
common/lib tests are tested with the ``test_lib`` task, which also
-accepts the ``--failed`` and ``--fail_fast`` options.
+accepts the ``--failed`` and ``--exitfirst`` options.
::
paver test_lib -l common/lib/calc
paver test_lib -l common/lib/xmodule --failed
-For example, this command runs a single nose test file.
+For example, this command runs a single python unit test file.
::
- nosetests common/lib/xmodule/xmodule/tests/test_stringify.py
+ pytest common/lib/xmodule/xmodule/tests/test_stringify.py
-This command runs a single nose test within a specified file.
+This command runs a single python unit test within a specified file.
::
- nosetests common/lib/xmodule/xmodule/tests/test_stringify.py:test_stringify
+ pytest common/lib/xmodule/xmodule/tests/test_stringify.py::test_stringify
-This is an example of how to run a single test and get stdout, with proper env config.
+This is an example of how to run a single test and get stdout shown immediately, with proper env config.
::
- python manage.py cms --settings test test contentstore.tests.test_import_nostatic -s
+ pytest cms/djangoapps/contentstore/tests/test_import.py -s
-These are examples of how to run a single test and get stdout and get coverage.
+These are examples of how to run a single test and get coverage.
::
- python -m coverage run which ./manage.py cms --settings test test --traceback --logging-clear-handlers --liveserver=localhost:8000-9000 contentstore.tests.test_import_nostatic -s # cms example
- python -m coverage run which ./manage.py lms --settings test test --traceback --logging-clear-handlers --liveserver=localhost:8000-9000 courseware.tests.test_module_render -s # lms example
+ pytest cms/djangoapps/contentstore/tests/test_import.py --cov # cms example
+ pytest lms/djangoapps/courseware/tests/test_module_render.py --cov # lms example
-Use this command to generate coverage report.
+Use this command to generate a coverage report.
::
@@ -297,31 +298,32 @@ you can run one of these commands.
::
paver test_system -s cms -t common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
- python -m coverage run `which ./manage.py` cms --settings test test --traceback common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
+ pytest common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
Very handy: if you pass the ``--pdb`` flag to a paver test function, or
uncomment the ``pdb=1`` line in ``setup.cfg``, the test runner
will drop you into pdb on error. This lets you go up and down the stack
and see what the values of the variables are. Check out `the pdb
-documentation `__
+documentation `__ Note that this
+only works if you aren't collecting coverage statistics (pdb and coverage.py
+use the same mechanism to trace code execution).
Use this command to put a temporary debugging breakpoint in a test.
If you check this in, your tests will hang on jenkins.
::
- from nose.tools import set_trace; set_trace()
-
+ import pdb; pdb.set_trace()
-Note: More on the ``--failed`` functionality
+Note: More on the ``--failed`` functionality:
* In order to use this, you must run the tests first. If you haven't already
run the tests, or if no tests failed in the previous run, then using the
``--failed`` switch will result in **all** of the tests being run. See more
- about this in the `nose documentation
- `__.
+ about this in the `pytest documentation
+ `__.
-* Note that ``paver test_python`` calls nosetests separately for cms and lms.
+* Note that ``paver test_python`` calls pytest separately for cms and lms.
This means that if tests failed only in lms on the previous run, then calling
``paver test_python --failed`` will run **all of the tests for cms** in
addition to the previously failing lms tests. If you want it to run only the
@@ -501,7 +503,7 @@ To run all the bok choy accessibility tests use this command.
paver test_a11y
-To run specific tests, use the ``-t`` flag to specify a nose-style test spec
+To run specific tests, use the ``-t`` flag to specify a pytest-style test spec
relative to the ``common/test/acceptance/tests`` directory. This is an example for it.
::
@@ -565,7 +567,7 @@ Note if setup has already been done, you can run::
You must run BOTH `--testsonly` and `--fasttest`.
3. When done, you can kill your servers in the first terminal/ssh session with
-Control-C. *Warning*: Only hit Control-C one time so the nose test framework can
+Control-C. *Warning*: Only hit Control-C one time so the pytest framework can
properly clean up.
Running Lettuce Acceptance Tests
@@ -644,7 +646,7 @@ Running Tests on Paver Scripts
To run tests on the scripts that power the various Paver commands, use the following command::
- nosetests pavelib
+ pytest pavelib
Testing internationalization with dummy translations
@@ -814,7 +816,7 @@ To view JavaScript code style quality run this command.
::
- paver run_eslint --limit=50000
+ paver run_eslint --limit=50000
diff --git a/lms/djangoapps/certificates/tests/test_api.py b/lms/djangoapps/certificates/tests/test_api.py
index e583fe105f87..308af1faa356 100644
--- a/lms/djangoapps/certificates/tests/test_api.py
+++ b/lms/djangoapps/certificates/tests/test_api.py
@@ -207,16 +207,17 @@ def test_with_downloadable_web_cert(self):
)
@ddt.data(
- (False, datetime.now(pytz.UTC) + timedelta(days=2), False),
- (False, datetime.now(pytz.UTC) - timedelta(days=2), True),
- (True, datetime.now(pytz.UTC) + timedelta(days=2), True)
+ (False, timedelta(days=2), False),
+ (False, -timedelta(days=2), True),
+ (True, timedelta(days=2), True)
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
- def test_cert_api_return(self, self_paced, cert_avail_date, cert_downloadable_status):
+ def test_cert_api_return(self, self_paced, cert_avail_delta, cert_downloadable_status):
"""
Test 'downloadable status'
"""
+ cert_avail_date = datetime.now(pytz.UTC) + cert_avail_delta
self.course.self_paced = self_paced
self.course.certificate_available_date = cert_avail_date
self.course.save()
diff --git a/lms/djangoapps/certificates/tests/test_tasks.py b/lms/djangoapps/certificates/tests/test_tasks.py
index 2538cf97e0f7..ce9d4c61e9d9 100644
--- a/lms/djangoapps/certificates/tests/test_tasks.py
+++ b/lms/djangoapps/certificates/tests/test_tasks.py
@@ -1,9 +1,7 @@
-from unittest import TestCase
-
import ddt
+from django.test import TestCase
from mock import call, patch
from opaque_keys.edx.keys import CourseKey
-from nose.tools import assert_true
from lms.djangoapps.certificates.tasks import generate_certificate
from student.tests.factories import UserFactory
diff --git a/lms/djangoapps/courseware/field_overrides.py b/lms/djangoapps/courseware/field_overrides.py
index a14ad18fce66..5b580cd95a40 100644
--- a/lms/djangoapps/courseware/field_overrides.py
+++ b/lms/djangoapps/courseware/field_overrides.py
@@ -257,6 +257,7 @@ def default(self, block, name):
class OverrideModulestoreFieldData(OverrideFieldData):
"""Apply field data overrides at the modulestore level. No student context required."""
+ provider_classes = None
@classmethod
def wrap(cls, block, field_data): # pylint: disable=arguments-differ
diff --git a/lms/djangoapps/courseware/tests/test_module_render.py b/lms/djangoapps/courseware/tests/test_module_render.py
index 6ed47a59cdf4..43f2b57f2086 100644
--- a/lms/djangoapps/courseware/tests/test_module_render.py
+++ b/lms/djangoapps/courseware/tests/test_module_render.py
@@ -133,6 +133,7 @@ def setUp(self):
Set up the course and user context
"""
super(ModuleRenderTestCase, self).setUp()
+ OverrideFieldData.provider_classes = None
self.mock_user = UserFactory()
self.mock_user.id = 1
@@ -154,6 +155,10 @@ def setUp(self):
)
)
+ def tearDown(self):
+ OverrideFieldData.provider_classes = None
+ super(ModuleRenderTestCase, self).tearDown()
+
def test_get_module(self):
self.assertEqual(
None,
diff --git a/lms/djangoapps/django_comment_client/tests/test_utils.py b/lms/djangoapps/django_comment_client/tests/test_utils.py
index 1f19c613c223..1d2896368491 100644
--- a/lms/djangoapps/django_comment_client/tests/test_utils.py
+++ b/lms/djangoapps/django_comment_client/tests/test_utils.py
@@ -1707,28 +1707,24 @@ def setUp(self):
# cohorted_user (who is in the cohort but not the verified enrollment track),
# and plain_user (who is neither in the cohort nor the verified enrollment track)
self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org')
- self.group_moderator.id = 1
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.group_moderator,
mode=verified_coursemode
)
self.verified_user = UserFactory(username='verified', email='verified@edx.org')
- self.verified_user.id = 2
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.verified_user,
mode=verified_coursemode
)
self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org')
- self.cohorted_user.id = 3
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.cohorted_user,
mode=audit_coursemode
)
self.plain_user = UserFactory(username='plain', email='plain@edx.org')
- self.plain_user.id = 4
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.plain_user,
@@ -1737,7 +1733,7 @@ def setUp(self):
CohortFactory(
course_id=self.course.id,
name='Test Cohort',
- users=[self.verified_user, self.cohorted_user]
+ users=[self.group_moderator, self.cohorted_user]
)
# Give group moderator permissions to group_moderator
diff --git a/lms/djangoapps/grades/tests/test_signals.py b/lms/djangoapps/grades/tests/test_signals.py
index ffc68776901b..63e2a790d753 100644
--- a/lms/djangoapps/grades/tests/test_signals.py
+++ b/lms/djangoapps/grades/tests/test_signals.py
@@ -260,6 +260,11 @@ def test_disconnect_manager_bad_arg(self):
@ddt.ddt
class RecalculateUserGradeSignalsTest(TestCase):
+ SIGNALS = {
+ 'COHORT_MEMBERSHIP_UPDATED': COHORT_MEMBERSHIP_UPDATED,
+ 'ENROLLMENT_TRACK_UPDATED': ENROLLMENT_TRACK_UPDATED,
+ }
+
def setUp(self):
super(RecalculateUserGradeSignalsTest, self).setUp()
self.user = UserFactory()
@@ -267,9 +272,10 @@ def setUp(self):
@patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.update')
@patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.read')
- @ddt.data(*itertools.product((COHORT_MEMBERSHIP_UPDATED, ENROLLMENT_TRACK_UPDATED), (True, False), (True, False)))
+ @ddt.data(*itertools.product(('COHORT_MEMBERSHIP_UPDATED', 'ENROLLMENT_TRACK_UPDATED'),
+ (True, False), (True, False)))
@ddt.unpack
- def test_recalculate_on_signal(self, signal, write_only_if_engaged, has_grade, read_mock, update_mock):
+ def test_recalculate_on_signal(self, signal_name, write_only_if_engaged, has_grade, read_mock, update_mock):
"""
Tests the grades handler for signals that trigger regrading.
The handler should call CourseGradeFactory.update() with the
@@ -279,6 +285,7 @@ def test_recalculate_on_signal(self, signal, write_only_if_engaged, has_grade, r
if not has_grade:
read_mock.return_value = None
with waffle().override(WRITE_ONLY_IF_ENGAGED, active=write_only_if_engaged):
+ signal = self.SIGNALS[signal_name]
signal.send(sender=None, user=self.user, course_key=self.course_key)
if not write_only_if_engaged and not has_grade:
diff --git a/openedx/core/djangoapps/certificates/tests/test_api.py b/openedx/core/djangoapps/certificates/tests/test_api.py
index c378be027b2c..03db4e1df201 100644
--- a/openedx/core/djangoapps/certificates/tests/test_api.py
+++ b/openedx/core/djangoapps/certificates/tests/test_api.py
@@ -1,11 +1,11 @@
from contextlib import contextmanager
from datetime import datetime, timedelta
import itertools
-from unittest import TestCase
import ddt
import pytz
import waffle
+from django.test import TestCase
from course_modes.models import CourseMode
from openedx.core.djangoapps.certificates import api
diff --git a/openedx/core/djangoapps/credit/tests/test_views.py b/openedx/core/djangoapps/credit/tests/test_views.py
index 72cb2a58c313..c6165da6a432 100644
--- a/openedx/core/djangoapps/credit/tests/test_views.py
+++ b/openedx/core/djangoapps/credit/tests/test_views.py
@@ -549,11 +549,15 @@ def test_post_with_invalid_signature(self):
self.assertEqual(response.status_code, 403)
@ddt.data(
- to_timestamp(datetime.datetime.now(pytz.UTC) - datetime.timedelta(0, 60 * 15 + 1)),
+ -datetime.timedelta(0, 60 * 15 + 1),
'invalid'
)
- def test_post_with_invalid_timestamp(self, timestamp):
+ def test_post_with_invalid_timestamp(self, timedelta):
""" Verify HTTP 400 is returned for requests with an invalid timestamp. """
+ if timedelta == 'invalid':
+ timestamp = timedelta
+ else:
+ timestamp = to_timestamp(datetime.datetime.now(pytz.UTC) + timedelta)
request_uuid = self._create_credit_request_and_get_uuid()
response = self._credit_provider_callback(request_uuid, 'approved', timestamp=timestamp)
self.assertEqual(response.status_code, 400)
diff --git a/openedx/core/djangoapps/schedules/management/commands/tests/test_send_recurring_nudge.py b/openedx/core/djangoapps/schedules/management/commands/tests/test_send_recurring_nudge.py
index 7c43f4b7698b..54d5aa082097 100644
--- a/openedx/core/djangoapps/schedules/management/commands/tests/test_send_recurring_nudge.py
+++ b/openedx/core/djangoapps/schedules/management/commands/tests/test_send_recurring_nudge.py
@@ -1,5 +1,6 @@
import datetime
import itertools
+from copy import deepcopy
from unittest import skipUnless
import attr
@@ -201,10 +202,8 @@ def test_multiple_enrollments(self, test_hour, messages_sent, mock_schedule_send
@ddt.data(*itertools.product((1, 10, 100), (3, 10)))
@ddt.unpack
- @override_settings()
def test_templates(self, message_count, day):
- settings.TEMPLATES[0]['OPTIONS']['string_if_invalid'] = "TEMPLATE WARNING - MISSING VARIABLE [%s]"
user = UserFactory.create()
schedules = [
ScheduleFactory.create(
@@ -226,20 +225,23 @@ def test_templates(self, message_count, day):
sent_messages = []
- with patch.object(tasks, '_recurring_nudge_schedule_send') as mock_schedule_send:
- mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args)
+ templates_override = deepcopy(settings.TEMPLATES)
+ templates_override[0]['OPTIONS']['string_if_invalid'] = "TEMPLATE WARNING - MISSING VARIABLE [%s]"
+ with self.settings(TEMPLATES=templates_override):
+ with patch.object(tasks, '_recurring_nudge_schedule_send') as mock_schedule_send:
+ mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args)
- with self.assertNumQueries(2):
- tasks.recurring_nudge_schedule_hour(
- self.site_config.site.id, day, test_time_str, [schedules[0].enrollment.course.org],
- )
+ with self.assertNumQueries(2):
+ tasks.recurring_nudge_schedule_hour(
+ self.site_config.site.id, day, test_time_str, [schedules[0].enrollment.course.org],
+ )
- self.assertEqual(len(sent_messages), 1)
+ self.assertEqual(len(sent_messages), 1)
- for args in sent_messages:
- tasks._recurring_nudge_schedule_send(*args)
+ for args in sent_messages:
+ tasks._recurring_nudge_schedule_send(*args)
- self.assertEqual(mock_channel.deliver.call_count, 1)
- for (_name, (_msg, email), _kwargs) in mock_channel.deliver.mock_calls:
- for template in attr.astuple(email):
- self.assertNotIn("TEMPLATE WARNING", template)
+ self.assertEqual(mock_channel.deliver.call_count, 1)
+ for (_name, (_msg, email), _kwargs) in mock_channel.deliver.mock_calls:
+ for template in attr.astuple(email):
+ self.assertNotIn("TEMPLATE WARNING", template)
diff --git a/pavelib/paver_tests/test_paver_bok_choy_cmds.py b/pavelib/paver_tests/test_paver_bok_choy_cmds.py
index c04534ad0a36..9a488cc97f86 100644
--- a/pavelib/paver_tests/test_paver_bok_choy_cmds.py
+++ b/pavelib/paver_tests/test_paver_bok_choy_cmds.py
@@ -47,7 +47,6 @@ def _expected_command(self, name, store=None, verify_xss=True):
"-m",
"pytest",
"{}/common/test/acceptance/{}".format(REPO_DIR, name),
- "--durations=20",
"--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str),
"--verbose",
]
diff --git a/pavelib/tests.py b/pavelib/tests.py
index 772d4215f7cd..c0b06f0b5bfb 100644
--- a/pavelib/tests.py
+++ b/pavelib/tests.py
@@ -30,12 +30,16 @@
("test-id=", "t", "Test id"),
("fail-fast", "x", "Fail suite on first failed test"),
("fasttest", "a", "Run without collectstatic"),
+ make_option(
+ "--eval-attr", dest="eval_attr",
+ help="Only run tests matching given attribute expression."
+ ),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
('skip-clean', 'C', 'skip cleaning repository before running tests'),
- ('processes=', 'p', 'number of processes to use running tests'),
+ make_option('-p', '--processes', dest='processes', default=0, help='number of processes to use running tests'),
make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'),
make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
@@ -53,14 +57,6 @@
dest='disable_migrations',
help="Create tables by applying migrations."
),
- ("fail_fast", None, "deprecated in favor of fail-fast"),
- ("test_id=", None, "deprecated in favor of test-id"),
- ('cov_args=', None, 'deprecated in favor of cov-args'),
- make_option(
- "-e", "--extra_args", default="",
- help="deprecated, pass extra options directly in the paver commandline"
- ),
- ('skip_clean', None, 'deprecated in favor of skip-clean'),
], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask
@timed
@@ -119,14 +115,6 @@ def test_system(options, passthrough_options):
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
- ('cov_args=', None, 'deprecated in favor of cov-args'),
- make_option(
- '-e', '--extra_args', default='',
- help='deprecated, pass extra options directly in the paver commandline'
- ),
- ("fail_fast", None, "deprecated in favor of fail-fast"),
- ('skip_clean', None, 'deprecated in favor of skip-clean'),
- ("test_id=", None, "deprecated in favor of test-id"),
], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask
@timed
@@ -153,8 +141,9 @@ def test_lib(options, passthrough_options):
suites.LibTestSuite(
d,
passthrough_options=passthrough_options,
+ append_coverage=(i != 0),
**options.test_lib
- ) for d in Env.LIB_TEST_DIRS
+ ) for i, d in enumerate(Env.LIB_TEST_DIRS)
]
test_suite = suites.PythonTestSuite(
@@ -186,12 +175,6 @@ def test_lib(options, passthrough_options):
dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
),
- ('cov_args=', None, 'deprecated in favor of cov-args'),
- make_option(
- '-e', '--extra_args', default='',
- help='deprecated, pass extra options directly in the paver commandline'
- ),
- ("fail_fast", None, "deprecated in favor of fail-fast"),
])
@PassthroughTask
@timed
@@ -220,11 +203,6 @@ def test_python(options, passthrough_options):
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
- ('cov_args=', None, 'deprecated in favor of cov-args'),
- make_option(
- '-e', '--extra_args', default='',
- help='deprecated, pass extra options directly in the paver commandline'
- ),
])
@PassthroughTask
@timed
@@ -249,7 +227,6 @@ def test(options, passthrough_options):
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
- ("compare_branch=", None, "deprecated in favor of compare-branch"),
])
@timed
def coverage():
@@ -287,7 +264,6 @@ def coverage():
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
- ("compare_branch=", None, "deprecated in favor of compare-branch"),
], share_with=['coverage'])
@timed
def diff_coverage(options):
diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py
index 5617a1290097..7a84881d4659 100644
--- a/pavelib/utils/test/suites/__init__.py
+++ b/pavelib/utils/test/suites/__init__.py
@@ -2,7 +2,7 @@
TestSuite class and subclasses
"""
from .suite import TestSuite
-from .nose_suite import NoseTestSuite, SystemTestSuite, LibTestSuite
+from .pytest_suite import PytestSuite, SystemTestSuite, LibTestSuite
from .python_suite import PythonTestSuite
from .js_suite import JsTestSuite
from .acceptance_suite import AcceptanceTestSuite
diff --git a/pavelib/utils/test/suites/bokchoy_suite.py b/pavelib/utils/test/suites/bokchoy_suite.py
index 453e4dc19f93..e04bcc0d1cc7 100644
--- a/pavelib/utils/test/suites/bokchoy_suite.py
+++ b/pavelib/utils/test/suites/bokchoy_suite.py
@@ -343,7 +343,6 @@ def cmd(self):
"-m",
"pytest",
test_spec,
- "--durations=20",
] + self.verbosity_processes_command
if self.extra_args:
cmd.append(self.extra_args)
diff --git a/pavelib/utils/test/suites/nose_suite.py b/pavelib/utils/test/suites/pytest_suite.py
similarity index 50%
rename from pavelib/utils/test/suites/nose_suite.py
rename to pavelib/utils/test/suites/pytest_suite.py
index 13f34b3600eb..2872605b1d65 100644
--- a/pavelib/utils/test/suites/nose_suite.py
+++ b/pavelib/utils/test/suites/pytest_suite.py
@@ -1,7 +1,8 @@
"""
-Classes used for defining and running nose test suites
+Classes used for defining and running pytest test suites
"""
import os
+from glob import glob
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites.suite import TestSuite
from pavelib.utils.envs import Env
@@ -14,13 +15,13 @@
__test__ = False # do not collect
-class NoseTestSuite(TestSuite):
+class PytestSuite(TestSuite):
"""
A subclass of TestSuite with extra methods that are specific
- to nose tests
+ to pytest tests
"""
def __init__(self, *args, **kwargs):
- super(NoseTestSuite, self).__init__(*args, **kwargs)
+ super(PytestSuite, self).__init__(*args, **kwargs)
self.failed_only = kwargs.get('failed_only', False)
self.fail_fast = kwargs.get('fail_fast', False)
self.run_under_coverage = kwargs.get('with_coverage', True)
@@ -34,22 +35,17 @@ def __init__(self, *args, **kwargs):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
self.report_dir = self.report_dir / shard_str
- self.test_id_dir = Env.TEST_DIR / self.root
- self.test_ids = self.test_id_dir / 'noseids'
- self.extra_args = kwargs.get('extra_args', '')
self.cov_args = kwargs.get('cov_args', '')
- self.use_ids = True
def __enter__(self):
- super(NoseTestSuite, self).__enter__()
+ super(PytestSuite, self).__enter__()
self.report_dir.makedirs_p()
- self.test_id_dir.makedirs_p()
def __exit__(self, exc_type, exc_value, traceback):
"""
Cleans mongo afer the tests run.
"""
- super(NoseTestSuite, self).__exit__(exc_type, exc_value, traceback)
+ super(PytestSuite, self).__exit__(exc_type, exc_value, traceback)
test_utils.clean_mongo()
def _under_coverage_cmd(self, cmd):
@@ -59,24 +55,19 @@ def _under_coverage_cmd(self, cmd):
unaltered otherwise.
"""
if self.run_under_coverage:
- # We use "python -m coverage" so that the proper python
- # will run the importable coverage rather than the
- # coverage that OS path finds.
-
- if not cmd[0].endswith('.py'):
- cmd[0] = "`which {}`".format(cmd[0])
-
- cmd = [
- "python",
- "-m",
- "coverage",
- "run",
- self.cov_args,
- "--rcfile={}".format(Env.PYTHON_COVERAGERC),
- ] + cmd
+ cmd.append('--cov')
+ cmd.append('--cov-report=')
return cmd
+ @staticmethod
+ def is_success(exit_code):
+ """
+ An exit code of zero means all tests passed, 5 means no tests were
+ found.
+ """
+ return exit_code in [0, 5]
+
@property
def test_options_flags(self):
"""
@@ -87,12 +78,12 @@ def test_options_flags(self):
# Handle "--failed" as a special case: we want to re-run only
# the tests that failed within our Django apps
- # This sets the --failed flag for the nosetests command, so this
- # functionality is the same as described in the nose documentation
+ # This sets the --last-failed flag for the pytest command, so this
+ # functionality is the same as described in the pytest documentation
if self.failed_only:
- opts.append("--failed")
+ opts.append("--last-failed")
- # This makes it so we use nose's fail-fast feature in two cases.
+ # This makes it so we use pytest's fail-fast feature in two cases.
# Case 1: --fail-fast is passed as an arg in the paver command
# Case 2: The environment variable TESTS_FAIL_FAST is set as True
env_fail_fast_set = (
@@ -100,20 +91,18 @@ def test_options_flags(self):
)
if self.fail_fast or env_fail_fast_set:
- opts.append("--stop")
-
- if self.use_ids:
- opts.append("--with-id")
+ opts.append("--exitfirst")
return opts
-class SystemTestSuite(NoseTestSuite):
+class SystemTestSuite(PytestSuite):
"""
- TestSuite for lms and cms nosetests
+ TestSuite for lms and cms python unit tests
"""
def __init__(self, *args, **kwargs):
super(SystemTestSuite, self).__init__(*args, **kwargs)
+ self.eval_attr = kwargs.get('eval_attr', None)
self.test_id = kwargs.get('test_id', self._default_test_id)
self.fasttest = kwargs.get('fasttest', False)
@@ -127,17 +116,6 @@ def __init__(self, *args, **kwargs):
self.processes = int(self.processes)
- if self.randomize is None:
- self.randomize = self.root == 'lms'
-
- if self.processes != 0 and self.verbosity > 1:
- print colorize(
- 'red',
- "The TestId module and multiprocessing module can't be run "
- "together in verbose mode. Disabling TestId for {} tests.".format(self.root)
- )
- self.use_ids = False
-
def __enter__(self):
super(SystemTestSuite, self).__enter__()
@@ -145,23 +123,29 @@ def __enter__(self):
def cmd(self):
cmd = [
- './manage.py', self.root, 'test',
- '--verbosity={}'.format(self.verbosity),
- self.test_id,
- ] + self.test_options_flags + [
- '--settings', self.settings,
- self.extra_args,
- '--xunitmp-file={}'.format(self.report_dir / "nosetests.xml"),
- '--with-database-isolation',
- ]
-
- if self.processes != 0:
- cmd.append('--processes={}'.format(self.processes))
-
- if self.randomize:
- cmd.append('--with-randomly')
+ 'pytest',
+ '--ds={}'.format('{}.envs.{}'.format(self.root, self.settings)),
+ '--junitxml={}'.format(self.report_dir / "nosetests.xml"),
+ ] + self.test_options_flags
+ if self.verbosity < 1:
+ cmd.append("--quiet")
+ elif self.verbosity > 1:
+ cmd.append("--verbose")
+
+ if self.processes == -1:
+ cmd.append('-n auto')
+ cmd.append('--dist=loadscope')
+ elif self.processes != 0:
+ cmd.append('-n {}'.format(self.processes))
+ cmd.append('--dist=loadscope')
+
+ if not self.randomize:
+ cmd.append('-p no:randomly')
+ if self.eval_attr:
+ cmd.append("-a '{}'".format(self.eval_attr))
cmd.extend(self.passthrough_options)
+ cmd.append(self.test_id)
return self._under_coverage_cmd(cmd)
@@ -174,47 +158,77 @@ def _default_test_id(self):
using a default test id.
"""
# We need to use $DIR/*, rather than just $DIR so that
- # django-nose will import them early in the test process,
+ # pytest will import them early in the test process,
# thereby making sure that we load any django models that are
# only defined in test files.
- default_test_id = (
- "{system}/djangoapps/*"
- " common/djangoapps/*"
- " openedx/core/djangoapps/*"
- " openedx/tests/*"
- " openedx/core/lib/*"
- )
-
+ default_test_globs = [
+ "{system}/djangoapps/*".format(system=self.root),
+ "common/djangoapps/*",
+ "openedx/core/djangoapps/*",
+ "openedx/tests/*",
+ "openedx/core/lib/*",
+ ]
if self.root in ('lms', 'cms'):
- default_test_id += " {system}/lib/*"
+ default_test_globs.append("{system}/lib/*".format(system=self.root))
if self.root == 'lms':
- default_test_id += " {system}/tests.py"
- default_test_id += " openedx/core/djangolib/*"
- default_test_id += " openedx/features"
-
- return default_test_id.format(system=self.root)
-
-
-class LibTestSuite(NoseTestSuite):
+ default_test_globs.append("{system}/tests.py".format(system=self.root))
+ default_test_globs.append("openedx/core/djangolib/*")
+ default_test_globs.append("openedx/features")
+
+ def included(path):
+ """
+ Should this path be included in the pytest arguments?
+ """
+ if path.endswith('__pycache__'):
+ return False
+ return path.endswith('.py') or os.path.isdir(path)
+
+ default_test_paths = []
+ for path_glob in default_test_globs:
+ if '*' in path_glob:
+ default_test_paths += [path for path in glob(path_glob) if included(path)]
+ else:
+ default_test_paths += [path_glob]
+ return ' '.join(default_test_paths)
+
+
+class LibTestSuite(PytestSuite):
"""
- TestSuite for edx-platform/common/lib nosetests
+ TestSuite for edx-platform/common/lib python unit tests
"""
def __init__(self, *args, **kwargs):
super(LibTestSuite, self).__init__(*args, **kwargs)
+ self.append_coverage = kwargs.get('append_coverage', False)
self.test_id = kwargs.get('test_id', self.root)
self.xunit_report = self.report_dir / "nosetests.xml"
@property
def cmd(self):
cmd = [
- "nosetests",
- "--id-file={}".format(self.test_ids),
- self.test_id,
- ] + self.test_options_flags + [
- "--xunit-file={}".format(self.xunit_report),
- self.extra_args,
- "--verbosity={}".format(self.verbosity),
- ] + self.passthrough_options
+ "pytest",
+ "-p",
+ "no:randomly",
+ "--junitxml=".format(self.xunit_report),
+ ] + self.passthrough_options + self.test_options_flags
+ if self.verbosity < 1:
+ cmd.append("--quiet")
+ elif self.verbosity > 1:
+ cmd.append("--verbose")
+ cmd.append(self.test_id)
return self._under_coverage_cmd(cmd)
+
+ def _under_coverage_cmd(self, cmd):
+ """
+ If self.run_under_coverage is True, it returns the arg 'cmd'
+ altered to be run under coverage. It returns the command
+ unaltered otherwise.
+ """
+ if self.run_under_coverage:
+ cmd.append('--cov')
+ if self.append_coverage:
+ cmd.append('--cov-append')
+ cmd.append('--cov-report=')
+
+ return cmd
diff --git a/pavelib/utils/test/suites/python_suite.py b/pavelib/utils/test/suites/python_suite.py
index 3d918c5c974f..2fde58019e4d 100644
--- a/pavelib/utils/test/suites/python_suite.py
+++ b/pavelib/utils/test/suites/python_suite.py
@@ -5,7 +5,7 @@
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites.suite import TestSuite
-from pavelib.utils.test.suites.nose_suite import LibTestSuite, SystemTestSuite
+from pavelib.utils.test.suites.pytest_suite import LibTestSuite, SystemTestSuite
from pavelib.utils.envs import Env
__test__ = False # do not collect
diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py
index f89e526a9b34..bc4122805aaa 100644
--- a/pavelib/utils/test/suites/suite.py
+++ b/pavelib/utils/test/suites/suite.py
@@ -61,6 +61,14 @@ def cmd(self):
"""
return None
+ @staticmethod
+ def is_success(exit_code):
+ """
+ Determine if the given exit code represents a success of the test
+ suite. By default, only a zero counts as a success.
+ """
+ return exit_code == 0
+
def run_test(self):
"""
Runs a self.cmd in a subprocess and waits for it to finish.
@@ -88,7 +96,7 @@ def run_test(self):
try:
process = subprocess.Popen(cmd, **kwargs)
- return (process.wait() == 0)
+ return self.is_success(process.wait())
except KeyboardInterrupt:
kill_process(process)
sys.exit(1)
diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt
index 9f053a445fd8..5ec205361386 100644
--- a/requirements/edx/github.txt
+++ b/requirements/edx/github.txt
@@ -48,7 +48,7 @@
# Third-party:
git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6dea236422852#egg=django-pipeline==1.5.3
-git+https://github.com/edx/django-wiki.git@v0.0.11#egg=django-wiki==0.0.11
+git+https://github.com/edx/django-wiki.git@v0.0.14#egg=django-wiki==0.0.14
git+https://github.com/edx/django-openid-auth.git@0.14#egg=django-openid-auth==0.14
git+https://github.com/edx/MongoDBProxy.git@25b99097615bda06bd7cdfe5669ed80dc2a7fed0#egg=MongoDBProxy==0.1.0
git+https://github.com/edx/nltk.git@2.0.6#egg=nltk==2.0.6
diff --git a/requirements/edx/testing.txt b/requirements/edx/testing.txt
index 767d019b8bda..e33b13d5ca27 100644
--- a/requirements/edx/testing.txt
+++ b/requirements/edx/testing.txt
@@ -15,5 +15,8 @@ pysqlite==2.8.3
pytest==3.1.3
pytest-attrib==0.1.3
pytest-catchlog==1.2.2
+pytest-cov==2.5.1
pytest-django==3.1.2
-pytest-xdist==1.18.1
+pytest-forked==0.2
+pytest-randomly==1.2.1
+pytest-xdist==1.20.0
diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh
index 65535c3f9c28..702a4dd7336f 100755
--- a/scripts/generic-ci-tests.sh
+++ b/scripts/generic-ci-tests.sh
@@ -29,7 +29,7 @@ set -e
# `SHARD` is a number indicating which subset of the tests to build.
#
# For "bok-choy" and "lms-unit", the tests are put into shard groups
-# using the nose 'attr' decorator (e.g. "@attr(shard=1)"). Anything with
+# using the 'attr' decorator (e.g. "@attr(shard=1)"). Anything with
# the 'shard=n' attribute will run in the nth shard. If there isn't a
# shard explicitly assigned, the test will run in the last shard.
#
@@ -68,8 +68,9 @@ function emptyxunit {
END
}
-PAVER_ARGS="--cov-args='-p' --with-xunitmp -v"
+PAVER_ARGS="-v"
PARALLEL="--processes=-1"
+export SUBSET_JOB=$JOB_NAME
case "$TEST_SUITE" in
"quality")
@@ -108,10 +109,10 @@ case "$TEST_SUITE" in
paver test_system -s lms $PAVER_ARGS $PARALLEL 2> lms-tests.log
;;
[1-3])
- paver test_system -s lms --attr="shard=$SHARD" $PAVER_ARGS $PARALLEL 2> lms-tests.$SHARD.log
+ paver test_system -s lms --eval-attr="shard==$SHARD" $PAVER_ARGS $PARALLEL 2> lms-tests.$SHARD.log
;;
4|"noshard")
- paver test_system -s lms --attr='!shard' $PAVER_ARGS $PARALLEL 2> lms-tests.4.log
+ paver test_system -s lms --eval-attr='not shard' $PAVER_ARGS $PARALLEL 2> lms-tests.4.log
;;
*)
# If no shard is specified, rather than running all tests, create an empty xunit file. This is a
diff --git a/setup.cfg b/setup.cfg
index 9123e9f69a64..4d6f804c78d2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,7 +15,11 @@ process-timeout=300
#pdb=1
[tool:pytest]
-norecursedirs = .git conf node_modules test_root cms/envs lms/envs
+DJANGO_SETTINGS_MODULE = lms.envs.test
+addopts = --nomigrations --reuse-db --durations=20
+norecursedirs = .* *.egg build conf dist node_modules test_root cms/envs lms/envs
+python_classes =
+python_files = tests.py test_*.py tests_*.py *_tests.py __init__.py
[pep8]
# error codes: http://pep8.readthedocs.org/en/latest/intro.html#error-codes