Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MRG] nosetests to pytests #310

Merged
merged 2 commits into from
Aug 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions imblearn/combine/tests/test_smote_enn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
assert_raises_regex)
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.utils.testing import assert_raises_regex

from imblearn.combine import SMOTEENN
from imblearn.under_sampling import EditedNearestNeighbours
Expand Down
4 changes: 2 additions & 2 deletions imblearn/combine/tests/test_smote_tomek.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
assert_raises_regex)
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.utils.testing import assert_raises_regex

from imblearn.combine import SMOTETomek
from imblearn.over_sampling import SMOTE
Expand Down
12 changes: 6 additions & 6 deletions imblearn/datasets/tests/test_imbalance.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
import numpy as np

from sklearn.datasets import load_iris
from sklearn.utils.testing import (assert_equal, assert_raises_regex,
assert_warns_message)
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns_message

from imblearn.datasets import make_imbalance

Expand Down Expand Up @@ -46,17 +46,17 @@ def test_make_imbalance_float():
X_, y_ = assert_warns_message(DeprecationWarning,
"'ratio' being a float is deprecated",
make_imbalance, X, Y, ratio=0.5, min_c_=1)
assert_equal(Counter(y_), {0: 50, 1: 25, 2: 50})
assert Counter(y_) == {0: 50, 1: 25, 2: 50}
# resample without using min_c_
X_, y_ = make_imbalance(X_, y_, ratio=0.25, min_c_=None)
assert_equal(Counter(y_), {0: 50, 1: 12, 2: 50})
assert Counter(y_) == {0: 50, 1: 12, 2: 50}


def test_make_imbalance_dict():
ratio = {0: 10, 1: 20, 2: 30}
X_, y_ = make_imbalance(X, Y, ratio=ratio)
assert_equal(Counter(y_), ratio)
assert Counter(y_) == ratio

ratio = {0: 10, 1: 20}
X_, y_ = make_imbalance(X, Y, ratio=ratio)
assert_equal(Counter(y_), {0: 10, 1: 20, 2: 50})
assert Counter(y_) == {0: 10, 1: 20, 2: 50}
20 changes: 10 additions & 10 deletions imblearn/datasets/tests/test_zenodo.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
# License: MIT

from imblearn.datasets import fetch_datasets
from sklearn.utils.testing import (assert_equal, assert_allclose,
assert_raises_regex, SkipTest)
from sklearn.utils.testing import SkipTest, assert_allclose
from sklearn.utils.testing import assert_raises_regex

DATASET_SHAPE = {'ecoli': (336, 7),
'optical_digits': (5620, 64),
Expand Down Expand Up @@ -54,12 +54,12 @@ def test_fetch():
for k in DATASET_SHAPE.keys():

X1, X2 = datasets1[k].data, datasets2[k].data
assert_equal(DATASET_SHAPE[k], X1.shape)
assert_equal(X1.shape, X2.shape)
assert DATASET_SHAPE[k] == X1.shape
assert X1.shape == X2.shape

y1, y2 = datasets1[k].target, datasets2[k].target
assert_equal((X1.shape[0],), y1.shape)
assert_equal((X1.shape[0],), y2.shape)
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape


def test_fetch_filter():
Expand All @@ -73,14 +73,14 @@ def test_fetch_filter():
random_state=37)

X1, X2 = datasets1['ecoli'].data, datasets2['ecoli'].data
assert_equal(DATASET_SHAPE['ecoli'], X1.shape)
assert_equal(X1.shape, X2.shape)
assert DATASET_SHAPE['ecoli'] == X1.shape
assert X1.shape == X2.shape

assert_allclose(X1.sum(), X2.sum())

y1, y2 = datasets1['ecoli'].target, datasets2['ecoli'].target
assert_equal((X1.shape[0],), y1.shape)
assert_equal((X1.shape[0],), y2.shape)
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape


def test_fetch_error():
Expand Down
4 changes: 2 additions & 2 deletions imblearn/ensemble/tests/test_balance_cascade.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import (assert_array_equal, assert_raises,
assert_raises_regex)
from sklearn.utils.testing import assert_array_equal, assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.ensemble import RandomForestClassifier

from imblearn.ensemble import BalanceCascade
Expand Down
10 changes: 5 additions & 5 deletions imblearn/ensemble/tests/test_easy_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_equal
from sklearn.utils.testing import assert_array_equal

from imblearn.ensemble import EasyEnsemble

Expand All @@ -25,10 +25,10 @@ def test_ee_init():
ratio = 1.
ee = EasyEnsemble(ratio=ratio, random_state=RND_SEED)

assert_equal(ee.ratio, ratio)
assert_equal(ee.replacement, False)
assert_equal(ee.n_subsets, 10)
assert_equal(ee.random_state, RND_SEED)
assert ee.ratio == ratio
assert ee.replacement is False
assert ee.n_subsets == 10
assert ee.random_state == RND_SEED


def test_fit_sample_auto():
Expand Down
56 changes: 28 additions & 28 deletions imblearn/metrics/tests/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
assert_no_warnings, assert_equal,
assert_raises, assert_warns_message,
ignore_warnings, assert_not_equal,
assert_raise_message)
from sklearn.metrics import (accuracy_score, average_precision_score,
brier_score_loss, cohen_kappa_score,
jaccard_similarity_score, precision_score,
recall_score, roc_auc_score)
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.utils.testing import assert_no_warnings, assert_raises
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics import accuracy_score, average_precision_score
from sklearn.metrics import brier_score_loss, cohen_kappa_score
from sklearn.metrics import jaccard_similarity_score, precision_score
from sklearn.metrics import recall_score, roc_auc_score

from imblearn.metrics import sensitivity_specificity_support
from imblearn.metrics import sensitivity_score
Expand All @@ -33,6 +32,8 @@
from imblearn.metrics import make_index_balanced_accuracy
from imblearn.metrics import classification_report_imbalanced

from pytest import approx

RND_SEED = 42
R_TOL = 1e-2

Expand Down Expand Up @@ -113,11 +114,11 @@ def test_sensitivity_specificity_score_binary():

def test_sensitivity_specificity_f_binary_single_class():
# Such a case may occur with non-stratified cross-validation
assert_equal(1., sensitivity_score([1, 1], [1, 1]))
assert_equal(0., specificity_score([1, 1], [1, 1]))
assert sensitivity_score([1, 1], [1, 1]) == 1.
assert specificity_score([1, 1], [1, 1]) == 0.

assert_equal(0., sensitivity_score([-1, -1], [-1, -1]))
assert_equal(0., specificity_score([-1, -1], [-1, -1]))
assert sensitivity_score([-1, -1], [-1, -1]) == 0.
assert specificity_score([-1, -1], [-1, -1]) == 0.


@ignore_warnings
Expand Down Expand Up @@ -166,9 +167,8 @@ def test_sensitivity_specificity_ignored_labels():
rtol=R_TOL)

# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(
specificity_13(average=average), specificity_all(average=average))
for each in ['macro', 'weighted', 'micro']:
assert specificity_13(average=each) != specificity_all(average=each)


def test_sensitivity_specificity_error_multilabels():
Expand Down Expand Up @@ -333,15 +333,15 @@ def test_classification_report_imbalanced_multiclass():
y_pred,
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report
# print classification report with label detection
expected_report = ('pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 '
'0.86 0.74 24 1 0.33 0.10 0.86 0.15 0.44 0.19 31 2 '
'0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 '
'0.53 0.80 0.47 0.62 0.41 75')

report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report


def test_classification_report_imbalanced_multiclass_with_digits():
Expand All @@ -361,14 +361,14 @@ def test_classification_report_imbalanced_multiclass_with_digits():
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names,
digits=5)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report
# print classification report with label detection
expected_report = ('pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 '
'0.86 0.74 24 1 0.33 0.10 0.86 0.15 0.44 0.19 31 2 '
'0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 '
'0.53 0.80 0.47 0.62 0.41 75')
report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report


def test_classification_report_imbalanced_multiclass_with_string_label():
Expand All @@ -382,15 +382,15 @@ def test_classification_report_imbalanced_multiclass_with_string_label():
'0.19 31 red 0.42 0.90 0.55 0.57 0.63 0.37 20 '
'avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75')
report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report

expected_report = ('pre rec spe f1 geo iba sup a 0.83 0.79 0.92 0.81 '
'0.86 0.74 24 b 0.33 0.10 0.86 0.15 0.44 0.19 31 '
'c 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total '
'0.51 0.53 0.80 0.47 0.62 0.41 75')
report = classification_report_imbalanced(
y_true, y_pred, target_names=["a", "b", "c"])
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report


def test_classification_report_imbalanced_multiclass_with_unicode_label():
Expand All @@ -411,7 +411,7 @@ def test_classification_report_imbalanced_multiclass_with_unicode_label():
classification_report_imbalanced, y_true, y_pred)
else:
report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report


def test_classification_report_imbalanced_multiclass_with_long_string_label():
Expand All @@ -427,7 +427,7 @@ def test_classification_report_imbalanced_multiclass_with_long_string_label():
'0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75')

report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
assert _format_report(report) == expected_report


def test_iba_sklearn_metrics():
Expand All @@ -436,22 +436,22 @@ def test_iba_sklearn_metrics():
acc = make_index_balanced_accuracy(alpha=0.5, squared=True)(
accuracy_score)
score = acc(y_true, y_pred)
assert_equal(score, 0.54756)
assert score == approx(0.54756)

jss = make_index_balanced_accuracy(alpha=0.5, squared=True)(
jaccard_similarity_score)
score = jss(y_true, y_pred)
assert_equal(score, 0.54756)
assert score == approx(0.54756)

pre = make_index_balanced_accuracy(alpha=0.5, squared=True)(
precision_score)
score = pre(y_true, y_pred)
assert_equal(score, 0.65025)
assert score == approx(0.65025)

rec = make_index_balanced_accuracy(alpha=0.5, squared=True)(
recall_score)
score = rec(y_true, y_pred)
assert_equal(score, 0.41616000000000009)
assert score == approx(0.41616000000000009)


def test_iba_error_y_score_prob():
Expand Down
8 changes: 4 additions & 4 deletions imblearn/over_sampling/tests/test_adasyn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_raises_regex)
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.utils.testing import assert_raises_regex
from sklearn.neighbors import NearestNeighbors

from imblearn.over_sampling import ADASYN
Expand All @@ -30,13 +30,13 @@
def test_ada_init():
ratio = 'auto'
ada = ADASYN(ratio=ratio, random_state=RND_SEED)
assert_equal(ada.random_state, RND_SEED)
assert ada.random_state == RND_SEED


def test_ada_fit():
ada = ADASYN(random_state=RND_SEED)
ada.fit(X, Y)
assert_equal(ada.ratio_, {0: 4, 1: 0})
assert ada.ratio_ == {0: 4, 1: 0}


def test_ada_fit_sample():
Expand Down
10 changes: 5 additions & 5 deletions imblearn/over_sampling/tests/test_random_over_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from collections import Counter

import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_equal
from sklearn.utils.testing import assert_array_equal

from imblearn.over_sampling import RandomOverSampler

Expand All @@ -24,7 +24,7 @@
def test_ros_init():
ratio = 'auto'
ros = RandomOverSampler(ratio=ratio, random_state=RND_SEED)
assert_equal(ros.random_state, RND_SEED)
assert ros.random_state == RND_SEED


def test_ros_fit_sample():
Expand Down Expand Up @@ -75,6 +75,6 @@ def test_multiclass_fit_sample():
ros = RandomOverSampler(random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_sample(X, y)
count_y_res = Counter(y_resampled)
assert_equal(count_y_res[0], 5)
assert_equal(count_y_res[1], 5)
assert_equal(count_y_res[2], 5)
assert count_y_res[0] == 5
assert count_y_res[1] == 5
assert count_y_res[2] == 5
4 changes: 2 additions & 2 deletions imblearn/over_sampling/tests/test_smote.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from __future__ import print_function

import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
assert_raises_regex)
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.utils.testing import assert_raises_regex
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import SVC

Expand Down
6 changes: 2 additions & 4 deletions imblearn/tests/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
# Christos Aridas
# License: MIT

from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import _named_check

from imblearn.utils.estimator_checks import check_estimator, _yield_all_checks
Expand All @@ -16,12 +14,12 @@ def test_all_estimator_no_base_class():
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
assert not name.lower().startswith('base'), msg


def test_all_estimators():
estimators = all_estimators(include_meta_estimators=True)
assert_greater(len(estimators), 0)
assert len(estimators) > 0
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_estimator, name),
Expand Down
Loading