Skip to content

Latest commit

 

History

History
33 lines (25 loc) · 777 Bytes

README.md

File metadata and controls

33 lines (25 loc) · 777 Bytes

Multi Evaluation Tool

Example usage:

import numpy as np

from evaluate.metrics import r2, sensitivity, specificity
from evaluate.schemas.evaluators import Truth
from evaluate.schemas.metrics import EvaluationMetric
from evaluate import Evaluator


def custom_comp(y_pred: np.array, y_true: np.array) -> np.float64:
    return y_pred.mean() - y_true.mean()

my_custom_metric = EvaluationMetric(
    name = "my custom metric",
    model_fn = custom_comp,
    allowed_models = ["classifier"]
)

evaluator = Evaluator(
    full_df = df,
    test_df = Xte,
    truths = [Truth(name='test_actuals', y_true=yte)],
    estimator = sklearn_model,
    evaluation_metrics = [r2, sensitivity, specificity, my_custom_metric],
    slices = ['country']
)

evaluator.evaluate()