import numpy as np
from evaluate.metrics import r2, sensitivity, specificity
from evaluate.schemas.evaluators import Truth
from evaluate.schemas.metrics import EvaluationMetric
from evaluate import Evaluator
def custom_comp(y_pred: np.array, y_true: np.array) -> np.float64:
return y_pred.mean() - y_true.mean()
my_custom_metric = EvaluationMetric(
name = "my custom metric",
model_fn = custom_comp,
allowed_models = ["classifier"]
)
evaluator = Evaluator(
full_df = df,
test_df = Xte,
truths = [Truth(name='test_actuals', y_true=yte)],
estimator = sklearn_model,
evaluation_metrics = [r2, sensitivity, specificity, my_custom_metric],
slices = ['country']
)
evaluator.evaluate()
-
Notifications
You must be signed in to change notification settings - Fork 0
economy/multi_evaluator
Folders and files
Name | Name | Last commit message | Last commit date | |
---|---|---|---|---|
Repository files navigation
About
No description, website, or topics provided.
Resources
Stars
Watchers
Forks
Releases
No releases published
Packages 0
No packages published