Skip to content

Commit

Permalink
Merge pull request #7 from andrewtavis/add-testing
Browse files Browse the repository at this point in the history
Tests for evaluation metrics
  • Loading branch information
andrewtavis authored Feb 25, 2021
2 parents 3ecca13 + f0d2bf0 commit 7b6773f
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 46 deletions.
5 changes: 5 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,11 @@ def df_vis_eval_proba(request):
return request.param


@pytest.fixture(params=[model_eval_dict_proba])
def model_evaluation_dict_proba(request):
return request.param


# Iterated pred models
tm = two_model.TwoModel(
treatment_model=RandomForestRegressor(random_state=42),
Expand Down
117 changes: 71 additions & 46 deletions tests/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,61 +12,86 @@

np.random.seed(42)

models = ["two_model", "interaction_term"]

def test_plot_cum_effect(monkeypatch):
monkeypatch.setattr(plt, "show", lambda: None)
# evaluation.get_cum_effect(
# df,
# models=None,
# outcome_col="y",
# treatment_col="w",
# treatment_effect_col="tau",
# normalize=False,
# random_seed=None,
# )
assert True


def test_plot_cum_gain(monkeypatch):
monkeypatch.setattr(plt, "show", lambda: None)
assert True


def test_plot_qini(monkeypatch):
def test_plot_cum_effect(monkeypatch, df_vis_eval_proba):
monkeypatch.setattr(plt, "show", lambda: None)
assert True


def test_auuc_score():
assert True


def test_qini_score():
assert True


def test_get_batch_metrics():
assert True


def test_plot_batch_metrics(monkeypatch):
evaluation.plot_cum_effect(
df=df_vis_eval_proba,
n=20,
models=models,
percent_of_pop=False,
outcome_col="y_test",
treatment_col="w_test",
random_seed=42,
figsize=(10, 5),
fontsize=20,
axis=None,
legend_metrics=False,
)


def test_plot_cum_gain(monkeypatch, df_vis_eval_proba):
monkeypatch.setattr(plt, "show", lambda: None)
assert True


def test_plot_batch_responses(monkeypatch):
evaluation.plot_cum_gain(
df=df_vis_eval_proba,
n=100,
models=models,
percent_of_pop=True,
outcome_col="y_test",
treatment_col="w_test",
normalize=True,
random_seed=42,
figsize=None,
fontsize=20,
axis=None,
legend_metrics=True,
)


def test_plot_qini(monkeypatch, df_vis_eval_proba):
monkeypatch.setattr(plt, "show", lambda: None)
evaluation.plot_qini(
df=df_vis_eval_proba,
n=100,
models=models,
percent_of_pop=True,
outcome_col="y_test",
treatment_col="w_test",
normalize=True,
random_seed=42,
figsize=None,
fontsize=20,
axis=None,
legend_metrics=True,
)


def test_plot_batch_responses(monkeypatch, df_vis_eval_proba):
monkeypatch.setattr(plt, "show", lambda: None)
assert True
evaluation.plot_batch_responses(
df=df_vis_eval_proba,
n=10,
models=models,
outcome_col="y_test",
treatment_col="w_test",
normalize=False,
figsize=None,
fontsize=15,
axis=None,
)


def test_signal_to_noise(y_split_proba, w_split_proba):
sn_ration = evaluation.signal_to_noise(y=y_split_proba, w=w_split_proba)
assert type(sn_ration) == float or type(sn_ration) == np.float64


def test_iterate_model_pred():
assert True


def test_pred_proba_eval_table():
assert True
def test_pred_proba_eval_table(model_evaluation_dict_proba):
evaluation.eval_table(
model_evaluation_dict_proba, variances=True, annotate_vars=True
)
evaluation.eval_table(
model_evaluation_dict_proba, variances=False, annotate_vars=False
)

0 comments on commit 7b6773f

Please sign in to comment.