Skip to content

Commit

Permalink
run precommit
Browse files Browse the repository at this point in the history
  • Loading branch information
LeoGrin committed Jan 21, 2025
1 parent 287b33e commit 991cd0b
Show file tree
Hide file tree
Showing 5 changed files with 34 additions and 34 deletions.
2 changes: 1 addition & 1 deletion src/tabpfn/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@
determine_precision,
initialize_tabpfn_model,
)
from tabpfn.config import ModelInterfaceConfig
from tabpfn.constants import (
PROBABILITY_EPSILON_ROUND_ZERO,
SKLEARN_16_DECIMAL_PRECISION,
XType,
YType,
)
from tabpfn.config import ModelInterfaceConfig
from tabpfn.preprocessing import (
ClassifierEnsembleConfig,
EnsembleConfig,
Expand Down
2 changes: 1 addition & 1 deletion src/tabpfn/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def to_dict(self) -> dict:
}

@classmethod
def from_dict(cls, config_dict: dict) -> "PreprocessorConfig":
def from_dict(cls, config_dict: dict) -> PreprocessorConfig:
"""Create a config from a dictionary.
Args:
Expand Down
8 changes: 4 additions & 4 deletions src/tabpfn/regressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,6 @@
determine_precision,
initialize_tabpfn_model,
)
from tabpfn.constants import (
XType,
YType,
)
from tabpfn.config import ModelInterfaceConfig
from tabpfn.model.bar_distribution import FullSupportBarDistribution
from tabpfn.model.preprocessing import (
Expand Down Expand Up @@ -70,6 +66,10 @@
from sklearn.pipeline import Pipeline
from torch.types import _dtype

from tabpfn.constants import (
XType,
YType,
)
from tabpfn.inference import (
InferenceEngine,
)
Expand Down
23 changes: 12 additions & 11 deletions tests/test_classifier_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,49 +171,50 @@ def test_classifier_in_pipeline(X_y: tuple[np.ndarray, np.ndarray]) -> None:
rtol=0.1,
), "Class probabilities are not properly balanced in pipeline"


def test_dict_vs_object_preprocessor_config(X_y: tuple[np.ndarray, np.ndarray]) -> None:
"""Test that dict configs behave identically to PreprocessorConfig objects."""
X, y = X_y

# Define same config as both dict and object
dict_config = {
"name": "quantile_uni_coarse",
"append_original": False, # changed from default
"append_original": False, # changed from default
"categorical_name": "ordinal_very_common_categories_shuffled",
"global_transformer_name": "svd",
"subsample_features": -1,
}

object_config = PreprocessorConfig(
name="quantile_uni_coarse",
append_original=False, # changed from default
append_original=False, # changed from default
categorical_name="ordinal_very_common_categories_shuffled",
global_transformer_name="svd",
subsample_features=-1,
)

# Create two models with same random state
model_dict = TabPFNClassifier(
inference_config={"PREPROCESS_TRANSFORMS": [dict_config]},
n_estimators=2,
random_state=42
random_state=42,
)

model_obj = TabPFNClassifier(
inference_config={"PREPROCESS_TRANSFORMS": [object_config]},
n_estimators=2,
random_state=42
random_state=42,
)

# Fit both models
model_dict.fit(X, y)
model_obj.fit(X, y)

# Compare predictions
pred_dict = model_dict.predict(X)
pred_obj = model_obj.predict(X)
np.testing.assert_array_equal(pred_dict, pred_obj)

# Compare probabilities
prob_dict = model_dict.predict_proba(X)
prob_obj = model_obj.predict_proba(X)
Expand Down
33 changes: 16 additions & 17 deletions tests/test_regressor_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,64 +157,63 @@ def test_regressor_in_pipeline(X_y: tuple[np.ndarray, np.ndarray]) -> None:
X.shape[0],
), "Quantile predictions shape is incorrect"


def test_dict_vs_object_preprocessor_config(X_y: tuple[np.ndarray, np.ndarray]) -> None:
"""Test that dict configs behave identically to PreprocessorConfig objects."""
X, y = X_y

# Define same config as both dict and object
dict_config = {
"name": "quantile_uni",
"append_original": False, # changed from default
"append_original": False, # changed from default
"categorical_name": "ordinal_very_common_categories_shuffled",
"global_transformer_name": "svd",
"subsample_features": -1,
}

object_config = PreprocessorConfig(
name="quantile_uni",
append_original=False, # changed from default
append_original=False, # changed from default
categorical_name="ordinal_very_common_categories_shuffled",
global_transformer_name="svd",
subsample_features=-1,
)

# Create two models with same random state
model_dict = TabPFNRegressor(
inference_config={"PREPROCESS_TRANSFORMS": [dict_config]},
n_estimators=2,
random_state=42
random_state=42,
)

model_obj = TabPFNRegressor(
inference_config={"PREPROCESS_TRANSFORMS": [object_config]},
n_estimators=2,
random_state=42
random_state=42,
)

# Fit both models
model_dict.fit(X, y)
model_obj.fit(X, y)

# Compare predictions for different output types
for output_type in ["mean", "median", "mode"]:
pred_dict = model_dict.predict(X, output_type=output_type)
pred_obj = model_obj.predict(X, output_type=output_type)
np.testing.assert_array_almost_equal(
pred_dict,
pred_dict,
pred_obj,
err_msg=f"Predictions differ for output_type={output_type}"
err_msg=f"Predictions differ for output_type={output_type}",
)

# Compare quantile predictions
quantiles = [0.1, 0.5, 0.9]
quant_dict = model_dict.predict(X, output_type="quantiles", quantiles=quantiles)
quant_obj = model_obj.predict(X, output_type="quantiles", quantiles=quantiles)

for q_dict, q_obj in zip(quant_dict, quant_obj):
np.testing.assert_array_almost_equal(
q_dict,
q_obj,
err_msg=f"Quantile predictions differ"
err_msg="Quantile predictions differ",
)


0 comments on commit 991cd0b

Please sign in to comment.