Skip to content

Commit 6107e58

Browse files
authored
Pydantic Fix: Change deprecated function (#614)
1 parent abb79a9 commit 6107e58

File tree

5 files changed

+17
-17
lines changed

5 files changed

+17
-17
lines changed

uptrain/framework/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def serialize(self, fpath: str | None = None):
141141
if fpath is None:
142142
fpath = os.path.join(self.logs_folder, "settings.json")
143143
with open(fpath, "w") as f:
144-
jsondump(self.dict(), f)
144+
jsondump(self.model_dump(), f)
145145

146146
@classmethod
147147
def deserialize(cls, fpath: str):

uptrain/framework/evalllm.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ def evaluate(
248248
)
249249

250250
if isinstance(m, ParametricEval):
251-
dictm = m.dict()
251+
dictm = m.model_dump()
252252
dictm.update({"scenario_description": this_scenario_description})
253253
ser_checks.append({"check_name": m.__class__.__name__, **dictm})
254254
elif isinstance(m, Evals):
@@ -323,7 +323,7 @@ def evaluate(
323323
"data": results,
324324
"checks": checks,
325325
"metadata": metadata,
326-
"schema_dict": schema.dict(),
326+
"schema_dict": schema.model_dump(),
327327
"project": project_name,
328328
},
329329
)
@@ -347,8 +347,8 @@ def evaluate_on_server(self, data, ser_checks, schema):
347347
data=data[i : i + BATCH_SIZE],
348348
checks=ser_checks,
349349
metadata={
350-
"schema": schema.dict(),
351-
"uptrain_settings": self.settings.dict(),
350+
"schema": schema.model_dump(),
351+
"uptrain_settings": self.settings.model_dump(),
352352
},
353353
)
354354
break

uptrain/framework/remote.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def add_checkset(self, name: str, checkset: CheckSet, settings: Settings):
174174
url = f"{self.base_url}/checkset"
175175
response = self.client.post(
176176
url,
177-
json={"name": name, "config": checkset.dict(), "settings": settings.dict()},
177+
json={"name": name, "config": checkset.dict(), "settings": settings.model_dump()},
178178
)
179179
return raise_or_return(response)
180180

@@ -210,7 +210,7 @@ def add_experiment(
210210
json={
211211
"name": name,
212212
"config": modified_checkset.dict(),
213-
"settings": settings.dict(),
213+
"settings": settings.model_dump(),
214214
},
215215
)
216216
return raise_or_return(response)
@@ -422,10 +422,10 @@ def evaluate(
422422
results = []
423423

424424
if params is not None:
425-
params["uptrain_settings"] = self.settings.dict()
425+
params["uptrain_settings"] = self.settings.model_dump()
426426
else:
427427
params = {}
428-
params["uptrain_settings"] = self.settings.dict()
428+
params["uptrain_settings"] = self.settings.model_dump()
429429

430430
NUM_TRIES = 3
431431
for i in range(0, len(full_dataset), 100):
@@ -528,9 +528,9 @@ def perform_root_cause_analysis(
528528
"rca_templates": ser_templates,
529529
"metadata": {
530530
"project": project_name,
531-
"schema": schema.dict(),
531+
"schema": schema.model_dump(),
532532
**metadata,
533-
"uptrain_settings": self.settings.dict(),
533+
"uptrain_settings": self.settings.model_dump(),
534534
},
535535
},
536536
)
@@ -645,7 +645,7 @@ def log_and_evaluate(
645645
req_attrs.update([schema.question])
646646

647647
if isinstance(m, ParametricEval):
648-
dictm = m.dict()
648+
dictm = m.model_dump()
649649
dictm.update({"scenario_description": scenario_description})
650650
ser_checks.append({"check_name": m.__class__.__name__, **dictm})
651651
elif isinstance(m, Evals):
@@ -676,9 +676,9 @@ def log_and_evaluate(
676676
"checks": ser_checks,
677677
"metadata": {
678678
"project": project_name,
679-
"schema": schema.dict(),
679+
"schema": schema.model_dump(),
680680
**metadata,
681-
"uptrain_settings": self.settings.dict(),
681+
"uptrain_settings": self.settings.model_dump(),
682682
},
683683
},
684684
)

uptrain/operators/drift.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,9 @@ def _check_params(cls, values):
143143

144144
def setup(self):
145145
if self.algorithm == "DDM":
146-
self._algo_obj = drift.binary.DDM(**self.params.dict()) # type: ignore
146+
self._algo_obj = drift.binary.DDM(**self.params.model_dump()) # type: ignore
147147
elif self.algorithm == "ADWIN":
148-
self._algo_obj = drift.ADWIN(**self.params.dict()) # type: ignore
148+
self._algo_obj = drift.ADWIN(**self.params.model_dump()) # type: ignore
149149
self._counter = 0
150150
self._avg_accuracy = 0.0
151151
self._cuml_accuracy = 0.0

uptrain/utilities/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def to_py_types(obj: t.Any) -> t.Any:
5454
"params": obj.dict(include=set(obj.__fields__)),
5555
}
5656
elif isinstance(obj, BaseModel):
57-
return obj.dict()
57+
return obj.model_dump()
5858

5959
# for numpy types
6060
if isinstance(obj, np.integer):

0 commit comments

Comments
 (0)