Skip to content

Commit 92bb0bf

Browse files
chore: Replace Union with | in type annotations (#281)
* replace Union with | * Update polaris/evaluate/metrics/docking_metrics.py Co-authored-by: Honoré Hounwanou <[email protected]> * Update polaris/evaluate/metrics/docking_metrics.py Co-authored-by: Honoré Hounwanou <[email protected]> * Update polaris/utils/types.py Co-authored-by: Honoré Hounwanou <[email protected]> * Update polaris/utils/types.py Co-authored-by: Honoré Hounwanou <[email protected]> * remove other deprecated typing: Dict, List, Tuple --------- Co-authored-by: Honoré Hounwanou <[email protected]>
1 parent 435188a commit 92bb0bf

File tree

8 files changed

+17
-20
lines changed

8 files changed

+17
-20
lines changed

polaris/dataset/_dataset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from hashlib import md5
33
from os import PathLike
44
from pathlib import Path
5-
from typing import Any, ClassVar, List, Literal
5+
from typing import Any, ClassVar, Literal
66

77
import fsspec
88
import numpy as np
@@ -148,7 +148,7 @@ def load_zarr_root_from_hub(self):
148148

149149
@computed_field
150150
@property
151-
def zarr_md5sum_manifest(self) -> List[ZarrFileChecksum]:
151+
def zarr_md5sum_manifest(self) -> list[ZarrFileChecksum]:
152152
"""
153153
The Zarr Checksum manifest stores the checksums of all files in a Zarr archive.
154154
If the dataset doesn't use Zarr, this will simply return an empty list.

polaris/dataset/_subset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from copy import deepcopy
2-
from typing import Callable, Iterable, List, Literal, Sequence
2+
from typing import Callable, Iterable, Literal, Sequence
33

44
import numpy as np
55
import pandas as pd
@@ -239,7 +239,7 @@ def extend_inputs(self, input_cols: Iterable[str] | str) -> Self:
239239
copy.input_cols = list(set(self.input_cols + input_cols))
240240
return copy
241241

242-
def filter_targets(self, target_cols: List[str] | str) -> Self:
242+
def filter_targets(self, target_cols: list[str] | str) -> Self:
243243
"""
244244
Filter the subset to only include the specified target columns.
245245

polaris/dataset/converters/_base.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
import abc
2-
from typing import Dict, Tuple, TypeAlias, Union
2+
from typing import TypeAlias
33

44
import pandas as pd
55

66
from polaris.dataset import ColumnAnnotation
77
from polaris.dataset._adapters import Adapter
88
from polaris.dataset._dataset import _INDEX_SEP
99

10-
FactoryProduct: TypeAlias = Tuple[pd.DataFrame, Dict[str, ColumnAnnotation], Dict[str, Adapter]]
10+
FactoryProduct: TypeAlias = tuple[pd.DataFrame, dict[str, ColumnAnnotation], dict[str, Adapter]]
1111

1212

1313
class Converter(abc.ABC):
@@ -17,7 +17,7 @@ def convert(self, path: str, append: bool = False) -> FactoryProduct:
1717
raise NotImplementedError
1818

1919
@staticmethod
20-
def get_pointer(column: str, index: Union[int, slice]) -> str:
20+
def get_pointer(column: str, index: int | slice) -> str:
2121
"""
2222
Creates a pointer.
2323

polaris/dataset/converters/_pdb.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from pathlib import Path
2-
from typing import TYPE_CHECKING, Optional, Sequence, Union
2+
from typing import TYPE_CHECKING, Optional, Sequence
33

44
import fastpdb
55
import numpy as np
@@ -156,7 +156,7 @@ def _load_pdb(self, path: str, pdb_pointer=None) -> dict:
156156
return pdb_dict
157157

158158
def _convert_pdb(
159-
self, path: str, factory: "DatasetFactory", pdb_pointer: Union[str, int], append: bool = False
159+
self, path: str, factory: "DatasetFactory", pdb_pointer: str | int, append: bool = False
160160
) -> FactoryProduct:
161161
"""
162162
Convert a single pdb to zarr file

polaris/dataset/zarr/_checksum.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
from functools import total_ordering
3939
from json import dumps
4040
from pathlib import Path
41-
from typing import List, Tuple
4241

4342
import fsspec
4443
import zarr
@@ -52,7 +51,7 @@
5251
ZARR_DIGEST_PATTERN = "([0-9a-f]{32})-([0-9]+)-([0-9]+)"
5352

5453

55-
def compute_zarr_checksum(zarr_root_path: str) -> Tuple["_ZarrDirectoryDigest", List["ZarrFileChecksum"]]:
54+
def compute_zarr_checksum(zarr_root_path: str) -> tuple["_ZarrDirectoryDigest", list["ZarrFileChecksum"]]:
5655
r"""
5756
Implements an algorithm to compute the Zarr checksum.
5857

polaris/evaluate/metrics/docking_metrics.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# This script includes docking related evaluation metrics.
22

3-
from typing import Union, List
43

54
import numpy as np
65
from rdkit.Chem.rdMolAlign import CalcRMS
@@ -36,7 +35,7 @@ def _rmsd(mol_probe: dm.Mol, mol_ref: dm.Mol) -> float:
3635
)
3736

3837

39-
def rmsd_coverage(y_pred: Union[str, List[dm.Mol]], y_true: Union[str, list[dm.Mol]], max_rsmd: float = 2):
38+
def rmsd_coverage(y_pred: str | list[dm.Mol], y_true: str | list[dm.Mol], max_rsmd: float = 2):
4039
"""
4140
Calculate the coverage of molecules with an RMSD less than a threshold (2 Å by default) compared to the reference molecule conformer.
4241

polaris/hub/settings.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from typing import Union
21
from urllib.parse import urljoin
32

43
from pydantic import ValidationInfo, field_validator
@@ -57,7 +56,7 @@ class PolarisHubSettings(BaseSettings):
5756
client_id: str = "agQP2xVM6JqMHvGc"
5857

5958
# Networking settings
60-
ca_bundle: Union[str, bool, None] = None
59+
ca_bundle: str | bool | None = None
6160
default_timeout: TimeoutTypes = (10, 200)
6261

6362
@field_validator("api_url", mode="before")

polaris/utils/types.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from enum import Enum
2-
from typing import Annotated, Any, Literal, Optional, Tuple, Union
2+
from typing import Annotated, Any, Literal, Optional
33

44
import numpy as np
55
from pydantic import (
@@ -19,7 +19,7 @@
1919
A split is defined by a sequence of integers.
2020
"""
2121

22-
SplitType: TypeAlias = tuple[SplitIndicesType, Union[SplitIndicesType, dict[str, SplitIndicesType]]]
22+
SplitType: TypeAlias = tuple[SplitIndicesType, SplitIndicesType | dict[str, SplitIndicesType]]
2323
"""
2424
A split is a pair of which the first item is always assumed to be the train set.
2525
The second item can either be a single test set or a dictionary with multiple, named test sets.
@@ -47,7 +47,7 @@
4747
that looks like {"test_set_name": {"target_name": np.ndarray}}.
4848
"""
4949

50-
DatapointPartType = Union[Any, tuple[Any], dict[str, Any]]
50+
DatapointPartType = Any | tuple[Any] | dict[str, Any]
5151
DatapointType: TypeAlias = tuple[DatapointPartType, DatapointPartType]
5252
"""
5353
A datapoint has:
@@ -109,7 +109,7 @@
109109
Type to specify access to a dataset, benchmark or result in the Hub.
110110
"""
111111

112-
TimeoutTypes = Union[Tuple[int, int], Literal["timeout", "never"]]
112+
TimeoutTypes = tuple[int, int] | Literal["timeout", "never"]
113113
"""
114114
Timeout types for specifying maximum wait times.
115115
"""
@@ -150,7 +150,7 @@
150150
- A single row, e.g. dataset[0]
151151
- Specify a specific value, e.g. dataset[0, "col1"]
152152
153-
There are more exciting options we could implement, such as slicing,
153+
There are more exciting options we could implement, such as slicing,
154154
but this gets complex.
155155
"""
156156

0 commit comments

Comments
 (0)