diff --git a/poetry.lock b/poetry.lock index cf938da..4ad3e94 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1569,13 +1569,13 @@ secure-local-storage = ["keyring (>=23.1.0,<26.0.0)"] [[package]] name = "snowflake-sqlalchemy" -version = "1.7.1" +version = "1.7.2" description = "Snowflake SQLAlchemy Dialect" optional = false python-versions = ">=3.8" files = [ - {file = "snowflake_sqlalchemy-1.7.1-py3-none-any.whl", hash = "sha256:eecb63e6830e7fec2a0fc5c583c0e9903fe1b2ea40bcac974e03932cd24662f2"}, - {file = "snowflake_sqlalchemy-1.7.1.tar.gz", hash = "sha256:a06b78d8b83ca74318e6fbb2982b9fbd9ce99e202f502c1f6af7ba69d05da1f5"}, + {file = "snowflake_sqlalchemy-1.7.2-py3-none-any.whl", hash = "sha256:db4e5350e469adbbda034d6bd1c948c5a3e88994405483ee9a76caf18cbe9958"}, + {file = "snowflake_sqlalchemy-1.7.2.tar.gz", hash = "sha256:083f9113ce5b7e9fb21ca6d748aee210117f6f2bd767f08415471796fc42ad37"}, ] [package.dependencies] @@ -1583,7 +1583,7 @@ snowflake-connector-python = "<4.0.0" sqlalchemy = ">=1.4.19" [package.extras] -development = ["mock", "numpy", "pre-commit", "pytest", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytz", "syrupy (==4.6.1)"] +development = ["mock", "numpy", "pre-commit", "pytest", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytz", "setuptools", "syrupy (==4.6.1)"] pandas = ["snowflake-connector-python[pandas]"] [[package]] @@ -1793,4 +1793,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9" -content-hash = "215535b0edfaa15ea1f32ca9e5b58e6a1af91c42b69446a5b18ffc05162d30c2" +content-hash = "1202589a9b885e75fbeb08d81d070dd565906c5f1c0d0b45912b48895b57bf10" diff --git a/pyproject.toml b/pyproject.toml index 3783efa..ae62307 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ packages = [ [tool.poetry.dependencies] python = ">=3.9" cryptography = ">=40" -snowflake-sqlalchemy = "==1.7.1" +snowflake-sqlalchemy = "==1.7.2" snowflake-connector-python = { version = "<4.0.0", extras = ["secure-local-storage"] } sqlalchemy = "~=2.0.31" @@ -28,6 +28,7 @@ version = "~=0.43.0" coverage = ">=7.2.7" pytest = ">=7.4.3" pytest-xdist = ">=3.3.1" +requests = ">=2.32.3" [tool.poetry.group.dev.dependencies.singer-sdk] version="~=0.43.0" diff --git a/tests/core.py b/tests/core.py index 79dc433..4324793 100644 --- a/tests/core.py +++ b/tests/core.py @@ -5,6 +5,7 @@ import pytest import snowflake.sqlalchemy.custom_types as sct import sqlalchemy as sa +from requests.structures import CaseInsensitiveDict from singer_sdk.testing.suites import TestSuite from singer_sdk.testing.target_tests import ( TargetArrayData, @@ -44,17 +45,19 @@ def validate(self) -> None: assert row[1] == '[\n "apple",\n "orange",\n "pear"\n]' table_schema = connector.get_table(table) - expected_types = { - "id": sa.DECIMAL, - "fruits": sct.VARIANT, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sa.DECIMAL, + "fruits": sct.VARIANT, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -68,30 +71,32 @@ def validate(self) -> None: connector = self.target.default_sink_class.connector_class(self.target.config) table = f"{self.target.config['database']}.{self.target.config['default_target_schema']}.ForecastingTypeToCategory".upper() # noqa: E501 table_schema = connector.get_table(table) - expected_types = { - "id": sa.VARCHAR, - "isdeleted": sa.types.BOOLEAN, - "createddate": sct.TIMESTAMP_NTZ, - "createdbyid": sct.STRING, - "lastmodifieddate": sct.TIMESTAMP_NTZ, - "lastmodifiedbyid": sct.STRING, - "systemmodstamp": sct.TIMESTAMP_NTZ, - "forecastingtypeid": sct.STRING, - "forecastingitemcategory": sct.STRING, - "displayposition": sct.NUMBER, - "isadjustable": sa.types.BOOLEAN, - "isowneradjustable": sa.types.BOOLEAN, - "age": sct.NUMBER, - "newcamelcasedattribute": sct.STRING, - "_attribute_startswith_underscore": sct.STRING, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sa.VARCHAR, + "isdeleted": sa.types.BOOLEAN, + "createddate": sct.TIMESTAMP_NTZ, + "createdbyid": sct.STRING, + "lastmodifieddate": sct.TIMESTAMP_NTZ, + "lastmodifiedbyid": sct.STRING, + "systemmodstamp": sct.TIMESTAMP_NTZ, + "forecastingtypeid": sct.STRING, + "forecastingitemcategory": sct.STRING, + "displayposition": sct.NUMBER, + "isadjustable": sa.types.BOOLEAN, + "isowneradjustable": sa.types.BOOLEAN, + "age": sct.NUMBER, + "newcamelcasedattribute": sct.STRING, + "_attribute_startswith_underscore": sct.STRING, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -120,17 +125,19 @@ def validate(self) -> None: assert expected_value.get(row[0]) == row[1] table_schema = connector.get_table(table) - expected_types = { - "id": sct.NUMBER, - "metric": sct.NUMBER, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sct.NUMBER, + "metric": sct.NUMBER, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -154,17 +161,19 @@ def validate(self) -> None: ) table_schema = connector.get_table(table) - expected_types = { - "id": sct.STRING, - "clientname": sct.STRING, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sct.STRING, + "clientname": sct.STRING, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -251,16 +260,18 @@ def validate(self) -> None: assert len(row) == 1, f"Row has unexpected length {len(row)}" table_schema = connector.get_table(table) - expected_types = { - "object_store": sct.VARIANT, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "object_store": sct.VARIANT, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -287,21 +298,23 @@ def validate(self) -> None: assert len(row) == 7, f"Row has unexpected length {len(row)}" table_schema = connector.get_table(table) - expected_types = { - "id": sct.NUMBER, - "a1": sct.DOUBLE, - "a2": sct.STRING, - "a3": sa.types.BOOLEAN, - "a4": sct.VARIANT, - "a5": sct.VARIANT, - "a6": sct.NUMBER, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sct.NUMBER, + "a1": sct.DOUBLE, + "a2": sct.STRING, + "a3": sa.types.BOOLEAN, + "a4": sct.VARIANT, + "a5": sct.VARIANT, + "a6": sct.NUMBER, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance( @@ -523,19 +536,21 @@ def validate(self) -> None: connector = self.target.default_sink_class.connector_class(self.target.config) table = f"{self.target.config['database']}.{self.target.config['default_target_schema']}.{self.name}".upper() table_schema = connector.get_table(table) - expected_types = { - "id": sct.NUMBER, - "col_max_length_str": sct.STRING, - "col_multiple_of": sct.DOUBLE, - "col_multiple_of_int": sct.DOUBLE, - "_sdc_extracted_at": sct.TIMESTAMP_NTZ, - "_sdc_batched_at": sct.TIMESTAMP_NTZ, - "_sdc_received_at": sct.TIMESTAMP_NTZ, - "_sdc_deleted_at": sct.TIMESTAMP_NTZ, - "_sdc_sync_started_at": sct.NUMBER, - "_sdc_table_version": sct.NUMBER, - "_sdc_sequence": sct.NUMBER, - } + expected_types = CaseInsensitiveDict( + { + "id": sct.NUMBER, + "col_max_length_str": sct.STRING, + "col_multiple_of": sct.DOUBLE, + "col_multiple_of_int": sct.DOUBLE, + "_sdc_extracted_at": sct.TIMESTAMP_NTZ, + "_sdc_batched_at": sct.TIMESTAMP_NTZ, + "_sdc_received_at": sct.TIMESTAMP_NTZ, + "_sdc_deleted_at": sct.TIMESTAMP_NTZ, + "_sdc_sync_started_at": sct.NUMBER, + "_sdc_table_version": sct.NUMBER, + "_sdc_sequence": sct.NUMBER, + }, + ) for column in table_schema.columns: assert column.name in expected_types, f"Column {column.name} not found in expected types" assert isinstance(