From 2f370fa418b14cbd5c055d7192f0a9ab8c85ad87 Mon Sep 17 00:00:00 2001 From: Louis Pieterse Date: Thu, 5 Sep 2024 15:19:23 +0100 Subject: [PATCH] pin pipelinewise-tap-zendesk to 1.2.1 (#1182) --- CHANGELOG.md | 4 + docs/connectors/taps.rst | 76 --- docs/connectors/taps/google_analytics.rst | 108 ---- docs/connectors/taps/oracle.rst | 215 ------- docs/connectors/taps/salesforce.rst | 105 ---- docs/connectors/taps/shopify.rst | 138 ----- docs/connectors/taps/twilio.rst | 105 ---- docs/connectors/taps/zuora.rst | 105 ---- docs/index.rst | 7 +- .../installation_guide/creating_pipelines.rst | 6 +- docs/installation_guide/installation.rst | 14 - docs/project/licenses.rst | 14 - docs/user_guide/yaml_config.rst | 10 +- setup.py | 2 +- singer-connectors/tap-zendesk/.gitignore | 101 ---- singer-connectors/tap-zendesk/.pylintrc | 552 ------------------ singer-connectors/tap-zendesk/Makefile | 13 - singer-connectors/tap-zendesk/README.md | 15 +- singer-connectors/tap-zendesk/setup.py | 10 +- .../tap-zendesk/tap_zendesk/__init__.py | 36 +- .../tap-zendesk/tap_zendesk/sync.py | 22 +- .../tap-zendesk/{tests => test}/__init__.py | 0 .../{tests => test}/helper/zenpymock.py | 0 .../{tests => test}/test_catalog.json | 0 .../{tests => test}/test_do_sync.py | 5 +- .../tap-zendesk/{tests => test}/test_init.py | 0 .../{tests => test}/test_state.json | 0 27 files changed, 49 insertions(+), 1614 deletions(-) delete mode 100644 docs/connectors/taps/google_analytics.rst delete mode 100644 docs/connectors/taps/oracle.rst delete mode 100644 docs/connectors/taps/salesforce.rst delete mode 100644 docs/connectors/taps/shopify.rst delete mode 100644 docs/connectors/taps/twilio.rst delete mode 100644 docs/connectors/taps/zuora.rst delete mode 100644 singer-connectors/tap-zendesk/.gitignore delete mode 100644 singer-connectors/tap-zendesk/.pylintrc delete mode 100644 singer-connectors/tap-zendesk/Makefile rename singer-connectors/tap-zendesk/{tests => test}/__init__.py (100%) rename singer-connectors/tap-zendesk/{tests => test}/helper/zenpymock.py (100%) rename singer-connectors/tap-zendesk/{tests => test}/test_catalog.json (100%) rename singer-connectors/tap-zendesk/{tests => test}/test_do_sync.py (97%) rename singer-connectors/tap-zendesk/{tests => test}/test_init.py (100%) rename singer-connectors/tap-zendesk/{tests => test}/test_state.json (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0b467e26..7238ad18e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +0.65.2 (2024-09-05) +------------------- +- Pin tap-zendesk to v1.2.1 + 0.65.0 (2024-08-27) ------------------- - Remove FastSync for target Redshift diff --git a/docs/connectors/taps.rst b/docs/connectors/taps.rst index 6538c5e14..d04806722 100644 --- a/docs/connectors/taps.rst +++ b/docs/connectors/taps.rst @@ -48,26 +48,6 @@ PipelineWise can replicate data from the following data sources: :ref:`tap-mongodb` -.. container:: tile-wrapper - - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/snowflake-logo.png - :target: taps/snowflake.html - - :ref:`tap-snowflake` - - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/oracle-logo.png - :target: taps/oracle.html - - :ref:`tap-oracle` - .. container:: tile-wrapper .. container:: tile @@ -89,17 +69,6 @@ PipelineWise can replicate data from the following data sources: :ref:`tap-kafka` -.. container:: tile-wrapper - - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/salesforce-logo.png - :target: taps/salesforce.html - - :ref:`tap-salesforce` - .. container:: tile .. container:: img-hover-zoom @@ -120,35 +89,6 @@ PipelineWise can replicate data from the following data sources: :ref:`tap-jira` - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/twilio-logo.png - :target: taps/twilio.html - - :ref:`tap-twilio` - -.. container:: tile-wrapper - - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/zuora-logo.png - :target: taps/zuora.html - - :ref:`tap-zuora` - - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/google-analytics-logo.png - :target: taps/google_analytics.html - - :ref:`tap-google-analytics` - .. container:: tile-wrapper .. container:: tile @@ -160,16 +100,6 @@ PipelineWise can replicate data from the following data sources: :ref:`tap-github` - .. container:: tile - - .. container:: img-hover-zoom - - .. image:: ../img/shopify-logo.png - :target: taps/shopify.html - - :ref:`tap-shopify` - - .. container:: tile-wrapper .. container:: tile @@ -199,18 +129,12 @@ Configuring taps taps/mysql taps/postgres - taps/oracle taps/s3_csv taps/kafka taps/snowflake taps/mongodb - taps/salesforce taps/zendesk taps/jira - taps/zuora - taps/google_analytics taps/github - taps/shopify taps/slack taps/mixpanel - taps/twilio diff --git a/docs/connectors/taps/google_analytics.rst b/docs/connectors/taps/google_analytics.rst deleted file mode 100644 index ebe0773b5..000000000 --- a/docs/connectors/taps/google_analytics.rst +++ /dev/null @@ -1,108 +0,0 @@ - -.. _tap-google-analytics: - -Tap Google Analytics --------------------- - - -Configuring what to replicate -''''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Google Analytics replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Authorization Methods -''''''''''''''''''''' - -``tap-google-analytics`` supports two different ways of authorization: - - - **Service account based authorization**, where an administrator manually creates a service account with the appropriate permissions to view the account, property, and view you wish to fetch data from - - **OAuth** ``access_token`` **based authorization**, where this tap gets called with a valid ``access_token`` and ``refresh_token`` produced by an OAuth flow conducted in a different system. - -If you're setting up ``tap-google-analytics`` for your own organization and only plan to extract from a handful of different views in the same limited set of properties, Service Account based authorization is the simplest. When you create a service account Google gives you a json file with that service account's credentials called the ``client_secrets.json``, and that's all you need to pass to this tap, and you only have to do it once, so this is the recommended way of configuring ``tap-google-analytics``. - -If you're building something where a wide variety of users need to be able to give access to their Google Analytics, ``tap-google-analytics`` can use an ``access_token`` granted by those users to authorize it's requests to Google. This ``access_token`` is produced by a normal Google OAuth flow, but this flow is outside the scope of ``tap-google-analytics``. This is useful if you're integrating ``tap-google-analytics`` with another system, like Stitch Data might do to allow users to configure their extracts themselves without manual config setup. This tap expects an ``access_token``, ``refresh_token``, ``client_id`` and ``client_secret`` to be passed to it in order to authenticate as the user who granted the token and then access their data. - -.. warning:: - - This tap does not currently use any ``STATE`` information for incrementally extracting data. This is currently mitigated by allowing for chunked runs using ``[start_date, end_date]``, but we should definitely add support for using ``STATE`` messages. - - The difficulty on that front is on dynamically deciding which attributes to use for capturing state for ad-hoc reports that do not include the `ga:date` dimension or other combinations of Time Dimensions. - -Example YAML for ``tap-google-analytics``: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "google_analytics_sample" # Unique identifier of the tap - name: "Google Analytics" # Name of the tap - type: "tap-google-analytics" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Google Analytics connection details - # ------------------------------------------------------------------------------ - db_conn: - view_id: "" - start_date: "2010-01-01" # specifies the date at which the tap will begin pulling data - - # OAuth authentication - oauth_credentials: - client_id: "" - client_secret: "" # Plain string or vault encrypted - access_token: "" # Plain string or vault encrypted - refresh_token: "" # Plain string or vault encrypted - - # Service account based authorization - # key_file_location: "full-path-to-client_secrets.json" - - - # ------------------------------------------------------------------------------ - # Destination (Target) - Target properties - # Connection details should be in the relevant target YAML file - # ------------------------------------------------------------------------------ - target: "snowflake" # ID of the target connector where the data will be loaded - batch_size_rows: 20000 # Batch size for the stream to optimise load performance - stream_buffer_size: 0 # In-memory buffer size (MB) between taps and targets for asynchronous data pipes - default_target_schema: "google-analytic" # Target schema where the data will be loaded - #default_target_schema_select_permission: # Optional: Grant SELECT on schema and tables that created - # - grp_power - #batch_wait_limit_seconds: 3600 # Optional: Maximum time to wait for `batch_size_rows`. Available only for snowflake target. - - # Options only for Snowflake target - #archive_load_files: False # Optional: when enabled, the files loaded to Snowflake will also be stored in `archive_load_files_s3_bucket` - #archive_load_files_s3_prefix: "archive" # Optional: When `archive_load_files` is enabled, the archived files will be placed in the archive S3 bucket under this prefix. - #archive_load_files_s3_bucket: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - - source_schema: "google-analytics" # This is mandatory, but can be anything in this tap type - target_schema: "google-analytics" # Target schema in the destination Data Warehouse - #target_schema_select_permissions: # Optional: Grant SELECT on schema and tables that created - # - grp_stats - - # List of Google Analytics tables to replicate into destination Data Warehouse - # List of available tables available at https://github.com/transferwise/pipelinewise-tap-google-analytics/blob/master/tap_google_analytics/defaults/default_report_definition.json - tables: - - # Tables replicated incrementally - - table_name: "website_overview" - - table_name: "traffic_sources" - - table_name: "monthly_active_users" - - # OPTIONAL: Load time transformations - you can add it to any table - #transformations: - # - column: "some_column_to_transform" # Column to transform - # type: "SET-NULL" # Transformation type diff --git a/docs/connectors/taps/oracle.rst b/docs/connectors/taps/oracle.rst deleted file mode 100644 index ae102265e..000000000 --- a/docs/connectors/taps/oracle.rst +++ /dev/null @@ -1,215 +0,0 @@ - -.. _tap-oracle: - -Tap Oracle ----------- - -.. warning:: - - `Oracle Instant Client `_ is - required to use Tap Oracle. If PipelineWise is :ref:`running_in_docker` then no further - action needed because **PipelineWise Docker Image includes Oracle Instant Client** - automatically. - - If PipelineWise :ref:`building_from_source` then you have to - **install Oracle Instant Client manually** to your machine. - - -Oracle setup requirements -''''''''''''''''''''''''' - -**Step 1. Create a PipelineWise database user** - -You’ll create a dedicated database user for PipelineWise. Create a new user and grant the required permissions -on the database, schema and tables that you want to replicate: - - * ``CREATE USER pipelinewise IDENTIFIEDBY `` - * ``GRANT CONNECT TO pipelinewise`` - * ``GRANT CREATE SESSION TO pipelinewise`` - * ``GRANT UNLIMITED TABLESPACE TO TO pipelinewise`` - * ``GRANT USAGE ON SCHEMA TO pipelinewise`` - * ``GRANT SELECT ON . TO pipelinewise`` (Repeat this grant on every table that you want to replicate) - - -**Step 2: Check if you have all the required credentials for replicating data from Oracle** - -Access to ``V$DATABASE`` and ``V_$THREAD`` performance views. -These are required to verify setting configuration while setting up your Oracle database and to -retrieve the database’s Oracle System ID. - - -**Step 3: Configure Log-based Incremental Replication with LogMiner** - -.. note:: - - This step is only required if you use :ref:`log_based` replication method. - -**Step 3.1: Verify the database's current archiving mode** - -To check the database’s current mode, run: - -.. code-block:: bash - - SELECT LOG_MODE FROM V$DATABASE - - -If the result is ``ARCHIVELOG``, archiving is enabled and no further action is required. Skip to Step 3.3 to configure RMAN backups. - - -**Step 3.2: Enable ARCHIVELOG mode** - -1. Shut down the database instance. The database and any associated instances must be shut down before the database’s archiving mode can be changed. - -.. code-block:: bash - - SQL> SHUTDOWN IMMEDIATE - SQL> STARTUP MOUNT - SQL> ALTER DATABASE ARCHIVELOG - SQL> ALTER DATABASE OPEN - -**Step 3.3: Set retention period by RMAN** - -.. code-block:: bash - - RMAN> CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 3 DAYS; - - -**Note**: To ensure that archive log files don’t consume all of your available disk space, -you should also set the ``DB_RECOVERY_FILE_DEST_SIZE`` parameter to a value that agrees with -your available disk quota. Refer to `Oracle's documentation `_ -for more info about this parameter. - - -**Step 3.4: Enable supplemental logging** - -.. code-block:: bash - - SQL> ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS - - -**Note**: Alternatively to enable supplemental logging at the table level, run -``ALTER TABLE . ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS`` -for every table you want to replicate. - - -Verify that supplemental logging was successfully enabled by running the following query: - -.. code-block:: bash - - SELECT SUPPLEMENTAL_LOG_DATA_MIN FROM V$DATABASE - - -If the returned value is ``YES`` or ``IMPLICIT``, supplemental logging is enabled. - -.. warning:: - - If you want to use Log-based Incremental Replication, you’ll also need to - **grant additional permissions** to the ``pipelinewise`` user: - - * ``GRANT EXECUTE_CATALOG_ROLE TO PIPELINEWISE`` - - * ``GRANT SELECT ANY TRANSACTION TO PIPELINEWISE`` - - * ``GRANT SELECT ANY DICTIONARY TO PIPELINEWISE`` - - * ``GRANT EXECUTE ON DBMS_LOGMNR TO PIPELINEWISE`` - - * ``GRANT EXECUTE ON DBMS_LOGMNR_D TO PIPELINEWISE`` - - * ``GRANT SELECT ON SYS.V_$DATABASE TO PIPELINEWISE`` - - * ``GRANT SELECT ON SYS.V_$ARCHIVED_LOG TO PIPELINEWISE`` - - * ``GRANT SELECT ON SYS.V_$LOGMNR_CONTENTS TO PIPELINEWISE`` - - **If you’re using version 12 of Oracle**, you’ll also need to grant the - ``LOGMINING`` privilege to the PipelineWise user: - - * ``GRANT LOGMINING TO PIPELINEWISE`` - - -Configuring what to replicate -''''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Oracle replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Example YAML for ``tap-oracle``: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "oracle_sample" # Unique identifier of the tap - name: "Sample Oracle Database" # Name of the tap - type: "tap-oracle" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Oracle connection details - # ------------------------------------------------------------------------------ - db_conn: - sid: "" # Oracle SID - host: "" # Oracle host - port: 1521 # Oracle port - user: "" # Oracle user - password: "" # Plain string or vault encrypted - #filter_schemas: "SCHEMA1,SCHEMA2" # Optional: Scan only the required schemas - # to improve the performance of - # data extraction - - - # ------------------------------------------------------------------------------ - # Destination (Target) - Target properties - # Connection details should be in the relevant target YAML file - # ------------------------------------------------------------------------------ - target: "snowflake" # ID of the target connector where the data will be loaded - batch_size_rows: 20000 # Batch size for the stream to optimise load performance - stream_buffer_size: 0 # In-memory buffer size (MB) between taps and targets for asynchronous data pipes - #batch_wait_limit_seconds: 3600 # Optional: Maximum time to wait for `batch_size_rows`. Available only for snowflake target. - - # Options only for Snowflake target - #archive_load_files: False # Optional: when enabled, the files loaded to Snowflake will also be stored in `archive_load_files_s3_bucket` - #archive_load_files_s3_prefix: "archive" # Optional: When `archive_load_files` is enabled, the archived files will be placed in the archive S3 bucket under this prefix. - #archive_load_files_s3_bucket: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - - source_schema: "SCHEMA1" # Source schema in Oracle with tables - target_schema: "repl_oracle" # Target schema in the destination Data Warehouse - target_schema_select_permissions: # Optional: Grant SELECT on schema and tables that created - - grp_stats - - # List of tables to replicate from Oracle to destination Data Warehouse - # - # Please check the Replication Strategies section in the documentation to understand the differences. - # For LOG_BASED replication method you might need to adjust the source Oracle database. - tables: - - table_name: "TABLE_ONE" - replication_method: "INCREMENTAL" # One of INCREMENTAL, LOG_BASED and FULL_TABLE - replication_key: "LAST_UPDATE" # Important: Incremental load always needs replication key - - # OPTIONAL: Load time transformations - #transformations: - # - column: "last_name" # Column to transform - # type: "SET-NULL" # Transformation type - - # You can add as many tables as you need... - - table_name: "TABLE_TWO" - replication_method: "LOG_BASED" # Important! Log based must be enabled in Oracle - - # You can add as many schemas as you need... - # Uncommend this if you want replicate tables from multiple schemas - #- source_schema: "another_schema_in_oracle" - # target_schema: "another diff --git a/docs/connectors/taps/salesforce.rst b/docs/connectors/taps/salesforce.rst deleted file mode 100644 index eb53901f6..000000000 --- a/docs/connectors/taps/salesforce.rst +++ /dev/null @@ -1,105 +0,0 @@ - -.. _tap-salesforce: - -Tap Salesforce --------------- - -Connecting to Salesforce -'''''''''''''''''''''''' - -.. warning:: - - This section of the documentation is work in progress. - - -Configuring what to replicate -''''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Salesforce replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Example YAML for tap-salesforce: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "salesforce" # Unique identifier of the tap - name: "Sample data on Salesforce" # Name of the tap - type: "tap-salesforce" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Salesforce connection details - # - # The client_id and client_secret keys are your OAuth Salesforce App secrets. - # The refresh_token is a secret created during the OAuth flow. For more info on - # the Salesforce OAuth flow, visit the Salesforce documentation at - # https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/intro_understanding_web_server_oauth_flow.htm - # - # api_type to use extracting data from Snowflake. This can be BULK or REST. - # Further details about API types at https://www.stitchdata.com/docs/integrations/saas/salesforce#bulk-vs-rest-api - # ------------------------------------------------------------------------------ - db_conn: - client_id: "" # Salesforce Client ID - client_secret: "" # Salesforce Client Secret - refresh_token: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - source_schema: "salesforce" # This is mandatory, but can be anything in this tap type - target_schema: "salesforce" # Target schema in the destination Data Warehouse - - # List of Salesforce tables to replicate into destination Data Warehouse - # Tap-Salesforce will use the best incremental strategies automatically to replicate data - # - # Tap-Salesforce currently supports the replication of the majority of Salesforce objects, - # with the exception of those listed in the Unsupported Objects row of this table at - # https://www.stitchdata.com/docs/integrations/saas/salesforce#bulk-vs-rest-api - # - # - # This section will only cover a few of the most popular tables Salesforce integration offers. - # See the Salesforce Object Reference guide for info on objects not listed here, including the - # fields available in each object at https://resources.docs.salesforce.com/sfdc/pdf/object_reference.pdf - tables: - - table_name: "Account" - - table_name: "Contact" - - table_name: "Lead" - - table_name: "Opportunity" - - table_name: "User" - - # OPTIONAL: Load time transformations - #transformations: - # - column: "last_name" # Column to transform - # type: "SET-NULL" # Transformation type - diff --git a/docs/connectors/taps/shopify.rst b/docs/connectors/taps/shopify.rst deleted file mode 100644 index 5067ec775..000000000 --- a/docs/connectors/taps/shopify.rst +++ /dev/null @@ -1,138 +0,0 @@ - -.. _tap-shopify: - -Tap Shopify ------------ - -Configure access to your Shopify store -'''''''''''''''''''''''''''''''''''''' - -In order to extract your Shopify data, you will need: - -- Store Subdomain -- Private App API Password -- Start Date - -Store Subdomain -''''''''''''''' - -The store subdomain can be derived from your Shopify admin URL. - -If your admin URL starts with :code:`https://my-first-store.myshopify.com/`, your store subdomain is -:code:`my-first-store`. - - -Private App API Password -'''''''''''''''''''''''' - -You need to create a `Private App `_ -API password to extract data from your Shopify Shop: - -1. Log in to your Shopify store admin at :code:`https://.myshopify.com/admin` -2. Click "Apps" in the sidebar on the left -3. On the bottom of the page, click "Manage private apps" next to "Working with a developer on your shop?" -4. Click the "Create a new private app" button -5. Enter a "Private app name" of your choosing, e.g. "Singer" -6. Enter your email address under "Emergency developer email" -7. In the "Admin API" section, click "▼ Review disabled Admin API permissions" -8. Choose "Read access" rather than "No access" in the access level dropdowns for the following permissions: - - 1. Products, variants and collections - :code:`read_products, write_products` - 2. Orders, transactions and fulfillments - :code:`read_orders, write_orders` - 3. Customer details and customer groups - :code:`read_customers, write_customers` - -9. Click "Save" -10. In the modal that appears, click "I understand, create the app" - -Once the app has been created, you can locate the API password: - -1. In the "Admin API" section on the private app details page, find the "Password" field and click "Show" -2. The value that appears (starting with :code:`shppa_`) is your API password. - -Start Date -'''''''''' - -This property determines how much historical data will be extracted. - - -Configuring what to extract -''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Jira replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Example YAML for ``tap-shopify``: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "shopify" # Unique identifier of the tap - name: "Shopify" # Name of the tap - type: "tap-shopify" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Shopify connection details - # ------------------------------------------------------------------------------ - db_conn: - shop: "" # Shopify Store Subdomain - api_key: "" # Shopify Private App API Password - start_date: "2019-01-01" # Sync data from this date onwards - - - # ------------------------------------------------------------------------------ - # Destination (Target) - Target properties - # Connection details should be in the relevant target YAML file - # ------------------------------------------------------------------------------ - target: "snowflake" # ID of the target connector where the data will be loaded - batch_size_rows: 20000 # Batch size for the stream to optimise load performance - stream_buffer_size: 0 # In-memory buffer size (MB) between taps and targets for asynchronous data pipes - default_target_schema: "shopify" # Target schema where the data will be loaded - #batch_wait_limit_seconds: 3600 # Optional: Maximum time to wait for `batch_size_rows`. Available only for snowflake target. - - # Options only for Snowflake target - #archive_load_files: False # Optional: when enabled, the files loaded to Snowflake will also be stored in `archive_load_files_s3_bucket` - #archive_load_files_s3_prefix: "archive" # Optional: When `archive_load_files` is enabled, the archived files will be placed in the archive S3 bucket under this prefix. - #archive_load_files_s3_bucket: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - - source_schema: "shopify" # This is mandatory, but can be anything in this tap type - target_schema: "shopify" # Target schema in the destination Data Warehouse - #target_schema_select_permissions: # Optional: Grant SELECT on schema and tables that created - # - grp_stats - - # List of Github tables to load into destination Data Warehouse - # Tap-Github will use the best incremental strategies automatically to replicate data - tables: - # Supported tables - - table_name: "orders" - - table_name: "customers" - - table_name: "products" - - table_name: "transactions" - - - # Additional supported tables - #- table_name: "custom_collections" - #- table_name: "abandoned_checkouts" - #- table_name: "metafields" - #- table_name: "order_refunds" - #- table_name: "collects" - - - # OPTIONAL: Load time transformations - you can add it to any table - #transformations: - # - column: "some_column_to_transform" # Column to transform - # type: "SET-NULL" # Transformation type diff --git a/docs/connectors/taps/twilio.rst b/docs/connectors/taps/twilio.rst deleted file mode 100644 index 146e16ee2..000000000 --- a/docs/connectors/taps/twilio.rst +++ /dev/null @@ -1,105 +0,0 @@ - -.. _tap-twilio: - -Tap Twilio ------------ - - -Configuring what to replicate -''''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Twilio replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Example YAML for tap-twilio: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "twilio" # Unique identifier of the tap - name: "Twilio" # Name of the tap - type: "tap-twilio" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Twilio connection details - # ------------------------------------------------------------------------------ - db_conn: - account_sid: # Twilio Account SID - auth_token: # Twilio Auth token - start_date: "2021-02-01T00:00:00Z" # The default value to use if no bookmark exists for an endpoint. ISO-8601 datetime formatted string - user_agent: "someone@transferwise.com" # Optional: Process and email for API logging purposes. - - - # ------------------------------------------------------------------------------ - # Destination (Target) - Target properties - # Connection details should be in the relevant target YAML file - # ------------------------------------------------------------------------------ - target: "snowflake" # ID of the target connector where the data will be loaded - batch_size_rows: 20000 # Batch size for the stream to optimise load performance - stream_buffer_size: 0 # In-memory buffer size (MB) between taps and targets for asynchronous data pipes - default_target_schema: "twilio" # Target schema where the data will be loaded - #default_target_schema_select_permission: # Optional: Grant SELECT on schema and tables that created - # - grp_power - #batch_wait_limit_seconds: 3600 # Optional: Maximum time to wait for `batch_size_rows`. Available only for snowflake target. - - # Options only for Snowflake target - #archive_load_files: False # Optional: when enabled, the files loaded to Snowflake will also be stored in `archive_load_files_s3_bucket` - #archive_load_files_s3_prefix: "archive" # Optional: When `archive_load_files` is enabled, the archived files will be placed in the archive S3 bucket under this prefix. - #archive_load_files_s3_bucket: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - - source_schema: "twilio" # This is mandatory, but can be anything in this tap type - target_schema: "twilio" # Target schema in the destination Data Warehouse - target_schema_select_permissions: # Optional: Grant SELECT on schema and tables that created - - grp_stats - - # List of Twilio tables to load into destination Data Warehouse - # Tap-Twilio will use the best incremental strategies automatically to replicate data - tables: - # Incrementally loaded tables - # TaskRouter resources - - table_name: "workspaces" - - table_name: "activities" - - table_name: "events" - - table_name: "tasks" - - table_name: "task_channels" - - table_name: "task_queues" - - table_name: "workers" - - table_name: "workflows" - # Programmable Chat resources - - table_name: "services" - - table_name: "roles" - - table_name: "chat_channels" - - table_name: "users" - - - # Tables that cannot load incrementally and will use FULL_TABLE method - # TaskRouter resources - - table_name: "cumulative_statistics" - - table_name: "channels" - - # Programmable Chat resources - # These 2 resources are using FULL_TABLE method and can pull huge amount of data from the twilio api at every sync. - # Please use it with caution. - #- table_name: "members" - #- table_name: "chat_messages" - - - # OPTIONAL: Load time transformations - you can add it to any table - #transformations: - # - column: "some_column_to_transform" # Column to transform - # type: "SET-NULL" # Transformation type diff --git a/docs/connectors/taps/zuora.rst b/docs/connectors/taps/zuora.rst deleted file mode 100644 index 410fcb66e..000000000 --- a/docs/connectors/taps/zuora.rst +++ /dev/null @@ -1,105 +0,0 @@ - -.. _tap-zuora: - -Tap Zuora --------------- - -Connecting to Zuora -'''''''''''''''''''''''' - - -Configuring what to replicate -''''''''''''''''''''''''''''' - -PipelineWise configures every tap with a common structured YAML file format. -A sample YAML for Zuora replication can be generated into a project directory by -following the steps in the :ref:`generating_pipelines` section. - -Example YAML for tap-zuora: - -.. code-block:: yaml - - --- - - # ------------------------------------------------------------------------------ - # General Properties - # ------------------------------------------------------------------------------ - id: "zuora" # Unique identifier of the tap - name: "Sample data on Zuora" # Name of the tap - type: "tap-zuora" # !! THIS SHOULD NOT CHANGE !! - owner: "somebody@foo.com" # Data owner to contact - #send_alert: False # Optional: Disable all configured alerts on this tap - #slack_alert_channel: "#tap-channel" # Optional: Sending a copy of specific tap alerts to this slack channel - - - # ------------------------------------------------------------------------------ - # Source (Tap) - Zuora connection details - # - # The client_id and client_secret keys are your OAuth Salesforce App secrets. - # The refresh_token is a secret created during the OAuth flow. For more info on - # the Salesforce OAuth flow, visit the Salesforce documentation at - # https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/intro_understanding_web_server_oauth_flow.htm - # - # api_type to use extracting data from Zuora. This can be AQUA or REST. - # Further details about API types at https://www.stitchdata.com/docs/integrations/saas/zuora#rest-vs-aqua-api - # ------------------------------------------------------------------------------ - db_conn: - username: "" # Zuora username - password: "" # Zuora password - partner_id: "" # In case of using the AQUA api, a partner id is required - start_date: "2019-01-01T00:00:00Z" # Bound on api queries when searching for records - api_type: "AQUA" # Zuora API Type: AQUA or REST - sandbox: "true" # Determines which api location to call - european: "true" # Determines which api location to call - - - - # ------------------------------------------------------------------------------ - # Destination (Target) - Target properties - # Connection details should be in the relevant target YAML file - # ------------------------------------------------------------------------------ - target: "snowflake" # ID of the target connector where the data will be loaded - batch_size_rows: 20000 # Batch size for the stream to optimise load performance - stream_buffer_size: 0 # In-memory buffer size (MB) between taps and targets for asynchronous data pipes - default_target_schema: "zuora" # Target schema where the data will be loaded - default_target_schema_select_permission: # Optional: Grant SELECT on schema and tables that created - - grp_power - #batch_wait_limit_seconds: 3600 # Optional: Maximum time to wait for `batch_size_rows`. Available only for snowflake target. - - # Options only for Snowflake target - #archive_load_files: False # Optional: when enabled, the files loaded to Snowflake will also be stored in `archive_load_files_s3_bucket` - #archive_load_files_s3_prefix: "archive" # Optional: When `archive_load_files` is enabled, the archived files will be placed in the archive S3 bucket under this prefix. - #archive_load_files_s3_bucket: "" # Optional: When `archive_load_files` is enabled, the archived files will be placed in this bucket. (Default: the value of `s3_bucket` in target snowflake YAML) - - - # ------------------------------------------------------------------------------ - # Source to target Schema mapping - # ------------------------------------------------------------------------------ - schemas: - - source_schema: "zuora" # This is mandatory, but can be anything in this tap type - target_schema: "zuora" # Target schema in the destination Data Warehouse - default_target_schema_select_permission: # Optional: Grant SELECT on schema and tables that created - - grp_power - - - # List of Zuora tables to replicate into destination Data Warehouse - # Tap-Zuora will default to FULL_TABLE replication, but supports INCREMENTAL replication, which is recommended - # - # The available object types (and their replication keys to use, if supported) which are supported are listed on - # https://www.stitchdata.com/docs/integrations/saas/zuora#zuora-entity-relationships - # - # Unsupported objects to replicate are listed on - # https://www.stitchdata.com/docs/integrations/saas/zuora#unsupported-objects - # - tables: - - table_name: "Account" - replication_method: "INCREMENTAL" - replication_key: "updatedDate" - - table_name: "BillingRun" - replication_method: "FULL_TABLE" - - # OPTIONAL: Load time transformations - #transformations: - # - column: "last_name" # Column to transform - # type: "SET-NULL" # Transformation type - diff --git a/docs/index.rst b/docs/index.rst index b0445e2c6..448f413ce 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,10 +1,11 @@ Notice ====== -To better serve Wise business and customer needs, PipelineWise is shrinking. -Going forward many components of PipelineWise will be removed or incorporated in the main repo - +To better serve Wise business and customer needs, the PipelineWise codebase needs to shrink. +We have made the difficult decision that, going forward many components of PipelineWise will be removed or incorporated in the main repo. The last version before this decision is `v0.64.1 `_ +We thank all in the open-source community, that over the past 6 years, have helped to make PipelineWise a robust product for heterogeneous replication of many many Terabytes, daily + .. image:: img/pipelinewise-with-text.png :width: 300 diff --git a/docs/installation_guide/creating_pipelines.rst b/docs/installation_guide/creating_pipelines.rst index 3be49a159..679b50bbf 100644 --- a/docs/installation_guide/creating_pipelines.rst +++ b/docs/installation_guide/creating_pipelines.rst @@ -46,11 +46,9 @@ This will create a ``pipelinewise_samples`` directory with samples for each supp ├── tap_mysql_mariadb.yml.sample ├── tap_postgres.yml.sample ├── tap_s3_csv.yml.sample - ├── tap_salesforce.yml.sample ├── tap_snowflake.yml.sample ├── tap_zendesk.yml.sample ├── target_postgres.yml.sample - ├── target_redshift.yml.sample ├── target_s3_csv.yml.sample └── target_snowflake.yml.sample @@ -71,7 +69,7 @@ Once you configured the YAML files you can go to :ref:`import_project_from_yaml` Environment variables in YAML config ------------------------------------ -It is possible to use environment variables in the YAML config files. +It is possible to use environment variables in the YAML config files. This feature is implemented using jinja templates and requires the following syntax to work: .. code-block:: bash @@ -156,7 +154,7 @@ You can edit it with the text editor of your choice: port: 10602 user: "my_user" password: "" # Plain string or Vault Encrypted password - + target: "snowflake_test" # Target ID, should match the id from target_snowflake.yml batch_size_rows: 100000 # Batch size for the stream to optimise load performance diff --git a/docs/installation_guide/installation.rst b/docs/installation_guide/installation.rst index 066bd8f89..57412905c 100644 --- a/docs/installation_guide/installation.rst +++ b/docs/installation_guide/installation.rst @@ -157,38 +157,24 @@ Here’s the list of the singer connectors and if they are installed by default +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-mysql | ./install --connectors=tap-mysql | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-oracle | ./install --connectors=tap-oracle | NO | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-postgres | ./install --connectors=tap-postgres | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-s3-csv | ./install --connectors=tap-s3-csv | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-salesforce | ./install --connectors=tap-salesforce | YES | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-snowflake | ./install --connectors=tap-snowflake | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-zendesk | ./install --connectors=tap-zendesk | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-zuora | ./install --connectors=tap-zuora | NO | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-google-analytics | ./install --connectors=tap-google-analytics | NO | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-github | ./install --connectors=tap-github | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-slack | ./install --connectors=tap-slack | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-shopify | ./install --connectors=tap-shopify | NO | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | tap-mixpanel | ./install --connectors=tap-mixpanel | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| tap-twilio | ./install --connectors=tap-twilio | YES | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | target-postgres | ./install --connectors=target-postgres | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | target-s3-csv | ./install --connectors=target-s3-csv | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ -| target-redshift | ./install --connectors=target-redshift | YES | | -+----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | target-snowflake | ./install --connectors=target-snowflake | YES | | +----------------------------+---------------------------------------------+----------------------------------+---------------------------------------+ | transform-field | ./install --connectors=transform-field | YES | | diff --git a/docs/project/licenses.rst b/docs/project/licenses.rst index f1102a99a..75457fb16 100644 --- a/docs/project/licenses.rst +++ b/docs/project/licenses.rst @@ -24,8 +24,6 @@ Connectors and Licenses +---------------------------------------------+---------------------------------+ | :ref:`tap-github` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ -| :ref:`tap-google-analytics` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ | :ref:`tap-jira` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ | :ref:`tap-kafka` | AGPL Version 3 | @@ -36,30 +34,18 @@ Connectors and Licenses +---------------------------------------------+---------------------------------+ | :ref:`tap-mysql` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ -| :ref:`tap-oracle` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ | :ref:`tap-postgres` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ | :ref:`tap-s3-csv` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ -| :ref:`tap-salesforce` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ -| :ref:`tap-shopify` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ | :ref:`tap-slack` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ | :ref:`tap-snowflake` | Apache License Version 2.0 | +---------------------------------------------+---------------------------------+ -| :ref:`tap-twilio` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ | :ref:`tap-zendesk` | AGPL Version 3 | +---------------------------------------------+---------------------------------+ -| :ref:`tap-zuora` | AGPL Version 3 | -+---------------------------------------------+---------------------------------+ | :ref:`target-postgres` | Apache License Version 2.0 | +---------------------------------------------+---------------------------------+ -| :ref:`target-redshift` | Apache License Version 2.0 | -+---------------------------------------------+---------------------------------+ | :ref:`target-s3-csv` | Apache License Version 2.0 | +---------------------------------------------+---------------------------------+ | :ref:`target-snowflake` | Apache License Version 2.0 | diff --git a/docs/user_guide/yaml_config.rst b/docs/user_guide/yaml_config.rst index d7517376d..a48487319 100644 --- a/docs/user_guide/yaml_config.rst +++ b/docs/user_guide/yaml_config.rst @@ -7,31 +7,23 @@ YAML configuration .. warning:: This section of the documentation is work in progress. - + Pipelines using YAML format for configurations. You can find some information and usage of these YAML files in the following sections: * :ref:`creating_pipelines` * :ref:`alerts` - * :ref:`tap-google-analytics` * :ref:`tap-jira` * :ref:`tap-kafka` * :ref:`tap-mongodb` * :ref:`tap-mysql` - * :ref:`tap-oracle` * :ref:`tap-postgres` * :ref:`tap-s3-csv` - * :ref:`tap-salesforce` * :ref:`tap-snowflake` * :ref:`tap-zendesk` - * :ref:`tap-zuora` * :ref:`tap-github` * :ref:`tap-slack` - * :ref:`tap-shopify` * :ref:`tap-mixpanel` - * :ref:`tap-twilio` * :ref:`target-postgres` - * :ref:`target-redshift` * :ref:`target-snowflake` * :ref:`target-s3-csv` - diff --git a/setup.py b/setup.py index fe823c944..92215dbb4 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup(name='pipelinewise', python_requires='==3.8.*', - version='0.65.1', + version='0.65.2', description='PipelineWise', long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', diff --git a/singer-connectors/tap-zendesk/.gitignore b/singer-connectors/tap-zendesk/.gitignore deleted file mode 100644 index 817e442da..000000000 --- a/singer-connectors/tap-zendesk/.gitignore +++ /dev/null @@ -1,101 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -virtualenvs/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -*.egg-info/ -.installed.cfg -*.egg - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*,cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# IPython Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# dotenv -.env - -# virtualenv -venv/ -ENV/ - -# Spyder project settings -.spyderproject - -# Rope project settings -.ropeproject - -# Mac -._* -.DS_Store - -# Custom stuff -env.sh -config.json -.autoenv.zsh - -*~ \ No newline at end of file diff --git a/singer-connectors/tap-zendesk/.pylintrc b/singer-connectors/tap-zendesk/.pylintrc deleted file mode 100644 index 63e99a110..000000000 --- a/singer-connectors/tap-zendesk/.pylintrc +++ /dev/null @@ -1,552 +0,0 @@ -# Based on Apache 2.0 licensed code from https://github.com/ClusterHQ/flocker - -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -# init-hook= - -# Add files or directories to the blacklist. They should be base names, not paths. -ignore= - -# Pickle collected data for later comparisons. -persistent=no - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -# DO NOT CHANGE THIS VALUES >1 HIDE RESULTS!!!!! -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist=ujson - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -disable=wrong-import-order, - broad-except, - missing-module-docstring, - - -enable=import-error, - import-self, - reimported, - wildcard-import, - misplaced-future, - deprecated-module, - unpacking-non-sequence, - invalid-all-object, - undefined-all-variable, - used-before-assignment, - cell-var-from-loop, - global-variable-undefined, - redefine-in-handler, - unused-import, - unused-wildcard-import, - global-variable-not-assigned, - undefined-loop-variable, - global-statement, - global-at-module-level, - bad-open-mode, - redundant-unittest-assert, - boolean-datetime - deprecated-method, - anomalous-unicode-escape-in-string, - anomalous-backslash-in-string, - not-in-loop, - continue-in-finally, - abstract-class-instantiated, - star-needs-assignment-target, - duplicate-argument-name, - return-in-init, - too-many-star-expressions, - nonlocal-and-global, - return-outside-function, - return-arg-in-generator, - invalid-star-assignment-target, - bad-reversed-sequence, - nonexistent-operator, - yield-outside-function, - init-is-generator, - nonlocal-without-binding, - lost-exception, - assert-on-tuple, - dangerous-default-value, - duplicate-key, - useless-else-on-loop - expression-not-assigned, - confusing-with-statement, - unnecessary-lambda, - pointless-statement, - pointless-string-statement, - unnecessary-pass, - unreachable, - eval-used, - exec-used, - using-constant-test, - bad-super-call, - missing-super-argument, - slots-on-old-class, - super-on-old-class, - property-on-old-class, - not-an-iterable, - not-a-mapping, - format-needs-mapping, - truncated-format-string, - missing-format-string-key, - mixed-format-string, - too-few-format-args, - bad-str-strip-call, - too-many-format-args, - bad-format-character, - format-combined-specification, - bad-format-string-key, - bad-format-string, - missing-format-attribute, - missing-format-argument-key, - unused-format-string-argument - unused-format-string-key, - invalid-format-index, - bad-indentation, - mixed-indentation, - unnecessary-semicolon, - lowercase-l-suffix, - invalid-encoded-data, - unpacking-in-except, - import-star-module-level, - long-suffix, - old-octal-literal, - old-ne-operator, - backtick, - old-raise-syntax, - metaclass-assignment, - next-method-called, - dict-iter-method, - dict-view-method, - indexing-exception, - raising-string, - using-cmp-argument, - cmp-method, - coerce-method, - delslice-method, - getslice-method, - hex-method, - nonzero-method, - t-method, - setslice-method, - old-division, - logging-format-truncated, - logging-too-few-args, - logging-too-many-args, - logging-unsupported-format, - logging-format-interpolation, - invalid-unary-operand-type, - unsupported-binary-operation, - not-callable, - redundant-keyword-arg, - assignment-from-no-return, - assignment-from-none, - not-context-manager, - repeated-keyword, - missing-kwoa, - no-value-for-parameter, - invalid-sequence-index, - invalid-slice-index, - unexpected-keyword-arg, - unsupported-membership-test, - unsubscriptable-object, - access-member-before-definition, - method-hidden, - assigning-non-slot, - duplicate-bases, - inconsistent-mro, - inherit-non-class, - invalid-slots, - invalid-slots-object, - no-method-argument, - no-self-argument, - unexpected-special-method-signature, - non-iterator-returned, - arguments-differ, - signature-differs, - bad-staticmethod-argument, - non-parent-init-called, - bad-except-order, - catching-non-exception, - bad-exception-context, - notimplemented-raised, - raising-bad-type, - raising-non-exception, - misplaced-bare-raise, - duplicate-except, - nonstandard-exception, - binary-op-exception, - bare-except, - not-async-context-manager, - yield-inside-async-function - -# Needs investigation: -# abstract-method (might be indicating a bug? probably not though) -# protected-access (requires some refactoring) -# attribute-defined-outside-init (requires some refactoring) -# super-init-not-called (requires some cleanup) - -# Things we'd like to enable someday: -# redefined-builtin (requires a bunch of work to clean up our code first) -# redefined-outer-name (requires a bunch of work to clean up our code first) -# undefined-variable (re-enable when pylint fixes https://github.com/PyCQA/pylint/issues/760) -# no-name-in-module (giving us spurious warnings https://github.com/PyCQA/pylint/issues/73) -# unused-argument (need to clean up or code a lot, e.g. prefix unused_?) -# function-redefined (@overload causes lots of spurious warnings) -# too-many-function-args (@overload causes spurious warnings... I think) -# parameter-unpacking (needed for eventual Python 3 compat) -# print-statement (needed for eventual Python 3 compat) -# filter-builtin-not-iterating (Python 3) -# map-builtin-not-iterating (Python 3) -# range-builtin-not-iterating (Python 3) -# zip-builtin-not-iterating (Python 3) -# many others relevant to Python 3 -# unused-variable (a little work to cleanup, is all) - -# ... -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=parseable - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=no - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=120 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes= - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[BASIC] - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,input - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,40}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception \ No newline at end of file diff --git a/singer-connectors/tap-zendesk/Makefile b/singer-connectors/tap-zendesk/Makefile deleted file mode 100644 index 93539c352..000000000 --- a/singer-connectors/tap-zendesk/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -venv: - python3 -m venv venv ;\ - . ./venv/bin/activate ;\ - pip install --upgrade pip setuptools wheel ;\ - pip install -e .[test] - -pylint: - . ./venv/bin/activate ;\ - pylint --rcfile .pylintrc tap_zendesk/ - -unit_test: - . ./venv/bin/activate ;\ - pytest -v tests --cov tap_zendesk --cov-fail-under=67 diff --git a/singer-connectors/tap-zendesk/README.md b/singer-connectors/tap-zendesk/README.md index 8da098fc9..b3c641524 100644 --- a/singer-connectors/tap-zendesk/README.md +++ b/singer-connectors/tap-zendesk/README.md @@ -86,19 +86,26 @@ The tap will write bookmarks to stdout which can be captured and passed as an op ### To run tests: -1. Install python test dependencies in a virtual env and run tests +1. Install python test dependencies in a virtual env and run nose unit and integration tests ``` -make venv + python3 -m venv venv + . venv/bin/activate + pip install --upgrade pip + pip install .[test] ``` 2. To run tests: ``` -make unit_test + nosetests test ``` ### To run pylint: 1. Install python dependencies and run python linter ``` -make venv pylint + python3 -m venv venv + . venv/bin/activate + pip install --upgrade pip + pip install .[test] + pylint --rcfile .pylintrc tap_zendesk ``` \ No newline at end of file diff --git a/singer-connectors/tap-zendesk/setup.py b/singer-connectors/tap-zendesk/setup.py index 207173e53..234236468 100644 --- a/singer-connectors/tap-zendesk/setup.py +++ b/singer-connectors/tap-zendesk/setup.py @@ -16,14 +16,14 @@ py_modules=['tap_zendesk'], install_requires=[ 'pipelinewise-singer-python==1.*', - 'zenpy==2.0.24', + 'zenpy==2.0.0', ], extras_require={ 'test': [ - 'ipdb==0.13.*', - 'pylint==2.9.*', - 'pytest==6.2.*', - 'pytest-cov==2.12.*', + 'ipdb', + 'pylint', + 'nose', + 'nose-watch', ] }, entry_points=''' diff --git a/singer-connectors/tap-zendesk/tap_zendesk/__init__.py b/singer-connectors/tap-zendesk/tap_zendesk/__init__.py index 23592adc3..4dc11fe37 100755 --- a/singer-connectors/tap-zendesk/tap_zendesk/__init__.py +++ b/singer-connectors/tap-zendesk/tap_zendesk/__init__.py @@ -3,13 +3,12 @@ import json import os import sys -import itertools -import requests -import singer from zenpy import Zenpy +import requests from requests import Session from requests.adapters import HTTPAdapter +import singer from singer import metadata, metrics as singer_metrics from tap_zendesk import metrics as zendesk_metrics from tap_zendesk.discover import discover_streams @@ -37,15 +36,11 @@ # patch Session.request to record HTTP request metrics request = Session.request - def request_metrics_patch(self, method, url, **kwargs): with singer_metrics.http_request_timer(None): return request(self, method, url, **kwargs) - Session.request = request_metrics_patch - - # end patch def do_discover(client): @@ -54,11 +49,9 @@ def do_discover(client): json.dump(catalog, sys.stdout, indent=2) LOGGER.info("Finished discover") - def stream_is_selected(mdata): return mdata.get((), {}).get('selected', False) - def get_selected_streams(catalog): selected_stream_names = [] for stream in catalog.streams: @@ -72,23 +65,21 @@ def get_selected_streams(catalog): 'tickets': ['ticket_audits', 'ticket_metrics', 'ticket_comments'] } - def get_sub_stream_names(): - """ - Get all sub_streams as one list - """ - return list(itertools.chain(*SUB_STREAMS.values())) - + sub_stream_names = [] + for parent_stream in SUB_STREAMS: + sub_stream_names.extend(SUB_STREAMS[parent_stream]) + return sub_stream_names class DependencyException(Exception): pass - def validate_dependencies(selected_stream_ids): errs = [] msg_tmpl = ("Unable to extract {0} data. " "To receive {0} data, you also need to select {1}.") - for parent_stream_name, sub_stream_names in SUB_STREAMS.items(): + for parent_stream_name in SUB_STREAMS: + sub_stream_names = SUB_STREAMS[parent_stream_name] for sub_stream_name in sub_stream_names: if sub_stream_name in selected_stream_ids and parent_stream_name not in selected_stream_ids: errs.append(msg_tmpl.format(sub_stream_name, parent_stream_name)) @@ -96,15 +87,14 @@ def validate_dependencies(selected_stream_ids): if errs: raise DependencyException(" ".join(errs)) - def populate_class_schemas(catalog, selected_stream_names): for stream in catalog.streams: if stream.tap_stream_id in selected_stream_names: STREAMS[stream.tap_stream_id].stream = stream - # pylint: disable=too-many-locals def do_sync(client, catalog, state, start_date): + selected_stream_names = get_selected_streams(catalog) validate_dependencies(selected_stream_names) populate_class_schemas(catalog, selected_stream_names) @@ -127,6 +117,7 @@ def do_sync(client, catalog, state, start_date): # else: # LOGGER.info("%s: Starting", stream_name) + key_properties = metadata.get(mdata, (), 'table-key-properties') singer.write_schema(stream_name, stream.schema.to_dict(), key_properties) @@ -155,7 +146,6 @@ def do_sync(client, catalog, state, start_date): LOGGER.info("Finished sync") zendesk_metrics.log_aggregate_rates() - def oauth_auth(args): if not set(OAUTH_CONFIG_KEYS).issubset(args.config.keys()): LOGGER.debug("OAuth authentication unavailable.") @@ -167,7 +157,6 @@ def oauth_auth(args): "oauth_token": args.config['access_token'], } - def api_token_auth(args): if not set(API_TOKEN_CONFIG_KEYS).issubset(args.config.keys()): LOGGER.debug("API Token authentication unavailable.") @@ -180,20 +169,17 @@ def api_token_auth(args): "token": args.config['api_token'] } - def convert_x_rate_limit_remaining_to_int(response, *args, **kwargs): if 'X-Rate-Limit-Remaining' in response.headers and isinstance(response.headers['X-Rate-Limit-Remaining'], str): response.headers['X-Rate-Limit-Remaining'] = int(response.headers['X-Rate-Limit-Remaining']) return response - def add_session_hooks(session): # This is due version conflict between singer-python and ZenPy # Link: https://github.com/singer-io/singer-python/issues/114 session.hooks['response'].append(convert_x_rate_limit_remaining_to_int) - def get_session(config): """ Add partner information to requests Session object if specified in the config. """ if not all(k in config for k in ["marketplace_name", @@ -217,7 +203,6 @@ def get_default_config(): return config - def get_internal_config(user_config, default_config): config = {} for key in default_config.keys(): @@ -225,7 +210,6 @@ def get_internal_config(user_config, default_config): return config - @singer.utils.handle_top_exception(LOGGER) def main(): default_config = get_default_config() diff --git a/singer-connectors/tap-zendesk/tap_zendesk/sync.py b/singer-connectors/tap-zendesk/tap_zendesk/sync.py index a03cd8514..ad97185b1 100644 --- a/singer-connectors/tap-zendesk/tap_zendesk/sync.py +++ b/singer-connectors/tap-zendesk/tap_zendesk/sync.py @@ -1,24 +1,21 @@ # pylint: disable=invalid-name,missing-function-docstring,missing-class-docstring import json -import singer - from zenpy.lib.api_objects import BaseObject from zenpy.lib.proxy import ProxyList -from singer import metrics +import singer +import singer.metrics as metrics from singer import metadata from singer import Transformer LOGGER = singer.get_logger('tap_zendesk') - def process_record(record): """ Serializes Zenpy's internal classes into Python objects via ZendeskEncoder. """ rec_str = json.dumps(record, cls=ZendeskEncoder) rec_dict = json.loads(rec_str) return rec_dict - def sync_stream(state, start_date, instance): stream = instance.stream @@ -44,23 +41,22 @@ def sync_stream(state, start_date, instance): singer.write_record(stream.tap_stream_id, rec) # NB: We will only write state at the end of a stream's sync: # We may find out that there exists a sync that takes too long and can never emit a bookmark - # but we don't know if we can guarantee the order of emitted records. + # but we don't know if we can guarentee the order of emitted records. if instance.replication_method == "INCREMENTAL": singer.write_state(state) return counter.value - class ZendeskEncoder(json.JSONEncoder): - def default(self, o): # pylint: disable=arguments-differ,method-hidden - if isinstance(o, BaseObject): - obj_dict = o.to_dict() + def default(self, obj): # pylint: disable=arguments-differ,method-hidden + if isinstance(obj, BaseObject): + obj_dict = obj.to_dict() for k, v in list(obj_dict.items()): # NB: This might fail if the object inside is callable if callable(v): obj_dict.pop(k) return obj_dict - if isinstance(o, ProxyList): - return o.copy() - return json.JSONEncoder.default(self, o) + if isinstance(obj, ProxyList): + return obj.copy() + return json.JSONEncoder.default(self, obj) diff --git a/singer-connectors/tap-zendesk/tests/__init__.py b/singer-connectors/tap-zendesk/test/__init__.py similarity index 100% rename from singer-connectors/tap-zendesk/tests/__init__.py rename to singer-connectors/tap-zendesk/test/__init__.py diff --git a/singer-connectors/tap-zendesk/tests/helper/zenpymock.py b/singer-connectors/tap-zendesk/test/helper/zenpymock.py similarity index 100% rename from singer-connectors/tap-zendesk/tests/helper/zenpymock.py rename to singer-connectors/tap-zendesk/test/helper/zenpymock.py diff --git a/singer-connectors/tap-zendesk/tests/test_catalog.json b/singer-connectors/tap-zendesk/test/test_catalog.json similarity index 100% rename from singer-connectors/tap-zendesk/tests/test_catalog.json rename to singer-connectors/tap-zendesk/test/test_catalog.json diff --git a/singer-connectors/tap-zendesk/tests/test_do_sync.py b/singer-connectors/tap-zendesk/test/test_do_sync.py similarity index 97% rename from singer-connectors/tap-zendesk/tests/test_do_sync.py rename to singer-connectors/tap-zendesk/test/test_do_sync.py index adbbc6a26..0afcd3b62 100644 --- a/singer-connectors/tap-zendesk/tests/test_do_sync.py +++ b/singer-connectors/tap-zendesk/test/test_do_sync.py @@ -10,7 +10,7 @@ from singer import Catalog from tap_zendesk import do_sync -from tests.helper.zenpymock import ZenpyMock +from test.helper.zenpymock import ZenpyMock DIR = os.path.dirname(__file__) @@ -26,8 +26,7 @@ def setUp(self): def test_network_failure(self): client = ZenpyMock(n_tickets=10000, p_sleep=0.01, p_failure=0.1, subdomain='xyz', oauth_token=123) - with self.assertRaises(RuntimeError): - do_sync(client, self.catalog, self.state, self.start_date) + self.failUnlessRaises(RuntimeError, do_sync, client, self.catalog, self.state, self.start_date) def test_data_consistency(self): client = ZenpyMock(n_tickets=1000, p_sleep=0.01, subdomain='xyz', oauth_token=123) diff --git a/singer-connectors/tap-zendesk/tests/test_init.py b/singer-connectors/tap-zendesk/test/test_init.py similarity index 100% rename from singer-connectors/tap-zendesk/tests/test_init.py rename to singer-connectors/tap-zendesk/test/test_init.py diff --git a/singer-connectors/tap-zendesk/tests/test_state.json b/singer-connectors/tap-zendesk/test/test_state.json similarity index 100% rename from singer-connectors/tap-zendesk/tests/test_state.json rename to singer-connectors/tap-zendesk/test/test_state.json