diff --git a/.gitattributes b/.gitattributes
index 84ef90f32fb6b..dbdb53bb0d1b3 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1 +1,2 @@
-*.py diff=python
\ No newline at end of file
+*.py diff=python
+**/uv.lock linguist-generated
diff --git a/CHANGES.md b/CHANGES.md
index ff54736810ef7..911f8fe2c6e34 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,5 +1,33 @@
# Changelog
+## 1.9.5 (core) / 0.25.5 (libraries)
+
+### New
+
+- The automatic run retry daemon has been updated so that there is a single source of truth for if a run will be retried and if the retry has been launched. Tags are now added to run at failure time indicating if the run will be retried by the automatic retry system. Once the automatic retry has been launched, the run ID of the retry is added to the original run.
+- When canceling a backfill of a job, the backfill daemon will now cancel all runs launched by that backfill before marking the backfill as canceled.
+- Dagster execution info (tags such as `dagster/run-id`, `dagster/code-location`, `dagster/user` and Dagster Cloud environment variables) typically attached to external resources are now available under `DagsterRun.dagster_execution_info`.
+- `SensorReturnTypesUnion` is now exported for typing the output of sensor functions.
+- [dagster-dbt] dbt seeds now get a valid code version (Thanks [@marijncv](https://github.com/marijncv)!).
+- Manual and automatic retries of runs launched by backfills that occur while the backfill is still in progress are now incorporated into the backfill's status.
+- Manual retries of runs launched by backfills are no longer considered part of the backfill if the backfill is complete when the retry is launched.
+- [dagster-fivetran] Fivetran assets can now be materialized using the FivetranWorkspace.sync_and_poll(…) method in the definition of a `@fivetran_assets` decorator.
+- [dagster-fivetran] `load_fivetran_asset_specs` has been updated to accept an instance of `DagsterFivetranTranslator` or custom subclass.
+- [dagster-fivetran] The `fivetran_assets` decorator was added. It can be used with the `FivetranWorkspace` resource and `DagsterFivetranTranslator` translator to load Fivetran tables for a given connector as assets in Dagster. The `build_fivetran_assets_definitions` factory can be used to create assets for all the connectors in your Fivetran workspace.
+- [dagster-aws] `ECSPipesClient.run` now waits up to 70 days for tasks completion (waiter parameters are configurable) (Thanks [@jenkoian](https://github.com/jenkoian)!)
+- [dagster-dbt] Update dagster-dbt scaffold template to be compatible with uv (Thanks [@wingyplus](https://github.com/wingyplus)!).
+- [dagster-airbyte] A `load_airbyte_cloud_asset_specs` function has
+ been added. It can be used with the `AirbyteCloudWorkspace` resource and `DagsterAirbyteTranslator` translator to load your Airbyte Cloud connection streams as external assets in Dagster.
+- [ui] Add an icon for the `icechunk` kind.
+- [ui] Improved ui for manual sensor/schedule evaluation.
+
+### Bugfixes
+
+- Fixed database locking bug for the `ConsolidatedSqliteEventLogStorage`, which is mostly used for tests.
+- [dagster-aws] Fixed a bug in the ECSRunLauncher that prevented it from accepting a user-provided task definition when DAGSTER_CURRENT_IMAGE was not set in the code location.
+- [ui] Fixed an issue that would sometimes cause the asset graph to fail to render on initial load.
+- [ui] Fix global auto-materialize tick timeline when paginating.
+
## 1.9.4 (core) / 0.25.4 (libraries)
### New
diff --git a/docs/content/_navigation.json b/docs/content/_navigation.json
index 1186bd69ff960..a784329e9fc60 100644
--- a/docs/content/_navigation.json
+++ b/docs/content/_navigation.json
@@ -1341,10 +1341,6 @@
{
"title": "Migrating from Airflow",
"path": "/guides/migrations/migrating-airflow-to-dagster"
- },
- {
- "title": "Observe your Airflow pipelines with Dagster",
- "path": "/guides/migrations/observe-your-airflow-pipelines-with-dagster"
}
]
},
diff --git a/docs/content/api/modules.json.gz b/docs/content/api/modules.json.gz
index babd11583e374..00caf49d509e0 100644
Binary files a/docs/content/api/modules.json.gz and b/docs/content/api/modules.json.gz differ
diff --git a/docs/content/api/searchindex.json.gz b/docs/content/api/searchindex.json.gz
index 5ca4d157c09eb..39b9eab48e8bb 100644
Binary files a/docs/content/api/searchindex.json.gz and b/docs/content/api/searchindex.json.gz differ
diff --git a/docs/content/api/sections.json.gz b/docs/content/api/sections.json.gz
index 1254495534a4f..c970656e88c06 100644
Binary files a/docs/content/api/sections.json.gz and b/docs/content/api/sections.json.gz differ
diff --git a/docs/content/concepts/metadata-tags/kind-tags.mdx b/docs/content/concepts/metadata-tags/kind-tags.mdx
index 3125a021b0c6e..d6bd0634af33b 100644
--- a/docs/content/concepts/metadata-tags/kind-tags.mdx
+++ b/docs/content/concepts/metadata-tags/kind-tags.mdx
@@ -124,6 +124,7 @@ Some kinds are given a branded icon in the UI. We currently support nearly 200 u
| `go` | |
| `google` | |
| `googlecloud` | |
+| `googledrive` | |
| `googlesheets` | |
| `graphql` | |
| `greatexpectations` | |
diff --git a/docs/content/dagster-plus/deployment/azure/acr-user-code.mdx b/docs/content/dagster-plus/deployment/azure/acr-user-code.mdx
index b9922423bce14..93d7390bb3890 100644
--- a/docs/content/dagster-plus/deployment/azure/acr-user-code.mdx
+++ b/docs/content/dagster-plus/deployment/azure/acr-user-code.mdx
@@ -89,18 +89,26 @@ First, we'll need to generate a service principal for GitHub Actions to use to a
az ad sp create-for-rbac --name "github-actions-acr" --role contributor --scopes /subscriptions//resourceGroups//providers/Microsoft.ContainerRegistry/registries/
```
-This command will output a JSON object with the service principal details. Make sure to save the `appId`, `password`, and `tenant` values - we'll use them in the next step.
+This command will output a JSON object with the service principal details. Make sure to save the `appId` and `password` values - we'll use them in the next step.
### Add secrets to your repository
We'll add the service principal details as secrets in our repository. Go to your repository in GitHub, and navigate to `Settings` -> `Secrets`. Add the following secrets:
+- `DAGSTER_CLOUD_API_TOKEN`: An agent token. For more details see [Managing agent tokens](/dagster-plus/account/managing-user-agent-tokens#managing-agent-tokens).
- `AZURE_CLIENT_ID`: The `appId` from the service principal JSON object.
- `AZURE_CLIENT_SECRET`: The `password` from the service principal JSON object.
-### Update the workflow
+### Update the GitHub Actions workflow
-Finally, we'll update the workflow to use the service principal details. Open `.github/workflows/dagster-cloud-deploy.yml` in your repository, and uncomment the section on Azure Container Registry. It should look like this:
+For this step, open `.github/workflows/dagster-cloud-deploy.yml` in your repository with your preferred text editor to perform the changes below.
+
+In the `env` section of the workflow, update the following variables:
+
+- `DAGSTER_CLOUD_ORGANIZATION`: The name of your Dagster Cloud organization.
+- `IMAGE_REGISTRY`: The URL of your Azure Container Registry: `.azurecr.io`.
+
+We'll update the workflow to use the Azure Container Registry by uncommenting its section and providing the principal details. It should look like this:
```yaml
# Azure Container Registry (ACR)
@@ -114,6 +122,34 @@ Finally, we'll update the workflow to use the service principal details. Open `.
password: ${{ secrets.AZURE_CLIENT_SECRET }}
```
+Finally, update the tags in the "Build and upload Docker image" step to match the full URL of your image in ACR:
+
+```yaml
+- name: Build and upload Docker image for "quickstart_etl"
+ if: steps.prerun.outputs.result != 'skip'
+ uses: docker/build-push-action@v4
+ with:
+ context: .
+ push: true
+ tags: ${{ env.IMAGE_REGISTRY }}/:${{ env.IMAGE_TAG }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+```
+
+### Update the `dagster_cloud.yaml` build configuration to use the Azure Container Registry
+
+Edit the `dagster_cloud.yaml` file in the root of your repository. Update the `build` section to use the Azure Container Registry, and provide an image name specific to the code location. This must match the registry and image name used in the previous step.
+
+```yaml
+locations:
+ - location_name: quickstart_etl
+ code_source:
+ package_name: quickstart_etl.definitions
+ build:
+ directory: ./
+ registry: .azurecr.io/
+```
+
### Push and run the workflow
Now, commit and push the changes to your repository. The GitHub Actions workflow should run automatically. You can check the status of the workflow in the `Actions` tab of your repository.
@@ -133,3 +169,7 @@ alt="Dagster+ code locations page showing the new code location"
width={1152}
height={320}
/>
+
+## Next steps
+
+Now that you have your code location deployed, you can follow the guide [here](/dagster-plus/deployment/azure/blob-compute-logs) to set up logging in your AKS cluster.
diff --git a/docs/content/dagster-plus/deployment/azure/blob-compute-logs.mdx b/docs/content/dagster-plus/deployment/azure/blob-compute-logs.mdx
index 698d8def57482..98d80e90aac2f 100644
--- a/docs/content/dagster-plus/deployment/azure/blob-compute-logs.mdx
+++ b/docs/content/dagster-plus/deployment/azure/blob-compute-logs.mdx
@@ -25,14 +25,19 @@ First, we'll enable the cluster to use workload identity. This will allow the AK
az aks update --resource-group --name --enable-workload-identity
```
-Then, we'll create a new managed identity for the AKS agent, and a new service account in our AKS cluster.
+Then, we'll create a new managed identity for the AKS agent.
```bash
az identity create --resource-group --name agent-identity
-kubectl create serviceaccount dagster-agent-service-account --namespace dagster-agent
```
-Now we need to federate the managed identity with the service account.
+We will need to find the name of the service account used by the Dagster+ Agent. If you used the [Dagster+ Helm chart](/dagster-plus/deployment/agents/kubernetes/configuring-running-kubernetes-agent), it should be `user-cloud-dagster-cloud-agent`. You can confirm by using this command:
+
+```bash
+kubectl get serviceaccount -n
+```
+
+Now we need to federate the managed identity with the service account used by the Dagster+ Agent.
```bash
az identity federated-credential create \
@@ -40,51 +45,63 @@ az identity federated-credential create \
--identity-name agent-identity \
--resource-group \
--issuer $(az aks show -g -n --query "oidcIssuerProfile.issuerUrl" -otsv) \
- --subject system:serviceaccount:dagster-agent:dagster-agent-service-account
+ --subject system:serviceaccount::
```
-Finally, we'll edit our AKS agent deployment to use the new service account.
+You will need to obtain the client id of this identity for the next few operations. Make sure to save this value:
```bash
-kubectl edit deployment -n dagster-agent
+az identity show -g -n agent-identity --query 'clientId' -otsv
```
-In the deployment manifest, add the following lines:
+We need to grant access to the storage account.
+
+```bash
+az role assignment create \
+ --assignee \
+ --role "Storage Blob Data Contributor" \
+ --scope $(az storage account show -g -n --query 'id' -otsv)
+```
+
+You will need to add new annotations and labels in Kubernetes to enable the use of workload identities. If you're using the Dagster+ Helm Chart, modify your values.yaml to add the following lines:
```yaml
-metadata:
- ...
+serviceAccount:
+ annotations:
+ azure.workload.identity/client-id: ""
+
+dagsterCloudAgent:
+ labels:
+ azure.workload.identity/use: "true"
+
+workspace:
labels:
- ...
azure.workload.identity/use: "true"
-spec:
- ...
- template:
- ...
- spec:
- ...
- serviceAccountName: dagster-agent-sa
```
-If everything is set up correctly, you should be able to run the following command and see an access token returned:
+
+ If you need to retrieve the values used by your Helm deployment, you can run:
+ `helm get values user-cloud > values.yaml`.
+
+
+Finally, update your Helm release with the new values:
```bash
-kubectl exec -n dagster-agent -it -- bash
-# in the pod
-curl -H "Metadata:true" "http://169.254.169.254/metadata/identity/oauth2/token?resource=https://storage.azure.com/"
+helm upgrade user-cloud dagster-cloud/dagster-cloud-agent -n -f values.yaml
```
-## Step 2: Configure Dagster to use Azure Blob Storage
-
-Now, you need to update the helm values to use Azure Blob Storage for logs. You can do this by editing the `values.yaml` file for your user-cloud deployment.
-
-Pull down the current values for your deployment:
+If everything is set up correctly, you should be able to run the following command and see an access token returned:
```bash
-helm get values user-cloud > current-values.yaml
+kubectl exec -n -it -- bash
+# in the pod
+apt update && apt install -y curl # install curl if missing, may vary depending on the base image
+curl -H "Metadata:true" "http://169.254.169.254/metadata/identity/oauth2/token?resource=https://storage.azure.com/&api-version=2018-02-01"
```
-Then, edit the `current-values.yaml` file to include the following lines:
+## Step 2: Configure Dagster to use Azure Blob Storage
+
+Once again, you need to update the Helm values to use Azure Blob Storage for logs. You can do this by editing the `values.yaml` file for your user-cloud deployment to include the following lines:
```yaml
computeLogs:
@@ -97,7 +114,7 @@ computeLogs:
container: mycontainer
default_azure_credential:
exclude_environment_credential: false
- prefix: dagster-logs-
+ prefix: dagster-logs
local_dir: "/tmp/cool"
upload_interval: 30
```
@@ -105,10 +122,14 @@ computeLogs:
Finally, update your deployment with the new values:
```bash
-helm upgrade user-cloud dagster-cloud/dagster-cloud-agent -n dagster-agent -f current-values.yaml
+helm upgrade user-cloud dagster-cloud/dagster-cloud-agent -n -f values.yaml
```
-## Step 3: Verify logs are being written to Azure Blob Storage
+## Step 3: Update your code location to enable the use of the AzureBlobComputeLogManager
+
+- Add `dagster-azure` to your `setup.py` file. This will allow you to import the `AzureBlobComputeLogManager` class.
+
+## Step 4: Verify logs are being written to Azure Blob Storage
It's time to kick off a run in Dagster to test your new configuration. If following along with the quickstart repo, you should be able to kick off a run of the `all_assets_job`, which will generate logs for you to test against. Otherwise, use any job that emits logs. When you go to the stdout/stderr window of the run page, you should see a log file that directs you to the Azure Blob Storage container.
diff --git a/docs/content/deployment/run-monitoring.mdx b/docs/content/deployment/run-monitoring.mdx
index 5bafc1acdd697..6673fed36651e 100644
--- a/docs/content/deployment/run-monitoring.mdx
+++ b/docs/content/deployment/run-monitoring.mdx
@@ -39,7 +39,7 @@ When Dagster terminates a run, the run moves into CANCELING status and sends a t
## General run timeouts
-After a run is marked as STARTED, it may hang indefinitely for various reasons (user API errors, network issues, etc.). You can configure a maximum runtime for every run in a deployment by setting the `run_monitoring.max_runtime_seconds` field in your dagster.yaml or (Dagster+ deployment settings)\[dagster-plus/managing-deployments/deployment-settings-reference] to the maximum runtime in seconds. If a run exceeds this timeout and run monitoring is enabled, it will be marked as failed. The `dagster/max_runtime` tag can also be used to set a timeout in seconds on a per-run basis.
+After a run is marked as STARTED, it may hang indefinitely for various reasons (user API errors, network issues, etc.). You can configure a maximum runtime for every run in a deployment by setting the `run_monitoring.max_runtime_seconds` field in your dagster.yaml or [Dagster+ deployment settings](/dagster-plus/managing-deployments/deployment-settings-reference) to the maximum runtime in seconds. If a run exceeds this timeout and run monitoring is enabled, it will be marked as failed. The `dagster/max_runtime` tag can also be used to set a timeout in seconds on a per-run basis.
For example, to configure a maximum of 2 hours for every run in your deployment:
diff --git a/docs/content/guides/migrations.mdx b/docs/content/guides/migrations.mdx
index ece1d40fe8b3a..f968e1aa1dc9b 100644
--- a/docs/content/guides/migrations.mdx
+++ b/docs/content/guides/migrations.mdx
@@ -13,4 +13,3 @@ Explore your options for migrating from other platforms to Dagster.
Curious how you can migrate your Airflow pipelines to Dagster?
- Learn how to perform [a lift-and-shift migration of Airflow to Dagster](/guides/migrations/migrating-airflow-to-dagster)
-- Learn how to leverage the features of [Dagster and Airflow together using Dagster Pipes](/guides/migrations/observe-your-airflow-pipelines-with-dagster)
diff --git a/docs/content/guides/migrations/observe-your-airflow-pipelines-with-dagster.mdx b/docs/content/guides/migrations/observe-your-airflow-pipelines-with-dagster.mdx
deleted file mode 100644
index ed2f0fe3cbfc6..0000000000000
--- a/docs/content/guides/migrations/observe-your-airflow-pipelines-with-dagster.mdx
+++ /dev/null
@@ -1,105 +0,0 @@
----
-title: "Observe your Airflow pipelines with Dagster | Dagster Docs"
-description: "Learn how to leverage the features of Dagster and Airflow together."
----
-
-# Observe your Airflow pipelines with Dagster
-
-Dagster can act as a single entry point to all orchestration platforms in use at your organization. By injecting a small amount of code into your existing pipelines, you can report events to Dagster, where you can then visualize the full lineage of pipelines. This can be particularly useful if you have multiple Apache Airflow environments, and hope to build a catalog and observation platform through Dagster.
-
-## Emitting materialization events from Airflow to Dagster
-
-Imagine you have a large number of pipelines written in Apache Airflow and wish to introduce Dagster into your stack. By using custom Airflow operators, you can continue to run your existing pipelines while you work toward migrating them off Airflow, or while building new pipelines in Dagster that are tightly integrated with your legacy systems.
-
-To do this, we will define a `DagsterAssetOperator` operator downstream of your Airflow DAG to indicate that the pipeline's processing has concluded. The HTTP endpoint of the Dagster server, the `asset_key`, and additional metadata and descriptions are to be specified to inform Dagster of the materialization.
-
-```python
-from typing import Dict, Optional
-
-from airflow.models import BaseOperator
-from airflow.utils.decorators import apply_defaults
-import requests
-
-class DagsterAssetOperator(BaseOperator):
- @apply_defaults
- def __init__(
- self,
- dagster_webserver_host: str,
- dagster_webserver_port: str,
- asset_key: str,
- metadata: Optional[Dict] = None,
- description: Optional[str] = None,
- *args,
- **kwargs,
- ):
- super().__init__(*args, **kwargs)
- self.dagster_webserver_host = dagster_webserver_host
- self.dagster_webserver_port = dagster_webserver_port
- self.asset_key = asset_key
- self.metadata = metadata or {}
- self.description = description
-
- def execute(self, context):
- url = f"http://{dagster_webserver_host}:{dagster_webserver_port}/report_asset_materialization/{self.asset_key}"
- payload = {"metadata": self.metadata, "description": self.description}
- headers = {"Content-Type": "application/json"}
-
- response = requests.post(url, json=payload, headers=headers)
- response.raise_for_status()
-
- self.log.info(
- f"Reported asset materialization to Dagster. Response: {response.text}"
- )
-```
-
-Then, we can append this to our Airflow DAG to indicate that a pipeline has run successfully.
-
-```python
-import os
-
-dagster_webserver_host = os.environ.get("DAGSTER_WEBSERVER_HOST", "localhost")
-dagster_webserver_port = os.environ.get("DAGSTER_WEBSERVER_PORT", "3000")
-
-dagster_op = DagsterAssetOperator(
- task_id="report_dagster_asset_materialization",
- dagster_webserver_host=dagster_webserver_host,
- dagster_webserver_port=dagster_webserver_port,
- asset_key="example_external_airflow_asset",
- metadata={"airflow/tag": "example", "source": "external"},
-)
-```
-
-Once the events are emitted from Airflow, there are two options for scheduling Dagster materializations following the external Airflow materialization event: asset sensors and auto materialization policies.
-
-An external asset is created in Dagster, and an `asset_sensor` is used to identify the materialization events that are being sent from Airflow.
-
-```python
-from dagster import external_asset_from_spec
-
-example_external_airflow_asset = external_asset_from_spec(
- AssetSpec("example_external_airflow_asset",
- group_name="External")
-)
-```
-
-```python
-from dagster import (
- AssetKey,
- EventLogEntry,
- RunRequest,
- SensorEvaluationContext,
- asset_sensor
-)
-
-@asset_sensor(
- asset_key=AssetKey("example_external_airflow_asset"),
- job=example_external_airflow_asset_job
-)
-def example_external_airflow_asset_sensor(
- context: SensorEvaluationContext, asset_event: EventLogEntry
-):
- assert asset_event.dagster_event and asset_event.dagster_event.asset_key
- yield RunRequest(run_key=context.cursor)
-```
-
-Now, when a materialization event occurs on the external `example_external_airflow_asset` asset, the `example_external_airflow_asset_job` job will be triggered. Here, you can define logic that can build upon the DAG from your Airflow environment.
diff --git a/docs/content/integrations/airflow.mdx b/docs/content/integrations/airflow.mdx
index 41d0d0c53ab73..66a46614286fb 100644
--- a/docs/content/integrations/airflow.mdx
+++ b/docs/content/integrations/airflow.mdx
@@ -9,7 +9,6 @@ Migrating from Airflow to Dagster, or integrating Dagster into your existing wor
- [Learning Dagster from Airflow](/integrations/airflow/from-airflow-to-dagster) - a step-by-step tutorial of mapping concepts from Airflow to Dagster
- [Migrating from Airflow](/guides/migrations/migrating-airflow-to-dagster) - migration patterns for translating Airflow code to Dagster
-- [Observe your Airflow pipelines with Dagster](/guides/migrations/observe-your-airflow-pipelines-with-dagster) - See how Dagster can act as the observation layer over all pipelines in your organization
---
diff --git a/docs/content/integrations/fivetran/fivetran.mdx b/docs/content/integrations/fivetran/fivetran.mdx
index cd33c2331e76e..2c61ea9917b23 100644
--- a/docs/content/integrations/fivetran/fivetran.mdx
+++ b/docs/content/integrations/fivetran/fivetran.mdx
@@ -26,6 +26,14 @@ This guide provides instructions for using Dagster with Fivetran using the `dags
+## Set up your environment
+
+To get started, you'll need to install the `dagster` and `dagster-fivetran` Python packages:
+
+```bash
+pip install dagster dagster-fivetran
+```
+
## Represent Fivetran assets in the asset graph
To load Fivetran assets into the Dagster asset graph, you must first construct a resource, which allows Dagster to communicate with your Fivetran workspace. You'll need to supply your account ID, API key and API secret. See [Getting Started](https://fivetran.com/docs/rest-api/getting-started) in the Fivetran REST API documentation for more information on how to create your API key and API secret.
diff --git a/docs/dagster-university/next-env.d.ts b/docs/dagster-university/next-env.d.ts
index 4f11a03dc6cc3..a4a7b3f5cfa2f 100644
--- a/docs/dagster-university/next-env.d.ts
+++ b/docs/dagster-university/next-env.d.ts
@@ -2,4 +2,4 @@
///
// NOTE: This file should not be edited
-// see https://nextjs.org/docs/basic-features/typescript for more information.
+// see https://nextjs.org/docs/pages/building-your-application/configuring/typescript for more information.
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-2/requirements-and-installation.md b/docs/dagster-university/pages/dagster-essentials/lesson-2/requirements-and-installation.md
index 363718ca770fe..b074d2757ae49 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-2/requirements-and-installation.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-2/requirements-and-installation.md
@@ -9,11 +9,12 @@ lesson: '2'
To install Dagster, you’ll need:
- **To install Python**. Dagster supports Python 3.9 through 3.12.
-- **A package manager like pip or poetry**. If you need to install a package manager, refer to the following installation guides:
+- **A package manager like pip, Poetry, or uv**. If you need to install a package manager, refer to the following installation guides:
- [pip](https://pip.pypa.io/en/stable/installation/)
- [Poetry](https://python-poetry.org/docs/)
+ - [uv](https://docs.astral.sh/uv/getting-started/installation/)
-To check that Python and the pip or Poetry package manager are already installed in your environment, run:
+To check that Python and the package manager are already installed in your environment, run:
```shell
python --version
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-3/whats-an-asset.md b/docs/dagster-university/pages/dagster-essentials/lesson-3/whats-an-asset.md
index 7e3d27ebd2cee..99499c844f619 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-3/whats-an-asset.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-3/whats-an-asset.md
@@ -10,8 +10,8 @@ An asset is an object in persistent storage that captures some understanding of
- **A database table or view**, such as those in a Google BigQuery data warehouse
- **A file**, such as a file in your local machine or blob storage like Amazon S3
-- **A machine learning model**
-- **An asset from an integration,** like a dbt model or a Fivetran connector
+- **A machine learning model**, such as TensorFlow or PyTorch
+- **An asset from an integration,** such as a dbt model or a Fivetran connector
Assets aren’t limited to just the objects listed above - these are just some common examples.
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-taxi-zones-asset.md b/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-taxi-zones-asset.md
index 92eab5b00a8b5..69f9139b16c2b 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-taxi-zones-asset.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-taxi-zones-asset.md
@@ -30,7 +30,7 @@ The asset you built should look similar to the following code. Click **View answ
deps=["taxi_zones_file"]
)
def taxi_zones() -> None:
- sql_query = f"""
+ query = f"""
create or replace table zones as (
select
LocationID as zone_id,
@@ -41,6 +41,13 @@ def taxi_zones() -> None:
);
"""
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
- conn.execute(sql_query)
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
+ conn.execute(query)
```
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-trips-by-week-asset.md b/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-trips-by-week-asset.md
index 8067d27dd281a..b5c5e90b1bfad 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-trips-by-week-asset.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-4/coding-practice-trips-by-week-asset.md
@@ -62,12 +62,20 @@ from datetime import datetime, timedelta
from . import constants
import pandas as pd
+from dagster._utils.backoff import backoff
@asset(
deps=["taxi_trips"]
)
def trips_by_week() -> None:
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
current_date = datetime.strptime("2023-03-01", constants.DATE_FORMAT)
end_date = datetime.strptime("2023-04-01", constants.DATE_FORMAT)
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-4/loading-data-into-a-database.md b/docs/dagster-university/pages/dagster-essentials/lesson-4/loading-data-into-a-database.md
index 0bf9ced624484..f5bc537d9837b 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-4/loading-data-into-a-database.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-4/loading-data-into-a-database.md
@@ -13,6 +13,7 @@ Now that you have a query that produces an asset, let’s use Dagster to manage
```python
import duckdb
import os
+ from dagster._utils.backoff import backoff
```
2. Copy and paste the code below into the bottom of the `trips.py` file. Note how this code looks similar to the asset definition code for the `taxi_trips_file` and the `taxi_zones` assets:
@@ -25,7 +26,7 @@ Now that you have a query that produces an asset, let’s use Dagster to manage
"""
The raw taxi trips dataset, loaded into a DuckDB database
"""
- sql_query = """
+ query = """
create or replace table trips as (
select
VendorID as vendor_id,
@@ -42,8 +43,15 @@ Now that you have a query that produces an asset, let’s use Dagster to manage
);
"""
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
- conn.execute(sql_query)
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
+ conn.execute(query)
```
Let’s walk through what this code does:
@@ -52,13 +60,13 @@ Now that you have a query that produces an asset, let’s use Dagster to manage
2. The `taxi_trips_file` asset is defined as a dependency of `taxi_trips` through the `deps` argument.
- 3. Next, a variable named `sql_query` is created. This variable contains a SQL query that creates a table named `trips`, which sources its data from the `data/raw/taxi_trips_2023-03.parquet` file. This is the file created by the `taxi_trips_file` asset.
+ 3. Next, a variable named `query` is created. This variable contains a SQL query that creates a table named `trips`, which sources its data from the `data/raw/taxi_trips_2023-03.parquet` file. This is the file created by the `taxi_trips_file` asset.
- 4. A variable named `conn` is created, which defines the connection to the DuckDB database in the project. To do this, it uses the `.connect` method from the `duckdb` library, passing in the `DUCKDB_DATABASE` environment variable to tell DuckDB where the database is located.
+ 4. A variable named `conn` is created, which defines the connection to the DuckDB database in the project. To do this, we first wrap everything with the Dagster utility function `backoff`. Using the backoff function ensures that multiple assets can use DuckDB safely without locking resources. The backoff function takes in the function we want to call (in this case the `.connect` method from the `duckdb` library), any errors to retry on (`RuntimeError` and `duckdb.IOException`), the max number of retries, and finally, the arguments to supply to the `.connect` DuckDB method. Here we are passing in the `DUCKDB_DATABASE` environment variable to tell DuckDB where the database is located.
The `DUCKDB_DATABASE` environment variable, sourced from your project’s `.env` file, resolves to `data/staging/data.duckdb`. **Note**: We set up this file in Lesson 2 - refer to this lesson if you need a refresher. If this file isn’t set up correctly, the materialization will result in an error.
- 5. Finally, `conn` is paired with the DuckDB `execute` method, where our SQL query (`sql_query`) is passed in as an argument. This tells the asset that, when materializing, to connect to the DuckDB database and execute the query in `sql_query`.
+ 5. Finally, `conn` is paired with the DuckDB `execute` method, where our SQL query (`query`) is passed in as an argument. This tells the asset that, when materializing, to connect to the DuckDB database and execute the query in `query`.
3. Save the changes to the file.
@@ -98,9 +106,9 @@ This is because you’ve told Dagster that taxi_trips depends on the taxi_trips_
To confirm that the `taxi_trips` asset materialized properly, you can access the newly made `trips` table in DuckDB. In a new terminal session, open a Python REPL and run the following snippet:
```python
-> import duckdb
-> conn = duckdb.connect(database="data/staging/data.duckdb") # assumes you're writing to the same destination as specified in .env.example
-> conn.execute("select count(*) from trips").fetchall()
+import duckdb
+conn = duckdb.connect(database="data/staging/data.duckdb") # assumes you're writing to the same destination as specified in .env.example
+conn.execute("select count(*) from trips").fetchall()
```
The command should succeed and return a row count of the taxi trips that were ingested. When finished, make sure to stop the terminal process before continuing or you may encounter an error. Use `Control+C` or `Command+C` to stop the process.
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-6/setting-up-a-database-resource.md b/docs/dagster-university/pages/dagster-essentials/lesson-6/setting-up-a-database-resource.md
index 311748941755c..9dadbf149bfa1 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-6/setting-up-a-database-resource.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-6/setting-up-a-database-resource.md
@@ -14,7 +14,14 @@ Throughout this module, you’ve used DuckDB to store and transform your data. E
)
def taxi_trips() -> None:
...
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
...
```
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-6/using-resources-in-assets.md b/docs/dagster-university/pages/dagster-essentials/lesson-6/using-resources-in-assets.md
index 7e27a7e555ab1..6f059f62f2bdc 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-6/using-resources-in-assets.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-6/using-resources-in-assets.md
@@ -31,7 +31,7 @@ from dagster import asset
deps=["taxi_trips_file"],
)
def taxi_trips() -> None:
- sql_query = """
+ query = """
create or replace table taxi_trips as (
select
VendorID as vendor_id,
@@ -48,8 +48,15 @@ def taxi_trips() -> None:
);
"""
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
- conn.execute(sql_query)
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
+ conn.execute(query)
```
---
@@ -72,7 +79,7 @@ from dagster import asset
deps=["taxi_trips_file"],
)
def taxi_trips(database: DuckDBResource) -> None:
- sql_query = """
+ query = """
create or replace table taxi_trips as (
select
VendorID as vendor_id,
@@ -90,7 +97,7 @@ def taxi_trips(database: DuckDBResource) -> None:
"""
with database.get_connection() as conn:
- conn.execute(sql_query)
+ conn.execute(query)
```
To refactor `taxi_trips` to use the `database` resource, we had to:
@@ -100,7 +107,14 @@ To refactor `taxi_trips` to use the `database` resource, we had to:
3. Replace the lines that connect to DuckDB and execute a query:
```python
- conn = duckdb.connect(os.getenv("DUCKDB_DATABASE"))
+ conn = backoff(
+ fn=duckdb.connect,
+ retry_on=(RuntimeError, duckdb.IOException),
+ kwargs={
+ "database": os.getenv("DUCKDB_DATABASE"),
+ },
+ max_retries=10,
+ )
conn.execute(query)
```
@@ -111,6 +125,8 @@ To refactor `taxi_trips` to use the `database` resource, we had to:
conn.execute(query)
```
+ Notice that we no longer need to use the `backoff` function. The Dagster `DuckDBResource` handles this functionality for us.
+
---
## Before you continue
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-7/what-are-schedules.md b/docs/dagster-university/pages/dagster-essentials/lesson-7/what-are-schedules.md
index 7abd4f66c4963..f91b4cd4900f0 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-7/what-are-schedules.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-7/what-are-schedules.md
@@ -92,7 +92,7 @@ Despite many schedulers and orchestrators replacing the cron program since then,
Consider the following example:
-```python
+```
15 5 * * 1-5
```
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-8/adding-partitions-to-assets.md b/docs/dagster-university/pages/dagster-essentials/lesson-8/adding-partitions-to-assets.md
index cdfdd00cf041e..c44c2b93eafc9 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-8/adding-partitions-to-assets.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-8/adding-partitions-to-assets.md
@@ -63,7 +63,7 @@ To add the partition to the asset:
@asset(
partitions_def=monthly_partition
)
- def taxi_trips_file(context) -> None:
+ def taxi_trips_file(context: AssetExecutionContext) -> None:
partition_date_str = context.partition_key
```
@@ -73,7 +73,7 @@ To add the partition to the asset:
@asset(
partitions_def=monthly_partition
)
- def taxi_trips_file(context) -> None:
+ def taxi_trips_file(context: AssetExecutionContext) -> None:
partition_date_str = context.partition_key
month_to_fetch = partition_date_str[:-3]
```
@@ -86,7 +86,7 @@ from ..partitions import monthly_partition
@asset(
partitions_def=monthly_partition
)
-def taxi_trips_file(context) -> None:
+def taxi_trips_file(context: AssetExecutionContext) -> None:
"""
The raw parquet files for the taxi trips dataset. Sourced from the NYC Open Data portal.
"""
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-8/coding-practice-partition-taxi-trips.md b/docs/dagster-university/pages/dagster-essentials/lesson-8/coding-practice-partition-taxi-trips.md
index f2aa0b5ffc093..606ee4b3847c0 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-8/coding-practice-partition-taxi-trips.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-8/coding-practice-partition-taxi-trips.md
@@ -17,7 +17,7 @@ To practice what you’ve learned, partition the `taxi_trips` asset by month usi
{% callout %}
You’ll need to drop the existing `taxi_trips` because of the new `partition_date` column. In a Python REPL or scratch script, run the following:
- ```yaml
+ ```
import duckdb
conn = duckdb.connect(database="data/staging/data.duckdb")
conn.execute("drop table trips;")
diff --git a/docs/dagster-university/pages/dagster-essentials/lesson-8/creating-a-schedule-with-a-date-based-partition.md b/docs/dagster-university/pages/dagster-essentials/lesson-8/creating-a-schedule-with-a-date-based-partition.md
index a6327cb6ad954..cd8793b6755fe 100644
--- a/docs/dagster-university/pages/dagster-essentials/lesson-8/creating-a-schedule-with-a-date-based-partition.md
+++ b/docs/dagster-university/pages/dagster-essentials/lesson-8/creating-a-schedule-with-a-date-based-partition.md
@@ -36,12 +36,14 @@ To add partition to the job, make the following changes:
The job should now look like this:
```python
-from dagster import define_asset_job, AssetSelection, AssetKey
+from dagster import define_asset_job, AssetSelection
from ..partitions import monthly_partition
+trips_by_week = AssetSelection.assets("trips_by_week")
+
trip_update_job = define_asset_job(
name="trip_update_job",
partitions_def=monthly_partition, # partitions added here
- selection=AssetSelection.all() - AssetSelection.assets(["trips_by_week"])
+ selection=AssetSelection.all() - trips_by_week
)
```
diff --git a/docs/docs-beta/CONTRIBUTING.md b/docs/docs-beta/CONTRIBUTING.md
new file mode 100644
index 0000000000000..c0b8e52ba5712
--- /dev/null
+++ b/docs/docs-beta/CONTRIBUTING.md
@@ -0,0 +1,142 @@
+# Contributing
+
+## Migration from legacy docs
+
+There are some features in the previous docs that require changes to be made to work in the new Docusaurus-based documentation site.
+
+### Images
+
+Before:
+
+```
+
+```
+
+After:
+
+```
+
+```
+
+### Notes
+
+Before:
+
+```
+This guide is applicable to Dagster+.
+```
+
+After:
+
+```
+:::note
+This guide is applicable to Dagster+
+:::
+```
+
+### Tabs
+
+Before:
+
+```
+
+
+ ...
+
+
+```
+
+After:
+
+```
+
+
+ ...
+
+
+```
+
+### Header boundaries
+
+Previously, horizontal rules had to be defined between each level-two header: `---`.
+
+This is no longer required, as the horizontal rule has been included in the CSS rules.
+
+### Reference tables
+
+Before:
+
+```
+
+
+ The name of the Dagster+ deployment. For example, prod.
+
+
+ If 1, the deployment is a{" "}
+
+ branch deployment
+
+ . Refer to the
+ Branch Deployment variables section
+ for a list of variables available in branch deployments.
+
+
+```
+
+After:
+
+| Key | Value |
+|---|---|
+| `DAGSTER_CLOUD_DEPLOYMENT_NAME` | The name of the Dagster+ deployment.
**Example:** `prod`. |
+| `DAGSTER_CLOUD_IS_BRANCH_DEPLOYMENT` | `1` if the deployment is a [branch deployment](/dagster-plus/features/ci-cd/branch-deployments/index.md). |
+
+### Whitespace via `{" "}`
+
+Forcing empty space using the `{" "}` interpolation is not supported, and must be removed.
+
+---
+
+## Diagrams
+
+You can use [Mermaid.js](https://mermaid.js.org/syntax/flowchart.html) to create diagrams. For example:
+
+```mermaid
+flowchart LR
+ Start --> Stop
+```
+
+Refer to the [Mermaid.js documentation](https://mermaid.js.org/) for more info.
+
+---
+
+## Code examples
+
+To include code snippets, use the following format:
+
+```
+
+```
+
+The `filePath` is relative to the `./examples/docs_beta_snippets/docs_beta_snippets/` directory.
+
+At minimum, all `.py` files in the `docs_beta_snippets` directory are tested by attempting to load the Python files.
+You can write additional tests for them in the `docs_beta_snippets_test` folder. See the folder for more information.
+
+To type-check the code snippets during development, run the following command from the Dagster root folder.
+This will run `pyright` on all new/changed files relative to the master branch.
+
+```
+make quick_pyright
+```
diff --git a/docs/docs-beta/README.md b/docs/docs-beta/README.md
index 33e2e5fbf0ae8..f07c8ddd61daa 100644
--- a/docs/docs-beta/README.md
+++ b/docs/docs-beta/README.md
@@ -5,6 +5,26 @@ The documentation site is built using [Docusaurus](https://docusaurus.io/), a mo
---
+## Overview of the docs
+
+- `./src` contains custom components, styles, themes, and layouts.
+- `./content-templates` contains the templates for the documentation pages.
+- `./docs/` is the source of truth for the documentation.
+- `/examples/docs_beta_snippets/docs_beta_snippets/` contains all code examples for the documentation.
+
+The docs are broken down into the following sections:
+
+- Docs - includes content from [getting-started](./docs/getting-started/) and [guides](./docs/guides/)
+- [Integrations](./docs/integrations/)
+- [Dagster+](./docs/dagster-plus/)
+- [API reference](./docs/api/)
+
+`sidebar.ts` and `docusaurus.config.ts` are the main configuration files for the documentation.
+
+For formatting guidelines, see the [CONTRIBUTING](CONTRIBUTING.md) guide.
+
+---
+
## Installation
The site uses [yarn](https://yarnpkg.com/) for package management.
@@ -31,22 +51,6 @@ pip install vale
---
-## Overview of the docs
-
-- `./src` contains custom components, styles, themes, and layouts.
-- `./content-templates` contains the templates for the documentation pages.
-- `./docs/` is the source of truth for the documentation.
-- `/examples/docs_beta_snippets/docs_beta_snippets/` contains all code examples for the documentation.
-
-The docs are broken down into the following sections:
-
-- [Tutorials](./docs/tutorials/)
-- [Guides](./docs/guides/)
-
-`sidebar.ts` and `docusaurus.config.ts` are the main configuration files for the documentation.
-
----
-
## Local Development
To start the local development server:
@@ -72,37 +76,6 @@ yarn vale /path/to/file ## check individual file
yarn vale --no-wrap ## remove wrapping from output
```
-### Diagrams
-
-You can use [Mermaid.js](https://mermaid.js.org/syntax/flowchart.html) to create diagrams. For example:
-
-```mermaid
-flowchart LR
- Start --> Stop
-```
-
-Refer to the [Mermaid.js documentation](https://mermaid.js.org/) for more info.
-
-### Code examples
-
-To include code snippets, use the following format:
-
-```
-
-```
-
-The `filePath` is relative to the `./examples/docs_beta_snippets/docs_beta_snippets/` directory.
-
-At minimum, all `.py` files in the `docs_beta_snippets` directory are tested by attempting to load the Python files.
-You can write additional tests for them in the `docs_beta_snippets_test` folder. See the folder for more information.
-
-To type-check the code snippets during development, run the following command from the Dagster root folder.
-This will run `pyright` on all new/changed files relative to the master branch.
-
-```
-make quick_pyright
-```
-
---
## Build
@@ -115,6 +88,8 @@ yarn build
This command generates static content into the `build` directory and can be served using any static contents hosting service. This also checks for any broken links in the documentation. Note that you will need to store Algolia credentials in local environment variables to build the site for production.
+---
+
## Deployment
This site is built and deployed using Vercel.
@@ -129,6 +104,8 @@ yarn sync-api-docs && yarn build
This runs the `scripts/vercel-sync-api-docs.sh` script which builds the MDX files using the custom `sphinx-mdx-builder`, and copies the resulting MDX files to `docs/api/python-api`.
+---
+
## Search
Algolia search is used for search results on the website, as configured in `docusaurus.config.ts`.
diff --git a/docs/docs-beta/docs/dagster-plus/features/code-locations/code-location-history.md b/docs/docs-beta/docs/dagster-plus/deployment/code-locations/code-location-history.md
similarity index 97%
rename from docs/docs-beta/docs/dagster-plus/features/code-locations/code-location-history.md
rename to docs/docs-beta/docs/dagster-plus/deployment/code-locations/code-location-history.md
index bfdaa044ac9ce..a3b242c3b3fb7 100644
--- a/docs/docs-beta/docs/dagster-plus/features/code-locations/code-location-history.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/code-locations/code-location-history.md
@@ -49,5 +49,5 @@ If you notice an issue with newly deployed code, or your code fails to deploy su
## Next steps
-- Learn more about [Code Locations](/dagster-plus/features/code-locations)
+- Learn more about [Code Locations](/dagster-plus/deployment/code-locations)
- Learn how to [Alert when a code location fails to load](/dagster-plus/features/alerts/creating-alerts#alerting-when-a-code-location-fails-to-load)
diff --git a/docs/docs-beta/docs/dagster-plus/features/code-locations/dagster-cloud-yaml.md b/docs/docs-beta/docs/dagster-plus/deployment/code-locations/dagster-cloud-yaml.md
similarity index 100%
rename from docs/docs-beta/docs/dagster-plus/features/code-locations/dagster-cloud-yaml.md
rename to docs/docs-beta/docs/dagster-plus/deployment/code-locations/dagster-cloud-yaml.md
diff --git a/docs/docs-beta/docs/dagster-plus/features/code-locations/index.md b/docs/docs-beta/docs/dagster-plus/deployment/code-locations/index.md
similarity index 99%
rename from docs/docs-beta/docs/dagster-plus/features/code-locations/index.md
rename to docs/docs-beta/docs/dagster-plus/deployment/code-locations/index.md
index 946b831d0b34e..facde90edf2aa 100644
--- a/docs/docs-beta/docs/dagster-plus/features/code-locations/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/code-locations/index.md
@@ -1,6 +1,6 @@
---
title: "Code locations"
-sidebar_position: 20
+sidebar_position: 40
---
Separate code locations allow you to deploy different projects that still roll up into a single Dagster+ deployment with one global lineage graph.
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/amazon-ecs/index.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/amazon-ecs/index.md
index ade5c5b935ffa..0ab0a059c490b 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/amazon-ecs/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/amazon-ecs/index.md
@@ -1,6 +1,6 @@
---
title: Amazon ECS agent
-sidebar_position: 50
+sidebar_position: 30
---
import DocCardList from '@theme/DocCardList';
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/architecture.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/architecture.md
index 7c21d41a38430..8afa1ecb5e76a 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/architecture.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/architecture.md
@@ -5,25 +5,13 @@ sidebar_position: 10
The Hybrid architecture is the most flexible and secure way to deploy Dagster+. It allows you to run your user code in your environment while leveraging Dagster+'s infrastructure for orchestration and metadata management
-
- Pre-requisites
-
-Before you begin, you should have:
-
-- A [Dagster+ account](/dagster-plus/getting-started)
-- [Basic familiarity with Dagster](/getting-started/quickstart)
-
-
-
----
-
## Hybrid architecture overview
A **hybrid deployment** utilizes a combination of your infrastructure and Dagster-hosted backend services.
-The Dagster backend services - including the web frontend, GraphQL API, metadata database, and daemons (responsible for executing schedules and sensors) - are hosted in Dagster+. You are responsible for running an [agent](/todo) in your environment.
+The Dagster backend services - including the web frontend, GraphQL API, metadata database, and daemons (responsible for executing schedules and sensors) - are hosted in Dagster+. You are responsible for running an [agent](index.md#dagster-hybrid-agents) in your environment.
-![Dagster+ Hybrid deployment architecture](/img/placeholder.svg)
+![Dagster+ Hybrid deployment architecture](/images/dagster-cloud/deployment/hybrid-architecture.png)
Work is enqueued for your agent when:
@@ -35,27 +23,31 @@ The agent polls the agent API to see if any work needs to be done and launches u
All user code runs within your environment, in isolation from Dagster system code.
----
-
## The agent
Because the agent communicates with the Dagster+ control plane over the agent API, it's possible to support agents that operate in arbitrary compute environments.
This means that over time, Dagster+'s support for different user deployment environments will expand and custom agents can take advantage of bespoke compute environments such as HPC.
-Refer to the [Agents documentation](/todo) for more info, including the agents that are currently supported.
-
----
+See the [setup page](index.md#dagster-hybrid-agents) for a list of agents that are currently supported.
## Security
-This section describes how Dagster+ interacts with user code. To summarize:
+Dagster+ Hybrid relies on a shared security model.
+
+The Dagster+ control plane is SOC 2 Type II certified and follows best practices such as:
+- encrypting data at rest (AES 256) and in transit (TLS 1.2+)
+- highly available, with disaster recovery and backup strategies
+- only manages metadata such as pipeline names, execution status, and run duration
+
+The execution environment is managed by the customer:
+- Dagster+ doesn't have access to user code—your code never leaves your environment. Metadata about the code is fetched over constrained APIs.
+- All connections to databases, file systems, and other resources are made from your environment.
+- The execution environment only requires egress access to Dagster+. No ingress is required from Dagster+ to user environments.
-- No ingress is required from Dagster+ to user environments
-- Dagster+ doesn't have access to user code. Metadata about the code is fetched over constrained APIs.
-- The Dagster+ agent is [open source and auditable](https://github.com/dagster-io/dagster-cloud)
+Additionally, the Dagster+ agent is [open source and auditable](https://github.com/dagster-io/dagster-cloud)
-These highlights are described in more detail below:
+The following highlights are described in more detail below:
- [Interactions and queries](#interactions-and-queries)
- [Runs](#runs)
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/docker/index.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/docker/index.md
index cb00c69f21296..81ebfa807f20a 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/docker/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/docker/index.md
@@ -1,6 +1,6 @@
---
title: Docker agent
-sidebar_position: 30
+sidebar_position: 40
---
import DocCardList from '@theme/DocCardList';
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/index.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/index.md
index f726f15d4a12b..5a4c660304db3 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/index.md
@@ -6,25 +6,26 @@ sidebar_position: 20
In a Dagster+ Hybrid deployment, the orchestration control plane is run by Dagster+ while your Dagster code is executed within your environment.
-[comment]: <> (TODO: Architecture diagram)
+:::note
+For an overview of the Hybrid design, including security considerations, see [Dagster+ Hybrid architecture](architecture.md).
+:::
## Get started
-To get started with a Hybrid deployment you'll need to:
+To get started with a Hybrid deployment, you'll need to:
1. Create a [Dagster+ organization](https://dagster.cloud/signup)
-2. Install a Dagster+ Hybrid Agent
-3. [Add a code location](/dagster-plus/features/code-locations), typically using a Git repository and CI/CD
+2. [Install a Dagster+ Hybrid agent](#dagster-hybrid-agents)
+3. [Add a code location](/dagster-plus/deployment/code-locations), typically using a Git repository and CI/CD
## Dagster+ Hybrid agents
-The Dagster+ agent is a long-lived process that polls Dagster+'s API servers for new work.
+The Dagster+ agent is a long-lived process that polls Dagster+'s API servers for new work. Currently supported agents include:
-See the following guides for setting up an agent:
- [Kubernetes](/dagster-plus/deployment/deployment-types/hybrid/kubernetes)
- [AWS ECS](/dagster-plus/deployment/deployment-types/hybrid/amazon-ecs/new-vpc)
- [Docker](/dagster-plus/deployment/deployment-types/hybrid/docker)
- - [Locally](/dagster-plus/deployment/deployment-types/hybrid/local)
+ - [Local agent](/dagster-plus/deployment/deployment-types/hybrid/local)
## What you'll see in your environment
@@ -44,20 +45,10 @@ When a run needs to be launched, Dagster+ enqueues instructions for your agent t
Your agent will send Dagster+ metadata letting us know the run has been launched. Your run's container will also send Dagster+ metadata informing us of how the run is progressing. The Dagster+ backend services will monitor this stream of metadata to make additional orchestration decisions, monitor for failure, or send alerts.
-## Security
+## Best practices
-Dagster+ hybrid relies on a shared security model.
+### Security
-The Dagster+ control plane is SOC 2 Type II certified and follows best practices such as:
-- encrypting data at rest (AES 256) and in transit (TLS 1.2+)
-- highly available, with disaster recovery and backup strategies
-- only manages metadata such as pipeline names, execution status, and run duration
-
-The execution environment is managed by the customer:
-- your code never leaves your environment
-- all connections to databases, file systems, and other resources are made from your environment
-- the execution environment only requires egress access to Dagster+
-
-Common security considerations in Dagster+ hybrid include:
-- [disabling log forwarding](/todo)
-- [managing tokens](/todo)
+You can do the following to make your Dagster+ Hybrid deployment more secure:
+- [Disable log forwarding](/dagster-plus/deployment/management/settings/customizing-agent-settings#disabling-compute-logs)
+- [Manage tokens](/dagster-plus/deployment/management/tokens/agent-tokens)
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/index.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/index.md
index d744a447c3b87..3b4e765984d84 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/index.md
@@ -1,6 +1,6 @@
---
title: Kubernetes agent
-sidebar_position: 40
+sidebar_position: 20
---
import DocCardList from '@theme/DocCardList';
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/setup.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/setup.md
index 69162ce92d7eb..f6cd53f5dbb09 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/setup.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/kubernetes/setup.md
@@ -442,7 +442,7 @@ Another option is to launch a pod for each asset by telling Dagster to use the K
-Dagster can launch and manage existing Docker images as Kubernetes jobs using the [Dagster kubernetes pipes integration](/integrations/kubernetes). To request resources for these jobs by supplying the appropriate Kubernetes pod spec.
+Dagster can launch and manage existing Docker images as Kubernetes jobs using the [Dagster kubernetes pipes integration](/integrations/libraries/kubernetes). To request resources for these jobs by supplying the appropriate Kubernetes pod spec.
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/local.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/local.md
index 53c43ba09d468..dc199cc03e4a8 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/local.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/hybrid/local.md
@@ -1,6 +1,6 @@
---
title: Running a local agent
-sidebar_position: 20
+sidebar_position: 50
sidebar_label: Local agent
---
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/ci-cd-in-serverless.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/ci-cd-in-serverless.md
index 265a378ea82b0..dc094da1f98fb 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/ci-cd-in-serverless.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/ci-cd-in-serverless.md
@@ -83,5 +83,3 @@ dagster-cloud serverless deploy-python-executable ./my-dagster-project \
-
----
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/index.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/index.md
index 02b9ce40ada82..672ded0e54bbc 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/index.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/index.md
@@ -8,9 +8,7 @@ sidebar_position: 10
Dagster+ Serverless is a fully managed version of Dagster+ and is the easiest way to get started with Dagster. With a Serverless deployment, you can run your Dagster jobs without spinning up any infrastructure yourself.
----
-
-## When to choose Serverless \{#when-to-choose-serverless}
+## Serverless vs Hybrid
Serverless works best with workloads that primarily orchestrate other services or perform light computation. Most workloads fit into this category, especially those that orchestrate third-party SaaS products like cloud data warehouses and ETL tools.
@@ -21,9 +19,7 @@ If any of the following are applicable, you should select [Hybrid deployment](/d
- You need to distribute computation across many nodes for a single run. Dagster+ runs currently execute on a single node with 4 CPUs
- You don't want to add Dagster Labs as a data processor
----
-
-## Limitations \{#limitations}
+## Limitations
Serverless is subject to the following limitations:
@@ -36,8 +32,6 @@ Serverless is subject to the following limitations:
Dagster+ Pro customers may request a quota increase by [contacting Sales](https://dagster.io/contact).
----
-
## Next steps
-To start using Dagster+ Serverless, follow our [Getting started with Dagster+](/dagster-plus/getting-started) guide.
+To start using Dagster+ Serverless, follow the steps in [Getting started with Dagster+](/dagster-plus/getting-started).
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/run-isolation.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/run-isolation.md
index c9c5d04ab6756..ec45953a7632f 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/run-isolation.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/run-isolation.md
@@ -15,8 +15,6 @@ To follow the steps in this guide, you'll need:
- An understanding of [Dagster+ deployment settings](/dagster-plus/deployment/management/settings/deployment-settings)
----
-
## Differences between isolated and non-isolated runs
- [**Isolated runs**](#isolated-runs-default) execute in their own container. They're the default and are intended for production and compute-heavy use cases.
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/runtime-environment.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/runtime-environment.md
index 750eefa59919a..ae13107a69f9b 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/runtime-environment.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/runtime-environment.md
@@ -7,13 +7,13 @@ sidebar_position: 100
By default, Dagster+ Serverless will package your code as PEX files and deploys them on Docker images. Using PEX files significantly reduces the time to deploy since it does not require building a new Docker image and provisioning a new container for every code change. However you are able to customize the Serverless runtime environment in various ways:
- [Add dependencies](#add-dependencies)
-- [Use a different Python version](#python-version)
-- [Use a different base image](#base-image)
-- [Include data files](#data-files)
-- [Disable PEX deploys](#disable-pex)
-- [Use private Python packages](#private-packages)
+- [Use a different Python version](#use-a-different-python-version)
+- [Use a different base image](#use-a-different-base-image)
+- [Include data files](#include-data-files)
+- [Disable PEX deploys](#disable-pex-deploys)
+- [Use private Python packages](#use-private-python-packages)
-## Add dependencies \{#add-dependencies}
+## Add dependencies
You can add dependencies by including the corresponding Python libraries in your Dagster project's `setup.py` file. These should follow [PEP 508](https://peps.python.org/pep-0508/).
@@ -39,9 +39,9 @@ setup(
)
```
-To add a package from a private GitHub repository, see: [Use private Python packages](#private-packages)
+To add a package from a private GitHub repository, see [Use private Python packages](#use-private-python-packages)
-## Use a different Python version \{#python-version}
+## Use a different Python version
The default Python version for Dagster+ Serverless is Python 3.9. Python versions 3.10 through 3.12 are also supported. You can specify the Python version you want to use in your GitHub or GitLab workflow, or by using the `dagster-cloud` CLI.
@@ -70,7 +70,7 @@ dagster-cloud serverless deploy-python-executable --python-version=3.11 --locati
-## Use a different base image \{#base-image}
+## Use a different base image
Dagster+ runs your code on a Docker image that we build as follows:
@@ -117,7 +117,7 @@ Setting a custom base image isn't supported for GitLab CI/CD workflows out of th
-## Include data files \{#data-files}
+## Include data files
To add data files to your deployment, use the [Data Files Support](https://setuptools.pypa.io/en/latest/userguide/datafiles.html) built into Python's `setup.py`. This requires adding a `package_data` or `include_package_data` keyword in the call to `setup()` in `setup.py`. For example, given this directory structure:
@@ -134,7 +134,7 @@ To add data files to your deployment, use the [Data Files Support](https://setup
If you want to include the data folder, modify your `setup.py` to add the `package_data` line:
-## Disable PEX deploys \{#disable-pex}
+## Disable PEX deploys
You have the option to disable PEX-based deploys and deploy using a Docker image instead of PEX. You can disable PEX in your GitHub or GitLab workflow, or by using the `dagster-cloud` CLI.
@@ -200,7 +200,7 @@ Setting a custom base image isn't supported for GitLab CI/CD workflows out of th
-## Use private Python packages \{#private-packages}
+## Use private Python packages
If you use PEX deploys in your workflow (`ENABLE_FAST_DEPLOYS: 'true'`), the following steps can install a package from a private GitHub repository, e.g. `my-org/private-repo`, as a dependency:
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/security.md b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/security.md
index bd1b23f2e1c6e..1996c5fd07abf 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/security.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/deployment-types/serverless/security.md
@@ -32,8 +32,6 @@ To prevent this, you can use [another I/O manager](/guides/build/configure/io-ma
You must have [boto3](https://pypi.org/project/boto3/) or `dagster-cloud[serverless]` installed as a project dependency otherwise the Dagster+ managed storage can fail and silently fall back to using the default I/O manager.
:::
-## Adding environment variables and secrets \{#adding-secrets}
+## Adding environment variables and secrets
Often you'll need to securely access secrets from your jobs. Dagster+ supports several methods for adding secrets—refer to the [Dagster+ environment variables documentation](/dagster-plus/deployment/management/environment-variables) for more information.
-
----
diff --git a/docs/docs-beta/docs/dagster-plus/deployment/management/settings/customizing-agent-settings.md b/docs/docs-beta/docs/dagster-plus/deployment/management/settings/customizing-agent-settings.md
index df595ee48e153..e84fb830f287d 100644
--- a/docs/docs-beta/docs/dagster-plus/deployment/management/settings/customizing-agent-settings.md
+++ b/docs/docs-beta/docs/dagster-plus/deployment/management/settings/customizing-agent-settings.md
@@ -4,4 +4,8 @@ sidebar_position: 80
unlisted: true
---
-{/* TODO move from https://docs.dagster.io/dagster-plus/deployment/agents/customizing-configuration */}
\ No newline at end of file
+{/* TODO move from https://docs.dagster.io/dagster-plus/deployment/agents/customizing-configuration */}
+
+## Disabling compute logs
+
+{/* NOTE this is a placeholder section so the Hybrid deployment index page has somewhere to link to */}
diff --git a/docs/docs-beta/docs/dagster-plus/features/authentication-and-access-control/rbac/user-roles-permissions.md b/docs/docs-beta/docs/dagster-plus/features/authentication-and-access-control/rbac/user-roles-permissions.md
index 5b2ce29700e46..05b98b558c1e1 100644
--- a/docs/docs-beta/docs/dagster-plus/features/authentication-and-access-control/rbac/user-roles-permissions.md
+++ b/docs/docs-beta/docs/dagster-plus/features/authentication-and-access-control/rbac/user-roles-permissions.md
@@ -42,7 +42,7 @@ Dagster+ Pro users can create teams of users and assign default permission sets.
With the exception of the **Organization Admin** role, user and team roles are set on a per-deployment basis.
-Organization Admins have access to the entire organization, including all [deployments](/todo), [code locations](/dagster-plus/features/code-locations), and [Branch Deployments](dagster-plus/features/ci-cd/branch-deployments/index.md).
+Organization Admins have access to the entire organization, including all [deployments](/todo), [code locations](/dagster-plus/deployment/code-locations), and [Branch Deployments](dagster-plus/features/ci-cd/branch-deployments/index.md).
| Level | Plan | Description |
| ------------------ | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
diff --git a/docs/docs-beta/docs/dagster-plus/getting-started.md b/docs/docs-beta/docs/dagster-plus/getting-started.md
index 9be3ffb5f46d5..468e1a248869e 100644
--- a/docs/docs-beta/docs/dagster-plus/getting-started.md
+++ b/docs/docs-beta/docs/dagster-plus/getting-started.md
@@ -2,12 +2,16 @@
title: "Getting started with Dagster+"
---
-First [create a Dagster+ organization](https://dagster.plus/signup). Note: you can sign up with:
+To get started with Dagster+, you will need to create a Dagster+ organization and choose your deployment type (Serverless or Hybrid).
+
+## Create a Dagster+ organization
+
+First, [create a Dagster+ organization](https://dagster.plus/signup). You can sign up with:
- a Google email address
- a GitHub account
-- a one-time email link, great if you are using a corporate email. You can setup SSO after completing these steps.
+- a one-time email link (ideal if you are using a corporate email). You can set up SSO after completing these steps.
-Next, pick your deployment type. Not sure?
+## Choose your deployment type
- [Dagster+ Serverless](/dagster-plus/deployment/deployment-types/serverless) is the easiest way to get started and is great for teams with limited DevOps support. In Dagster+ Serverless, your Dagster code is executed in Dagster+. You will need to be okay [giving Dagster+ the credentials](/dagster-plus/deployment/management/environment-variables) to connect to the tools you want to orchestrate.
@@ -20,14 +24,12 @@ The remaining steps depend on your deployment type.
We recommend following the steps in Dagster+ to add a new project.
-![Screenshot of Dagster+ serverless NUX](/img/placeholder.svg)
-
-The Dagster+ on-boarding will guide you through:
+The Dagster+ onboarding will guide you through:
- creating a Git repository containing your Dagster code
- setting up the necessary CI/CD actions to deploy that repository to Dagster+
:::tip
-If you don't have any Dagster code yet, you will have the option to select an example quickstart project or import an existing dbt project
+If you don't have any Dagster code yet, you can select an example project or import an existing dbt project.
:::
See the guide on [adding code locations](/dagster-plus/features/code-locations) for details.
@@ -35,12 +37,12 @@ See the guide on [adding code locations](/dagster-plus/features/code-locations)
-## Install a Dagster+ Hybrid agent
+**Install a Dagster+ Hybrid agent**
-Follow [these guides](/dagster-plus/deployment/deployment-types/hybrid) for installing a Dagster+ Hybrid agent. Not sure which agent to pick? We recommend using the Dagster+ Kubernetes agent in most cases.
+Follow [these guides](/dagster-plus/deployment/deployment-types/hybrid) for installing a Dagster+ Hybrid agent. If you're not sure which agent to use, we recommend the [Dagster+ Kubernetes agent](/dagster-plus/deployment/deployment-types/hybrid/kubernetes/index.md) in most cases.
-## Setup CI/CD
+**Set up CI/CD**
In most cases, your CI/CD process will be responsible for:
- building your Dagster code into a Docker image
diff --git a/docs/docs-beta/docs/guides/build/integrate/ingesting-data.md b/docs/docs-beta/docs/guides/build/integrate/ingesting-data.md
index a6290c8c37c1d..7d4d0467ff063 100644
--- a/docs/docs-beta/docs/guides/build/integrate/ingesting-data.md
+++ b/docs/docs-beta/docs/guides/build/integrate/ingesting-data.md
@@ -33,10 +33,10 @@ As a data orchestrator, Dagster helps with data ingestion as it can:
Dagster currently integrates with the following data ingestion tools, enabling you to sync diverse data sources into data warehouse tables using pre-built connectors:
-- [Airbyte](/integrations/airbyte)
-- [dlt](/integrations/dlt)
-- [Fivetran](/integrations/fivetran)
-- [Sling](/integrations/sling)
+- [Airbyte](/integrations/libraries/airbyte)
+- [dlt](/integrations/libraries/dlt)
+- [Fivetran](/integrations/libraries/fivetran)
+- [Sling](/integrations/libraries/sling)
## Writing custom data ingestion pipelines
diff --git a/docs/docs-beta/docs/guides/operate/index.md b/docs/docs-beta/docs/guides/operate/index.md
deleted file mode 100644
index 6d95bde0b7fb9..0000000000000
--- a/docs/docs-beta/docs/guides/operate/index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Operate
-sidebar_class_name: hidden
----
-
-import DocCardList from '@theme/DocCardList';
-
-
\ No newline at end of file
diff --git a/docs/docs-beta/docs/guides/test/asset-checks.md b/docs/docs-beta/docs/guides/test/asset-checks.md
index 9b1540dc2447b..b2feaead928be 100644
--- a/docs/docs-beta/docs/guides/test/asset-checks.md
+++ b/docs/docs-beta/docs/guides/test/asset-checks.md
@@ -33,7 +33,7 @@ To get started with asset checks, follow these general steps:
## Defining a single asset check \{#single-check}
:::tip
-Dagster's dbt integration can model existing dbt tests as asset checks. Refer to the [dagster-dbt documentation](/integrations/dbt) for more information.
+Dagster's dbt integration can model existing dbt tests as asset checks. Refer to the [dagster-dbt documentation](/integrations/libraries/dbt) for more information.
:::
A asset check is defined using the `@asset_check` decorator.
diff --git a/docs/docs-beta/docs/integrations/fivetran.md b/docs/docs-beta/docs/integrations/fivetran.md
deleted file mode 100644
index 9235e4e3788e5..0000000000000
--- a/docs/docs-beta/docs/integrations/fivetran.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: Integration
-status: published
-name: Fivetran
-title: Dagster & Fivetran
-sidebar_label: Fivetran
-excerpt: Orchestrate Fivetran connectors and schedule syncs with upstream or downstream dependencies.
-date: 2022-11-07
-apireflink: https://docs.dagster.io/_apidocs/libraries/dagster-fivetran
-docslink: https://docs.dagster.io/integrations/fivetran
-partnerlink: https://www.fivetran.com/
-logo: /integrations/Fivetran.svg
-categories:
- - ETL
-enabledBy:
-enables:
----
-
-### About this integration
-
-The Dagster-Fivetran integration enables you to orchestrate data ingestion as part of a larger pipeline. Programmatically interact with the Fivetran REST API to initiate syncs and monitor their progress.
-
-### Installation
-
-```bash
-pip install dagster-fivetran
-```
-
-### Example
-
-
-
-### About Fivetran
-
-**Fivetran** ingests data from SaaS applications, databases, and servers. The data is stored and typically used for analytics.
diff --git a/docs/docs-beta/docs/integrations/guides/multi-asset-integration.md b/docs/docs-beta/docs/integrations/guides/multi-asset-integration.md
new file mode 100644
index 0000000000000..df0615e217fab
--- /dev/null
+++ b/docs/docs-beta/docs/integrations/guides/multi-asset-integration.md
@@ -0,0 +1,5 @@
+---
+title: Creating a multi-asset integration
+---
+
+{/* TODO write this */}
\ No newline at end of file
diff --git a/docs/docs-beta/docs/integrations/index.md b/docs/docs-beta/docs/integrations/index.md
deleted file mode 100644
index 233bd07d0fdc0..0000000000000
--- a/docs/docs-beta/docs/integrations/index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-sidebar_class_name: hidden
-title: Integrations
----
-
-import DocCardList from '@theme/DocCardList';
-
-
diff --git a/docs/docs-beta/docs/integrations/airbyte.md b/docs/docs-beta/docs/integrations/libraries/airbyte.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/airbyte.md
rename to docs/docs-beta/docs/integrations/libraries/airbyte.md
index eec45c87481fb..36e160f2986d9 100644
--- a/docs/docs-beta/docs/integrations/airbyte.md
+++ b/docs/docs-beta/docs/integrations/libraries/airbyte.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [dagster-supported, etl]
---
-### About this integration
+
Using this integration, you can trigger Airbyte syncs and orchestrate your Airbyte connections from within Dagster, making it easy to chain an Airbyte sync with upstream or downstream steps in your workflow.
diff --git a/docs/docs-beta/docs/integrations/aws/athena.md b/docs/docs-beta/docs/integrations/libraries/aws/athena.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/aws/athena.md
rename to docs/docs-beta/docs/integrations/libraries/aws/athena.md
index e17f95e077147..f28c6a5a8fae3 100644
--- a/docs/docs-beta/docs/integrations/aws/athena.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/athena.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
This integration allows you to connect to AWS Athena, a serverless interactive query service that makes it easy to analyze data in Amazon S3 using standard SQL. Using this integration, you can issue queries to Athena, fetch results, and handle query execution states within your Dagster pipelines.
diff --git a/docs/docs-beta/docs/integrations/aws/cloudwatch.md b/docs/docs-beta/docs/integrations/libraries/aws/cloudwatch.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/aws/cloudwatch.md
rename to docs/docs-beta/docs/integrations/libraries/aws/cloudwatch.md
index 6f31e5b7fbf02..8f27e767cd512 100644
--- a/docs/docs-beta/docs/integrations/aws/cloudwatch.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/cloudwatch.md
@@ -14,9 +14,10 @@ categories:
- Monitoring
enabledBy:
enables:
+tags: [dagster-supported, monitoring]
---
-### About this integration
+
This integration allows you to send Dagster logs to AWS CloudWatch, enabling centralized logging and monitoring of your Dagster jobs. By using AWS CloudWatch, you can take advantage of its powerful log management features, such as real-time log monitoring, log retention policies, and alerting capabilities.
diff --git a/docs/docs-beta/docs/integrations/aws/ecr.md b/docs/docs-beta/docs/integrations/libraries/aws/ecr.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/aws/ecr.md
rename to docs/docs-beta/docs/integrations/libraries/aws/ecr.md
index dfaec5dea91f8..a3b2487794a0b 100644
--- a/docs/docs-beta/docs/integrations/aws/ecr.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/ecr.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
This integration allows you to connect to AWS Elastic Container Registry (ECR). It provides resources to interact with AWS ECR, enabling you to manage your container images.
diff --git a/docs/docs-beta/docs/integrations/aws/emr.md b/docs/docs-beta/docs/integrations/libraries/aws/emr.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/aws/emr.md
rename to docs/docs-beta/docs/integrations/libraries/aws/emr.md
index 4a055872d2a1f..db5ba4e4bc91d 100644
--- a/docs/docs-beta/docs/integrations/aws/emr.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/emr.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
The `dagster-aws` integration provides ways orchestrating data pipelines that leverage AWS services, including AWS EMR (Elastic MapReduce). This integration allows you to run and scale big data workloads using open source tools such as Apache Spark, Hive, Presto, and more.
diff --git a/docs/docs-beta/docs/integrations/aws/glue.md b/docs/docs-beta/docs/integrations/libraries/aws/glue.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/aws/glue.md
rename to docs/docs-beta/docs/integrations/libraries/aws/glue.md
index e06ce1494ba57..629df3adc218d 100644
--- a/docs/docs-beta/docs/integrations/aws/glue.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/glue.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
The `dagster-aws` integration library provides the `PipesGlueClient` resource, enabling you to launch AWS Glue jobs directly from Dagster assets and ops. This integration allows you to pass parameters to Glue code while Dagster receives real-time events, such as logs, asset checks, and asset materializations, from the initiated jobs. With minimal code changes required on the job side, this integration is both efficient and easy to implement.
diff --git a/docs/docs-beta/docs/integrations/aws/index.md b/docs/docs-beta/docs/integrations/libraries/aws/index.md
similarity index 80%
rename from docs/docs-beta/docs/integrations/aws/index.md
rename to docs/docs-beta/docs/integrations/libraries/aws/index.md
index 481931c7b81e2..a95102a5e87c8 100644
--- a/docs/docs-beta/docs/integrations/aws/index.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/index.md
@@ -4,4 +4,4 @@ title: AWS
import DocCardList from '@theme/DocCardList';
-
+
\ No newline at end of file
diff --git a/docs/docs-beta/docs/integrations/aws/lambda.md b/docs/docs-beta/docs/integrations/libraries/aws/lambda.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/aws/lambda.md
rename to docs/docs-beta/docs/integrations/libraries/aws/lambda.md
index 4dd4ba58e903d..5ec9c7c9c64ad 100644
--- a/docs/docs-beta/docs/integrations/aws/lambda.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/lambda.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
Using this integration, you can leverage AWS Lambda to execute external code as part of your Dagster pipelines. This is particularly useful for running serverless functions that can scale automatically and handle various workloads without the need for managing infrastructure. The `PipesLambdaClient` class allows you to invoke AWS Lambda functions and stream logs and structured metadata back to Dagster's UI and tools.
diff --git a/docs/docs-beta/docs/integrations/aws/redshift.md b/docs/docs-beta/docs/integrations/libraries/aws/redshift.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/aws/redshift.md
rename to docs/docs-beta/docs/integrations/libraries/aws/redshift.md
index 053bd366b417f..593516db98136 100644
--- a/docs/docs-beta/docs/integrations/aws/redshift.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/redshift.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
Using this integration, you can connect to an AWS Redshift cluster and issue queries against it directly from your Dagster assets. This allows you to seamlessly integrate Redshift into your data pipelines, leveraging the power of Redshift's data warehousing capabilities within your Dagster workflows.
diff --git a/docs/docs-beta/docs/integrations/aws/s3.md b/docs/docs-beta/docs/integrations/libraries/aws/s3.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/aws/s3.md
rename to docs/docs-beta/docs/integrations/libraries/aws/s3.md
index e617605730442..71e35378e38b1 100644
--- a/docs/docs-beta/docs/integrations/aws/s3.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/s3.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
The AWS S3 integration allows data engineers to easily read, and write objects to the durable AWS S3 storage -- enabling engineers to a resilient storage layer when constructing their pipelines.
diff --git a/docs/docs-beta/docs/integrations/aws/secretsmanager.md b/docs/docs-beta/docs/integrations/libraries/aws/secretsmanager.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/aws/secretsmanager.md
rename to docs/docs-beta/docs/integrations/libraries/aws/secretsmanager.md
index 736b84fc56fb4..48b3b007bf1cd 100644
--- a/docs/docs-beta/docs/integrations/aws/secretsmanager.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/secretsmanager.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
This integration allows you to manage, retrieve, and rotate credentials, API keys, and other secrets using [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/).
diff --git a/docs/docs-beta/docs/integrations/aws/ssm.md b/docs/docs-beta/docs/integrations/libraries/aws/ssm.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/aws/ssm.md
rename to docs/docs-beta/docs/integrations/libraries/aws/ssm.md
index 36f480a509482..d0da33d8d7e2f 100644
--- a/docs/docs-beta/docs/integrations/aws/ssm.md
+++ b/docs/docs-beta/docs/integrations/libraries/aws/ssm.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
The Dagster AWS Systems Manager (SSM) Parameter Store integration allows you to manage and retrieve parameters stored in AWS SSM Parameter Store directly within your Dagster pipelines. This integration provides resources to fetch parameters by name, tags, or paths, and optionally set them as environment variables for your operations.
diff --git a/docs/docs-beta/docs/integrations/azure-adls2.md b/docs/docs-beta/docs/integrations/libraries/azure-adls2.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/azure-adls2.md
rename to docs/docs-beta/docs/integrations/libraries/azure-adls2.md
index 9f766bac09d09..780b800aa62dc 100644
--- a/docs/docs-beta/docs/integrations/azure-adls2.md
+++ b/docs/docs-beta/docs/integrations/libraries/azure-adls2.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
Dagster helps you use Azure Storage Accounts as part of your data pipeline. Azure Data Lake Storage Gen 2 (ADLS2) is our primary focus but we also provide utilities for Azure Blob Storage.
diff --git a/docs/docs-beta/docs/integrations/census.md b/docs/docs-beta/docs/integrations/libraries/census.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/census.md
rename to docs/docs-beta/docs/integrations/libraries/census.md
index ae13177111531..1c3e4f8e2c903 100644
--- a/docs/docs-beta/docs/integrations/census.md
+++ b/docs/docs-beta/docs/integrations/libraries/census.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [community-supported, etl]
---
-### About this integration
+
With the `dagster-census` integration you can execute a Census sync and poll until that sync completes, raising an error if it's unsuccessful.
diff --git a/docs/docs-beta/docs/integrations/cube.md b/docs/docs-beta/docs/integrations/libraries/cube.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/cube.md
rename to docs/docs-beta/docs/integrations/libraries/cube.md
index 9bac9cd168c7b..e1e976090adf0 100644
--- a/docs/docs-beta/docs/integrations/cube.md
+++ b/docs/docs-beta/docs/integrations/libraries/cube.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [community-supported]
---
-### About this integration
+
With the `dagster_cube` integration you can setup Cube and Dagster to work together so that Dagster can push changes from upstream data sources to Cube using its integration API.
diff --git a/docs/docs-beta/docs/integrations/databricks.md b/docs/docs-beta/docs/integrations/libraries/databricks.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/databricks.md
rename to docs/docs-beta/docs/integrations/libraries/databricks.md
index 92ee6a0fc6a78..ff4736c3c0c4e 100644
--- a/docs/docs-beta/docs/integrations/databricks.md
+++ b/docs/docs-beta/docs/integrations/libraries/databricks.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
The `dagster-databricks` integration library provides the `PipesDatabricksClient` resource, enabling you to launch Databricks jobs directly from Dagster assets and ops. This integration allows you to pass parameters to Databricks code while Dagster receives real-time events, such as logs, asset checks, and asset materializations, from the initiated jobs. With minimal code changes required on the job side, this integration is both efficient and easy to implement.
diff --git a/docs/docs-beta/docs/integrations/datadog.md b/docs/docs-beta/docs/integrations/libraries/datadog.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/datadog.md
rename to docs/docs-beta/docs/integrations/libraries/datadog.md
index 9bac8f21649b5..4f2e867eb2fd7 100644
--- a/docs/docs-beta/docs/integrations/datadog.md
+++ b/docs/docs-beta/docs/integrations/libraries/datadog.md
@@ -14,9 +14,10 @@ categories:
- Monitoring
enabledBy:
enables:
+tags: [dagster-supported, monitoring]
---
-### About this integration
+
While Dagster provides comprehensive monitoring and observability of the pipelines it orchestrates, many teams look to centralize all their monitoring across apps, processes and infrastructure using Datadog's 'Cloud Monitoring as a Service'. The `dagster-datadog` integration allows you to publish metrics to Datadog from within Dagster ops.
diff --git a/docs/docs-beta/docs/integrations/dbt-cloud.md b/docs/docs-beta/docs/integrations/libraries/dbt-cloud.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/dbt-cloud.md
rename to docs/docs-beta/docs/integrations/libraries/dbt-cloud.md
index ae375f429e15e..80c07e9990d41 100644
--- a/docs/docs-beta/docs/integrations/dbt-cloud.md
+++ b/docs/docs-beta/docs/integrations/libraries/dbt-cloud.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [dagster-supported, etl]
---
-### About this integration
+
Dagster allows you to run dbt Cloud jobs alongside other technologies. You can schedule them to run as a step in a larger pipeline and manage them as a data asset.
diff --git a/docs/docs-beta/docs/integrations/dbt.md b/docs/docs-beta/docs/integrations/libraries/dbt.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/dbt.md
rename to docs/docs-beta/docs/integrations/libraries/dbt.md
index cd2b7f873e3f2..177dc9f4e45f3 100644
--- a/docs/docs-beta/docs/integrations/dbt.md
+++ b/docs/docs-beta/docs/integrations/libraries/dbt.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [dagster-supported, etl]
---
-### About this integration
+
Dagster orchestrates dbt alongside other technologies, so you can schedule dbt with Spark, Python, etc. in a single data pipeline.
diff --git a/docs/docs-beta/docs/integrations/deltalake.md b/docs/docs-beta/docs/integrations/libraries/deltalake.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/deltalake.md
rename to docs/docs-beta/docs/integrations/libraries/deltalake.md
index 175fc173c5534..3a3fc20373fef 100644
--- a/docs/docs-beta/docs/integrations/deltalake.md
+++ b/docs/docs-beta/docs/integrations/libraries/deltalake.md
@@ -15,9 +15,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [community-supported, storage]
---
-### About this integration
+
Delta Lake is a great storage format for Dagster workflows. With this integration, you can use the Delta Lake I/O Manager to read and write your Dagster assets.
diff --git a/docs/docs-beta/docs/integrations/dlt.md b/docs/docs-beta/docs/integrations/libraries/dlt.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/dlt.md
rename to docs/docs-beta/docs/integrations/libraries/dlt.md
index 9381022348790..19c54360eea56 100644
--- a/docs/docs-beta/docs/integrations/dlt.md
+++ b/docs/docs-beta/docs/integrations/libraries/dlt.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [dagster-supported, etl]
---
-### About this integration
+
This integration allows you to use [dlt](https://dlthub.com/) to easily ingest and replicate data between systems through Dagster.
diff --git a/docs/docs-beta/docs/integrations/docker.md b/docs/docs-beta/docs/integrations/libraries/docker.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/docker.md
rename to docs/docs-beta/docs/integrations/libraries/docker.md
index 16bdd0ab960c0..6a65e2818cee9 100644
--- a/docs/docs-beta/docs/integrations/docker.md
+++ b/docs/docs-beta/docs/integrations/libraries/docker.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
The `dagster-docker` integration library provides the `PipesDockerClient` resource, enabling you to launch Docker containers and execute external code directly from Dagster assets and ops. This integration allows you to pass parameters to Docker containers while Dagster receives real-time events, such as logs, asset checks, and asset materializations, from the initiated jobs. With minimal code changes required on the job side, this integration is both efficient and easy to implement.
diff --git a/docs/docs-beta/docs/integrations/duckdb.md b/docs/docs-beta/docs/integrations/libraries/duckdb.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/duckdb.md
rename to docs/docs-beta/docs/integrations/libraries/duckdb.md
index 5335df37db094..e8097b5040ed8 100644
--- a/docs/docs-beta/docs/integrations/duckdb.md
+++ b/docs/docs-beta/docs/integrations/libraries/duckdb.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
This library provides an integration with the DuckDB database, and allows for an out-of-the-box [I/O Manager](https://docs.dagster.io/concepts/io-management/io-managers) so that you can make DuckDB your storage of choice.
diff --git a/docs/docs-beta/docs/integrations/libraries/fivetran.md b/docs/docs-beta/docs/integrations/libraries/fivetran.md
new file mode 100644
index 0000000000000..33baaa8275c07
--- /dev/null
+++ b/docs/docs-beta/docs/integrations/libraries/fivetran.md
@@ -0,0 +1,87 @@
+---
+layout: Integration
+status: published
+name: Fivetran
+title: Using Dagster with Fivetran
+sidebar_label: Fivetran
+excerpt: Orchestrate Fivetran connectors syncs with upstream or downstream dependencies.
+date: 2022-11-07
+apireflink: https://docs.dagster.io/_apidocs/libraries/dagster-fivetran
+docslink: https://docs.dagster.io/integrations/fivetran
+partnerlink: https://www.fivetran.com/
+logo: /integrations/Fivetran.svg
+categories:
+ - ETL
+enabledBy:
+enables:
+tags: [dagster-supported, etl]
+---
+
+This guide provides instructions for using Dagster with Fivetran using the `dagster-fivetran` library. Your Fivetran connector tables can be represented as assets in the Dagster asset graph, allowing you to track lineage and dependencies between Fivetran assets and data assets you are already modeling in Dagster. You can also use Dagster to orchestrate Fivetran connectors, allowing you to trigger syncs for these on a cadence or based on upstream data changes.
+
+## What you'll learn
+
+- How to represent Fivetran assets in the Dagster asset graph, including lineage to other Dagster assets.
+- How to customize asset definition metadata for these Fivetran assets.
+- How to materialize Fivetran connector tables from Dagster.
+- How to customize how Fivetran connector tables are materialized.
+
+
+ Prerequisites
+
+- The `dagster` and `dagster-fivetran` libraries installed in your environment
+- Familiarity with asset definitions and the Dagster asset graph
+- Familiarity with Dagster resources
+- Familiarity with Fivetran concepts, like connectors and connector tables
+- A Fivetran workspace
+- A Fivetran API key and API secret. For more information, see [Getting Started](https://fivetran.com/docs/rest-api/getting-started) in the Fivetran REST API documentation.
+
+
+
+## Set up your environment
+
+To get started, you'll need to install the `dagster` and `dagster-fivetran` Python packages:
+
+```bash
+pip install dagster dagster-fivetran
+```
+
+## Represent Fivetran assets in the asset graph
+
+To load Fivetran assets into the Dagster asset graph, you must first construct a resource, which allows Dagster to communicate with your Fivetran workspace. You'll need to supply your account ID, API key and API secret. See [Getting Started](https://fivetran.com/docs/rest-api/getting-started) in the Fivetran REST API documentation for more information on how to create your API key and API secret.
+
+Dagster can automatically load all connector tables from your Fivetran workspace as asset specs. Call the function, which returns list of s representing your Fivetran assets. You can then include these asset specs in your object:
+
+
+
+### Sync and materialize Fivetran assets
+
+You can use Dagster to sync Fivetran connectors and materialize Fivetran connector tables. You can use the factory to create all assets definitions for your Fivetran workspace.
+
+
+
+### Customize the materialization of Fivetran assets
+
+If you want to customize the sync of your connectors, you can use the decorator to do so. This allows you to execute custom code before and after the call to the Fivetran sync.
+
+
+
+### Customize asset definition metadata for Fivetran assets
+
+By default, Dagster will generate asset specs for each Fivetran asset and populate default metadata. You can further customize asset properties by passing an instance of the custom to the function.
+
+
+
+Note that `super()` is called in each of the overridden methods to generate the default asset spec. It is best practice to generate the default asset spec before customizing it.
+
+You can pass an instance of the custom to the decorator or the factory.
+
+### Load Fivetran assets from multiple workspaces
+
+Definitions from multiple Fivetran workspaces can be combined by instantiating multiple resources and merging their specs. This lets you view all your Fivetran assets in a single asset graph:
+
+
+
+### About Fivetran
+
+**Fivetran** ingests data from SaaS applications, databases, and servers. The data is stored and typically used for analytics.
\ No newline at end of file
diff --git a/docs/docs-beta/docs/integrations/gcp/bigquery.md b/docs/docs-beta/docs/integrations/libraries/gcp/bigquery.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/gcp/bigquery.md
rename to docs/docs-beta/docs/integrations/libraries/gcp/bigquery.md
index 3decbd33ad606..986a7f4070de2 100644
--- a/docs/docs-beta/docs/integrations/gcp/bigquery.md
+++ b/docs/docs-beta/docs/integrations/libraries/gcp/bigquery.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
The Google Cloud Platform BigQuery integration allows data engineers to easily query and store data in the BigQuery data warehouse through the use of the `BigQueryResource`.
diff --git a/docs/docs-beta/docs/integrations/gcp/dataproc.md b/docs/docs-beta/docs/integrations/libraries/gcp/dataproc.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/gcp/dataproc.md
rename to docs/docs-beta/docs/integrations/libraries/gcp/dataproc.md
index fbb9527fb6065..dc168778f798b 100644
--- a/docs/docs-beta/docs/integrations/gcp/dataproc.md
+++ b/docs/docs-beta/docs/integrations/libraries/gcp/dataproc.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
Using this integration, you can manage and interact with Google Cloud Platform's Dataproc service directly from Dagster. This integration allows you to create, manage, and delete Dataproc clusters, and submit and monitor jobs on these clusters.
diff --git a/docs/docs-beta/docs/integrations/gcp/gcs.md b/docs/docs-beta/docs/integrations/libraries/gcp/gcs.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/gcp/gcs.md
rename to docs/docs-beta/docs/integrations/libraries/gcp/gcs.md
index 4969db7e33882..5969fe8f89368 100644
--- a/docs/docs-beta/docs/integrations/gcp/gcs.md
+++ b/docs/docs-beta/docs/integrations/libraries/gcp/gcs.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
This integration allows you to interact with Google Cloud Storage (GCS) using Dagster. It provides resources, I/O Managers, and utilities to manage and store data in GCS, making it easier to integrate GCS into your data pipelines.
diff --git a/docs/docs-beta/docs/integrations/gcp/index.md b/docs/docs-beta/docs/integrations/libraries/gcp/index.md
similarity index 100%
rename from docs/docs-beta/docs/integrations/gcp/index.md
rename to docs/docs-beta/docs/integrations/libraries/gcp/index.md
diff --git a/docs/docs-beta/docs/integrations/github.md b/docs/docs-beta/docs/integrations/libraries/github.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/github.md
rename to docs/docs-beta/docs/integrations/libraries/github.md
index 19c278d44ea0a..8d4176eb8e940 100644
--- a/docs/docs-beta/docs/integrations/github.md
+++ b/docs/docs-beta/docs/integrations/libraries/github.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
This library provides an integration with _[GitHub Apps](https://docs.github.com/en/developers/apps/getting-started-with-apps/about-apps)_ by providing a thin wrapper on the GitHub v4 GraphQL API. This allows for automating operations within your GitHub repositories and with the tighter permissions scopes that GitHub Apps allow for vs using a personal token.
diff --git a/docs/docs-beta/docs/integrations/hashicorp.md b/docs/docs-beta/docs/integrations/libraries/hashicorp.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/hashicorp.md
rename to docs/docs-beta/docs/integrations/libraries/hashicorp.md
index 5664934092637..5d65ae6106959 100644
--- a/docs/docs-beta/docs/integrations/hashicorp.md
+++ b/docs/docs-beta/docs/integrations/libraries/hashicorp.md
@@ -15,9 +15,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [community-supported]
---
-### About this integration
+
Package for integrating HashiCorp Vault into Dagster so that you can securely manage tokens and passwords.
diff --git a/docs/docs-beta/docs/integrations/hightouch.md b/docs/docs-beta/docs/integrations/libraries/hightouch.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/hightouch.md
rename to docs/docs-beta/docs/integrations/libraries/hightouch.md
index 11f81649565c9..eea47836e2390 100644
--- a/docs/docs-beta/docs/integrations/hightouch.md
+++ b/docs/docs-beta/docs/integrations/libraries/hightouch.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [community-supported, etl]
---
-### About this integration
+
With this integration you can trigger Hightouch syncs and monitor them from within Dagster. Fine-tune when Hightouch syncs kick-off, visualize their dependencies, and monitor the steps in your data activation workflow.
diff --git a/docs/docs-beta/docs/integrations/libraries/index.md b/docs/docs-beta/docs/integrations/libraries/index.md
new file mode 100644
index 0000000000000..99a53949d12af
--- /dev/null
+++ b/docs/docs-beta/docs/integrations/libraries/index.md
@@ -0,0 +1,10 @@
+---
+title: Libraries
+sidebar_class_name: hidden
+---
+
+You can integrate Dagster with external services using our libraries and libraries supported by the community.
+
+import DocCardList from '@theme/DocCardList';
+
+
\ No newline at end of file
diff --git a/docs/docs-beta/docs/integrations/jupyter.md b/docs/docs-beta/docs/integrations/libraries/jupyter.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/jupyter.md
rename to docs/docs-beta/docs/integrations/libraries/jupyter.md
index f0ab1db8998cf..c24ab32e1f1d5 100644
--- a/docs/docs-beta/docs/integrations/jupyter.md
+++ b/docs/docs-beta/docs/integrations/libraries/jupyter.md
@@ -15,6 +15,7 @@ enabledBy:
categories:
- Compute
enables:
+tags: [dagster-supported, compute]
---
### About Jupyter
diff --git a/docs/docs-beta/docs/integrations/kubernetes.md b/docs/docs-beta/docs/integrations/libraries/kubernetes.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/kubernetes.md
rename to docs/docs-beta/docs/integrations/libraries/kubernetes.md
index dbe389b9b2536..bdff728e10bbf 100644
--- a/docs/docs-beta/docs/integrations/kubernetes.md
+++ b/docs/docs-beta/docs/integrations/libraries/kubernetes.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
The `dagster-k8s` integration library provides the `PipesK8sClient` resource, enabling you to launch Kubernetes pods and execute external code directly from Dagster assets and ops. This integration allows you to pass parameters to Kubernetes pods while Dagster receives real-time events, such as logs, asset checks, and asset materializations, from the initiated jobs. With minimal code changes required on the job side, this integration is both efficient and easy to implement.
diff --git a/docs/docs-beta/docs/integrations/lakefs.md b/docs/docs-beta/docs/integrations/libraries/lakefs.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/lakefs.md
rename to docs/docs-beta/docs/integrations/libraries/lakefs.md
index c4901e7bc28f6..64f3510405c21 100644
--- a/docs/docs-beta/docs/integrations/lakefs.md
+++ b/docs/docs-beta/docs/integrations/libraries/lakefs.md
@@ -15,9 +15,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [community-supported, storage]
---
-### About this integration
+
By integrating with lakeFS, a big data scale version control system, you can leverage the versioning capabilities of lakeFS to track changes to your data. This integration allows you to have a complete lineage of your data, from the initial raw data to the transformed and processed data, making it easier to understand and reproduce data transformations.
diff --git a/docs/docs-beta/docs/integrations/looker.md b/docs/docs-beta/docs/integrations/libraries/looker.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/looker.md
rename to docs/docs-beta/docs/integrations/libraries/looker.md
index 33b936b606125..4cff5bf5fddcd 100644
--- a/docs/docs-beta/docs/integrations/looker.md
+++ b/docs/docs-beta/docs/integrations/libraries/looker.md
@@ -15,9 +15,10 @@ categories:
- BI
enabledBy:
enables:
+tags: [dagster-supported, bi]
---
-### About this integration
+
Dagster allows you to represent your Looker project as assets, alongside other your other technologies like dbt and Sling. This allows you to see how your Looker assets are connected to your other data assets, and how changes to other data assets might impact your Looker project.
diff --git a/docs/docs-beta/docs/integrations/meltano.md b/docs/docs-beta/docs/integrations/libraries/meltano.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/meltano.md
rename to docs/docs-beta/docs/integrations/libraries/meltano.md
index 3a36625b75b4c..c78e518e8ca24 100644
--- a/docs/docs-beta/docs/integrations/meltano.md
+++ b/docs/docs-beta/docs/integrations/libraries/meltano.md
@@ -15,9 +15,10 @@ categories:
communityIntegration: true
enabledBy:
enables:
+tags: [community-supported, etl]
---
-### About this integration
+
The `dagster-meltano` library allows you to run Meltano using Dagster. Design and configure ingestion jobs using the popular [Singer.io](https://singer.io) specification.
diff --git a/docs/docs-beta/docs/integrations/microsoft-teams.md b/docs/docs-beta/docs/integrations/libraries/microsoft-teams.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/microsoft-teams.md
rename to docs/docs-beta/docs/integrations/libraries/microsoft-teams.md
index bed3741fdcda1..183b9f3d54cab 100644
--- a/docs/docs-beta/docs/integrations/microsoft-teams.md
+++ b/docs/docs-beta/docs/integrations/libraries/microsoft-teams.md
@@ -14,10 +14,9 @@ categories:
- Alerting
enabledBy:
enables:
+tags: [dagster-supported, alerting]
---
-### About this integration
-
By configuring this resource, you can post messages to MS Teams from any Dagster op or asset.
### Installation
diff --git a/docs/docs-beta/docs/integrations/open-metadata.md b/docs/docs-beta/docs/integrations/libraries/open-metadata.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/open-metadata.md
rename to docs/docs-beta/docs/integrations/libraries/open-metadata.md
index 476eaaa032b48..65c6c5077fd86 100644
--- a/docs/docs-beta/docs/integrations/open-metadata.md
+++ b/docs/docs-beta/docs/integrations/libraries/open-metadata.md
@@ -15,9 +15,10 @@ categories:
- Metadata
enabledBy:
enables:
+tags: [community-supported, metadata]
---
-### About this integration
+
With this integration you can create a Open Metadata service to ingest metadata produced by the Dagster application. View the Ingestion Pipeline running from the Open Metadata Service Page.
diff --git a/docs/docs-beta/docs/integrations/openai.md b/docs/docs-beta/docs/integrations/libraries/openai.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/openai.md
rename to docs/docs-beta/docs/integrations/libraries/openai.md
index 16d3f84270a96..532e21a9018eb 100644
--- a/docs/docs-beta/docs/integrations/openai.md
+++ b/docs/docs-beta/docs/integrations/libraries/openai.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
The `dagster-openai` library allows you to easily interact with the OpenAI REST API using the OpenAI Python API to build AI steps into your Dagster pipelines. You can also log OpenAI API usage metadata in Dagster Insights, giving you detailed observability on API call credit consumption.
diff --git a/docs/docs-beta/docs/integrations/pagerduty.md b/docs/docs-beta/docs/integrations/libraries/pagerduty.md
similarity index 95%
rename from docs/docs-beta/docs/integrations/pagerduty.md
rename to docs/docs-beta/docs/integrations/libraries/pagerduty.md
index b2ac9dbcb111c..4c3b577b03fcc 100644
--- a/docs/docs-beta/docs/integrations/pagerduty.md
+++ b/docs/docs-beta/docs/integrations/libraries/pagerduty.md
@@ -14,9 +14,10 @@ categories:
- Alerting
enabledBy:
enables:
+tags: [dagster-supported, alerting]
---
-### About this integration
+
This library provides an integration between Dagster and PagerDuty to support creating alerts from your Dagster code.
diff --git a/docs/docs-beta/docs/integrations/pandas.md b/docs/docs-beta/docs/integrations/libraries/pandas.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/pandas.md
rename to docs/docs-beta/docs/integrations/libraries/pandas.md
index b051eb7a98023..6f7ea38111aac 100644
--- a/docs/docs-beta/docs/integrations/pandas.md
+++ b/docs/docs-beta/docs/integrations/libraries/pandas.md
@@ -14,9 +14,10 @@ categories:
- Metadata
enabledBy:
enables:
+tags: [dagster-supported, metadata]
---
-### About this integration
+
Perform data validation, emit summary statistics, and enable reliable DataFrame serialization/deserialization. The dagster_pandas library provides you with the utilities for implementing validation on Pandas DataFrames. The Dagster type system generates documentation of your DataFrame constraints and makes it accessible in the Dagster UI.
diff --git a/docs/docs-beta/docs/integrations/pandera.md b/docs/docs-beta/docs/integrations/libraries/pandera.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/pandera.md
rename to docs/docs-beta/docs/integrations/libraries/pandera.md
index 6957b91cbe3f7..1549013311feb 100644
--- a/docs/docs-beta/docs/integrations/pandera.md
+++ b/docs/docs-beta/docs/integrations/libraries/pandera.md
@@ -14,9 +14,10 @@ categories:
- Metadata
enabledBy:
enables:
+tags: [dagster-supported, metadata]
---
-### About this integration
+
The `dagster-pandera` integration library provides an API for generating Dagster Types from [Pandera DataFrame schemas](https://pandera.readthedocs.io/en/stable/dataframe_schemas.html).
diff --git a/docs/docs-beta/docs/integrations/prometheus.md b/docs/docs-beta/docs/integrations/libraries/prometheus.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/prometheus.md
rename to docs/docs-beta/docs/integrations/libraries/prometheus.md
index a25da65aaaeaa..e90e0fe7fb10b 100644
--- a/docs/docs-beta/docs/integrations/prometheus.md
+++ b/docs/docs-beta/docs/integrations/libraries/prometheus.md
@@ -14,9 +14,10 @@ categories:
- Monitoring
enabledBy:
enables:
+tags: [dagster-supported, monitoring]
---
-### About this integration
+
This integration allows you to push metrics to the Prometheus gateway from within a Dagster pipeline.
diff --git a/docs/docs-beta/docs/integrations/sdf.md b/docs/docs-beta/docs/integrations/libraries/sdf.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/sdf.md
rename to docs/docs-beta/docs/integrations/libraries/sdf.md
index ce80dd510ce77..a155f37bfa613 100644
--- a/docs/docs-beta/docs/integrations/sdf.md
+++ b/docs/docs-beta/docs/integrations/libraries/sdf.md
@@ -15,9 +15,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [community-supported, etl]
---
-### About this integration
+
SDF can integrate seamlessly with your existing Dagster projects, providing the best-in-class transformation layer while enabling you to schedule, orchestrate, and monitor your dags in Dagster.
diff --git a/docs/docs-beta/docs/integrations/secoda.md b/docs/docs-beta/docs/integrations/libraries/secoda.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/secoda.md
rename to docs/docs-beta/docs/integrations/libraries/secoda.md
index 58c4c738eff32..3ec41ce364776 100644
--- a/docs/docs-beta/docs/integrations/secoda.md
+++ b/docs/docs-beta/docs/integrations/libraries/secoda.md
@@ -15,9 +15,10 @@ categories:
- Metadata
enabledBy:
enables:
+tags: [community-supported, metadata]
---
-### About this integration
+
Connect Dagster to Secoda and see metadata related to your Dagster assets, asset groups and jobs right in Secoda. Simplify your team's access, and remove the need to switch between tools.
diff --git a/docs/docs-beta/docs/integrations/shell.md b/docs/docs-beta/docs/integrations/libraries/shell.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/shell.md
rename to docs/docs-beta/docs/integrations/libraries/shell.md
index 0c5653a4f47c6..6b69731c6aeea 100644
--- a/docs/docs-beta/docs/integrations/shell.md
+++ b/docs/docs-beta/docs/integrations/libraries/shell.md
@@ -14,9 +14,10 @@ categories:
- Compute
enabledBy:
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
Dagster comes with a native `PipesSubprocessClient` resource that enables you to launch shell commands directly from Dagster assets and ops. This integration allows you to pass parameters to external shell scripts while Dagster receives real-time events, such as logs, asset checks, and asset materializations, from the initiated external execution. With minimal code changes required on the job side, this integration is both efficient and easy to implement.
diff --git a/docs/docs-beta/docs/integrations/slack.md b/docs/docs-beta/docs/integrations/libraries/slack.md
similarity index 95%
rename from docs/docs-beta/docs/integrations/slack.md
rename to docs/docs-beta/docs/integrations/libraries/slack.md
index ead87dc85b4b2..3c5d3b0253ec9 100644
--- a/docs/docs-beta/docs/integrations/slack.md
+++ b/docs/docs-beta/docs/integrations/libraries/slack.md
@@ -14,9 +14,10 @@ categories:
- Alerting
enabledBy:
enables:
+tags: [dagster-supported, alerting]
---
-### About this integration
+
This library provides an integration with Slack to support posting messages in your company's Slack workspace.
diff --git a/docs/docs-beta/docs/integrations/sling.md b/docs/docs-beta/docs/integrations/libraries/sling.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/sling.md
rename to docs/docs-beta/docs/integrations/libraries/sling.md
index 0c00aea6bb7e7..58cceb2bac261 100644
--- a/docs/docs-beta/docs/integrations/sling.md
+++ b/docs/docs-beta/docs/integrations/libraries/sling.md
@@ -14,9 +14,10 @@ categories:
- ETL
enabledBy:
enables:
+tags: [dagster-supported, etl]
---
-### About this integration
+
This integration allows you to use [Sling](https://slingdata.io/) to extract and load data from popular data sources to destinations with high performance and ease.
diff --git a/docs/docs-beta/docs/integrations/snowflake.md b/docs/docs-beta/docs/integrations/libraries/snowflake.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/snowflake.md
rename to docs/docs-beta/docs/integrations/libraries/snowflake.md
index 1f8adf390a4be..3e3f45986ff76 100644
--- a/docs/docs-beta/docs/integrations/snowflake.md
+++ b/docs/docs-beta/docs/integrations/libraries/snowflake.md
@@ -14,9 +14,10 @@ categories:
- Storage
enabledBy:
enables:
+tags: [dagster-supported, storage]
---
-### About this integration
+
This library provides an integration with the Snowflake data warehouse. Connect to Snowflake as a resource, then use the integration-provided functions to construct an op to establish connections and execute Snowflake queries. Read and write natively to Snowflake from Dagster assets.
diff --git a/docs/docs-beta/docs/integrations/spark.md b/docs/docs-beta/docs/integrations/libraries/spark.md
similarity index 96%
rename from docs/docs-beta/docs/integrations/spark.md
rename to docs/docs-beta/docs/integrations/libraries/spark.md
index a8e1b693e82b3..e14f63f73a10a 100644
--- a/docs/docs-beta/docs/integrations/spark.md
+++ b/docs/docs-beta/docs/integrations/libraries/spark.md
@@ -15,9 +15,10 @@ categories:
enabledBy:
- dagster-pyspark
enables:
+tags: [dagster-supported, compute]
---
-### About this integration
+
Spark jobs typically execute on infrastructure that's specialized for Spark. Spark applications are typically not containerized or executed on Kubernetes.
diff --git a/docs/docs-beta/docs/integrations/ssh-sftp.md b/docs/docs-beta/docs/integrations/libraries/ssh-sftp.md
similarity index 97%
rename from docs/docs-beta/docs/integrations/ssh-sftp.md
rename to docs/docs-beta/docs/integrations/libraries/ssh-sftp.md
index bd8d15ed34626..be2314fa07cbb 100644
--- a/docs/docs-beta/docs/integrations/ssh-sftp.md
+++ b/docs/docs-beta/docs/integrations/libraries/ssh-sftp.md
@@ -14,9 +14,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [dagster-supported]
---
-### About this integration
+
This integration provides a resource for SSH remote execution using [Paramiko](https://github.com/paramiko/paramiko). It allows you to establish secure connections to networked resources and execute commands remotely. The integration also provides an SFTP client for secure file transfers between the local and remote systems.
diff --git a/docs/docs-beta/docs/integrations/twilio.md b/docs/docs-beta/docs/integrations/libraries/twilio.md
similarity index 95%
rename from docs/docs-beta/docs/integrations/twilio.md
rename to docs/docs-beta/docs/integrations/libraries/twilio.md
index 2b2cb4d56f3af..4584fdb550781 100644
--- a/docs/docs-beta/docs/integrations/twilio.md
+++ b/docs/docs-beta/docs/integrations/libraries/twilio.md
@@ -14,9 +14,10 @@ categories:
- Alerting
enabledBy:
enables:
+tags: [dagster-supported, alerting]
---
-### About this integration
+
Use your Twilio `Account SID` and `Auth Token` to build Twilio tasks right into your Dagster pipeline.
diff --git a/docs/docs-beta/docs/integrations/wandb.md b/docs/docs-beta/docs/integrations/libraries/wandb.md
similarity index 98%
rename from docs/docs-beta/docs/integrations/wandb.md
rename to docs/docs-beta/docs/integrations/libraries/wandb.md
index 44a7ad762600c..363950d2fa10e 100644
--- a/docs/docs-beta/docs/integrations/wandb.md
+++ b/docs/docs-beta/docs/integrations/libraries/wandb.md
@@ -15,9 +15,10 @@ categories:
- Other
enabledBy:
enables:
+tags: [community-supported]
---
-### About this integration
+
Use Dagster and Weights & Biases (W&B) to orchestrate your MLOps pipelines and maintain ML assets. The integration with W&B makes it easy within Dagster to:
diff --git a/docs/docs-beta/docs/tags.yml b/docs/docs-beta/docs/tags.yml
new file mode 100644
index 0000000000000..c4ec13c2a2145
--- /dev/null
+++ b/docs/docs-beta/docs/tags.yml
@@ -0,0 +1,36 @@
+community-supported:
+ label: 'community-supported'
+ permalink: '/integrations/community-supported'
+ description: 'Community-supported integrations.'
+dagster-supported:
+ label: 'dagster-supported'
+ permalink: '/integrations/dagster-supported'
+ description: 'Dagster-supported integrations.'
+etl:
+ label: 'ETL'
+ permalink: '/integrations/etl'
+ description: 'ETL integrations.'
+storage:
+ label: 'storage'
+ permalink: '/integrations/storage'
+ description: 'Storage integrations.'
+compute:
+ label: 'compute'
+ permalink: '/integrations/compute'
+ description: 'Compute integrations.'
+bi:
+ label: 'BI'
+ permalink: '/integrations/bi'
+ description: 'BI integrations.'
+monitoring:
+ label: 'monitoring'
+ permalink: '/integrations/monitoring'
+ description: 'Monitoring integrations.'
+alerting:
+ label: 'alerting'
+ permalink: '/integrations/alerting'
+ description: 'Alerting integrations.'
+metadata:
+ label: 'metadata'
+ permalink: '/integrations/metadata'
+ description: 'Metadata integrations.'
diff --git a/docs/docs-beta/docusaurus.config.ts b/docs/docs-beta/docusaurus.config.ts
index 491dc86f6ba46..7a3f8dd9f75af 100644
--- a/docs/docs-beta/docusaurus.config.ts
+++ b/docs/docs-beta/docusaurus.config.ts
@@ -1,6 +1,7 @@
import {themes as prismThemes} from 'prism-react-renderer';
import type {Config} from '@docusaurus/types';
import type * as Preset from '@docusaurus/preset-classic';
+import { groupCollapsed } from 'console';
const config: Config = {
title: 'Dagster Docs - Beta',
@@ -84,7 +85,7 @@ const config: Config = {
{
label: 'Integrations',
type: 'doc',
- docId: 'integrations/index',
+ docId: 'integrations/libraries/index',
position: 'left',
},
{
diff --git a/docs/docs-beta/sidebars.ts b/docs/docs-beta/sidebars.ts
index 129d82e4510bb..5bc6f64d74974 100644
--- a/docs/docs-beta/sidebars.ts
+++ b/docs/docs-beta/sidebars.ts
@@ -100,133 +100,24 @@ const sidebars: SidebarsConfig = {
integrations: [
{
type: 'category',
- label: 'Categories',
- collapsible: false,
- items: [
- {
- type: 'category',
- label: 'ETL',
- items: [
- 'integrations/airbyte',
- 'integrations/sdf',
- 'integrations/fivetran',
- 'integrations/dlt',
- 'integrations/census',
- 'integrations/dbt',
- 'integrations/dbt-cloud',
- 'integrations/sling',
- 'integrations/hightouch',
- 'integrations/meltano',
- ],
- },
- {
- type: 'category',
- label: 'Storage',
- items: [
- 'integrations/snowflake',
- 'integrations/gcp/bigquery',
- 'integrations/aws/athena',
- 'integrations/aws/s3',
- 'integrations/duckdb',
- 'integrations/deltalake',
- 'integrations/aws/redshift',
- 'integrations/gcp/gcs',
- 'integrations/azure-adls2',
- 'integrations/lakefs',
- ],
- },
- {
- type: 'category',
- label: 'Compute',
- items: [
- 'integrations/kubernetes',
- 'integrations/spark',
- 'integrations/aws/glue',
- 'integrations/jupyter',
- 'integrations/aws/emr',
- 'integrations/databricks',
- 'integrations/aws/lambda',
- 'integrations/docker',
- 'integrations/shell',
- 'integrations/gcp/dataproc',
- ],
- },
- {
- type: 'category',
- label: 'BI',
- items: ['integrations/looker'],
- },
- {
- type: 'category',
- label: 'Monitoring',
- items: ['integrations/prometheus', 'integrations/datadog', 'integrations/aws/cloudwatch'],
- },
- {
- type: 'category',
- label: 'Alerting',
- items: [
- 'integrations/slack',
- 'integrations/twilio',
- 'integrations/pagerduty',
- 'integrations/microsoft-teams',
- ],
- },
- {
- type: 'category',
- label: 'Metadata',
- items: [
- 'integrations/secoda',
- 'integrations/pandera',
- 'integrations/open-metadata',
- 'integrations/pandas',
- ],
- },
- {
- type: 'category',
- label: 'Other',
- items: [
- 'integrations/cube',
- 'integrations/aws/secretsmanager',
- 'integrations/openai',
- 'integrations/ssh-sftp',
- 'integrations/github',
- 'integrations/aws/ssm',
- 'integrations/aws/ecr',
- 'integrations/wandb',
- 'integrations/hashicorp',
- ],
- },
- ],
- },
- {
- type: 'category',
- label: 'Community Supported',
+ label: 'Guides',
+ collapsed: false,
items: [
- 'integrations/secoda',
- 'integrations/cube',
- 'integrations/sdf',
- 'integrations/open-metadata',
- 'integrations/census',
- 'integrations/deltalake',
- 'integrations/hightouch',
- 'integrations/wandb',
- 'integrations/meltano',
- 'integrations/hashicorp',
- 'integrations/lakefs',
- ],
+ 'integrations/guides/multi-asset-integration'
+ ]
},
{
type: 'category',
- label: 'All Integrations',
- collapsed: true,
- // link: {type: 'doc', id: 'integrations'},
+ label: 'Libraries',
+ collapsible: false,
+ link: {type: 'doc', id: 'integrations/libraries/index'},
items: [
{
type: 'autogenerated',
- dirName: 'integrations',
- },
- ],
- },
+ dirName: 'integrations/libraries'
+ }
+ ]
+ }
],
dagsterPlus: [
'dagster-plus/index',
diff --git a/docs/docs-beta/static/images/dagster-cloud/deployment/hybrid-architecture.png b/docs/docs-beta/static/images/dagster-cloud/deployment/hybrid-architecture.png
new file mode 100644
index 0000000000000..89ce1b90c0b59
Binary files /dev/null and b/docs/docs-beta/static/images/dagster-cloud/deployment/hybrid-architecture.png differ
diff --git a/docs/next/.versioned_content/_versions_with_static_links.json b/docs/next/.versioned_content/_versions_with_static_links.json
index 03a000fd36b18..5b12b5f2ea18d 100644
--- a/docs/next/.versioned_content/_versions_with_static_links.json
+++ b/docs/next/.versioned_content/_versions_with_static_links.json
@@ -606,5 +606,9 @@
{
"url": "https://release-1-9-4.dagster.dagster-docs.io/",
"version": "1.9.4"
+ },
+ {
+ "url": "https://release-1-9-5.dagster.dagster-docs.io/",
+ "version": "1.9.5"
}
]
\ No newline at end of file
diff --git a/docs/next/package.json b/docs/next/package.json
index 46e996b0c0a16..5a8f3fc40d108 100644
--- a/docs/next/package.json
+++ b/docs/next/package.json
@@ -40,7 +40,7 @@
"lodash": "^4.17.21",
"mdast-util-toc": "^5.1.0",
"new-github-issue-url": "^0.2.1",
- "next": "^14.2.10",
+ "next": "^14.2.15",
"next-mdx-remote": "^2.1.4",
"next-remote-watch": "^2.0.0",
"next-seo": "^4.17.0",
diff --git a/docs/next/public/images/concepts/metadata-tags/kinds/icons/tool-googledrive-color.svg b/docs/next/public/images/concepts/metadata-tags/kinds/icons/tool-googledrive-color.svg
new file mode 100644
index 0000000000000..2d94beff46945
--- /dev/null
+++ b/docs/next/public/images/concepts/metadata-tags/kinds/icons/tool-googledrive-color.svg
@@ -0,0 +1,3 @@
+
diff --git a/docs/next/public/objects.inv b/docs/next/public/objects.inv
index fd50d97032019..dc486e70947de 100644
Binary files a/docs/next/public/objects.inv and b/docs/next/public/objects.inv differ
diff --git a/docs/next/yarn.lock b/docs/next/yarn.lock
index c1c47a3337653..b030a96106a71 100644
--- a/docs/next/yarn.lock
+++ b/docs/next/yarn.lock
@@ -2252,10 +2252,10 @@ __metadata:
languageName: node
linkType: hard
-"@next/env@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/env@npm:14.2.12"
- checksum: 10/9e1f36da7d794a29db42ebc68e24cc7ab19ab2d1fd86d6cdf872fac0f56cbce97d6df9ff43f526ec083c505feea716b86668c7fcc410d809ad136bb656a45d03
+"@next/env@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/env@npm:14.2.20"
+ checksum: 10/3aaf2ba16344d7cede12a846859fddffa172e951f2dc28bb66f8b7c24cb2c207d2a49c84fea965ae964714aeb2269cff7a91723b57631765f78fd02b9465d1f2
languageName: node
linkType: hard
@@ -2278,65 +2278,65 @@ __metadata:
languageName: node
linkType: hard
-"@next/swc-darwin-arm64@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-darwin-arm64@npm:14.2.12"
+"@next/swc-darwin-arm64@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-darwin-arm64@npm:14.2.20"
conditions: os=darwin & cpu=arm64
languageName: node
linkType: hard
-"@next/swc-darwin-x64@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-darwin-x64@npm:14.2.12"
+"@next/swc-darwin-x64@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-darwin-x64@npm:14.2.20"
conditions: os=darwin & cpu=x64
languageName: node
linkType: hard
-"@next/swc-linux-arm64-gnu@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-linux-arm64-gnu@npm:14.2.12"
+"@next/swc-linux-arm64-gnu@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-linux-arm64-gnu@npm:14.2.20"
conditions: os=linux & cpu=arm64 & libc=glibc
languageName: node
linkType: hard
-"@next/swc-linux-arm64-musl@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-linux-arm64-musl@npm:14.2.12"
+"@next/swc-linux-arm64-musl@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-linux-arm64-musl@npm:14.2.20"
conditions: os=linux & cpu=arm64 & libc=musl
languageName: node
linkType: hard
-"@next/swc-linux-x64-gnu@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-linux-x64-gnu@npm:14.2.12"
+"@next/swc-linux-x64-gnu@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-linux-x64-gnu@npm:14.2.20"
conditions: os=linux & cpu=x64 & libc=glibc
languageName: node
linkType: hard
-"@next/swc-linux-x64-musl@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-linux-x64-musl@npm:14.2.12"
+"@next/swc-linux-x64-musl@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-linux-x64-musl@npm:14.2.20"
conditions: os=linux & cpu=x64 & libc=musl
languageName: node
linkType: hard
-"@next/swc-win32-arm64-msvc@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-win32-arm64-msvc@npm:14.2.12"
+"@next/swc-win32-arm64-msvc@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-win32-arm64-msvc@npm:14.2.20"
conditions: os=win32 & cpu=arm64
languageName: node
linkType: hard
-"@next/swc-win32-ia32-msvc@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-win32-ia32-msvc@npm:14.2.12"
+"@next/swc-win32-ia32-msvc@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-win32-ia32-msvc@npm:14.2.20"
conditions: os=win32 & cpu=ia32
languageName: node
linkType: hard
-"@next/swc-win32-x64-msvc@npm:14.2.12":
- version: 14.2.12
- resolution: "@next/swc-win32-x64-msvc@npm:14.2.12"
+"@next/swc-win32-x64-msvc@npm:14.2.20":
+ version: 14.2.20
+ resolution: "@next/swc-win32-x64-msvc@npm:14.2.20"
conditions: os=win32 & cpu=x64
languageName: node
linkType: hard
@@ -4346,7 +4346,7 @@ __metadata:
lodash: "npm:^4.17.21"
mdast-util-toc: "npm:^5.1.0"
new-github-issue-url: "npm:^0.2.1"
- next: "npm:^14.2.10"
+ next: "npm:^14.2.15"
next-mdx-remote: "npm:^2.1.4"
next-remote-watch: "npm:^2.0.0"
next-seo: "npm:^4.17.0"
@@ -8456,20 +8456,20 @@ __metadata:
languageName: node
linkType: hard
-"next@npm:^14.2.10":
- version: 14.2.12
- resolution: "next@npm:14.2.12"
+"next@npm:^14.2.15":
+ version: 14.2.20
+ resolution: "next@npm:14.2.20"
dependencies:
- "@next/env": "npm:14.2.12"
- "@next/swc-darwin-arm64": "npm:14.2.12"
- "@next/swc-darwin-x64": "npm:14.2.12"
- "@next/swc-linux-arm64-gnu": "npm:14.2.12"
- "@next/swc-linux-arm64-musl": "npm:14.2.12"
- "@next/swc-linux-x64-gnu": "npm:14.2.12"
- "@next/swc-linux-x64-musl": "npm:14.2.12"
- "@next/swc-win32-arm64-msvc": "npm:14.2.12"
- "@next/swc-win32-ia32-msvc": "npm:14.2.12"
- "@next/swc-win32-x64-msvc": "npm:14.2.12"
+ "@next/env": "npm:14.2.20"
+ "@next/swc-darwin-arm64": "npm:14.2.20"
+ "@next/swc-darwin-x64": "npm:14.2.20"
+ "@next/swc-linux-arm64-gnu": "npm:14.2.20"
+ "@next/swc-linux-arm64-musl": "npm:14.2.20"
+ "@next/swc-linux-x64-gnu": "npm:14.2.20"
+ "@next/swc-linux-x64-musl": "npm:14.2.20"
+ "@next/swc-win32-arm64-msvc": "npm:14.2.20"
+ "@next/swc-win32-ia32-msvc": "npm:14.2.20"
+ "@next/swc-win32-x64-msvc": "npm:14.2.20"
"@swc/helpers": "npm:0.5.5"
busboy: "npm:1.6.0"
caniuse-lite: "npm:^1.0.30001579"
@@ -8510,7 +8510,7 @@ __metadata:
optional: true
bin:
next: dist/bin/next
- checksum: 10/4dcae15547930cdaeb8a1d935dec3ab0c82a65347b0835988fd70fa5b108f1c301b75f98acf063c253858719e2969301fb2b0c30d6b2a46086ec19419430b119
+ checksum: 10/baddcaeffa82e321cda87ad727540fc8ad639af5439ccc69b349c2b9a4315244d55c4aeed391c7bcd79edd634d6410b9e4a718ca02cc9e910046960444bb0c64
languageName: node
linkType: hard
diff --git a/docs/sphinx/sections/api/apidocs/libraries/dagster-aws.rst b/docs/sphinx/sections/api/apidocs/libraries/dagster-aws.rst
index e8774ae92ed5d..15d161197def5 100644
--- a/docs/sphinx/sections/api/apidocs/libraries/dagster-aws.rst
+++ b/docs/sphinx/sections/api/apidocs/libraries/dagster-aws.rst
@@ -49,6 +49,9 @@ ECS
.. autoconfigurable:: dagster_aws.ecs.EcsRunLauncher
:annotation: RunLauncher
+.. autoconfigurable:: dagster_aws.ecs.ecs_executor
+ :annotation: ExecutorDefinition
+
Redshift
--------
diff --git a/examples/assets_modern_data_stack/setup.py b/examples/assets_modern_data_stack/setup.py
index 419a06850ae31..2b0aabbd2d2f1 100644
--- a/examples/assets_modern_data_stack/setup.py
+++ b/examples/assets_modern_data_stack/setup.py
@@ -23,11 +23,6 @@
"dagster-webserver",
"pytest",
],
- "test": [
- # cant build psycopg2 in buildkite
- # something about the 1.8.0 dependency setup to avoid psycopg2-binary on linux
- # seems to prevent that dependency from being used even if explicitly added
- "dbt-postgres<1.8.0"
- ],
+ "test": [],
},
)
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran.py
deleted file mode 100644
index 9446ff76d0ce6..0000000000000
--- a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-
-from dagster_fivetran import FivetranResource, load_assets_from_fivetran_instance
-
-import dagster as dg
-
-fivetran_assets = load_assets_from_fivetran_instance(
- # Connect to your Fivetran instance
- FivetranResource(
- api_key="some_key",
- api_secret=dg.EnvVar("FIVETRAN_SECRET"),
- )
-)
-
-
-defs = dg.Definitions(
- assets=[fivetran_assets],
-)
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/__init__.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_asset_defs.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_asset_defs.py
new file mode 100644
index 0000000000000..b195ed0c134c3
--- /dev/null
+++ b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_asset_defs.py
@@ -0,0 +1,29 @@
+from dagster_fivetran import FivetranWorkspace, fivetran_assets
+
+import dagster as dg
+
+fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
+)
+
+
+@fivetran_assets(
+ connector_id="fivetran_connector_id",
+ name="fivetran_connector_id",
+ group_name="fivetran_connector_id",
+ workspace=fivetran_workspace,
+)
+def fivetran_connector_assets(
+ context: dg.AssetExecutionContext, fivetran: FivetranWorkspace
+):
+ # Do something before the materialization...
+ yield from fivetran.sync_and_poll(context=context)
+ # Do something after the materialization...
+
+
+defs = dg.Definitions(
+ assets=[fivetran_connector_assets],
+ resources={"fivetran": fivetran_workspace},
+)
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_translator_asset_spec.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_translator_asset_spec.py
new file mode 100644
index 0000000000000..8debde35e6eba
--- /dev/null
+++ b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/customize_fivetran_translator_asset_spec.py
@@ -0,0 +1,33 @@
+from dagster_fivetran import (
+ DagsterFivetranTranslator,
+ FivetranConnectorTableProps,
+ FivetranWorkspace,
+ load_fivetran_asset_specs,
+)
+
+import dagster as dg
+
+fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
+)
+
+
+# A translator class lets us customize properties of the built
+# Fivetran assets, such as the owners or asset key
+class MyCustomFivetranTranslator(DagsterFivetranTranslator):
+ def get_asset_spec(self, props: FivetranConnectorTableProps) -> dg.AssetSpec:
+ # We create the default asset spec using super()
+ default_spec = super().get_asset_spec(props)
+ # We customize the metadata and asset key prefix for all assets
+ return default_spec.replace_attributes(
+ key=default_spec.key.with_prefix("prefix"),
+ ).merge_attributes(metadata={"custom": "metadata"})
+
+
+fivetran_specs = load_fivetran_asset_specs(
+ fivetran_workspace, dagster_fivetran_translator=MyCustomFivetranTranslator()
+)
+
+defs = dg.Definitions(assets=fivetran_specs, resources={"fivetran": fivetran_workspace})
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/multiple_fivetran_workspaces.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/multiple_fivetran_workspaces.py
new file mode 100644
index 0000000000000..d28203bf3bcdf
--- /dev/null
+++ b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/multiple_fivetran_workspaces.py
@@ -0,0 +1,26 @@
+from dagster_fivetran import FivetranWorkspace, load_fivetran_asset_specs
+
+import dagster as dg
+
+sales_fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_SALES_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_SALES_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_SALES_API_SECRET"),
+)
+marketing_fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_MARKETING_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_MARKETING_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_MARKETING_API_SECRET"),
+)
+
+sales_fivetran_specs = load_fivetran_asset_specs(sales_fivetran_workspace)
+marketing_fivetran_specs = load_fivetran_asset_specs(marketing_fivetran_workspace)
+
+# Merge the specs into a single set of definitions
+defs = dg.Definitions(
+ assets=[*sales_fivetran_specs, *marketing_fivetran_specs],
+ resources={
+ "marketing_fivetran": marketing_fivetran_workspace,
+ "sales_fivetran": sales_fivetran_workspace,
+ },
+)
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/representing_fivetran_assets.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/representing_fivetran_assets.py
new file mode 100644
index 0000000000000..7637330c175cd
--- /dev/null
+++ b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/representing_fivetran_assets.py
@@ -0,0 +1,12 @@
+from dagster_fivetran import FivetranWorkspace, load_fivetran_asset_specs
+
+import dagster as dg
+
+fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
+)
+
+fivetran_specs = load_fivetran_asset_specs(fivetran_workspace)
+defs = dg.Definitions(assets=fivetran_specs, resources={"fivetran": fivetran_workspace})
diff --git a/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/sync_and_materialize_fivetran_assets.py b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/sync_and_materialize_fivetran_assets.py
new file mode 100644
index 0000000000000..65a3490f59486
--- /dev/null
+++ b/examples/docs_beta_snippets/docs_beta_snippets/integrations/fivetran/sync_and_materialize_fivetran_assets.py
@@ -0,0 +1,16 @@
+from dagster_fivetran import FivetranWorkspace, build_fivetran_assets_definitions
+
+import dagster as dg
+
+fivetran_workspace = FivetranWorkspace(
+ account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
+ api_key=dg.EnvVar("FIVETRAN_API_KEY"),
+ api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
+)
+
+all_fivetran_assets = build_fivetran_assets_definitions(workspace=fivetran_workspace)
+
+defs = dg.Definitions(
+ assets=all_fivetran_assets,
+ resources={"fivetran": fivetran_workspace},
+)
diff --git a/examples/docs_beta_snippets/docs_beta_snippets_tests/test_integration_files_load.py b/examples/docs_beta_snippets/docs_beta_snippets_tests/test_integration_files_load.py
index faaf20053a482..89c5039563a98 100644
--- a/examples/docs_beta_snippets/docs_beta_snippets_tests/test_integration_files_load.py
+++ b/examples/docs_beta_snippets/docs_beta_snippets_tests/test_integration_files_load.py
@@ -19,7 +19,11 @@
f"{snippets_folder}/sdf.py",
f"{snippets_folder}/airbyte.py",
f"{snippets_folder}/dlt.py",
- f"{snippets_folder}/fivetran.py",
+ f"{snippets_folder}/fivetran/customize_fivetran_asset_defs.py",
+ f"{snippets_folder}/fivetran/customize_fivetran_translator_asset_spec.py",
+ f"{snippets_folder}/fivetran/multiple_fivetran_workspaces.py",
+ f"{snippets_folder}/fivetran/representing_fivetran_assets.py",
+ f"{snippets_folder}/fivetran/sync_and_materialize_fivetran_assets.py",
# FIXME: this breaks on py3.8 and seems related to the non-dagster dependencies
f"{snippets_folder}/pandera.py",
}
diff --git a/examples/docs_beta_snippets/tox.ini b/examples/docs_beta_snippets/tox.ini
index 44645d7b1f94e..f294484e22ad2 100644
--- a/examples/docs_beta_snippets/tox.ini
+++ b/examples/docs_beta_snippets/tox.ini
@@ -12,6 +12,11 @@ install_command = uv pip install {opts} {packages}
deps =
duckdb
plotly
+ ####
+ # need deps of dagster-cloud that we need to add since we --no-deps below to avoid reinstalling dagster packages
+ opentelemetry-api
+ opentelemetry-sdk
+ ####
-e ../../python_modules/dagster[test]
-e ../../python_modules/dagster-pipes
-e ../../python_modules/dagster-graphql
diff --git a/examples/docs_snippets/docs_snippets/guides/migrations/migrating_airflow_to_dagster.py b/examples/docs_snippets/docs_snippets/guides/migrations/migrating_airflow_to_dagster.py
index c546978f13096..e88e8c01e467a 100644
--- a/examples/docs_snippets/docs_snippets/guides/migrations/migrating_airflow_to_dagster.py
+++ b/examples/docs_snippets/docs_snippets/guides/migrations/migrating_airflow_to_dagster.py
@@ -1,5 +1,6 @@
# ruff: isort: skip_file
# ruff: noqa: T201,D415
+# type: ignore # problematic imports in example code
def scope_simple_airflow_task():
diff --git a/examples/docs_snippets/docs_snippets/integrations/airlift/operator_migration/kubernetes_pod_operator.py b/examples/docs_snippets/docs_snippets/integrations/airlift/operator_migration/kubernetes_pod_operator.py
index bde2a3e5b1a86..aeafe9c74b84f 100644
--- a/examples/docs_snippets/docs_snippets/integrations/airlift/operator_migration/kubernetes_pod_operator.py
+++ b/examples/docs_snippets/docs_snippets/integrations/airlift/operator_migration/kubernetes_pod_operator.py
@@ -1,3 +1,4 @@
+# type: ignore
from airflow.providers.cncf.kubernetes.operators.pod import KubernetesPodOperator
k8s_hello_world = KubernetesPodOperator(
diff --git a/examples/experimental/external_assets/airflow_example.py b/examples/experimental/external_assets/airflow_example.py
index 9585d06d701f6..b326ea323bfbc 100644
--- a/examples/experimental/external_assets/airflow_example.py
+++ b/examples/experimental/external_assets/airflow_example.py
@@ -1,3 +1,4 @@
+# type: ignore
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from pendulum import datetime
diff --git a/examples/project_atproto_dashboard/.env.example b/examples/project_atproto_dashboard/.env.example
new file mode 100644
index 0000000000000..4ea1e239f6bb6
--- /dev/null
+++ b/examples/project_atproto_dashboard/.env.example
@@ -0,0 +1,17 @@
+AWS_ENDPOINT_URL=
+AWS_ACCESS_KEY_ID=
+AWS_SECRET_ACCESS_KEY=
+AWS_BUCKET_NAME=
+AWS_ACCOUNT_ID=
+
+MOTHERDUCK_TOKEN=
+
+BSKY_LOGIN=
+BSKY_APP_PASSWORD=
+
+DBT_TARGET=
+
+AZURE_POWERBI_CLIENT_ID=
+AZURE_POWERBI_CLIENT_SECRET=
+AZURE_POWERBI_TENANT_ID=
+AZURE_POWERBI_WORKSPACE_ID=
diff --git a/examples/project_atproto_dashboard/.gitignore b/examples/project_atproto_dashboard/.gitignore
new file mode 100644
index 0000000000000..ace8bc76e6a41
--- /dev/null
+++ b/examples/project_atproto_dashboard/.gitignore
@@ -0,0 +1,5 @@
+tmp*/
+storage/
+schedules/
+history/
+atproto-session.txt
diff --git a/examples/project_atproto_dashboard/README.md b/examples/project_atproto_dashboard/README.md
new file mode 100644
index 0000000000000..4c15d93cc89cf
--- /dev/null
+++ b/examples/project_atproto_dashboard/README.md
@@ -0,0 +1,52 @@
+# project_atproto_dashboard
+
+An end-to-end demonstration of ingestion data from the ATProto API, modeling it with dbt, and presenting it with Power BI.
+
+![Architecture Diagram](./architecture-diagram.png)
+
+![Project asset lineage](./lineage.svg)
+
+## Features used
+
+1. Ingestion of data-related Bluesky posts
+ - Dynamic partitions
+ - Declarative automation
+ - Concurrency limits
+2. Modelling data using _dbt_
+3. Representing data in a dashboard
+
+## Getting started
+
+### Environment Setup
+
+Ensure the following environments have been populated in your `.env` file. Start by copying the
+template.
+
+```
+cp .env.example .env
+```
+
+And then populate the fields.
+
+### Development
+
+Install the project dependencies:
+
+ pip install -e ".[dev]"
+
+Start Dagster:
+
+ DAGSTER_HOME=$(pwd) dagster dev
+
+### Unit testing
+
+Tests are in the `project_atproto_dashboard_tests` directory and you can run tests using `pytest`:
+
+ pytest project_atproto_dashboard_tests
+
+## Resources
+
+- https://docs.bsky.app/docs/tutorials/viewing-feeds
+- https://docs.bsky.app/docs/advanced-guides/rate-limits
+- https://atproto.blue/en/latest/atproto_client/auth.html#session-string
+- https://tenacity.readthedocs.io/en/latest/#waiting-before-retrying
diff --git a/examples/project_atproto_dashboard/architecture-diagram.png b/examples/project_atproto_dashboard/architecture-diagram.png
new file mode 100644
index 0000000000000..af16cc6c57430
Binary files /dev/null and b/examples/project_atproto_dashboard/architecture-diagram.png differ
diff --git a/examples/project_atproto_dashboard/dagster.yaml b/examples/project_atproto_dashboard/dagster.yaml
new file mode 100644
index 0000000000000..c9705420e83ca
--- /dev/null
+++ b/examples/project_atproto_dashboard/dagster.yaml
@@ -0,0 +1,6 @@
+run_coordinator:
+ module: dagster.core.run_coordinator
+ class: QueuedRunCoordinator
+
+concurrency:
+ default_op_concurrency_limit: 1
diff --git a/examples/project_atproto_dashboard/dbt_project/.gitignore b/examples/project_atproto_dashboard/dbt_project/.gitignore
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/project_atproto_dashboard/dbt_project/.sqlfluff b/examples/project_atproto_dashboard/dbt_project/.sqlfluff
new file mode 100644
index 0000000000000..6fffb098b0115
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff:rules:capitalisation.keywords]
+capitalisation_policy = upper
diff --git a/examples/project_atproto_dashboard/dbt_project/dbt_project.yml b/examples/project_atproto_dashboard/dbt_project/dbt_project.yml
new file mode 100644
index 0000000000000..5dc13e8c3997a
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/dbt_project.yml
@@ -0,0 +1,13 @@
+name: "dbt_project"
+version: "1.0.0"
+config-version: 2
+
+profile: "bluesky"
+
+target-path: "target"
+clean-targets:
+ - "target"
+ - "dbt_packages"
+
+models:
+ +materialized: table
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/activity_over_time.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/activity_over_time.sql
new file mode 100644
index 0000000000000..794065c8723e7
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/activity_over_time.sql
@@ -0,0 +1,14 @@
+WITH final AS (
+ SELECT
+ date_trunc('day', created_at) AS post_date,
+ count(DISTINCT post_text) AS unique_posts,
+ count(DISTINCT author_handle) AS active_authors,
+ sum(likes) AS total_likes,
+ sum(replies) AS total_comments,
+ sum(quotes) AS total_quotes
+ FROM {{ ref("latest_feed") }}
+ GROUP BY date_trunc('day', created_at)
+ ORDER BY date_trunc('day', created_at) DESC
+)
+
+SELECT * FROM final
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/all_profiles.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/all_profiles.sql
new file mode 100644
index 0000000000000..5f4e21734bff1
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/all_profiles.sql
@@ -0,0 +1,105 @@
+WITH max_profile_data AS (
+ SELECT
+ json_extract_string(json, '$.subject.did') AS profile_did,
+ max(
+ strptime(
+ regexp_extract(
+ filename,
+ 'dagster-demo/atproto_starter_pack_snapshot/(\d{4}-\d{2}-\d{2}/\d{2}/\d{2})',
+ 1
+ ),
+ '%Y-%m-%d/%H/%M'
+ )
+ ) AS max_extracted_timestamp
+ FROM {{ ref("stg_profiles") }}
+ GROUP BY
+ json_extract_string(json, '$.subject.did')
+),
+
+profiles AS (
+ SELECT
+ json_extract_string(json, '$.subject.handle') AS handle_subject,
+ json_extract_string(json, '$.subject.did') AS profile_did,
+ json_extract_string(json, '$.subject.avatar') AS profile_avatar,
+ json_extract_string(json, '$.subject.display_name')
+ AS profile_display_name,
+ json_extract_string(json, '$.subject.created_at')
+ AS profile_created_date,
+ json_extract_string(json, '$.subject.description')
+ AS profile_description
+ FROM {{ ref("stg_profiles") }} stg_prof
+ JOIN max_profile_data
+ ON
+ json_extract_string(stg_prof.json, '$.subject.did')
+ = max_profile_data.profile_did
+ AND strptime(
+ regexp_extract(
+ stg_prof.filename,
+ 'dagster-demo/atproto_starter_pack_snapshot/(\d{4}-\d{2}-\d{2}/\d{2}/\d{2})',
+ 1
+ ),
+ '%Y-%m-%d/%H/%M'
+ )
+ = max_profile_data.max_extracted_timestamp
+),
+
+user_aggregates AS (
+ SELECT
+ replace(author_handle, '"', '') AS author_handle,
+ count(*) AS num_posts,
+ avg(cast(lf.likes AS int)) AS average_likes,
+ sum(cast(lf.likes AS int)) AS total_likes,
+ sum(cast(lf.replies AS int)) AS total_replies,
+ sum(cast(lf.likes AS int)) / count(*) AS total_likes_by_num_of_posts,
+ round(
+ count(*)
+ / count(DISTINCT date_trunc('day', cast(created_at AS timestamp))),
+ 2
+ ) AS avg_posts_per_day,
+ ntile(100)
+ OVER (
+ ORDER BY sum(cast(lf.likes AS int))
+ )
+ AS likes_percentile,
+ ntile(100)
+ OVER (
+ ORDER BY sum(cast(lf.replies AS int))
+ )
+ AS replies_percentile,
+ ntile(100) OVER (
+ ORDER BY count(*)
+ ) AS posts_percentile,
+ (ntile(100) OVER (
+ ORDER BY sum(cast(lf.likes AS int))) + ntile(100) OVER (
+ ORDER BY sum(cast(lf.replies AS int))) + ntile(100) OVER (
+ ORDER BY count(*)
+ ))
+ / 3.0 AS avg_score
+ FROM {{ ref("latest_feed") }} lf
+ GROUP BY replace(author_handle, '"', '')
+),
+
+final AS (
+ SELECT DISTINCT
+ profiles.handle_subject AS profile_handle,
+ profiles.profile_did,
+ profiles.profile_display_name,
+ profiles.profile_avatar,
+ profiles.profile_created_date,
+ profiles.profile_description,
+ user_aggregates.num_posts,
+ user_aggregates.average_likes,
+ user_aggregates.total_likes,
+ user_aggregates.total_replies,
+ user_aggregates.total_likes_by_num_of_posts,
+ user_aggregates.avg_posts_per_day,
+ user_aggregates.likes_percentile,
+ user_aggregates.replies_percentile,
+ user_aggregates.posts_percentile,
+ user_aggregates.avg_score
+ FROM profiles
+ LEFT JOIN user_aggregates
+ ON user_aggregates.author_handle = profiles.handle_subject
+)
+
+SELECT * FROM final
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/calendar.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/calendar.sql
new file mode 100644
index 0000000000000..91f1ae0ea62e0
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/calendar.sql
@@ -0,0 +1,45 @@
+WITH date_spine AS (
+ SELECT CAST(range AS DATE) AS date_key
+ FROM RANGE(
+ (SELECT MIN(created_at) FROM {{ ref("latest_feed") }}),
+ CURRENT_DATE(),
+ INTERVAL 1 DAY
+ )
+)
+
+SELECT
+ date_key AS date_key,
+ DAYOFYEAR(date_key) AS day_of_year,
+ WEEKOFYEAR(date_key) AS week_of_year,
+ DAYOFWEEK(date_key) AS day_of_week,
+ ISODOW(date_key) AS iso_day_of_week,
+ DAYNAME(date_key) AS day_name,
+ DATE_TRUNC('week', date_key) AS first_day_of_week,
+ DATE_TRUNC('week', date_key) + 6 AS last_day_of_week,
+ YEAR(date_key) || RIGHT('0' || MONTH(date_key), 2) AS month_key,
+ MONTH(date_key) AS month_of_year,
+ DAYOFMONTH(date_key) AS day_of_month,
+ LEFT(MONTHNAME(date_key), 3) AS month_name_short,
+ MONTHNAME(date_key) AS month_name,
+ DATE_TRUNC('month', date_key) AS first_day_of_month,
+ LAST_DAY(date_key) AS last_day_of_month,
+ CAST(YEAR(date_key) || QUARTER(date_key) AS INT) AS quarter_key,
+ QUARTER(date_key) AS quarter_of_year,
+ CAST(date_key - DATE_TRUNC('Quarter', date_key) + 1 AS INT)
+ AS day_of_quarter,
+ ('Q' || QUARTER(date_key)) AS quarter_desc_short,
+ ('Quarter ' || QUARTER(date_key)) AS quarter_desc,
+ DATE_TRUNC('quarter', date_key) AS first_day_of_quarter,
+ LAST_DAY(DATE_TRUNC('quarter', date_key) + INTERVAL 2 MONTH)
+ AS last_day_of_quarter,
+ CAST(YEAR(date_key) AS INT) AS year_key,
+ DATE_TRUNC('Year', date_key) AS first_day_of_year,
+ DATE_TRUNC('Year', date_key) - 1 + INTERVAL 1 YEAR AS last_day_of_year,
+ ROW_NUMBER()
+ OVER (
+ PARTITION BY YEAR(date_key), MONTH(date_key), DAYOFWEEK(date_key)
+ ORDER BY date_key
+ )
+ AS ordinal_weekday_of_month
+FROM date_spine
+WHERE CAST(YEAR(date_key) AS INT) >= 2020
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/latest_feed.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/latest_feed.sql
new file mode 100644
index 0000000000000..09f0e23ab1855
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/latest_feed.sql
@@ -0,0 +1,57 @@
+WITH max_update AS (
+ SELECT
+ max(
+ strptime(
+ regexp_extract(
+ filename,
+ 'dagster-demo/atproto_actor_feed_snapshot/(\d{4}-\d{2}-\d{2}/\d{2}/\d{2})',
+ 1
+ ),
+ '%Y-%m-%d/%H/%M'
+ )
+ ) AS max_extracted_timestamp,
+ regexp_extract(filename, 'did:(.*?)\.json') AS profile_id
+ FROM {{ ref("stg_feed_snapshots") }}
+ GROUP BY
+ regexp_extract(filename, 'did:(.*?)\.json')
+),
+
+final AS (
+ SELECT
+ json_extract_string(sfs.json, '$.post.author.handle') AS author_handle,
+ json_extract_string(sfs.json, '$.post.author.did') AS author_id,
+ cast(sfs.json.post.like_count AS int) AS likes,
+ cast(sfs.json.post.quote_count AS int) AS quotes,
+ cast(sfs.json.post.reply_count AS int) AS replies,
+ json_extract_string(sfs.json, '$.post.record.text') AS post_text,
+ sfs.json.post.record.embed,
+ json_extract_string(
+ sfs.json, '$.post.record.embed.external.description'
+ ) AS external_embed_description,
+ json_extract_string(sfs.json, '$.post.record.embed.external.uri')
+ AS external_embed_link,
+ sfs.json.post.record.embed.external.thumb AS external_embed_thumbnail,
+ cast(sfs.json.post.record.created_at AS timestamp) AS created_at,
+ CASE
+ WHEN json_extract_string(sfs.json.post.record.embed, '$.images[0].image.ref.link') IS NULL THEN NULL
+ ELSE concat('https://cdn.bsky.app/img/feed_thumbnail/plain/', json_extract_string(sfs.json, '$.post.author.did') ,'/' ,json_extract_string(sfs.json.post.record.embed, '$.images[0].image.ref.link'), '@jpeg')
+ END AS image_url,
+ max_update.max_extracted_timestamp,
+ max_update.profile_id
+ FROM {{ ref("stg_feed_snapshots") }} sfs
+ JOIN max_update
+ ON
+ max_update.profile_id
+ = regexp_extract(sfs.filename, 'did:(.*?)\.json')
+ AND max_update.max_extracted_timestamp
+ = strptime(
+ regexp_extract(
+ sfs.filename,
+ 'dagster-demo/atproto_actor_feed_snapshot/(\d{4}-\d{2}-\d{2}/\d{2}/\d{2})',
+ 1
+ ),
+ '%Y-%m-%d/%H/%M'
+ )
+)
+
+SELECT * FROM final
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/schema.yml b/examples/project_atproto_dashboard/dbt_project/models/analysis/schema.yml
new file mode 100644
index 0000000000000..404d89541a5db
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/schema.yml
@@ -0,0 +1,18 @@
+version: 2
+
+models:
+ - name: all_profiles
+ description: "table showing data for all the profiles posts are collected from and some high level statistics"
+ columns:
+ - name: profile_handle
+ data_tests:
+ - unique
+ - not_null
+ - name: latest_feed
+ description: "the latest feed of posts"
+ - name: activity_over_time
+ description: "daily activity of posts overtime"
+ - name: top_daily_posts
+ description: "top posts ranked for a given day"
+ - name: top_external_links
+ description: "top external content grouped by type shared in the community"
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/top_daily_posts.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/top_daily_posts.sql
new file mode 100644
index 0000000000000..a53c8435f60c6
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/top_daily_posts.sql
@@ -0,0 +1,46 @@
+WITH distinct_posts AS (
+ SELECT DISTINCT ON (author_handle, post_text, date_trunc('day', created_at))
+ author_handle,
+ post_text,
+ likes,
+ quotes,
+ replies,
+ image_url,
+ external_embed_link,
+ external_embed_thumbnail,
+ external_embed_description,
+ created_at
+ FROM {{ ref("latest_feed") }}
+),
+
+scored_posts AS (
+ SELECT
+ *,
+ (likes * 0.2) + (quotes * 0.4) + (replies * 0.4) AS engagement_score,
+ date_trunc('day', created_at) AS post_date,
+ row_number() OVER (
+ PARTITION BY date_trunc('day', created_at)
+ ORDER BY (likes * 0.2) + (quotes * 0.4) + (replies * 0.4) DESC
+ ) AS daily_rank
+ FROM distinct_posts
+),
+
+final AS (
+ SELECT
+ post_date,
+ author_handle,
+ post_text,
+ likes,
+ quotes,
+ replies,
+ image_url,
+ external_embed_link,
+ external_embed_thumbnail,
+ external_embed_description,
+ round(engagement_score, 2) AS engagement_score,
+ daily_rank
+ FROM scored_posts
+ WHERE daily_rank <= 10
+)
+
+SELECT * FROM final
diff --git a/examples/project_atproto_dashboard/dbt_project/models/analysis/top_external_links.sql b/examples/project_atproto_dashboard/dbt_project/models/analysis/top_external_links.sql
new file mode 100644
index 0000000000000..5e207b0d664be
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/analysis/top_external_links.sql
@@ -0,0 +1,73 @@
+WITH distinct_posts AS (
+ SELECT DISTINCT ON (author_handle, post_text, date_trunc('day', created_at))
+ author_handle,
+ post_text,
+ likes,
+ quotes,
+ replies,
+ created_at,
+ image_url,
+ embed,
+ external_embed_link,
+ external_embed_thumbnail,
+ external_embed_description,
+ CASE
+ WHEN external_embed_link LIKE '%youtu%' THEN 'YouTube'
+ WHEN external_embed_link LIKE '%docs%' THEN 'Docs'
+ WHEN external_embed_link LIKE '%github%' THEN 'GitHub'
+ WHEN external_embed_link LIKE '%substack%' THEN 'SubStack'
+ WHEN external_embed_link LIKE '%twitch%' THEN 'Twitch'
+ WHEN external_embed_link LIKE '%msnbc%' THEN 'News'
+ WHEN external_embed_link LIKE '%theguardian%' THEN 'News'
+ WHEN external_embed_link LIKE '%foreignpolicy%' THEN 'News'
+ WHEN external_embed_link LIKE '%nytimes%' THEN 'News'
+ WHEN external_embed_link LIKE '%wsj%' THEN 'News'
+ WHEN external_embed_link LIKE '%bloomberg%' THEN 'News'
+ WHEN external_embed_link LIKE '%theverge%' THEN 'News'
+ WHEN external_embed_link LIKE '%cnbc%' THEN 'News'
+ WHEN external_embed_link LIKE '%.ft.%' THEN 'News'
+ WHEN external_embed_link LIKE '%washingtonpost%' THEN 'News'
+ WHEN external_embed_link LIKE '%newrepublic%' THEN 'News'
+ WHEN external_embed_link LIKE '%huffpost%' THEN 'News'
+ WHEN external_embed_link LIKE '%wired%' THEN 'News'
+ WHEN external_embed_link LIKE '%medium%' THEN 'Medium'
+ WHEN external_embed_link LIKE '%reddit%' THEN 'Reddit'
+ WHEN external_embed_link LIKE '%/blog/%' THEN 'Blog'
+ ELSE 'Other'
+ END AS external_link_type
+ FROM {{ ref("latest_feed") }}
+ WHERE external_embed_link IS NOT null
+),
+
+scored_posts AS (
+ SELECT
+ *,
+ (likes * 0.2) + (quotes * 0.4) + (replies * 0.4) AS engagement_score,
+ date_trunc('day', created_at) AS post_date,
+ row_number() OVER (
+ PARTITION BY date_trunc('day', created_at), external_link_type
+ ORDER BY (likes * 0.2) + (quotes * 0.4) + (replies * 0.4) DESC
+ ) AS daily_rank
+ FROM distinct_posts
+),
+
+final AS (
+ SELECT
+ post_date,
+ author_handle,
+ post_text,
+ likes,
+ quotes,
+ replies,
+ round(engagement_score, 2) AS engagement_score,
+ daily_rank,
+ embed,
+ external_embed_link,
+ external_embed_thumbnail,
+ external_embed_description,
+ external_link_type
+ FROM scored_posts
+ WHERE daily_rank <= 10
+)
+
+SELECT * FROM final
diff --git a/examples/project_atproto_dashboard/dbt_project/models/sources.yml b/examples/project_atproto_dashboard/dbt_project/models/sources.yml
new file mode 100644
index 0000000000000..8b9e72a31b2bc
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/sources.yml
@@ -0,0 +1,14 @@
+version: 2
+
+sources:
+ - name: r2_bucket
+ tables:
+ - name: actor_feed_snapshot
+ description: "external r2 bucket with json files of actor feeds"
+ meta:
+ external_location: "read_ndjson_objects('r2://dagster-demo/atproto_actor_feed_snapshot/**/*.json', filename=true)"
+ - name: starter_pack_snapshot
+ description: "external r2 bucket with json files for feed snapshots"
+ meta:
+ external_location: "read_ndjson_objects('r2://dagster-demo/atproto_starter_pack_snapshot/**/*.json', filename=true)"
+
diff --git a/examples/project_atproto_dashboard/dbt_project/models/staging/schema.yml b/examples/project_atproto_dashboard/dbt_project/models/staging/schema.yml
new file mode 100644
index 0000000000000..61f4b3d774b18
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/staging/schema.yml
@@ -0,0 +1,7 @@
+version: 2
+
+models:
+ - name: stg_profiles
+ description: "raw data from r2 bucket"
+ - name: stg_feed_snapshots
+ description: "raw posts data from r2 bucket"
\ No newline at end of file
diff --git a/examples/project_atproto_dashboard/dbt_project/models/staging/stg_feed_snapshots.sql b/examples/project_atproto_dashboard/dbt_project/models/staging/stg_feed_snapshots.sql
new file mode 100644
index 0000000000000..92674ee054769
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/staging/stg_feed_snapshots.sql
@@ -0,0 +1,5 @@
+WITH raw AS (
+ SELECT * FROM {{ source('r2_bucket', 'actor_feed_snapshot') }}
+)
+
+SELECT * FROM raw
diff --git a/examples/project_atproto_dashboard/dbt_project/models/staging/stg_profiles.sql b/examples/project_atproto_dashboard/dbt_project/models/staging/stg_profiles.sql
new file mode 100644
index 0000000000000..7e4eeba113e2c
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/models/staging/stg_profiles.sql
@@ -0,0 +1,5 @@
+WITH raw AS (
+ SELECT * FROM {{ source('r2_bucket', 'starter_pack_snapshot') }}
+)
+
+SELECT * FROM raw
diff --git a/examples/project_atproto_dashboard/dbt_project/profiles.yml b/examples/project_atproto_dashboard/dbt_project/profiles.yml
new file mode 100644
index 0000000000000..462a3a6a9e31b
--- /dev/null
+++ b/examples/project_atproto_dashboard/dbt_project/profiles.yml
@@ -0,0 +1,27 @@
+bluesky:
+ target: prod
+ outputs:
+ dev:
+ type: duckdb
+ schema: bluesky_dev
+ path: "local.duckdb"
+ threads: 16
+ extensions:
+ - httpfs
+ settings:
+ s3_region: "auto"
+ s3_access_key_id: "{{ env_var('AWS_ACCESS_KEY_ID') }}"
+ s3_secret_access_key: "{{ env_var('AWS_SECRET_ACCESS_KEY') }}"
+ s3_endpoint: "{{ env_var('AWS_ENDPOINT_URL') | replace('https://', '') }}"
+ prod:
+ type: duckdb
+ schema: bluesky
+ path: "md:prod_bluesky?MOTHERDUCK_TOKEN={{ env_var('MOTHERDUCK_TOKEN') }}"
+ threads: 16
+ extensions:
+ - httpfs
+ settings:
+ s3_region: "auto"
+ s3_access_key_id: "{{ env_var('AWS_ACCESS_KEY_ID') }}"
+ s3_secret_access_key: "{{ env_var('AWS_SECRET_ACCESS_KEY') }}"
+ s3_endpoint: "{{ env_var('AWS_ENDPOINT_URL') | replace('https://', '') }}"
diff --git a/examples/project_atproto_dashboard/lineage.svg b/examples/project_atproto_dashboard/lineage.svg
new file mode 100644
index 0000000000000..578247eaff975
--- /dev/null
+++ b/examples/project_atproto_dashboard/lineage.svg
@@ -0,0 +1,3 @@
+
\ No newline at end of file
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard/__init__.py
new file mode 100644
index 0000000000000..8b137891791fe
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/__init__.py
@@ -0,0 +1 @@
+
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/dashboard/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard/dashboard/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/dashboard/definitions.py b/examples/project_atproto_dashboard/project_atproto_dashboard/dashboard/definitions.py
new file mode 100644
index 0000000000000..dba89ead146d3
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/dashboard/definitions.py
@@ -0,0 +1,49 @@
+import dagster as dg
+from dagster_powerbi import (
+ DagsterPowerBITranslator,
+ PowerBIServicePrincipal,
+ PowerBIWorkspace,
+ load_powerbi_asset_specs,
+)
+from dagster_powerbi.translator import PowerBIContentData
+
+power_bi_workspace = PowerBIWorkspace(
+ credentials=PowerBIServicePrincipal(
+ client_id=dg.EnvVar("AZURE_POWERBI_CLIENT_ID"),
+ client_secret=dg.EnvVar("AZURE_POWERBI_CLIENT_SECRET"),
+ tenant_id=dg.EnvVar("AZURE_POWERBI_TENANT_ID"),
+ ),
+ workspace_id=dg.EnvVar("AZURE_POWERBI_WORKSPACE_ID"),
+)
+
+
+class CustomDagsterPowerBITranslator(DagsterPowerBITranslator):
+ def get_report_spec(self, data: PowerBIContentData) -> dg.AssetSpec:
+ return (
+ super()
+ .get_report_spec(data)
+ .replace_attributes(
+ group_name="reporting",
+ )
+ )
+
+ def get_semantic_model_spec(self, data: PowerBIContentData) -> dg.AssetSpec:
+ upsteam_table_deps = [
+ dg.AssetKey(table.get("name")) for table in data.properties.get("tables", [])
+ ]
+ return (
+ super()
+ .get_semantic_model_spec(data)
+ .replace_attributes(
+ group_name="reporting",
+ deps=upsteam_table_deps,
+ )
+ )
+
+
+power_bi_specs = load_powerbi_asset_specs(
+ power_bi_workspace,
+ dagster_powerbi_translator=CustomDagsterPowerBITranslator,
+)
+
+defs = dg.Definitions(assets=[*power_bi_specs], resources={"power_bi": power_bi_workspace})
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/definitions.py b/examples/project_atproto_dashboard/project_atproto_dashboard/definitions.py
new file mode 100644
index 0000000000000..f2a2b5ebd5f2e
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/definitions.py
@@ -0,0 +1,9 @@
+import dagster as dg
+
+import project_atproto_dashboard.dashboard.definitions as dashboard_definitions
+import project_atproto_dashboard.ingestion.definitions as ingestion_definitions
+import project_atproto_dashboard.modeling.definitions as modeling_definitions
+
+defs = dg.Definitions.merge(
+ ingestion_definitions.defs, modeling_definitions.defs, dashboard_definitions.defs
+)
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/definitions.py b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/definitions.py
new file mode 100644
index 0000000000000..41b2f2e969c6b
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/definitions.py
@@ -0,0 +1,138 @@
+import os
+from datetime import datetime
+
+import dagster as dg
+from dagster_aws.s3 import S3Resource
+
+from project_atproto_dashboard.ingestion.resources import ATProtoResource
+from project_atproto_dashboard.ingestion.utils.atproto import (
+ get_all_feed_items,
+ get_all_starter_pack_members,
+)
+
+AWS_BUCKET_NAME = os.environ.get("AWS_BUCKET_NAME", "dagster-demo")
+
+
+atproto_did_dynamic_partition = dg.DynamicPartitionsDefinition(name="atproto_did_dynamic_partition")
+
+
+@dg.asset(
+ partitions_def=dg.StaticPartitionsDefinition(
+ partition_keys=[
+ "at://did:plc:lc5jzrr425fyah724df3z5ik/app.bsky.graph.starterpack/3l7cddlz5ja24", # https://bsky.app/starter-pack/christiannolan.bsky.social/3l7cddlz5ja24
+ ]
+ ),
+ automation_condition=dg.AutomationCondition.on_cron("0 0 * * *"), # Midnight
+ kinds={"python"},
+ group_name="ingestion",
+)
+def starter_pack_snapshot(
+ context: dg.AssetExecutionContext,
+ atproto_resource: ATProtoResource,
+ s3_resource: S3Resource,
+) -> dg.MaterializeResult:
+ """Snapshot of members in a Bluesky starter pack partitioned by starter pack ID and written to S3 storage.
+
+ Args:
+ context (AssetExecutionContext) Dagster context
+ atproto_resource (ATProtoResource) Resource for interfacing with atmosphere protocol
+ s3_resource (S3Resource) Resource for uploading files to S3 storage
+
+ """
+ atproto_client = atproto_resource.get_client()
+
+ starter_pack_uri = context.partition_key
+
+ list_items = get_all_starter_pack_members(atproto_client, starter_pack_uri)
+
+ _bytes = os.linesep.join([member.model_dump_json() for member in list_items]).encode("utf-8")
+
+ datetime_now = datetime.now()
+ object_key = "/".join(
+ (
+ "atproto_starter_pack_snapshot",
+ datetime_now.strftime("%Y-%m-%d"),
+ datetime_now.strftime("%H"),
+ datetime_now.strftime("%M"),
+ f"{starter_pack_uri}.json",
+ )
+ )
+
+ s3_resource.get_client().put_object(Body=_bytes, Bucket=AWS_BUCKET_NAME, Key=object_key)
+
+ context.instance.add_dynamic_partitions(
+ partitions_def_name="atproto_did_dynamic_partition",
+ partition_keys=[list_item_view.subject.did for list_item_view in list_items],
+ )
+
+ return dg.MaterializeResult(
+ metadata={
+ "len_members": len(list_items),
+ "s3_object_key": object_key,
+ }
+ )
+
+
+@dg.asset(
+ partitions_def=atproto_did_dynamic_partition,
+ deps=[dg.AssetDep(starter_pack_snapshot, partition_mapping=dg.AllPartitionMapping())],
+ automation_condition=dg.AutomationCondition.eager(),
+ kinds={"python"},
+ group_name="ingestion",
+ op_tags={"dagster/concurrency_key": "ingestion"},
+)
+def actor_feed_snapshot(
+ context: dg.AssetExecutionContext,
+ atproto_resource: ATProtoResource,
+ s3_resource: S3Resource,
+) -> dg.MaterializeResult:
+ """Snapshot of full user feed written to S3 storage."""
+ client = atproto_resource.get_client()
+ actor_did = context.partition_key
+
+ # NOTE: we may need to yield chunks to be more memory efficient
+ items = get_all_feed_items(client, actor_did)
+
+ datetime_now = datetime.now()
+
+ object_key = "/".join(
+ (
+ "atproto_actor_feed_snapshot",
+ datetime_now.strftime("%Y-%m-%d"),
+ datetime_now.strftime("%H"),
+ datetime_now.strftime("%M"),
+ f"{actor_did}.json",
+ )
+ )
+
+ _bytes = os.linesep.join([item.model_dump_json() for item in items]).encode("utf-8")
+
+ s3_resource.get_client().put_object(Body=_bytes, Bucket=AWS_BUCKET_NAME, Key=object_key)
+
+ return dg.MaterializeResult(
+ metadata={
+ "len_feed_items": len(items),
+ "s3_object_key": object_key,
+ }
+ )
+
+
+atproto_resource = ATProtoResource(
+ login=dg.EnvVar("BSKY_LOGIN"), password=dg.EnvVar("BSKY_APP_PASSWORD")
+)
+
+s3_resource = S3Resource(
+ endpoint_url=dg.EnvVar("AWS_ENDPOINT_URL"),
+ aws_access_key_id=dg.EnvVar("AWS_ACCESS_KEY_ID"),
+ aws_secret_access_key=dg.EnvVar("AWS_SECRET_ACCESS_KEY"),
+ region_name="auto",
+)
+
+
+defs = dg.Definitions(
+ assets=[starter_pack_snapshot, actor_feed_snapshot],
+ resources={
+ "atproto_resource": atproto_resource,
+ "s3_resource": s3_resource,
+ },
+)
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/resources.py b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/resources.py
new file mode 100644
index 0000000000000..38163e85896df
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/resources.py
@@ -0,0 +1,29 @@
+import os
+
+import dagster as dg
+from atproto import Client
+
+
+class ATProtoResource(dg.ConfigurableResource):
+ login: str
+ password: str
+ session_cache_path: str = "atproto-session.txt"
+
+ def _login(self, client):
+ """Create a re-usable session to be used across resource instances; we are rate limited to 30/5 minutes or 300/day session."""
+ if os.path.exists(self.session_cache_path):
+ with open(self.session_cache_path, "r") as f:
+ session_string = f.read()
+ client.login(session_string=session_string)
+ else:
+ client.login(login=self.login, password=self.password)
+ session_string = client.export_session_string()
+ with open(self.session_cache_path, "w") as f:
+ f.write(session_string)
+
+ def get_client(
+ self,
+ ) -> Client:
+ client = Client()
+ self._login(client)
+ return client
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/utils/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/utils/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/utils/atproto.py b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/utils/atproto.py
new file mode 100644
index 0000000000000..fe8fadb7e857a
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/ingestion/utils/atproto.py
@@ -0,0 +1,59 @@
+from typing import TYPE_CHECKING, List, Optional
+
+from atproto import Client
+
+if TYPE_CHECKING:
+ from atproto_client import models
+
+
+def get_all_feed_items(client: Client, actor: str) -> List["models.AppBskyFeedDefs.FeedViewPost"]:
+ """Retrieves all author feed items for a given `actor`.
+
+ Args:
+ client (Client): AT Protocol client
+ actor (str): author identifier (did)
+
+ Returns:
+ List['models.AppBskyFeedDefs.FeedViewPost'] list of feed
+
+ """
+ import math
+
+ import tenacity
+
+ @tenacity.retry(
+ stop=tenacity.stop_after_attempt(5),
+ wait=tenacity.wait_fixed(math.ceil(60 * 2.5)),
+ )
+ def _get_feed_with_retries(client: Client, actor: str, cursor: Optional[str]):
+ return client.get_author_feed(actor=actor, cursor=cursor, limit=100)
+
+ feed = []
+ cursor = None
+ while True:
+ data = _get_feed_with_retries(client, actor, cursor)
+ feed.extend(data.feed)
+ cursor = data.cursor
+ if not cursor:
+ break
+
+ return feed
+
+
+def get_all_list_members(client: Client, list_uri: str):
+ cursor = None
+ members = []
+ while True:
+ response = client.app.bsky.graph.get_list(
+ {"list": list_uri, "cursor": cursor, "limit": 100}
+ )
+ members.extend(response.items)
+ if not response.cursor:
+ break
+ cursor = response.cursor
+ return members
+
+
+def get_all_starter_pack_members(client: Client, starter_pack_uri: str):
+ response = client.app.bsky.graph.get_starter_pack({"starter_pack": starter_pack_uri})
+ return get_all_list_members(client, response.starter_pack.list.uri)
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/modeling/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard/modeling/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard/modeling/definitions.py b/examples/project_atproto_dashboard/project_atproto_dashboard/modeling/definitions.py
new file mode 100644
index 0000000000000..6e7acf2d55f78
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard/modeling/definitions.py
@@ -0,0 +1,45 @@
+import os
+from pathlib import Path
+from typing import Any, Mapping, Optional
+
+import dagster as dg
+from dagster_dbt import DagsterDbtTranslator, DbtCliResource, DbtProject, dbt_assets
+
+dbt_project = DbtProject(
+ project_dir=Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve(),
+ target=os.getenv("DBT_TARGET"),
+)
+dbt_project.prepare_if_dev()
+dbt_resource = DbtCliResource(project_dir=dbt_project)
+
+
+class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):
+ def get_group_name(self, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:
+ asset_path = dbt_resource_props["fqn"][1:-1]
+ if asset_path:
+ return "_".join(asset_path)
+ return "default"
+
+ def get_asset_key(self, dbt_resource_props):
+ resource_type = dbt_resource_props["resource_type"]
+ name = dbt_resource_props["name"]
+ if resource_type == "source":
+ return dg.AssetKey(name)
+ else:
+ return super().get_asset_key(dbt_resource_props)
+
+
+@dbt_assets(
+ manifest=dbt_project.manifest_path,
+ dagster_dbt_translator=CustomizedDagsterDbtTranslator(),
+)
+def dbt_bluesky(context: dg.AssetExecutionContext, dbt: DbtCliResource):
+ yield from (dbt.cli(["build"], context=context).stream().fetch_row_counts())
+
+
+defs = dg.Definitions(
+ assets=[dbt_bluesky],
+ resources={
+ "dbt": dbt_resource,
+ },
+)
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard_tests/__init__.py b/examples/project_atproto_dashboard/project_atproto_dashboard_tests/__init__.py
new file mode 100644
index 0000000000000..8b137891791fe
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard_tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/examples/project_atproto_dashboard/project_atproto_dashboard_tests/test_assets.py b/examples/project_atproto_dashboard/project_atproto_dashboard_tests/test_assets.py
new file mode 100644
index 0000000000000..8b137891791fe
--- /dev/null
+++ b/examples/project_atproto_dashboard/project_atproto_dashboard_tests/test_assets.py
@@ -0,0 +1 @@
+
diff --git a/examples/project_atproto_dashboard/pyproject.toml b/examples/project_atproto_dashboard/pyproject.toml
new file mode 100644
index 0000000000000..068f8e743f8a5
--- /dev/null
+++ b/examples/project_atproto_dashboard/pyproject.toml
@@ -0,0 +1,34 @@
+[project]
+name = "project_atproto_dashboard"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.9,<3.13"
+dependencies = [
+ "atproto",
+ "dagster",
+ "dagster-aws",
+ "dagster-dbt",
+ "dagster-duckdb",
+ "dagster-powerbi",
+ "dbt-duckdb",
+ "tenacity",
+]
+
+[project.optional-dependencies]
+dev = [
+ "dagster-webserver",
+ "pytest",
+ "ruff",
+]
+
+[build-system]
+requires = ["setuptools"]
+build-backend = "setuptools.build_meta"
+
+[tool.dagster]
+module_name = "project_atproto_dashboard.definitions"
+project_name = "project_atproto_dashboard"
+
+[tool.setuptools.packages.find]
+exclude=["project_atproto_dashboard_tests"]
diff --git a/examples/starlift-demo/dbt_example/dagster_defs/utils.py b/examples/starlift-demo/dbt_example/dagster_defs/utils.py
index ea4fa0740c2e4..1087a7d454573 100644
--- a/examples/starlift-demo/dbt_example/dagster_defs/utils.py
+++ b/examples/starlift-demo/dbt_example/dagster_defs/utils.py
@@ -2,9 +2,7 @@
from dagster import AssetsDefinition, AssetSpec, AutomationCondition, Definitions, Nothing
from dagster._core.definitions.asset_key import AssetKey
-from dagster._core.definitions.decorators.decorator_assets_definition_builder import (
- stringify_asset_key_to_input_name,
-)
+from dagster._core.definitions.assets import stringify_asset_key_to_input_name
from dagster._core.definitions.input import In
diff --git a/helm/dagster/schema/schema/charts/dagster/subschema/daemon.py b/helm/dagster/schema/schema/charts/dagster/subschema/daemon.py
index 5f9a5524f54b8..04cfa9f039e22 100644
--- a/helm/dagster/schema/schema/charts/dagster/subschema/daemon.py
+++ b/helm/dagster/schema/schema/charts/dagster/subschema/daemon.py
@@ -94,6 +94,7 @@ class Daemon(BaseModel, extra="forbid"):
podSecurityContext: kubernetes.PodSecurityContext
securityContext: kubernetes.SecurityContext
resources: kubernetes.Resources
+ checkDbReadyInitContainer: Optional[bool] = None
livenessProbe: kubernetes.LivenessProbe
readinessProbe: kubernetes.ReadinessProbe
startupProbe: kubernetes.StartupProbe
diff --git a/helm/dagster/schema/schema/charts/dagster/subschema/flower.py b/helm/dagster/schema/schema/charts/dagster/subschema/flower.py
index 6742e3c090f4b..f2a93daae60f3 100644
--- a/helm/dagster/schema/schema/charts/dagster/subschema/flower.py
+++ b/helm/dagster/schema/schema/charts/dagster/subschema/flower.py
@@ -13,6 +13,7 @@ class Flower(BaseModel):
tolerations: kubernetes.Tolerations
podSecurityContext: kubernetes.PodSecurityContext
securityContext: kubernetes.SecurityContext
+ checkDbReadyInitContainer: Optional[bool] = None
resources: kubernetes.Resources
livenessProbe: kubernetes.LivenessProbe
startupProbe: kubernetes.StartupProbe
diff --git a/helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py b/helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py
index 7b6d9eaaa143f..c73e9132573c2 100644
--- a/helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py
+++ b/helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py
@@ -40,6 +40,7 @@ class CeleryK8sRunLauncherConfig(BaseModel):
podSecurityContext: kubernetes.PodSecurityContext
securityContext: kubernetes.SecurityContext
resources: kubernetes.Resources
+ checkDbReadyInitContainer: Optional[bool] = None
livenessProbe: kubernetes.LivenessProbe
volumeMounts: List[kubernetes.VolumeMount]
volumes: List[kubernetes.Volume]
diff --git a/helm/dagster/schema/schema/charts/dagster/subschema/webserver.py b/helm/dagster/schema/schema/charts/dagster/subschema/webserver.py
index fda17a8acf6fc..d84ddf8c27178 100644
--- a/helm/dagster/schema/schema/charts/dagster/subschema/webserver.py
+++ b/helm/dagster/schema/schema/charts/dagster/subschema/webserver.py
@@ -34,6 +34,7 @@ class Webserver(BaseModel, extra="forbid"):
tolerations: kubernetes.Tolerations
podSecurityContext: kubernetes.PodSecurityContext
securityContext: kubernetes.SecurityContext
+ checkDbReadyInitContainer: Optional[bool] = None
resources: kubernetes.Resources
readinessProbe: kubernetes.ReadinessProbe
livenessProbe: kubernetes.LivenessProbe
diff --git a/helm/dagster/schema/schema_tests/test_celery_queues.py b/helm/dagster/schema/schema_tests/test_celery_queues.py
index 7b84118c5a941..c9548112b795b 100644
--- a/helm/dagster/schema/schema_tests/test_celery_queues.py
+++ b/helm/dagster/schema/schema_tests/test_celery_queues.py
@@ -307,3 +307,49 @@ def test_scheduler_name(deployment_template: HelmTemplate):
deployment = celery_queue_deployments[0]
assert deployment.spec.template.spec.scheduler_name == "custom"
+
+
+def test_check_db_container_toggle(deployment_template: HelmTemplate):
+ # Off test
+ helm_values = DagsterHelmValues.construct(
+ runLauncher=RunLauncher(
+ type=RunLauncherType.CELERY,
+ config=RunLauncherConfig(
+ celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
+ checkDbReadyInitContainer=False
+ )
+ ),
+ )
+ )
+ [daemon_deployment] = deployment_template.render(helm_values)
+ assert daemon_deployment.spec.template.spec.init_containers is None or "check-db-ready" not in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
+
+ # On test
+ helm_values = DagsterHelmValues.construct(
+ runLauncher=RunLauncher(
+ type=RunLauncherType.CELERY,
+ config=RunLauncherConfig(
+ celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
+ checkDbReadyInitContainer=True
+ )
+ ),
+ )
+ )
+ [daemon_deployment] = deployment_template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
+
+ # Default test
+ helm_values = DagsterHelmValues.construct(
+ runLauncher=RunLauncher(
+ type=RunLauncherType.CELERY,
+ config=RunLauncherConfig(celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct()),
+ )
+ )
+ [daemon_deployment] = deployment_template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
diff --git a/helm/dagster/schema/schema_tests/test_dagit.py b/helm/dagster/schema/schema_tests/test_dagit.py
index 85b2c9b9c44ab..67ade02507457 100644
--- a/helm/dagster/schema/schema_tests/test_dagit.py
+++ b/helm/dagster/schema/schema_tests/test_dagit.py
@@ -596,3 +596,34 @@ def test_env_configmap(configmap_template):
assert len(cm.data) == 6
assert cm.data["DAGSTER_HOME"] == "/opt/dagster/dagster_home"
assert cm.data["TEST_ENV"] == "test_value"
+
+
+def test_check_db_container_toggle(deployment_template: HelmTemplate):
+ # Off test
+ helm_values = DagsterHelmValues.construct(
+ dagsterWebserver=Webserver.construct(checkDbReadyInitContainer=False)
+ )
+ [webserver_deployment] = deployment_template.render(helm_values)
+ assert (
+ webserver_deployment.spec.template.spec.init_containers is None
+ or "check-db-ready"
+ not in [
+ container.name for container in webserver_deployment.spec.template.spec.init_containers
+ ]
+ )
+
+ # On test
+ helm_values = DagsterHelmValues.construct(
+ dagsterWebserver=Webserver.construct(checkDbReadyInitContainer=True)
+ )
+ [webserver_deployment] = deployment_template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in webserver_deployment.spec.template.spec.init_containers
+ ]
+
+ # Default test
+ helm_values = DagsterHelmValues.construct(dagsterWebserver=Webserver.construct())
+ [webserver_deployment] = deployment_template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in webserver_deployment.spec.template.spec.init_containers
+ ]
diff --git a/helm/dagster/schema/schema_tests/test_dagster_daemon.py b/helm/dagster/schema/schema_tests/test_dagster_daemon.py
index eada9cc4d5a5f..00ce79f69c47d 100644
--- a/helm/dagster/schema/schema_tests/test_dagster_daemon.py
+++ b/helm/dagster/schema/schema_tests/test_dagster_daemon.py
@@ -679,3 +679,30 @@ def test_env_configmap(env_configmap_template):
assert len(cm.data) == 6
assert cm.data["DAGSTER_HOME"] == "/opt/dagster/dagster_home"
assert cm.data["TEST_ENV"] == "test_value"
+
+
+def test_check_db_container_toggle(template: HelmTemplate):
+ # Off test
+ helm_values = DagsterHelmValues.construct(
+ dagsterDaemon=Daemon.construct(checkDbReadyInitContainer=False)
+ )
+ [daemon_deployment] = template.render(helm_values)
+ assert daemon_deployment.spec.template.spec.init_containers is None or "check-db-ready" not in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
+
+ # On test
+ helm_values = DagsterHelmValues.construct(
+ dagsterDaemon=Daemon.construct(checkDbReadyInitContainer=True)
+ )
+ [daemon_deployment] = template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
+
+ # Default test
+ helm_values = DagsterHelmValues.construct(dagsterDaemon=Daemon.construct())
+ [daemon_deployment] = template.render(helm_values)
+ assert "check-db-ready" in [
+ container.name for container in daemon_deployment.spec.template.spec.init_containers
+ ]
diff --git a/helm/dagster/templates/deployment-celery-queues.yaml b/helm/dagster/templates/deployment-celery-queues.yaml
index bbd8f1649be5a..c2f803376c99f 100644
--- a/helm/dagster/templates/deployment-celery-queues.yaml
+++ b/helm/dagster/templates/deployment-celery-queues.yaml
@@ -40,12 +40,14 @@ spec:
securityContext:
{{- toYaml $celeryK8sRunLauncherConfig.podSecurityContext | nindent 8 }}
initContainers:
+ {{- if $celeryK8sRunLauncherConfig.checkDbReadyInitContainer }}
- name: check-db-ready
image: "{{- $.Values.postgresql.image.repository -}}:{{- $.Values.postgresql.image.tag -}}"
imagePullPolicy: "{{- $.Values.postgresql.image.pullPolicy -}}"
command: ['sh', '-c', {{ include "dagster.postgresql.pgisready" $ | squote }}]
securityContext:
{{- toYaml $celeryK8sRunLauncherConfig.securityContext | nindent 12 }}
+ {{- end }}
{{- if $.Values.rabbitmq.enabled }}
- name: check-rabbitmq-ready
image: {{ include "dagster.externalImage.name" $.Values.busybox.image | quote }}
diff --git a/helm/dagster/templates/deployment-daemon.yaml b/helm/dagster/templates/deployment-daemon.yaml
index b28149be77e10..91b86b5c7ec73 100644
--- a/helm/dagster/templates/deployment-daemon.yaml
+++ b/helm/dagster/templates/deployment-daemon.yaml
@@ -49,6 +49,7 @@ spec:
securityContext:
{{- toYaml .Values.dagsterDaemon.podSecurityContext | nindent 8 }}
initContainers:
+ {{- if .Values.dagsterDaemon.checkDbReadyInitContainer }}
- name: check-db-ready
image: {{ include "dagster.externalImage.name" $.Values.postgresql.image | quote }}
imagePullPolicy: "{{- $.Values.postgresql.image.pullPolicy -}}"
@@ -57,6 +58,7 @@ spec:
{{- toYaml .Values.dagsterDaemon.securityContext | nindent 12 }}
resources:
{{- toYaml .Values.dagsterDaemon.initContainerResources | nindent 12 }}
+ {{- end }}
{{- if (and $userDeployments.enabled $userDeployments.enableSubchart) }}
{{- range $deployment := $userDeployments.deployments }}
- name: "init-user-deployment-{{- $deployment.name -}}"
diff --git a/helm/dagster/templates/deployment-flower.yaml b/helm/dagster/templates/deployment-flower.yaml
index 18dd7b3d6a479..63b9c7d44a515 100644
--- a/helm/dagster/templates/deployment-flower.yaml
+++ b/helm/dagster/templates/deployment-flower.yaml
@@ -36,12 +36,14 @@ spec:
securityContext:
{{- toYaml .Values.flower.podSecurityContext | nindent 8 }}
initContainers:
+ {{- if .Values.flower.checkDbReadyInitContainer }}
- name: check-db-ready
image: "{{- $.Values.postgresql.image.repository -}}:{{- $.Values.postgresql.image.tag -}}"
imagePullPolicy: "{{- $.Values.postgresql.image.pullPolicy -}}"
command: ['sh', '-c', {{ include "dagster.postgresql.pgisready" . | squote }}]
securityContext:
{{- toYaml .Values.flower.securityContext | nindent 12 }}
+ {{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext:
diff --git a/helm/dagster/templates/helpers/_deployment-webserver.tpl b/helm/dagster/templates/helpers/_deployment-webserver.tpl
index 2c5f649485d61..9427646bf4999 100644
--- a/helm/dagster/templates/helpers/_deployment-webserver.tpl
+++ b/helm/dagster/templates/helpers/_deployment-webserver.tpl
@@ -47,6 +47,7 @@ spec:
securityContext:
{{- toYaml $_.Values.dagsterWebserver.podSecurityContext | nindent 8 }}
initContainers:
+ {{- if .Values.dagsterWebserver.checkDbReadyInitContainer }}
- name: check-db-ready
image: {{ include "dagster.externalImage.name" .Values.postgresql.image | quote }}
imagePullPolicy: {{ .Values.postgresql.image.pullPolicy }}
@@ -57,6 +58,7 @@ spec:
resources:
{{- toYaml $_.Values.dagsterWebserver.initContainerResources | nindent 12 }}
{{- end }}
+ {{- end }}
{{- if (and $userDeployments.enabled $userDeployments.enableSubchart) }}
{{- range $deployment := $userDeployments.deployments }}
- name: "init-user-deployment-{{- $deployment.name -}}"
diff --git a/helm/dagster/values.schema.json b/helm/dagster/values.schema.json
index dcf24a595a95d..eb73d8863ccc7 100644
--- a/helm/dagster/values.schema.json
+++ b/helm/dagster/values.schema.json
@@ -216,6 +216,18 @@
"resources": {
"$ref": "#/$defs/Resources"
},
+ "checkDbReadyInitContainer": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "title": "Checkdbreadyinitcontainer"
+ },
"livenessProbe": {
"$ref": "#/$defs/LivenessProbe"
},
@@ -647,6 +659,18 @@
"resources": {
"$ref": "#/$defs/Resources"
},
+ "checkDbReadyInitContainer": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "title": "Checkdbreadyinitcontainer"
+ },
"livenessProbe": {
"$ref": "#/$defs/LivenessProbe"
},
@@ -837,6 +861,18 @@
"securityContext": {
"$ref": "#/$defs/SecurityContext"
},
+ "checkDbReadyInitContainer": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "title": "Checkdbreadyinitcontainer"
+ },
"resources": {
"$ref": "#/$defs/Resources"
},
@@ -3137,6 +3173,18 @@
"securityContext": {
"$ref": "#/$defs/SecurityContext"
},
+ "checkDbReadyInitContainer": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "title": "Checkdbreadyinitcontainer"
+ },
"resources": {
"$ref": "#/$defs/Resources"
},
diff --git a/helm/dagster/values.yaml b/helm/dagster/values.yaml
index ffd28ca0b84d5..3e7c6cb41c5da 100644
--- a/helm/dagster/values.yaml
+++ b/helm/dagster/values.yaml
@@ -165,6 +165,10 @@ dagsterWebserver:
# Configure initContainer resources separately from main container
initContainerResources: {}
+
+ # Enable the check-db-ready initContainer
+ checkDbReadyInitContainer: true
+
# Override the default K8s scheduler
# schedulerName: ~
@@ -689,6 +693,8 @@ runLauncher:
# memory: 128Mi
resources: {}
+ # Enable the check-db-ready initContainer
+ checkDbReadyInitContainer: true
# Override the default K8s scheduler
# schedulerName: ~
@@ -885,6 +891,8 @@ flower:
podSecurityContext: {}
securityContext: {}
+ # Enable the check-db-ready initContainer
+ checkDbReadyInitContainer: true
# Override the default K8s scheduler
# schedulerName: ~
@@ -1219,6 +1227,8 @@ dagsterDaemon:
# Configure initContainer resources separately from main container
initContainerResources: {}
+ # Enable the check-db-ready initContainer
+ checkDbReadyInitContainer: true
# Override the default K8s scheduler
# schedulerName: ~
diff --git a/integration_tests/test_suites/daemon-test-suite/auto_run_reexecution_tests/test_auto_run_reexecution.py b/integration_tests/test_suites/daemon-test-suite/auto_run_reexecution_tests/test_auto_run_reexecution.py
index 49c839775fbb4..3b58a6453ea62 100644
--- a/integration_tests/test_suites/daemon-test-suite/auto_run_reexecution_tests/test_auto_run_reexecution.py
+++ b/integration_tests/test_suites/daemon-test-suite/auto_run_reexecution_tests/test_auto_run_reexecution.py
@@ -14,6 +14,7 @@
AUTO_RETRY_RUN_ID_TAG,
MAX_RETRIES_TAG,
PARENT_RUN_ID_TAG,
+ RESUME_RETRY_TAG,
RETRY_ON_ASSET_OR_OP_FAILURE_TAG,
RETRY_STRATEGY_TAG,
ROOT_RUN_ID_TAG,
@@ -395,7 +396,15 @@ def test_consume_new_runs_for_automatic_reexecution(instance, workspace_context)
assert len(instance.run_coordinator.queue()) == 0
# retries failure
- run = create_run(instance, status=DagsterRunStatus.STARTED, tags={MAX_RETRIES_TAG: "2"})
+ run = create_run(
+ instance,
+ status=DagsterRunStatus.STARTED,
+ tags={
+ MAX_RETRIES_TAG: "2",
+ RESUME_RETRY_TAG: "true",
+ RETRY_STRATEGY_TAG: "ALL_STEPS",
+ },
+ )
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_FAILURE.value,
job_name="foo",
@@ -427,6 +436,10 @@ def test_consume_new_runs_for_automatic_reexecution(instance, workspace_context)
run = instance.get_run_by_id(run.run_id)
assert run.tags.get(AUTO_RETRY_RUN_ID_TAG) == first_retry.run_id
+ # retry strategy is copied, "is_resume_retry" is not since the retry strategy is ALL_STEPS
+ assert RESUME_RETRY_TAG not in first_retry.tags
+ assert first_retry.tags.get(RETRY_STRATEGY_TAG) == "ALL_STEPS"
+
# doesn't retry again
list(
consume_new_runs_for_automatic_reexecution(
diff --git a/js_modules/dagster-ui/.gitattributes b/js_modules/dagster-ui/.gitattributes
index bf95c09ab463e..9428a42a1d54b 100644
--- a/js_modules/dagster-ui/.gitattributes
+++ b/js_modules/dagster-ui/.gitattributes
@@ -10,3 +10,4 @@ packages/ui-core/client.json linguist-generated=true
packages/ui-core/src/asset-selection/generated/* linguist-generated=true
packages/ui-core/src/selection/generated/* linguist-generated=true
packages/ui-core/src/run-selection/generated/* linguist-generated=true
+packages/ui-core/src/op-selection/generated/* linguist-generated=true
diff --git a/js_modules/dagster-ui/packages/app-oss/package.json b/js_modules/dagster-ui/packages/app-oss/package.json
index f6d3fb76365a6..82cff1e839639 100644
--- a/js_modules/dagster-ui/packages/app-oss/package.json
+++ b/js_modules/dagster-ui/packages/app-oss/package.json
@@ -14,7 +14,7 @@
"@rive-app/react-canvas": "^3.0.34",
"eslint-config-next": "^13.5.3",
"graphql": "^16.8.1",
- "next": "^14.2.10",
+ "next": "^14.2.15",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-is": "^18.3.1",
diff --git a/js_modules/dagster-ui/packages/ui-core/package.json b/js_modules/dagster-ui/packages/ui-core/package.json
index af7ba534148f7..ff98bbec75a34 100644
--- a/js_modules/dagster-ui/packages/ui-core/package.json
+++ b/js_modules/dagster-ui/packages/ui-core/package.json
@@ -17,6 +17,7 @@
"generate-asset-selection": "ts-node -O '{\"module\": \"commonjs\"}' ./src/scripts/generateAssetSelection.ts && eslint src/asset-selection/generated/ --fix -c .eslintrc.js",
"generate-selection-autocomplete": "ts-node -O '{\"module\": \"commonjs\"}' ./src/scripts/generateSelection.ts && eslint src/selection/generated/ --fix -c .eslintrc.js",
"generate-run-selection": "ts-node -O '{\"module\": \"commonjs\"}' ./src/scripts/generateRunSelection.ts && eslint src/run-selection/generated/ --fix -c .eslintrc.js",
+ "generate-op-selection": "ts-node -O '{\"module\": \"commonjs\"}' ./src/scripts/generateOpSelection.ts && eslint src/op-selection/generated/ --fix -c .eslintrc.js",
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build"
},
diff --git a/js_modules/dagster-ui/packages/ui-core/src/app/DefaultFeatureFlags.oss.tsx b/js_modules/dagster-ui/packages/ui-core/src/app/DefaultFeatureFlags.oss.tsx
index 63a0632523530..4dab268bccfc8 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/app/DefaultFeatureFlags.oss.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/app/DefaultFeatureFlags.oss.tsx
@@ -8,6 +8,9 @@ export const DEFAULT_FEATURE_FLAG_VALUES: Partial>
[FeatureFlag.flagAssetSelectionSyntax]: new URLSearchParams(global?.location?.search ?? '').has(
'new-asset-selection-syntax',
),
+ [FeatureFlag.flagRunSelectionSyntax]: new URLSearchParams(global?.location?.search ?? '').has(
+ 'new-run-selection-syntax',
+ ),
// Flags for tests
[FeatureFlag.__TestFlagDefaultTrue]: true,
diff --git a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionInput.oss.tsx b/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionInput.oss.tsx
index f3060d7f460f8..ef87451179f9b 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionInput.oss.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionInput.oss.tsx
@@ -1,19 +1,15 @@
-import {Colors, Icon, Icons} from '@dagster-io/ui-components';
-import CodeMirror, {Editor, HintFunction} from 'codemirror';
-import {useLayoutEffect, useMemo, useRef} from 'react';
-import styled, {createGlobalStyle, css} from 'styled-components';
+import {Icons} from '@dagster-io/ui-components';
+import {useMemo} from 'react';
+import styled from 'styled-components';
-import {lintAssetSelection} from './AssetSelectionLinter';
import {assertUnreachable} from '../../app/Util';
import {AssetGraphQueryItem} from '../../asset-graph/useAssetGraphData';
-import {useUpdatingRef} from '../../hooks/useUpdatingRef';
-import {createSelectionHint} from '../../selection/SelectionAutoComplete';
-import {
- SelectionAutoCompleteInputCSS,
- applyStaticSyntaxHighlighting,
-} from '../../selection/SelectionAutoCompleteHighlighter';
+import {SelectionAutoCompleteInput, iconStyle} from '../../selection/SelectionAutoCompleteInput';
+import {createSelectionLinter} from '../../selection/createSelectionLinter';
import {placeholderTextForItems} from '../../ui/GraphQueryInput';
import {buildRepoPathForHuman} from '../../workspace/buildRepoAddress';
+import {AssetSelectionLexer} from '../generated/AssetSelectionLexer';
+import {AssetSelectionParser} from '../generated/AssetSelectionParser';
import 'codemirror/addon/edit/closebrackets';
import 'codemirror/lib/codemirror.css';
@@ -32,215 +28,86 @@ interface AssetSelectionInputProps {
const FUNCTIONS = ['sinks', 'roots'];
export const AssetSelectionInput = ({value, onChange, assets}: AssetSelectionInputProps) => {
- const editorRef = useRef(null);
- const cmInstance = useRef(null);
-
- const currentValueRef = useRef(value);
-
- const hintRef = useUpdatingRef(
- useMemo(() => {
- const assetNamesSet: Set = new Set();
- const tagNamesSet: Set = new Set();
- const ownersSet: Set = new Set();
- const groupsSet: Set = new Set();
- const kindsSet: Set = new Set();
- const codeLocationSet: Set = new Set();
-
- assets.forEach((asset) => {
- assetNamesSet.add(asset.name);
- asset.node.tags.forEach((tag) => {
- if (tag.key && tag.value) {
- tagNamesSet.add(`${tag.key}=${tag.value}`);
- } else {
- tagNamesSet.add(tag.key);
- }
- });
- asset.node.owners.forEach((owner) => {
- switch (owner.__typename) {
- case 'TeamAssetOwner':
- ownersSet.add(owner.team);
- break;
- case 'UserAssetOwner':
- ownersSet.add(owner.email);
- break;
- default:
- assertUnreachable(owner);
- }
- });
- if (asset.node.groupName) {
- groupsSet.add(asset.node.groupName);
+ const attributesMap = useMemo(() => {
+ const assetNamesSet: Set = new Set();
+ const tagNamesSet: Set = new Set();
+ const ownersSet: Set = new Set();
+ const groupsSet: Set = new Set();
+ const kindsSet: Set = new Set();
+ const codeLocationSet: Set = new Set();
+
+ assets.forEach((asset) => {
+ assetNamesSet.add(asset.name);
+ asset.node.tags.forEach((tag) => {
+ if (tag.key && tag.value) {
+ tagNamesSet.add(`${tag.key}=${tag.value}`);
+ } else {
+ tagNamesSet.add(tag.key);
}
- asset.node.kinds.forEach((kind) => {
- kindsSet.add(kind);
- });
- const location = buildRepoPathForHuman(
- asset.node.repository.name,
- asset.node.repository.location.name,
- );
- codeLocationSet.add(location);
- });
-
- const assetNames = Array.from(assetNamesSet);
- const tagNames = Array.from(tagNamesSet);
- const owners = Array.from(ownersSet);
- const groups = Array.from(groupsSet);
- const kinds = Array.from(kindsSet);
- const codeLocations = Array.from(codeLocationSet);
-
- return createSelectionHint(
- 'key',
- {
- key: assetNames,
- tag: tagNames,
- owner: owners,
- group: groups,
- kind: kinds,
- code_location: codeLocations,
- },
- FUNCTIONS,
- );
- }, [assets]),
- );
-
- useLayoutEffect(() => {
- if (editorRef.current && !cmInstance.current) {
- cmInstance.current = CodeMirror(editorRef.current, {
- value,
- mode: 'assetSelection',
- lineNumbers: false,
- lineWrapping: false,
- scrollbarStyle: 'native',
- autoCloseBrackets: true,
- lint: {
- getAnnotations: lintAssetSelection,
- async: false,
- },
- placeholder: placeholderTextForItems('Type an asset subset…', assets),
- extraKeys: {
- 'Ctrl-Space': 'autocomplete',
- Tab: (cm: Editor) => {
- cm.replaceSelection(' ', 'end');
- },
- },
});
-
- cmInstance.current.setSize('100%', 20);
-
- // Enforce single line by preventing newlines
- cmInstance.current.on('beforeChange', (_instance: Editor, change) => {
- if (change.text.some((line) => line.includes('\n'))) {
- change.cancel();
- }
- });
-
- cmInstance.current.on('change', (instance: Editor, change) => {
- const newValue = instance.getValue().replace(/\s+/g, ' ');
- currentValueRef.current = newValue;
- onChange(newValue);
-
- if (change.origin === 'complete' && change.text[0]?.endsWith('()')) {
- // Set cursor inside the right parenthesis
- const cursor = instance.getCursor();
- instance.setCursor({...cursor, ch: cursor.ch - 1});
+ asset.node.owners.forEach((owner) => {
+ switch (owner.__typename) {
+ case 'TeamAssetOwner':
+ ownersSet.add(owner.team);
+ break;
+ case 'UserAssetOwner':
+ ownersSet.add(owner.email);
+ break;
+ default:
+ assertUnreachable(owner);
}
});
-
- cmInstance.current.on('inputRead', (instance: Editor) => {
- showHint(instance, hintRef.current);
- });
-
- cmInstance.current.on('cursorActivity', (instance: Editor) => {
- applyStaticSyntaxHighlighting(instance);
- showHint(instance, hintRef.current);
- });
-
- requestAnimationFrame(() => {
- if (!cmInstance.current) {
- return;
- }
-
- applyStaticSyntaxHighlighting(cmInstance.current);
+ if (asset.node.groupName) {
+ groupsSet.add(asset.node.groupName);
+ }
+ asset.node.kinds.forEach((kind) => {
+ kindsSet.add(kind);
});
- }
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, []);
-
- // Update CodeMirror when value prop changes
- useLayoutEffect(() => {
- const noNewLineValue = value.replace('\n', ' ');
- if (cmInstance.current && cmInstance.current.getValue() !== noNewLineValue) {
- const instance = cmInstance.current;
- const cursor = instance.getCursor();
- instance.setValue(noNewLineValue);
- instance.setCursor(cursor);
- showHint(instance, hintRef.current);
- }
- }, [hintRef, value]);
+ const location = buildRepoPathForHuman(
+ asset.node.repository.name,
+ asset.node.repository.location.name,
+ );
+ codeLocationSet.add(location);
+ });
+ const assetNames = Array.from(assetNamesSet);
+ const tagNames = Array.from(tagNamesSet);
+ const owners = Array.from(ownersSet);
+ const groups = Array.from(groupsSet);
+ const kinds = Array.from(kindsSet);
+ const codeLocations = Array.from(codeLocationSet);
+
+ return {
+ key: assetNames,
+ tag: tagNames,
+ owner: owners,
+ group: groups,
+ kind: kinds,
+ code_location: codeLocations,
+ };
+ }, [assets]);
+
+ const linter = useMemo(
+ () => createSelectionLinter({Lexer: AssetSelectionLexer, Parser: AssetSelectionParser}),
+ [],
+ );
return (
- <>
-
-
-
-
-
-
- >
+
+
+
);
};
-const iconStyle = (img: string) => css`
- &:before {
- content: ' ';
- width: 14px;
- mask-size: contain;
- mask-repeat: no-repeat;
- mask-position: center;
- mask-image: url(${img});
- background: ${Colors.accentPrimary()};
- display: inline-block;
- }
-`;
-
-const InputDiv = styled.div`
- ${SelectionAutoCompleteInputCSS}
+const WrapperDiv = styled.div`
.attribute-owner {
${iconStyle(Icons.owner.src)}
}
`;
-
-const GlobalHintStyles = createGlobalStyle`
- .CodeMirror-hints {
- background: ${Colors.popoverBackground()};
- border: none;
- border-radius: 4px;
- padding: 8px 4px;
- .CodeMirror-hint {
- border-radius: 4px;
- font-size: 14px;
- padding: 6px 8px 6px 12px;
- color: ${Colors.textDefault()};
- &.CodeMirror-hint-active {
- background-color: ${Colors.backgroundBlue()};
- color: ${Colors.textDefault()};
- }
- }
- }
-`;
-
-function showHint(instance: Editor, hint: HintFunction) {
- requestAnimationFrame(() => {
- instance.showHint({
- hint,
- completeSingle: false,
- moveOnOverlap: true,
- updateOnCursorActivity: true,
- });
- });
-}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionLinter.ts b/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionLinter.ts
deleted file mode 100644
index f1f1eb059c23e..0000000000000
--- a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionLinter.ts
+++ /dev/null
@@ -1,34 +0,0 @@
-import {CharStreams, CommonTokenStream} from 'antlr4ts';
-import CodeMirror from 'codemirror';
-
-import {AssetSelectionSyntaxErrorListener} from './AssetSelectionSyntaxErrorListener';
-import {AssetSelectionLexer} from '../generated/AssetSelectionLexer';
-import {AssetSelectionParser} from '../generated/AssetSelectionParser';
-
-export const lintAssetSelection = (text: string) => {
- const errorListener = new AssetSelectionSyntaxErrorListener();
-
- const inputStream = CharStreams.fromString(text);
- const lexer = new AssetSelectionLexer(inputStream);
-
- lexer.removeErrorListeners();
- lexer.addErrorListener(errorListener);
-
- const tokens = new CommonTokenStream(lexer);
- const parser = new AssetSelectionParser(tokens);
-
- parser.removeErrorListeners(); // Remove default console error listener
- parser.addErrorListener(errorListener);
-
- parser.start();
-
- // Map syntax errors to CodeMirror's lint format
- const lintErrors = errorListener.errors.map((error) => ({
- message: error.message.replace(', ', ''),
- severity: 'error',
- from: CodeMirror.Pos(error.line, error.column),
- to: CodeMirror.Pos(error.line, text.length),
- }));
-
- return lintErrors;
-};
diff --git a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionSyntaxErrorListener.tsx b/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionSyntaxErrorListener.tsx
deleted file mode 100644
index 89d87f2dbf09e..0000000000000
--- a/js_modules/dagster-ui/packages/ui-core/src/asset-selection/input/AssetSelectionSyntaxErrorListener.tsx
+++ /dev/null
@@ -1,26 +0,0 @@
-import {ANTLRErrorListener, RecognitionException, Recognizer} from 'antlr4ts';
-
-interface SyntaxError {
- message: string;
- line: number;
- column: number;
-}
-
-export class AssetSelectionSyntaxErrorListener implements ANTLRErrorListener {
- public errors: SyntaxError[] = [];
-
- syntaxError(
- _recognizer: Recognizer,
- _offendingSymbol: T | undefined,
- line: number,
- charPositionInLine: number,
- msg: string,
- _e: RecognitionException | undefined,
- ): void {
- this.errors.push({
- message: msg,
- line: line - 1, // CodeMirror lines are 0-based
- column: charPositionInLine,
- });
- }
-}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/assets/BackfillPreviewModal.tsx b/js_modules/dagster-ui/packages/ui-core/src/assets/BackfillPreviewModal.tsx
index 31607e4e01b28..f78dda46931d0 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/assets/BackfillPreviewModal.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/assets/BackfillPreviewModal.tsx
@@ -57,7 +57,7 @@ export const BackfillPreviewModal = ({
skip: !isOpen,
},
);
- const {data} = queryResult;
+ const {data, loading} = queryResult;
const partitionsByAssetToken = useMemo(() => {
return Object.fromEntries(
@@ -107,8 +107,10 @@ export const BackfillPreviewModal = ({
{partitions ? (
- ) : (
+ ) : loading ? (
+ ) : (
+ 'No partitions available to materialize'
)}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/gantt/RunGroupPanel.tsx b/js_modules/dagster-ui/packages/ui-core/src/gantt/RunGroupPanel.tsx
index 00f35180d3e53..6d65f30ae1767 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/gantt/RunGroupPanel.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/gantt/RunGroupPanel.tsx
@@ -96,7 +96,7 @@ export const RunGroupPanel = ({
});
return (
-
+
<>
{runs.map((g, idx) =>
g ? (
diff --git a/js_modules/dagster-ui/packages/ui-core/src/graph/OpTags.tsx b/js_modules/dagster-ui/packages/ui-core/src/graph/OpTags.tsx
index 00ac437c859c8..b87007caaf369 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/graph/OpTags.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/graph/OpTags.tsx
@@ -62,6 +62,7 @@ import gitlab from './kindtag-images/tool-gitlab-color.svg';
import go from './kindtag-images/tool-go-color.svg';
import google from './kindtag-images/tool-google-color.svg';
import googlecloud from './kindtag-images/tool-googlecloud-color.svg';
+import googledrive from './kindtag-images/tool-googledrive-color.svg';
import googlesheets from './kindtag-images/tool-googlesheets-color.svg';
import graphql from './kindtag-images/tool-graphql-color.svg';
import greatexpectations from './kindtag-images/tool-greatexpectations-color.svg';
@@ -248,6 +249,7 @@ export type KnownTagType =
| 'powerbi'
| 'gcp'
| 'googlecloud'
+ | 'googledrive'
| 'looker'
| 'tableau'
| 'segment'
@@ -580,6 +582,10 @@ export const KNOWN_TAGS: Record = {
icon: googlecloud,
content: 'Google Cloud',
},
+ googledrive: {
+ icon: googledrive,
+ content: 'Google Drive',
+ },
looker: {
icon: looker,
content: 'Looker',
diff --git a/js_modules/dagster-ui/packages/ui-core/src/graph/kindtag-images/tool-googledrive-color.svg b/js_modules/dagster-ui/packages/ui-core/src/graph/kindtag-images/tool-googledrive-color.svg
new file mode 100644
index 0000000000000..2d94beff46945
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/graph/kindtag-images/tool-googledrive-color.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/js_modules/dagster-ui/packages/ui-core/src/launchpad/useLaunchMultipleRunsWithTelemetry.ts b/js_modules/dagster-ui/packages/ui-core/src/launchpad/useLaunchMultipleRunsWithTelemetry.ts
index d9b7d60528a77..9249da1025e91 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/launchpad/useLaunchMultipleRunsWithTelemetry.ts
+++ b/js_modules/dagster-ui/packages/ui-core/src/launchpad/useLaunchMultipleRunsWithTelemetry.ts
@@ -29,7 +29,9 @@ export function useLaunchMultipleRunsWithTelemetry() {
const executionParamsList = Array.isArray(variables.executionParamsList)
? variables.executionParamsList
: [variables.executionParamsList];
- const jobNames = executionParamsList.map((params) => params.selector?.jobName);
+ const jobNames = executionParamsList.map(
+ (params) => params.selector.jobName || params.selector.pipelineName,
+ );
if (
jobNames.length !== executionParamsList.length ||
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelection.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelection.ts
new file mode 100644
index 0000000000000..614bde690e347
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelection.ts
@@ -0,0 +1,42 @@
+import {CharStreams, CommonTokenStream} from 'antlr4ts';
+
+import {AntlrOpSelectionVisitor} from './AntlrOpSelectionVisitor';
+import {GraphQueryItem} from '../app/GraphQueryImpl';
+import {AntlrInputErrorListener} from '../asset-selection/AntlrAssetSelection';
+import {OpSelectionLexer} from './generated/OpSelectionLexer';
+import {OpSelectionParser} from './generated/OpSelectionParser';
+
+type OpSelectionQueryResult = {
+ all: GraphQueryItem[];
+ focus: GraphQueryItem[];
+};
+
+export const parseOpSelectionQuery = (
+ all_ops: GraphQueryItem[],
+ query: string,
+): OpSelectionQueryResult | Error => {
+ try {
+ const lexer = new OpSelectionLexer(CharStreams.fromString(query));
+ lexer.removeErrorListeners();
+ lexer.addErrorListener(new AntlrInputErrorListener());
+
+ const tokenStream = new CommonTokenStream(lexer);
+
+ const parser = new OpSelectionParser(tokenStream);
+ parser.removeErrorListeners();
+ parser.addErrorListener(new AntlrInputErrorListener());
+
+ const tree = parser.start();
+
+ const visitor = new AntlrOpSelectionVisitor(all_ops);
+ const all_selection = visitor.visit(tree);
+ const focus_selection = visitor.focus_ops;
+
+ return {
+ all: Array.from(all_selection),
+ focus: Array.from(focus_selection),
+ };
+ } catch (e) {
+ return e as Error;
+ }
+};
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelectionVisitor.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelectionVisitor.ts
new file mode 100644
index 0000000000000..7af548928dfe6
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/AntlrOpSelectionVisitor.ts
@@ -0,0 +1,123 @@
+import {AbstractParseTreeVisitor} from 'antlr4ts/tree/AbstractParseTreeVisitor';
+
+import {GraphQueryItem, GraphTraverser} from '../app/GraphQueryImpl';
+import {
+ AllExpressionContext,
+ AndExpressionContext,
+ AttributeExpressionContext,
+ DownTraversalExpressionContext,
+ NameExprContext,
+ NameSubstringExprContext,
+ NotExpressionContext,
+ OrExpressionContext,
+ ParenthesizedExpressionContext,
+ StartContext,
+ TraversalAllowedExpressionContext,
+ UpAndDownTraversalExpressionContext,
+ UpTraversalExpressionContext,
+} from './generated/OpSelectionParser';
+import {OpSelectionVisitor} from './generated/OpSelectionVisitor';
+import {getTraversalDepth, getValue} from '../asset-selection/AntlrAssetSelectionVisitor';
+
+export class AntlrOpSelectionVisitor
+ extends AbstractParseTreeVisitor>
+ implements OpSelectionVisitor>
+{
+ all_ops: Set;
+ focus_ops: Set;
+ traverser: GraphTraverser;
+
+ protected defaultResult() {
+ return new Set();
+ }
+
+ constructor(all_ops: GraphQueryItem[]) {
+ super();
+ this.all_ops = new Set(all_ops);
+ this.focus_ops = new Set();
+ this.traverser = new GraphTraverser(all_ops);
+ }
+
+ visitStart(ctx: StartContext) {
+ return this.visit(ctx.expr());
+ }
+
+ visitTraversalAllowedExpression(ctx: TraversalAllowedExpressionContext) {
+ return this.visit(ctx.traversalAllowedExpr());
+ }
+
+ visitUpAndDownTraversalExpression(ctx: UpAndDownTraversalExpressionContext) {
+ const selection = this.visit(ctx.traversalAllowedExpr());
+ const up_depth: number = getTraversalDepth(ctx.traversal(0));
+ const down_depth: number = getTraversalDepth(ctx.traversal(1));
+ const selection_copy = new Set(selection);
+ for (const item of selection_copy) {
+ this.traverser.fetchUpstream(item, up_depth).forEach((i) => selection.add(i));
+ this.traverser.fetchDownstream(item, down_depth).forEach((i) => selection.add(i));
+ }
+ return selection;
+ }
+
+ visitUpTraversalExpression(ctx: UpTraversalExpressionContext) {
+ const selection = this.visit(ctx.traversalAllowedExpr());
+ const traversal_depth: number = getTraversalDepth(ctx.traversal());
+ const selection_copy = new Set(selection);
+ for (const item of selection_copy) {
+ this.traverser.fetchUpstream(item, traversal_depth).forEach((i) => selection.add(i));
+ }
+ return selection;
+ }
+
+ visitDownTraversalExpression(ctx: DownTraversalExpressionContext) {
+ const selection = this.visit(ctx.traversalAllowedExpr());
+ const traversal_depth: number = getTraversalDepth(ctx.traversal());
+ const selection_copy = new Set(selection);
+ for (const item of selection_copy) {
+ this.traverser.fetchDownstream(item, traversal_depth).forEach((i) => selection.add(i));
+ }
+ return selection;
+ }
+
+ visitNotExpression(ctx: NotExpressionContext) {
+ const selection = this.visit(ctx.expr());
+ return new Set([...this.all_ops].filter((i) => !selection.has(i)));
+ }
+
+ visitAndExpression(ctx: AndExpressionContext) {
+ const left = this.visit(ctx.expr(0));
+ const right = this.visit(ctx.expr(1));
+ return new Set([...left].filter((i) => right.has(i)));
+ }
+
+ visitOrExpression(ctx: OrExpressionContext) {
+ const left = this.visit(ctx.expr(0));
+ const right = this.visit(ctx.expr(1));
+ return new Set([...left, ...right]);
+ }
+
+ visitAllExpression(_ctx: AllExpressionContext) {
+ return this.all_ops;
+ }
+
+ visitAttributeExpression(ctx: AttributeExpressionContext) {
+ return this.visit(ctx.attributeExpr());
+ }
+
+ visitParenthesizedExpression(ctx: ParenthesizedExpressionContext) {
+ return this.visit(ctx.expr());
+ }
+
+ visitNameExpr(ctx: NameExprContext) {
+ const value: string = getValue(ctx.value());
+ const selection = [...this.all_ops].filter((i) => i.name === value);
+ selection.forEach((i) => this.focus_ops.add(i));
+ return new Set(selection);
+ }
+
+ visitNameSubstringExpr(ctx: NameSubstringExprContext) {
+ const value: string = getValue(ctx.value());
+ const selection = [...this.all_ops].filter((i) => i.name.includes(value));
+ selection.forEach((i) => this.focus_ops.add(i));
+ return new Set(selection);
+ }
+}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4 b/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4
new file mode 100644
index 0000000000000..1edc8b2e04c85
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4
@@ -0,0 +1,63 @@
+grammar OpSelection;
+
+start: expr EOF;
+
+// Root rule for parsing expressions
+expr
+ : traversalAllowedExpr # TraversalAllowedExpression
+ | traversal traversalAllowedExpr traversal # UpAndDownTraversalExpression
+ | traversal traversalAllowedExpr # UpTraversalExpression
+ | traversalAllowedExpr traversal # DownTraversalExpression
+ | NOT expr # NotExpression
+ | expr AND expr # AndExpression
+ | expr OR expr # OrExpression
+ | STAR # AllExpression
+ ;
+
+// Allowed expressions for traversals
+traversalAllowedExpr
+ : attributeExpr # AttributeExpression
+ | LPAREN expr RPAREN # ParenthesizedExpression
+ ;
+
+// Traversal operators
+traversal
+ : STAR
+ | PLUS+
+ ;
+
+// Attribute expressions for specific attributes
+attributeExpr
+ : NAME COLON value # NameExpr
+ | NAME_SUBSTRING COLON value # NameSubstringExpr
+ ;
+
+// Value can be a quoted or unquoted string
+value
+ : QUOTED_STRING
+ | UNQUOTED_STRING
+ ;
+
+// Tokens for operators and keywords
+AND : 'and';
+OR : 'or';
+NOT : 'not';
+
+STAR : '*';
+PLUS : '+';
+
+COLON : ':';
+
+LPAREN : '(';
+RPAREN : ')';
+
+// Tokens for attributes
+NAME : 'name';
+NAME_SUBSTRING : 'name_substring';
+
+// Tokens for strings
+QUOTED_STRING : '"' (~["\\\r\n])* '"' ;
+UNQUOTED_STRING : [a-zA-Z_][a-zA-Z0-9_]*;
+
+// Whitespace
+WS : [ \t\r\n]+ -> skip ;
\ No newline at end of file
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/__tests__/AntlrOpSelection.test.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/__tests__/AntlrOpSelection.test.ts
new file mode 100644
index 0000000000000..8150daefa203d
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/__tests__/AntlrOpSelection.test.ts
@@ -0,0 +1,118 @@
+/* eslint-disable jest/expect-expect */
+
+import {GraphQueryItem} from '../../app/GraphQueryImpl';
+import {parseOpSelectionQuery} from '../AntlrOpSelection';
+
+const TEST_GRAPH: GraphQueryItem[] = [
+ // Top Layer
+ {
+ name: 'A',
+ inputs: [{dependsOn: []}],
+ outputs: [{dependedBy: [{solid: {name: 'B'}}, {solid: {name: 'B2'}}]}],
+ },
+ // Second Layer
+ {
+ name: 'B',
+ inputs: [{dependsOn: [{solid: {name: 'A'}}]}],
+ outputs: [{dependedBy: [{solid: {name: 'C'}}]}],
+ },
+ {
+ name: 'B2',
+ inputs: [{dependsOn: [{solid: {name: 'A'}}]}],
+ outputs: [{dependedBy: [{solid: {name: 'C'}}]}],
+ },
+ // Third Layer
+ {
+ name: 'C',
+ inputs: [{dependsOn: [{solid: {name: 'B'}}, {solid: {name: 'B2'}}]}],
+ outputs: [{dependedBy: []}],
+ },
+];
+
+function assertQueryResult(query: string, expectedNames: string[]) {
+ const result = parseOpSelectionQuery(TEST_GRAPH, query);
+ expect(result).not.toBeInstanceOf(Error);
+ if (result instanceof Error) {
+ throw result;
+ }
+ expect(result.all.length).toBe(expectedNames.length);
+ expect(new Set(result.all.map((op) => op.name))).toEqual(new Set(expectedNames));
+}
+
+// Most tests copied from AntlrAssetSelection.test.ts
+describe('parseOpSelectionQuery', () => {
+ describe('invalid queries', () => {
+ it('should throw on invalid queries', () => {
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'A')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'name:A name:B')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'not')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'and')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'name:A and')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'sinks(*)')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'roots(*)')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'notafunction()')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'tag:foo=')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'owner')).toBeInstanceOf(Error);
+ expect(parseOpSelectionQuery(TEST_GRAPH, 'owner:owner@owner.com')).toBeInstanceOf(Error);
+ });
+ });
+
+ describe('valid queries', () => {
+ it('should parse star query', () => {
+ assertQueryResult('*', ['A', 'B', 'B2', 'C']);
+ });
+
+ it('should parse name query', () => {
+ assertQueryResult('name:A', ['A']);
+ });
+
+ it('should parse name_substring query', () => {
+ assertQueryResult('name_substring:A', ['A']);
+ assertQueryResult('name_substring:B', ['B', 'B2']);
+ });
+
+ it('should parse and query', () => {
+ assertQueryResult('name:A and name:B', []);
+ assertQueryResult('name:A and name:B and name:C', []);
+ });
+
+ it('should parse or query', () => {
+ assertQueryResult('name:A or name:B', ['A', 'B']);
+ assertQueryResult('name:A or name:B or name:C', ['A', 'B', 'C']);
+ assertQueryResult('(name:A or name:B) and (name:B or name:C)', ['B']);
+ });
+
+ it('should parse upstream plus query', () => {
+ assertQueryResult('+name:A', ['A']);
+ assertQueryResult('+name:B', ['A', 'B']);
+ assertQueryResult('+name:C', ['B', 'B2', 'C']);
+ assertQueryResult('++name:C', ['A', 'B', 'B2', 'C']);
+ });
+
+ it('should parse downstream plus query', () => {
+ assertQueryResult('name:A+', ['A', 'B', 'B2']);
+ assertQueryResult('name:A++', ['A', 'B', 'B2', 'C']);
+ assertQueryResult('name:C+', ['C']);
+ assertQueryResult('name:B+', ['B', 'C']);
+ });
+
+ it('should parse upstream star query', () => {
+ assertQueryResult('*name:A', ['A']);
+ assertQueryResult('*name:B', ['A', 'B']);
+ assertQueryResult('*name:C', ['A', 'B', 'B2', 'C']);
+ });
+
+ it('should parse downstream star query', () => {
+ assertQueryResult('name:A*', ['A', 'B', 'B2', 'C']);
+ assertQueryResult('name:B*', ['B', 'C']);
+ assertQueryResult('name:C*', ['C']);
+ });
+
+ it('should parse up and down traversal queries', () => {
+ assertQueryResult('name:A* and *name:C', ['A', 'B', 'B2', 'C']);
+ assertQueryResult('*name:B*', ['A', 'B', 'C']);
+ assertQueryResult('name:A* and *name:C and *name:B*', ['A', 'B', 'C']);
+ assertQueryResult('name:A* and *name:B* and *name:C', ['A', 'B', 'C']);
+ });
+ });
+});
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.interp b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.interp
new file mode 100644
index 0000000000000..88357962b7070
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.interp
@@ -0,0 +1,43 @@
+token literal names:
+null
+'and'
+'or'
+'not'
+'*'
+'+'
+':'
+'('
+')'
+'name'
+'name_substring'
+null
+null
+null
+
+token symbolic names:
+null
+AND
+OR
+NOT
+STAR
+PLUS
+COLON
+LPAREN
+RPAREN
+NAME
+NAME_SUBSTRING
+QUOTED_STRING
+UNQUOTED_STRING
+WS
+
+rule names:
+start
+expr
+traversalAllowedExpr
+traversal
+attributeExpr
+value
+
+
+atn:
+[3, 51485, 51898, 1421, 44986, 20307, 1543, 60043, 49729, 3, 15, 71, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 33, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 41, 10, 3, 12, 3, 14, 3, 44, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 51, 10, 4, 3, 5, 3, 5, 6, 5, 55, 10, 5, 13, 5, 14, 5, 56, 5, 5, 59, 10, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 5, 6, 67, 10, 6, 3, 7, 3, 7, 3, 7, 2, 2, 3, 4, 8, 2, 2, 4, 2, 6, 2, 8, 2, 10, 2, 12, 2, 2, 3, 3, 2, 13, 14, 2, 75, 2, 14, 3, 2, 2, 2, 4, 32, 3, 2, 2, 2, 6, 50, 3, 2, 2, 2, 8, 58, 3, 2, 2, 2, 10, 66, 3, 2, 2, 2, 12, 68, 3, 2, 2, 2, 14, 15, 5, 4, 3, 2, 15, 16, 7, 2, 2, 3, 16, 3, 3, 2, 2, 2, 17, 18, 8, 3, 1, 2, 18, 33, 5, 6, 4, 2, 19, 20, 5, 8, 5, 2, 20, 21, 5, 6, 4, 2, 21, 22, 5, 8, 5, 2, 22, 33, 3, 2, 2, 2, 23, 24, 5, 8, 5, 2, 24, 25, 5, 6, 4, 2, 25, 33, 3, 2, 2, 2, 26, 27, 5, 6, 4, 2, 27, 28, 5, 8, 5, 2, 28, 33, 3, 2, 2, 2, 29, 30, 7, 5, 2, 2, 30, 33, 5, 4, 3, 6, 31, 33, 7, 6, 2, 2, 32, 17, 3, 2, 2, 2, 32, 19, 3, 2, 2, 2, 32, 23, 3, 2, 2, 2, 32, 26, 3, 2, 2, 2, 32, 29, 3, 2, 2, 2, 32, 31, 3, 2, 2, 2, 33, 42, 3, 2, 2, 2, 34, 35, 12, 5, 2, 2, 35, 36, 7, 3, 2, 2, 36, 41, 5, 4, 3, 6, 37, 38, 12, 4, 2, 2, 38, 39, 7, 4, 2, 2, 39, 41, 5, 4, 3, 5, 40, 34, 3, 2, 2, 2, 40, 37, 3, 2, 2, 2, 41, 44, 3, 2, 2, 2, 42, 40, 3, 2, 2, 2, 42, 43, 3, 2, 2, 2, 43, 5, 3, 2, 2, 2, 44, 42, 3, 2, 2, 2, 45, 51, 5, 10, 6, 2, 46, 47, 7, 9, 2, 2, 47, 48, 5, 4, 3, 2, 48, 49, 7, 10, 2, 2, 49, 51, 3, 2, 2, 2, 50, 45, 3, 2, 2, 2, 50, 46, 3, 2, 2, 2, 51, 7, 3, 2, 2, 2, 52, 59, 7, 6, 2, 2, 53, 55, 7, 7, 2, 2, 54, 53, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 54, 3, 2, 2, 2, 56, 57, 3, 2, 2, 2, 57, 59, 3, 2, 2, 2, 58, 52, 3, 2, 2, 2, 58, 54, 3, 2, 2, 2, 59, 9, 3, 2, 2, 2, 60, 61, 7, 11, 2, 2, 61, 62, 7, 8, 2, 2, 62, 67, 5, 12, 7, 2, 63, 64, 7, 12, 2, 2, 64, 65, 7, 8, 2, 2, 65, 67, 5, 12, 7, 2, 66, 60, 3, 2, 2, 2, 66, 63, 3, 2, 2, 2, 67, 11, 3, 2, 2, 2, 68, 69, 9, 2, 2, 2, 69, 13, 3, 2, 2, 2, 9, 32, 40, 42, 50, 56, 58, 66]
\ No newline at end of file
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.tokens b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.tokens
new file mode 100644
index 0000000000000..34097166bd470
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelection.tokens
@@ -0,0 +1,23 @@
+AND=1
+OR=2
+NOT=3
+STAR=4
+PLUS=5
+COLON=6
+LPAREN=7
+RPAREN=8
+NAME=9
+NAME_SUBSTRING=10
+QUOTED_STRING=11
+UNQUOTED_STRING=12
+WS=13
+'and'=1
+'or'=2
+'not'=3
+'*'=4
+'+'=5
+':'=6
+'('=7
+')'=8
+'name'=9
+'name_substring'=10
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.interp b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.interp
new file mode 100644
index 0000000000000..8828f9b625b8b
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.interp
@@ -0,0 +1,56 @@
+token literal names:
+null
+'and'
+'or'
+'not'
+'*'
+'+'
+':'
+'('
+')'
+'name'
+'name_substring'
+null
+null
+null
+
+token symbolic names:
+null
+AND
+OR
+NOT
+STAR
+PLUS
+COLON
+LPAREN
+RPAREN
+NAME
+NAME_SUBSTRING
+QUOTED_STRING
+UNQUOTED_STRING
+WS
+
+rule names:
+AND
+OR
+NOT
+STAR
+PLUS
+COLON
+LPAREN
+RPAREN
+NAME
+NAME_SUBSTRING
+QUOTED_STRING
+UNQUOTED_STRING
+WS
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[3, 51485, 51898, 1421, 44986, 20307, 1543, 60043, 49729, 2, 15, 93, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 11, 3, 12, 3, 12, 7, 12, 73, 10, 12, 12, 12, 14, 12, 76, 11, 12, 3, 12, 3, 12, 3, 13, 3, 13, 7, 13, 82, 10, 13, 12, 13, 14, 13, 85, 11, 13, 3, 14, 6, 14, 88, 10, 14, 13, 14, 14, 14, 89, 3, 14, 3, 14, 2, 2, 2, 15, 3, 2, 3, 5, 2, 4, 7, 2, 5, 9, 2, 6, 11, 2, 7, 13, 2, 8, 15, 2, 9, 17, 2, 10, 19, 2, 11, 21, 2, 12, 23, 2, 13, 25, 2, 14, 27, 2, 15, 3, 2, 6, 6, 2, 12, 12, 15, 15, 36, 36, 94, 94, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 11, 12, 15, 15, 34, 34, 2, 95, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 3, 29, 3, 2, 2, 2, 5, 33, 3, 2, 2, 2, 7, 36, 3, 2, 2, 2, 9, 40, 3, 2, 2, 2, 11, 42, 3, 2, 2, 2, 13, 44, 3, 2, 2, 2, 15, 46, 3, 2, 2, 2, 17, 48, 3, 2, 2, 2, 19, 50, 3, 2, 2, 2, 21, 55, 3, 2, 2, 2, 23, 70, 3, 2, 2, 2, 25, 79, 3, 2, 2, 2, 27, 87, 3, 2, 2, 2, 29, 30, 7, 99, 2, 2, 30, 31, 7, 112, 2, 2, 31, 32, 7, 102, 2, 2, 32, 4, 3, 2, 2, 2, 33, 34, 7, 113, 2, 2, 34, 35, 7, 116, 2, 2, 35, 6, 3, 2, 2, 2, 36, 37, 7, 112, 2, 2, 37, 38, 7, 113, 2, 2, 38, 39, 7, 118, 2, 2, 39, 8, 3, 2, 2, 2, 40, 41, 7, 44, 2, 2, 41, 10, 3, 2, 2, 2, 42, 43, 7, 45, 2, 2, 43, 12, 3, 2, 2, 2, 44, 45, 7, 60, 2, 2, 45, 14, 3, 2, 2, 2, 46, 47, 7, 42, 2, 2, 47, 16, 3, 2, 2, 2, 48, 49, 7, 43, 2, 2, 49, 18, 3, 2, 2, 2, 50, 51, 7, 112, 2, 2, 51, 52, 7, 99, 2, 2, 52, 53, 7, 111, 2, 2, 53, 54, 7, 103, 2, 2, 54, 20, 3, 2, 2, 2, 55, 56, 7, 112, 2, 2, 56, 57, 7, 99, 2, 2, 57, 58, 7, 111, 2, 2, 58, 59, 7, 103, 2, 2, 59, 60, 7, 97, 2, 2, 60, 61, 7, 117, 2, 2, 61, 62, 7, 119, 2, 2, 62, 63, 7, 100, 2, 2, 63, 64, 7, 117, 2, 2, 64, 65, 7, 118, 2, 2, 65, 66, 7, 116, 2, 2, 66, 67, 7, 107, 2, 2, 67, 68, 7, 112, 2, 2, 68, 69, 7, 105, 2, 2, 69, 22, 3, 2, 2, 2, 70, 74, 7, 36, 2, 2, 71, 73, 10, 2, 2, 2, 72, 71, 3, 2, 2, 2, 73, 76, 3, 2, 2, 2, 74, 72, 3, 2, 2, 2, 74, 75, 3, 2, 2, 2, 75, 77, 3, 2, 2, 2, 76, 74, 3, 2, 2, 2, 77, 78, 7, 36, 2, 2, 78, 24, 3, 2, 2, 2, 79, 83, 9, 3, 2, 2, 80, 82, 9, 4, 2, 2, 81, 80, 3, 2, 2, 2, 82, 85, 3, 2, 2, 2, 83, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 26, 3, 2, 2, 2, 85, 83, 3, 2, 2, 2, 86, 88, 9, 5, 2, 2, 87, 86, 3, 2, 2, 2, 88, 89, 3, 2, 2, 2, 89, 87, 3, 2, 2, 2, 89, 90, 3, 2, 2, 2, 90, 91, 3, 2, 2, 2, 91, 92, 8, 14, 2, 2, 92, 28, 3, 2, 2, 2, 6, 2, 74, 83, 89, 3, 8, 2, 2]
\ No newline at end of file
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.tokens b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.tokens
new file mode 100644
index 0000000000000..34097166bd470
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.tokens
@@ -0,0 +1,23 @@
+AND=1
+OR=2
+NOT=3
+STAR=4
+PLUS=5
+COLON=6
+LPAREN=7
+RPAREN=8
+NAME=9
+NAME_SUBSTRING=10
+QUOTED_STRING=11
+UNQUOTED_STRING=12
+WS=13
+'and'=1
+'or'=2
+'not'=3
+'*'=4
+'+'=5
+':'=6
+'('=7
+')'=8
+'name'=9
+'name_substring'=10
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.ts
new file mode 100644
index 0000000000000..3a01b871699a8
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionLexer.ts
@@ -0,0 +1,170 @@
+// Generated from /Users/briantu/repos/dagster/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4 by ANTLR 4.9.0-SNAPSHOT
+
+import {CharStream} from 'antlr4ts/CharStream';
+import {Lexer} from 'antlr4ts/Lexer';
+import {Vocabulary} from 'antlr4ts/Vocabulary';
+import {VocabularyImpl} from 'antlr4ts/VocabularyImpl';
+import {ATN} from 'antlr4ts/atn/ATN';
+import {ATNDeserializer} from 'antlr4ts/atn/ATNDeserializer';
+import {LexerATNSimulator} from 'antlr4ts/atn/LexerATNSimulator';
+import * as Utils from 'antlr4ts/misc/Utils';
+
+export class OpSelectionLexer extends Lexer {
+ public static readonly AND = 1;
+ public static readonly OR = 2;
+ public static readonly NOT = 3;
+ public static readonly STAR = 4;
+ public static readonly PLUS = 5;
+ public static readonly COLON = 6;
+ public static readonly LPAREN = 7;
+ public static readonly RPAREN = 8;
+ public static readonly NAME = 9;
+ public static readonly NAME_SUBSTRING = 10;
+ public static readonly QUOTED_STRING = 11;
+ public static readonly UNQUOTED_STRING = 12;
+ public static readonly WS = 13;
+
+ // tslint:disable:no-trailing-whitespace
+ public static readonly channelNames: string[] = ['DEFAULT_TOKEN_CHANNEL', 'HIDDEN'];
+
+ // tslint:disable:no-trailing-whitespace
+ public static readonly modeNames: string[] = ['DEFAULT_MODE'];
+
+ public static readonly ruleNames: string[] = [
+ 'AND',
+ 'OR',
+ 'NOT',
+ 'STAR',
+ 'PLUS',
+ 'COLON',
+ 'LPAREN',
+ 'RPAREN',
+ 'NAME',
+ 'NAME_SUBSTRING',
+ 'QUOTED_STRING',
+ 'UNQUOTED_STRING',
+ 'WS',
+ ];
+
+ private static readonly _LITERAL_NAMES: Array = [
+ undefined,
+ "'and'",
+ "'or'",
+ "'not'",
+ "'*'",
+ "'+'",
+ "':'",
+ "'('",
+ "')'",
+ "'name'",
+ "'name_substring'",
+ ];
+ private static readonly _SYMBOLIC_NAMES: Array = [
+ undefined,
+ 'AND',
+ 'OR',
+ 'NOT',
+ 'STAR',
+ 'PLUS',
+ 'COLON',
+ 'LPAREN',
+ 'RPAREN',
+ 'NAME',
+ 'NAME_SUBSTRING',
+ 'QUOTED_STRING',
+ 'UNQUOTED_STRING',
+ 'WS',
+ ];
+ public static readonly VOCABULARY: Vocabulary = new VocabularyImpl(
+ OpSelectionLexer._LITERAL_NAMES,
+ OpSelectionLexer._SYMBOLIC_NAMES,
+ [],
+ );
+
+ // @Override
+ // @NotNull
+ public get vocabulary(): Vocabulary {
+ return OpSelectionLexer.VOCABULARY;
+ }
+ // tslint:enable:no-trailing-whitespace
+
+ constructor(input: CharStream) {
+ super(input);
+ this._interp = new LexerATNSimulator(OpSelectionLexer._ATN, this);
+ }
+
+ // @Override
+ public get grammarFileName(): string {
+ return 'OpSelection.g4';
+ }
+
+ // @Override
+ public get ruleNames(): string[] {
+ return OpSelectionLexer.ruleNames;
+ }
+
+ // @Override
+ public get serializedATN(): string {
+ return OpSelectionLexer._serializedATN;
+ }
+
+ // @Override
+ public get channelNames(): string[] {
+ return OpSelectionLexer.channelNames;
+ }
+
+ // @Override
+ public get modeNames(): string[] {
+ return OpSelectionLexer.modeNames;
+ }
+
+ public static readonly _serializedATN: string =
+ '\x03\uC91D\uCABA\u058D\uAFBA\u4F53\u0607\uEA8B\uC241\x02\x0F]\b\x01\x04' +
+ '\x02\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04' +
+ '\x07\t\x07\x04\b\t\b\x04\t\t\t\x04\n\t\n\x04\v\t\v\x04\f\t\f\x04\r\t\r' +
+ '\x04\x0E\t\x0E\x03\x02\x03\x02\x03\x02\x03\x02\x03\x03\x03\x03\x03\x03' +
+ '\x03\x04\x03\x04\x03\x04\x03\x04\x03\x05\x03\x05\x03\x06\x03\x06\x03\x07' +
+ '\x03\x07\x03\b\x03\b\x03\t\x03\t\x03\n\x03\n\x03\n\x03\n\x03\n\x03\v\x03' +
+ '\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03\v\x03' +
+ '\v\x03\v\x03\f\x03\f\x07\fI\n\f\f\f\x0E\fL\v\f\x03\f\x03\f\x03\r\x03\r' +
+ '\x07\rR\n\r\f\r\x0E\rU\v\r\x03\x0E\x06\x0EX\n\x0E\r\x0E\x0E\x0EY\x03\x0E' +
+ '\x03\x0E\x02\x02\x02\x0F\x03\x02\x03\x05\x02\x04\x07\x02\x05\t\x02\x06' +
+ '\v\x02\x07\r\x02\b\x0F\x02\t\x11\x02\n\x13\x02\v\x15\x02\f\x17\x02\r\x19' +
+ '\x02\x0E\x1B\x02\x0F\x03\x02\x06\x06\x02\f\f\x0F\x0F$$^^\x05\x02C\\aa' +
+ 'c|\x06\x022;C\\aac|\x05\x02\v\f\x0F\x0F""\x02_\x02\x03\x03\x02\x02\x02' +
+ '\x02\x05\x03\x02\x02\x02\x02\x07\x03\x02\x02\x02\x02\t\x03\x02\x02\x02' +
+ '\x02\v\x03\x02\x02\x02\x02\r\x03\x02\x02\x02\x02\x0F\x03\x02\x02\x02\x02' +
+ '\x11\x03\x02\x02\x02\x02\x13\x03\x02\x02\x02\x02\x15\x03\x02\x02\x02\x02' +
+ '\x17\x03\x02\x02\x02\x02\x19\x03\x02\x02\x02\x02\x1B\x03\x02\x02\x02\x03' +
+ '\x1D\x03\x02\x02\x02\x05!\x03\x02\x02\x02\x07$\x03\x02\x02\x02\t(\x03' +
+ '\x02\x02\x02\v*\x03\x02\x02\x02\r,\x03\x02\x02\x02\x0F.\x03\x02\x02\x02' +
+ '\x110\x03\x02\x02\x02\x132\x03\x02\x02\x02\x157\x03\x02\x02\x02\x17F\x03' +
+ '\x02\x02\x02\x19O\x03\x02\x02\x02\x1BW\x03\x02\x02\x02\x1D\x1E\x07c\x02' +
+ '\x02\x1E\x1F\x07p\x02\x02\x1F \x07f\x02\x02 \x04\x03\x02\x02\x02!"\x07' +
+ 'q\x02\x02"#\x07t\x02\x02#\x06\x03\x02\x02\x02$%\x07p\x02\x02%&\x07q\x02' +
+ "\x02&'\x07v\x02\x02'\b\x03\x02\x02\x02()\x07,\x02\x02)\n\x03\x02\x02" +
+ '\x02*+\x07-\x02\x02+\f\x03\x02\x02\x02,-\x07<\x02\x02-\x0E\x03\x02\x02' +
+ '\x02./\x07*\x02\x02/\x10\x03\x02\x02\x0201\x07+\x02\x021\x12\x03\x02\x02' +
+ '\x0223\x07p\x02\x0234\x07c\x02\x0245\x07o\x02\x0256\x07g\x02\x026\x14' +
+ '\x03\x02\x02\x0278\x07p\x02\x0289\x07c\x02\x029:\x07o\x02\x02:;\x07g\x02' +
+ '\x02;<\x07a\x02\x02<=\x07u\x02\x02=>\x07w\x02\x02>?\x07d\x02\x02?@\x07' +
+ 'u\x02\x02@A\x07v\x02\x02AB\x07t\x02\x02BC\x07k\x02\x02CD\x07p\x02\x02' +
+ 'DE\x07i\x02\x02E\x16\x03\x02\x02\x02FJ\x07$\x02\x02GI\n\x02\x02\x02HG' +
+ '\x03\x02\x02\x02IL\x03\x02\x02\x02JH\x03\x02\x02\x02JK\x03\x02\x02\x02' +
+ 'KM\x03\x02\x02\x02LJ\x03\x02\x02\x02MN\x07$\x02\x02N\x18\x03\x02\x02\x02' +
+ 'OS\t\x03\x02\x02PR\t\x04\x02\x02QP\x03\x02\x02\x02RU\x03\x02\x02\x02S' +
+ 'Q\x03\x02\x02\x02ST\x03\x02\x02\x02T\x1A\x03\x02\x02\x02US\x03\x02\x02' +
+ '\x02VX\t\x05\x02\x02WV\x03\x02\x02\x02XY\x03\x02\x02\x02YW\x03\x02\x02' +
+ '\x02YZ\x03\x02\x02\x02Z[\x03\x02\x02\x02[\\\b\x0E\x02\x02\\\x1C\x03\x02' +
+ '\x02\x02\x06\x02JSY\x03\b\x02\x02';
+ public static __ATN: ATN;
+ public static get _ATN(): ATN {
+ if (!OpSelectionLexer.__ATN) {
+ OpSelectionLexer.__ATN = new ATNDeserializer().deserialize(
+ Utils.toCharArray(OpSelectionLexer._serializedATN),
+ );
+ }
+
+ return OpSelectionLexer.__ATN;
+ }
+}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionListener.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionListener.ts
new file mode 100644
index 0000000000000..5c13bdbb60747
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionListener.ts
@@ -0,0 +1,252 @@
+// Generated from /Users/briantu/repos/dagster/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4 by ANTLR 4.9.0-SNAPSHOT
+
+import {ParseTreeListener} from 'antlr4ts/tree/ParseTreeListener';
+
+import {
+ AllExpressionContext,
+ AndExpressionContext,
+ AttributeExprContext,
+ AttributeExpressionContext,
+ DownTraversalExpressionContext,
+ ExprContext,
+ NameExprContext,
+ NameSubstringExprContext,
+ NotExpressionContext,
+ OrExpressionContext,
+ ParenthesizedExpressionContext,
+ StartContext,
+ TraversalAllowedExprContext,
+ TraversalAllowedExpressionContext,
+ TraversalContext,
+ UpAndDownTraversalExpressionContext,
+ UpTraversalExpressionContext,
+ ValueContext,
+} from './OpSelectionParser';
+
+/**
+ * This interface defines a complete listener for a parse tree produced by
+ * `OpSelectionParser`.
+ */
+export interface OpSelectionListener extends ParseTreeListener {
+ /**
+ * Enter a parse tree produced by the `TraversalAllowedExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterTraversalAllowedExpression?: (ctx: TraversalAllowedExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `TraversalAllowedExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitTraversalAllowedExpression?: (ctx: TraversalAllowedExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `UpAndDownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterUpAndDownTraversalExpression?: (ctx: UpAndDownTraversalExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `UpAndDownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitUpAndDownTraversalExpression?: (ctx: UpAndDownTraversalExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `UpTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterUpTraversalExpression?: (ctx: UpTraversalExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `UpTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitUpTraversalExpression?: (ctx: UpTraversalExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `DownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterDownTraversalExpression?: (ctx: DownTraversalExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `DownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitDownTraversalExpression?: (ctx: DownTraversalExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `NotExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterNotExpression?: (ctx: NotExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `NotExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitNotExpression?: (ctx: NotExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `AndExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterAndExpression?: (ctx: AndExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `AndExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitAndExpression?: (ctx: AndExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `OrExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterOrExpression?: (ctx: OrExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `OrExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitOrExpression?: (ctx: OrExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `AllExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterAllExpression?: (ctx: AllExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `AllExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitAllExpression?: (ctx: AllExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `NameExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ enterNameExpr?: (ctx: NameExprContext) => void;
+ /**
+ * Exit a parse tree produced by the `NameExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ exitNameExpr?: (ctx: NameExprContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `NameSubstringExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ enterNameSubstringExpr?: (ctx: NameSubstringExprContext) => void;
+ /**
+ * Exit a parse tree produced by the `NameSubstringExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ exitNameSubstringExpr?: (ctx: NameSubstringExprContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `AttributeExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ enterAttributeExpression?: (ctx: AttributeExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `AttributeExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ exitAttributeExpression?: (ctx: AttributeExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by the `ParenthesizedExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ enterParenthesizedExpression?: (ctx: ParenthesizedExpressionContext) => void;
+ /**
+ * Exit a parse tree produced by the `ParenthesizedExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ exitParenthesizedExpression?: (ctx: ParenthesizedExpressionContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.start`.
+ * @param ctx the parse tree
+ */
+ enterStart?: (ctx: StartContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.start`.
+ * @param ctx the parse tree
+ */
+ exitStart?: (ctx: StartContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ enterExpr?: (ctx: ExprContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ */
+ exitExpr?: (ctx: ExprContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ enterTraversalAllowedExpr?: (ctx: TraversalAllowedExprContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ */
+ exitTraversalAllowedExpr?: (ctx: TraversalAllowedExprContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.traversal`.
+ * @param ctx the parse tree
+ */
+ enterTraversal?: (ctx: TraversalContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.traversal`.
+ * @param ctx the parse tree
+ */
+ exitTraversal?: (ctx: TraversalContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ enterAttributeExpr?: (ctx: AttributeExprContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ */
+ exitAttributeExpr?: (ctx: AttributeExprContext) => void;
+
+ /**
+ * Enter a parse tree produced by `OpSelectionParser.value`.
+ * @param ctx the parse tree
+ */
+ enterValue?: (ctx: ValueContext) => void;
+ /**
+ * Exit a parse tree produced by `OpSelectionParser.value`.
+ * @param ctx the parse tree
+ */
+ exitValue?: (ctx: ValueContext) => void;
+}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionParser.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionParser.ts
new file mode 100644
index 0000000000000..a9c1c99063b14
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionParser.ts
@@ -0,0 +1,1115 @@
+// Generated from /Users/briantu/repos/dagster/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4 by ANTLR 4.9.0-SNAPSHOT
+
+import {FailedPredicateException} from 'antlr4ts/FailedPredicateException';
+import {NoViableAltException} from 'antlr4ts/NoViableAltException';
+import {Parser} from 'antlr4ts/Parser';
+import {ParserRuleContext} from 'antlr4ts/ParserRuleContext';
+import {RecognitionException} from 'antlr4ts/RecognitionException';
+import {RuleContext} from 'antlr4ts/RuleContext';
+//import { RuleVersion } from "antlr4ts/RuleVersion";
+import {Token} from 'antlr4ts/Token';
+import {TokenStream} from 'antlr4ts/TokenStream';
+import {Vocabulary} from 'antlr4ts/Vocabulary';
+import {VocabularyImpl} from 'antlr4ts/VocabularyImpl';
+import {ATN} from 'antlr4ts/atn/ATN';
+import {ATNDeserializer} from 'antlr4ts/atn/ATNDeserializer';
+import {ParserATNSimulator} from 'antlr4ts/atn/ParserATNSimulator';
+import * as Utils from 'antlr4ts/misc/Utils';
+import {TerminalNode} from 'antlr4ts/tree/TerminalNode';
+
+import {OpSelectionListener} from './OpSelectionListener';
+import {OpSelectionVisitor} from './OpSelectionVisitor';
+
+export class OpSelectionParser extends Parser {
+ public static readonly AND = 1;
+ public static readonly OR = 2;
+ public static readonly NOT = 3;
+ public static readonly STAR = 4;
+ public static readonly PLUS = 5;
+ public static readonly COLON = 6;
+ public static readonly LPAREN = 7;
+ public static readonly RPAREN = 8;
+ public static readonly NAME = 9;
+ public static readonly NAME_SUBSTRING = 10;
+ public static readonly QUOTED_STRING = 11;
+ public static readonly UNQUOTED_STRING = 12;
+ public static readonly WS = 13;
+ public static readonly RULE_start = 0;
+ public static readonly RULE_expr = 1;
+ public static readonly RULE_traversalAllowedExpr = 2;
+ public static readonly RULE_traversal = 3;
+ public static readonly RULE_attributeExpr = 4;
+ public static readonly RULE_value = 5;
+ // tslint:disable:no-trailing-whitespace
+ public static readonly ruleNames: string[] = [
+ 'start',
+ 'expr',
+ 'traversalAllowedExpr',
+ 'traversal',
+ 'attributeExpr',
+ 'value',
+ ];
+
+ private static readonly _LITERAL_NAMES: Array = [
+ undefined,
+ "'and'",
+ "'or'",
+ "'not'",
+ "'*'",
+ "'+'",
+ "':'",
+ "'('",
+ "')'",
+ "'name'",
+ "'name_substring'",
+ ];
+ private static readonly _SYMBOLIC_NAMES: Array = [
+ undefined,
+ 'AND',
+ 'OR',
+ 'NOT',
+ 'STAR',
+ 'PLUS',
+ 'COLON',
+ 'LPAREN',
+ 'RPAREN',
+ 'NAME',
+ 'NAME_SUBSTRING',
+ 'QUOTED_STRING',
+ 'UNQUOTED_STRING',
+ 'WS',
+ ];
+ public static readonly VOCABULARY: Vocabulary = new VocabularyImpl(
+ OpSelectionParser._LITERAL_NAMES,
+ OpSelectionParser._SYMBOLIC_NAMES,
+ [],
+ );
+
+ // @Override
+ // @NotNull
+ public get vocabulary(): Vocabulary {
+ return OpSelectionParser.VOCABULARY;
+ }
+ // tslint:enable:no-trailing-whitespace
+
+ // @Override
+ public get grammarFileName(): string {
+ return 'OpSelection.g4';
+ }
+
+ // @Override
+ public get ruleNames(): string[] {
+ return OpSelectionParser.ruleNames;
+ }
+
+ // @Override
+ public get serializedATN(): string {
+ return OpSelectionParser._serializedATN;
+ }
+
+ protected createFailedPredicateException(
+ predicate?: string,
+ message?: string,
+ ): FailedPredicateException {
+ return new FailedPredicateException(this, predicate, message);
+ }
+
+ constructor(input: TokenStream) {
+ super(input);
+ this._interp = new ParserATNSimulator(OpSelectionParser._ATN, this);
+ }
+ // @RuleVersion(0)
+ public start(): StartContext {
+ const _localctx: StartContext = new StartContext(this._ctx, this.state);
+ this.enterRule(_localctx, 0, OpSelectionParser.RULE_start);
+ try {
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 12;
+ this.expr(0);
+ this.state = 13;
+ this.match(OpSelectionParser.EOF);
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.exitRule();
+ }
+ return _localctx;
+ }
+
+ public expr(): ExprContext;
+ public expr(_p: number): ExprContext;
+ // @RuleVersion(0)
+ public expr(_p?: number): ExprContext {
+ if (_p === undefined) {
+ _p = 0;
+ }
+
+ const _parentctx: ParserRuleContext = this._ctx;
+ const _parentState: number = this.state;
+ let _localctx: ExprContext = new ExprContext(this._ctx, _parentState);
+ let _prevctx: ExprContext = _localctx;
+ const _startState: number = 2;
+ this.enterRecursionRule(_localctx, 2, OpSelectionParser.RULE_expr, _p);
+ try {
+ let _alt: number;
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 30;
+ this._errHandler.sync(this);
+ switch (this.interpreter.adaptivePredict(this._input, 0, this._ctx)) {
+ case 1:
+ {
+ _localctx = new TraversalAllowedExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+
+ this.state = 16;
+ this.traversalAllowedExpr();
+ }
+ break;
+
+ case 2:
+ {
+ _localctx = new UpAndDownTraversalExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+ this.state = 17;
+ this.traversal();
+ this.state = 18;
+ this.traversalAllowedExpr();
+ this.state = 19;
+ this.traversal();
+ }
+ break;
+
+ case 3:
+ {
+ _localctx = new UpTraversalExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+ this.state = 21;
+ this.traversal();
+ this.state = 22;
+ this.traversalAllowedExpr();
+ }
+ break;
+
+ case 4:
+ {
+ _localctx = new DownTraversalExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+ this.state = 24;
+ this.traversalAllowedExpr();
+ this.state = 25;
+ this.traversal();
+ }
+ break;
+
+ case 5:
+ {
+ _localctx = new NotExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+ this.state = 27;
+ this.match(OpSelectionParser.NOT);
+ this.state = 28;
+ this.expr(4);
+ }
+ break;
+
+ case 6:
+ {
+ _localctx = new AllExpressionContext(_localctx);
+ this._ctx = _localctx;
+ _prevctx = _localctx;
+ this.state = 29;
+ this.match(OpSelectionParser.STAR);
+ }
+ break;
+ }
+ this._ctx._stop = this._input.tryLT(-1);
+ this.state = 40;
+ this._errHandler.sync(this);
+ _alt = this.interpreter.adaptivePredict(this._input, 2, this._ctx);
+ while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
+ if (_alt === 1) {
+ if (this._parseListeners != null) {
+ this.triggerExitRuleEvent();
+ }
+ _prevctx = _localctx;
+ {
+ this.state = 38;
+ this._errHandler.sync(this);
+ switch (this.interpreter.adaptivePredict(this._input, 1, this._ctx)) {
+ case 1:
+ {
+ _localctx = new AndExpressionContext(new ExprContext(_parentctx, _parentState));
+ this.pushNewRecursionContext(
+ _localctx,
+ _startState,
+ OpSelectionParser.RULE_expr,
+ );
+ this.state = 32;
+ if (!this.precpred(this._ctx, 3)) {
+ throw this.createFailedPredicateException('this.precpred(this._ctx, 3)');
+ }
+ this.state = 33;
+ this.match(OpSelectionParser.AND);
+ this.state = 34;
+ this.expr(4);
+ }
+ break;
+
+ case 2:
+ {
+ _localctx = new OrExpressionContext(new ExprContext(_parentctx, _parentState));
+ this.pushNewRecursionContext(
+ _localctx,
+ _startState,
+ OpSelectionParser.RULE_expr,
+ );
+ this.state = 35;
+ if (!this.precpred(this._ctx, 2)) {
+ throw this.createFailedPredicateException('this.precpred(this._ctx, 2)');
+ }
+ this.state = 36;
+ this.match(OpSelectionParser.OR);
+ this.state = 37;
+ this.expr(3);
+ }
+ break;
+ }
+ }
+ }
+ this.state = 42;
+ this._errHandler.sync(this);
+ _alt = this.interpreter.adaptivePredict(this._input, 2, this._ctx);
+ }
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.unrollRecursionContexts(_parentctx);
+ }
+ return _localctx;
+ }
+ // @RuleVersion(0)
+ public traversalAllowedExpr(): TraversalAllowedExprContext {
+ let _localctx: TraversalAllowedExprContext = new TraversalAllowedExprContext(
+ this._ctx,
+ this.state,
+ );
+ this.enterRule(_localctx, 4, OpSelectionParser.RULE_traversalAllowedExpr);
+ try {
+ this.state = 48;
+ this._errHandler.sync(this);
+ switch (this._input.LA(1)) {
+ case OpSelectionParser.NAME:
+ case OpSelectionParser.NAME_SUBSTRING:
+ _localctx = new AttributeExpressionContext(_localctx);
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 43;
+ this.attributeExpr();
+ }
+ break;
+ case OpSelectionParser.LPAREN:
+ _localctx = new ParenthesizedExpressionContext(_localctx);
+ this.enterOuterAlt(_localctx, 2);
+ {
+ this.state = 44;
+ this.match(OpSelectionParser.LPAREN);
+ this.state = 45;
+ this.expr(0);
+ this.state = 46;
+ this.match(OpSelectionParser.RPAREN);
+ }
+ break;
+ default:
+ throw new NoViableAltException(this);
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.exitRule();
+ }
+ return _localctx;
+ }
+ // @RuleVersion(0)
+ public traversal(): TraversalContext {
+ const _localctx: TraversalContext = new TraversalContext(this._ctx, this.state);
+ this.enterRule(_localctx, 6, OpSelectionParser.RULE_traversal);
+ try {
+ let _alt: number;
+ this.state = 56;
+ this._errHandler.sync(this);
+ switch (this._input.LA(1)) {
+ case OpSelectionParser.STAR:
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 50;
+ this.match(OpSelectionParser.STAR);
+ }
+ break;
+ case OpSelectionParser.PLUS:
+ this.enterOuterAlt(_localctx, 2);
+ {
+ this.state = 52;
+ this._errHandler.sync(this);
+ _alt = 1;
+ do {
+ switch (_alt) {
+ case 1:
+ {
+ {
+ this.state = 51;
+ this.match(OpSelectionParser.PLUS);
+ }
+ }
+ break;
+ default:
+ throw new NoViableAltException(this);
+ }
+ this.state = 54;
+ this._errHandler.sync(this);
+ _alt = this.interpreter.adaptivePredict(this._input, 4, this._ctx);
+ } while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER);
+ }
+ break;
+ default:
+ throw new NoViableAltException(this);
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.exitRule();
+ }
+ return _localctx;
+ }
+ // @RuleVersion(0)
+ public attributeExpr(): AttributeExprContext {
+ let _localctx: AttributeExprContext = new AttributeExprContext(this._ctx, this.state);
+ this.enterRule(_localctx, 8, OpSelectionParser.RULE_attributeExpr);
+ try {
+ this.state = 64;
+ this._errHandler.sync(this);
+ switch (this._input.LA(1)) {
+ case OpSelectionParser.NAME:
+ _localctx = new NameExprContext(_localctx);
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 58;
+ this.match(OpSelectionParser.NAME);
+ this.state = 59;
+ this.match(OpSelectionParser.COLON);
+ this.state = 60;
+ this.value();
+ }
+ break;
+ case OpSelectionParser.NAME_SUBSTRING:
+ _localctx = new NameSubstringExprContext(_localctx);
+ this.enterOuterAlt(_localctx, 2);
+ {
+ this.state = 61;
+ this.match(OpSelectionParser.NAME_SUBSTRING);
+ this.state = 62;
+ this.match(OpSelectionParser.COLON);
+ this.state = 63;
+ this.value();
+ }
+ break;
+ default:
+ throw new NoViableAltException(this);
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.exitRule();
+ }
+ return _localctx;
+ }
+ // @RuleVersion(0)
+ public value(): ValueContext {
+ const _localctx: ValueContext = new ValueContext(this._ctx, this.state);
+ this.enterRule(_localctx, 10, OpSelectionParser.RULE_value);
+ let _la: number;
+ try {
+ this.enterOuterAlt(_localctx, 1);
+ {
+ this.state = 66;
+ _la = this._input.LA(1);
+ if (
+ !(_la === OpSelectionParser.QUOTED_STRING || _la === OpSelectionParser.UNQUOTED_STRING)
+ ) {
+ this._errHandler.recoverInline(this);
+ } else {
+ if (this._input.LA(1) === Token.EOF) {
+ this.matchedEOF = true;
+ }
+
+ this._errHandler.reportMatch(this);
+ this.consume();
+ }
+ }
+ } catch (re) {
+ if (re instanceof RecognitionException) {
+ _localctx.exception = re;
+ this._errHandler.reportError(this, re);
+ this._errHandler.recover(this, re);
+ } else {
+ throw re;
+ }
+ } finally {
+ this.exitRule();
+ }
+ return _localctx;
+ }
+
+ public sempred(_localctx: RuleContext, ruleIndex: number, predIndex: number): boolean {
+ switch (ruleIndex) {
+ case 1:
+ return this.expr_sempred(_localctx as ExprContext, predIndex);
+ }
+ return true;
+ }
+ private expr_sempred(_localctx: ExprContext, predIndex: number): boolean {
+ switch (predIndex) {
+ case 0:
+ return this.precpred(this._ctx, 3);
+
+ case 1:
+ return this.precpred(this._ctx, 2);
+ }
+ return true;
+ }
+
+ public static readonly _serializedATN: string =
+ '\x03\uC91D\uCABA\u058D\uAFBA\u4F53\u0607\uEA8B\uC241\x03\x0FG\x04\x02' +
+ '\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04\x07' +
+ '\t\x07\x03\x02\x03\x02\x03\x02\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03' +
+ '\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03' +
+ '\x03\x03\x05\x03!\n\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03\x03' +
+ '\x07\x03)\n\x03\f\x03\x0E\x03,\v\x03\x03\x04\x03\x04\x03\x04\x03\x04\x03' +
+ '\x04\x05\x043\n\x04\x03\x05\x03\x05\x06\x057\n\x05\r\x05\x0E\x058\x05' +
+ '\x05;\n\x05\x03\x06\x03\x06\x03\x06\x03\x06\x03\x06\x03\x06\x05\x06C\n' +
+ '\x06\x03\x07\x03\x07\x03\x07\x02\x02\x03\x04\b\x02\x02\x04\x02\x06\x02' +
+ '\b\x02\n\x02\f\x02\x02\x03\x03\x02\r\x0E\x02K\x02\x0E\x03\x02\x02\x02' +
+ '\x04 \x03\x02\x02\x02\x062\x03\x02\x02\x02\b:\x03\x02\x02\x02\nB\x03\x02' +
+ '\x02\x02\fD\x03\x02\x02\x02\x0E\x0F\x05\x04\x03\x02\x0F\x10\x07\x02\x02' +
+ '\x03\x10\x03\x03\x02\x02\x02\x11\x12\b\x03\x01\x02\x12!\x05\x06\x04\x02' +
+ '\x13\x14\x05\b\x05\x02\x14\x15\x05\x06\x04\x02\x15\x16\x05\b\x05\x02\x16' +
+ '!\x03\x02\x02\x02\x17\x18\x05\b\x05\x02\x18\x19\x05\x06\x04\x02\x19!\x03' +
+ '\x02\x02\x02\x1A\x1B\x05\x06\x04\x02\x1B\x1C\x05\b\x05\x02\x1C!\x03\x02' +
+ '\x02\x02\x1D\x1E\x07\x05\x02\x02\x1E!\x05\x04\x03\x06\x1F!\x07\x06\x02' +
+ '\x02 \x11\x03\x02\x02\x02 \x13\x03\x02\x02\x02 \x17\x03\x02\x02\x02 \x1A' +
+ '\x03\x02\x02\x02 \x1D\x03\x02\x02\x02 \x1F\x03\x02\x02\x02!*\x03\x02\x02' +
+ '\x02"#\f\x05\x02\x02#$\x07\x03\x02\x02$)\x05\x04\x03\x06%&\f\x04\x02' +
+ "\x02&'\x07\x04\x02\x02')\x05\x04\x03\x05(\"\x03\x02\x02\x02(%\x03\x02" +
+ '\x02\x02),\x03\x02\x02\x02*(\x03\x02\x02\x02*+\x03\x02\x02\x02+\x05\x03' +
+ '\x02\x02\x02,*\x03\x02\x02\x02-3\x05\n\x06\x02./\x07\t\x02\x02/0\x05\x04' +
+ '\x03\x0201\x07\n\x02\x0213\x03\x02\x02\x022-\x03\x02\x02\x022.\x03\x02' +
+ '\x02\x023\x07\x03\x02\x02\x024;\x07\x06\x02\x0257\x07\x07\x02\x0265\x03' +
+ '\x02\x02\x0278\x03\x02\x02\x0286\x03\x02\x02\x0289\x03\x02\x02\x029;\x03' +
+ '\x02\x02\x02:4\x03\x02\x02\x02:6\x03\x02\x02\x02;\t\x03\x02\x02\x02<=' +
+ '\x07\v\x02\x02=>\x07\b\x02\x02>C\x05\f\x07\x02?@\x07\f\x02\x02@A\x07\b' +
+ '\x02\x02AC\x05\f\x07\x02B<\x03\x02\x02\x02B?\x03\x02\x02\x02C\v\x03\x02' +
+ '\x02\x02DE\t\x02\x02\x02E\r\x03\x02\x02\x02\t (*28:B';
+ public static __ATN: ATN;
+ public static get _ATN(): ATN {
+ if (!OpSelectionParser.__ATN) {
+ OpSelectionParser.__ATN = new ATNDeserializer().deserialize(
+ Utils.toCharArray(OpSelectionParser._serializedATN),
+ );
+ }
+
+ return OpSelectionParser.__ATN;
+ }
+}
+
+export class StartContext extends ParserRuleContext {
+ public expr(): ExprContext {
+ return this.getRuleContext(0, ExprContext);
+ }
+ public EOF(): TerminalNode {
+ return this.getToken(OpSelectionParser.EOF, 0);
+ }
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_start;
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterStart) {
+ listener.enterStart(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitStart) {
+ listener.exitStart(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitStart) {
+ return visitor.visitStart(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+
+export class ExprContext extends ParserRuleContext {
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_expr;
+ }
+ public copyFrom(ctx: ExprContext): void {
+ super.copyFrom(ctx);
+ }
+}
+export class TraversalAllowedExpressionContext extends ExprContext {
+ public traversalAllowedExpr(): TraversalAllowedExprContext {
+ return this.getRuleContext(0, TraversalAllowedExprContext);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterTraversalAllowedExpression) {
+ listener.enterTraversalAllowedExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitTraversalAllowedExpression) {
+ listener.exitTraversalAllowedExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitTraversalAllowedExpression) {
+ return visitor.visitTraversalAllowedExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class UpAndDownTraversalExpressionContext extends ExprContext {
+ public traversal(): TraversalContext[];
+ public traversal(i: number): TraversalContext;
+ public traversal(i?: number): TraversalContext | TraversalContext[] {
+ if (i === undefined) {
+ return this.getRuleContexts(TraversalContext);
+ } else {
+ return this.getRuleContext(i, TraversalContext);
+ }
+ }
+ public traversalAllowedExpr(): TraversalAllowedExprContext {
+ return this.getRuleContext(0, TraversalAllowedExprContext);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterUpAndDownTraversalExpression) {
+ listener.enterUpAndDownTraversalExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitUpAndDownTraversalExpression) {
+ listener.exitUpAndDownTraversalExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitUpAndDownTraversalExpression) {
+ return visitor.visitUpAndDownTraversalExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class UpTraversalExpressionContext extends ExprContext {
+ public traversal(): TraversalContext {
+ return this.getRuleContext(0, TraversalContext);
+ }
+ public traversalAllowedExpr(): TraversalAllowedExprContext {
+ return this.getRuleContext(0, TraversalAllowedExprContext);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterUpTraversalExpression) {
+ listener.enterUpTraversalExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitUpTraversalExpression) {
+ listener.exitUpTraversalExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitUpTraversalExpression) {
+ return visitor.visitUpTraversalExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class DownTraversalExpressionContext extends ExprContext {
+ public traversalAllowedExpr(): TraversalAllowedExprContext {
+ return this.getRuleContext(0, TraversalAllowedExprContext);
+ }
+ public traversal(): TraversalContext {
+ return this.getRuleContext(0, TraversalContext);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterDownTraversalExpression) {
+ listener.enterDownTraversalExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitDownTraversalExpression) {
+ listener.exitDownTraversalExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitDownTraversalExpression) {
+ return visitor.visitDownTraversalExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class NotExpressionContext extends ExprContext {
+ public NOT(): TerminalNode {
+ return this.getToken(OpSelectionParser.NOT, 0);
+ }
+ public expr(): ExprContext {
+ return this.getRuleContext(0, ExprContext);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterNotExpression) {
+ listener.enterNotExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitNotExpression) {
+ listener.exitNotExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitNotExpression) {
+ return visitor.visitNotExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class AndExpressionContext extends ExprContext {
+ public expr(): ExprContext[];
+ public expr(i: number): ExprContext;
+ public expr(i?: number): ExprContext | ExprContext[] {
+ if (i === undefined) {
+ return this.getRuleContexts(ExprContext);
+ } else {
+ return this.getRuleContext(i, ExprContext);
+ }
+ }
+ public AND(): TerminalNode {
+ return this.getToken(OpSelectionParser.AND, 0);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterAndExpression) {
+ listener.enterAndExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitAndExpression) {
+ listener.exitAndExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitAndExpression) {
+ return visitor.visitAndExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class OrExpressionContext extends ExprContext {
+ public expr(): ExprContext[];
+ public expr(i: number): ExprContext;
+ public expr(i?: number): ExprContext | ExprContext[] {
+ if (i === undefined) {
+ return this.getRuleContexts(ExprContext);
+ } else {
+ return this.getRuleContext(i, ExprContext);
+ }
+ }
+ public OR(): TerminalNode {
+ return this.getToken(OpSelectionParser.OR, 0);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterOrExpression) {
+ listener.enterOrExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitOrExpression) {
+ listener.exitOrExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitOrExpression) {
+ return visitor.visitOrExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class AllExpressionContext extends ExprContext {
+ public STAR(): TerminalNode {
+ return this.getToken(OpSelectionParser.STAR, 0);
+ }
+ constructor(ctx: ExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterAllExpression) {
+ listener.enterAllExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitAllExpression) {
+ listener.exitAllExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitAllExpression) {
+ return visitor.visitAllExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+
+export class TraversalAllowedExprContext extends ParserRuleContext {
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_traversalAllowedExpr;
+ }
+ public copyFrom(ctx: TraversalAllowedExprContext): void {
+ super.copyFrom(ctx);
+ }
+}
+export class AttributeExpressionContext extends TraversalAllowedExprContext {
+ public attributeExpr(): AttributeExprContext {
+ return this.getRuleContext(0, AttributeExprContext);
+ }
+ constructor(ctx: TraversalAllowedExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterAttributeExpression) {
+ listener.enterAttributeExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitAttributeExpression) {
+ listener.exitAttributeExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitAttributeExpression) {
+ return visitor.visitAttributeExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class ParenthesizedExpressionContext extends TraversalAllowedExprContext {
+ public LPAREN(): TerminalNode {
+ return this.getToken(OpSelectionParser.LPAREN, 0);
+ }
+ public expr(): ExprContext {
+ return this.getRuleContext(0, ExprContext);
+ }
+ public RPAREN(): TerminalNode {
+ return this.getToken(OpSelectionParser.RPAREN, 0);
+ }
+ constructor(ctx: TraversalAllowedExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterParenthesizedExpression) {
+ listener.enterParenthesizedExpression(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitParenthesizedExpression) {
+ listener.exitParenthesizedExpression(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitParenthesizedExpression) {
+ return visitor.visitParenthesizedExpression(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+
+export class TraversalContext extends ParserRuleContext {
+ public STAR(): TerminalNode | undefined {
+ return this.tryGetToken(OpSelectionParser.STAR, 0);
+ }
+ public PLUS(): TerminalNode[];
+ public PLUS(i: number): TerminalNode;
+ public PLUS(i?: number): TerminalNode | TerminalNode[] {
+ if (i === undefined) {
+ return this.getTokens(OpSelectionParser.PLUS);
+ } else {
+ return this.getToken(OpSelectionParser.PLUS, i);
+ }
+ }
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_traversal;
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterTraversal) {
+ listener.enterTraversal(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitTraversal) {
+ listener.exitTraversal(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitTraversal) {
+ return visitor.visitTraversal(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+
+export class AttributeExprContext extends ParserRuleContext {
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_attributeExpr;
+ }
+ public copyFrom(ctx: AttributeExprContext): void {
+ super.copyFrom(ctx);
+ }
+}
+export class NameExprContext extends AttributeExprContext {
+ public NAME(): TerminalNode {
+ return this.getToken(OpSelectionParser.NAME, 0);
+ }
+ public COLON(): TerminalNode {
+ return this.getToken(OpSelectionParser.COLON, 0);
+ }
+ public value(): ValueContext {
+ return this.getRuleContext(0, ValueContext);
+ }
+ constructor(ctx: AttributeExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterNameExpr) {
+ listener.enterNameExpr(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitNameExpr) {
+ listener.exitNameExpr(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitNameExpr) {
+ return visitor.visitNameExpr(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+export class NameSubstringExprContext extends AttributeExprContext {
+ public NAME_SUBSTRING(): TerminalNode {
+ return this.getToken(OpSelectionParser.NAME_SUBSTRING, 0);
+ }
+ public COLON(): TerminalNode {
+ return this.getToken(OpSelectionParser.COLON, 0);
+ }
+ public value(): ValueContext {
+ return this.getRuleContext(0, ValueContext);
+ }
+ constructor(ctx: AttributeExprContext) {
+ super(ctx.parent, ctx.invokingState);
+ this.copyFrom(ctx);
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterNameSubstringExpr) {
+ listener.enterNameSubstringExpr(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitNameSubstringExpr) {
+ listener.exitNameSubstringExpr(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitNameSubstringExpr) {
+ return visitor.visitNameSubstringExpr(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
+
+export class ValueContext extends ParserRuleContext {
+ public QUOTED_STRING(): TerminalNode | undefined {
+ return this.tryGetToken(OpSelectionParser.QUOTED_STRING, 0);
+ }
+ public UNQUOTED_STRING(): TerminalNode | undefined {
+ return this.tryGetToken(OpSelectionParser.UNQUOTED_STRING, 0);
+ }
+ constructor(parent: ParserRuleContext | undefined, invokingState: number) {
+ super(parent, invokingState);
+ }
+ // @Override
+ public get ruleIndex(): number {
+ return OpSelectionParser.RULE_value;
+ }
+ // @Override
+ public enterRule(listener: OpSelectionListener): void {
+ if (listener.enterValue) {
+ listener.enterValue(this);
+ }
+ }
+ // @Override
+ public exitRule(listener: OpSelectionListener): void {
+ if (listener.exitValue) {
+ listener.exitValue(this);
+ }
+ }
+ // @Override
+ public accept(visitor: OpSelectionVisitor): Result {
+ if (visitor.visitValue) {
+ return visitor.visitValue(this);
+ } else {
+ return visitor.visitChildren(this);
+ }
+ }
+}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionVisitor.ts b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionVisitor.ts
new file mode 100644
index 0000000000000..19e9f3b480257
--- /dev/null
+++ b/js_modules/dagster-ui/packages/ui-core/src/op-selection/generated/OpSelectionVisitor.ts
@@ -0,0 +1,171 @@
+// Generated from /Users/briantu/repos/dagster/js_modules/dagster-ui/packages/ui-core/src/op-selection/OpSelection.g4 by ANTLR 4.9.0-SNAPSHOT
+
+import {ParseTreeVisitor} from 'antlr4ts/tree/ParseTreeVisitor';
+
+import {
+ AllExpressionContext,
+ AndExpressionContext,
+ AttributeExprContext,
+ AttributeExpressionContext,
+ DownTraversalExpressionContext,
+ ExprContext,
+ NameExprContext,
+ NameSubstringExprContext,
+ NotExpressionContext,
+ OrExpressionContext,
+ ParenthesizedExpressionContext,
+ StartContext,
+ TraversalAllowedExprContext,
+ TraversalAllowedExpressionContext,
+ TraversalContext,
+ UpAndDownTraversalExpressionContext,
+ UpTraversalExpressionContext,
+ ValueContext,
+} from './OpSelectionParser';
+
+/**
+ * This interface defines a complete generic visitor for a parse tree produced
+ * by `OpSelectionParser`.
+ *
+ * @param The return type of the visit operation. Use `void` for
+ * operations with no return type.
+ */
+export interface OpSelectionVisitor extends ParseTreeVisitor {
+ /**
+ * Visit a parse tree produced by the `TraversalAllowedExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitTraversalAllowedExpression?: (ctx: TraversalAllowedExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `UpAndDownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitUpAndDownTraversalExpression?: (ctx: UpAndDownTraversalExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `UpTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitUpTraversalExpression?: (ctx: UpTraversalExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `DownTraversalExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitDownTraversalExpression?: (ctx: DownTraversalExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `NotExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitNotExpression?: (ctx: NotExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `AndExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitAndExpression?: (ctx: AndExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `OrExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitOrExpression?: (ctx: OrExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `AllExpression`
+ * labeled alternative in `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitAllExpression?: (ctx: AllExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `NameExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitNameExpr?: (ctx: NameExprContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `NameSubstringExpr`
+ * labeled alternative in `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitNameSubstringExpr?: (ctx: NameSubstringExprContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `AttributeExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitAttributeExpression?: (ctx: AttributeExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by the `ParenthesizedExpression`
+ * labeled alternative in `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitParenthesizedExpression?: (ctx: ParenthesizedExpressionContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.start`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitStart?: (ctx: StartContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.expr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitExpr?: (ctx: ExprContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.traversalAllowedExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitTraversalAllowedExpr?: (ctx: TraversalAllowedExprContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.traversal`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitTraversal?: (ctx: TraversalContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.attributeExpr`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitAttributeExpr?: (ctx: AttributeExprContext) => Result;
+
+ /**
+ * Visit a parse tree produced by `OpSelectionParser.value`.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ visitValue?: (ctx: ValueContext) => Result;
+}
diff --git a/js_modules/dagster-ui/packages/ui-core/src/pipelines/Description.tsx b/js_modules/dagster-ui/packages/ui-core/src/pipelines/Description.tsx
index d44e41b7178ed..ba9834a38808c 100644
--- a/js_modules/dagster-ui/packages/ui-core/src/pipelines/Description.tsx
+++ b/js_modules/dagster-ui/packages/ui-core/src/pipelines/Description.tsx
@@ -91,7 +91,7 @@ export const Description = ({maxHeight, description, fontSize}: IDescriptionProp
)}