diff --git a/docs/docs-beta/.vscode/extensions.json b/docs/docs-beta/.vscode/extensions.json
new file mode 100644
index 0000000000000..ca4a4c709abe9
--- /dev/null
+++ b/docs/docs-beta/.vscode/extensions.json
@@ -0,0 +1,9 @@
+{
+ "recommendations": [
+ "dbaeumer.vscode-eslint",
+ "unifiedjs.vscode-mdx",
+ "esbenp.prettier-vscode",
+ "mrmlnc.vscode-remark",
+ "chrischinchilla.vale-vscode"
+ ]
+}
\ No newline at end of file
diff --git a/docs/docs-beta/content-templates/guide-no-steps.md b/docs/docs-beta/content-templates/guide-no-steps.md
index e49ae9405a926..9ffb954e78ccc 100644
--- a/docs/docs-beta/content-templates/guide-no-steps.md
+++ b/docs/docs-beta/content-templates/guide-no-steps.md
@@ -1,40 +1,38 @@
---
-title: ''
-description: ''
+title: 'Title that briefly describes what the guide is for'
+description: 'Description of the guide, useful for SEO and social media links'
---
-# [Title that briefly describes what the guide is for]
+Provide a brief introduction to the how-to guide. View [this article](https://diataxis.fr/how-to-guides/) for more information on how to write effective how-to guides. The intro should be no more than a few sentences.
+The title from the frontmatter will be used as the first heading in the guide, you don't need to include it in the intro.
-
+## What you'll learn
-By the end of this guide, you will [a sentence or list about what will be achieved by the end of the guide].
+- A thing you'll learn, ex: "How to generate a token"
+- Another thing you'll learn, ex: "How to install this library"
+- One more thing you'll learn
----
-
-## Prerequisites
-
-
+
+ Prerequisites
To follow the steps in this guide, you'll need:
-- A prerequisite, ex: "Familiarity with Asset definitions"
-- Another prerequisite, ex: "To install library"
+- A prerequisite, ex: "Familiarity with [Asset definitions](/concepts/assets)"
+- Another prerequisite, ex: "To install this library"
- One more
----
+
## Title that describes this section
-
-
----
-## Related
+## Next steps
-[List of links to related content]
+- Add links to related content
+- Go deeper into [Understanding Automation](/concepts/understanding-automation)
+- Explore [Related Example](/)
diff --git a/docs/docs-beta/content-templates/guide-with-steps.md b/docs/docs-beta/content-templates/guide-with-steps.md
index c23598f5092cc..3e96b770524a0 100644
--- a/docs/docs-beta/content-templates/guide-with-steps.md
+++ b/docs/docs-beta/content-templates/guide-with-steps.md
@@ -1,49 +1,45 @@
---
-title: ''
-description: ''
+title: 'Title that briefly describes what the guide is for'
+description: 'Description of the guide that is useful for SEO and social media links'
---
-# [Title that briefly describes what the guide is for]
+Provide a brief introduction to the how-to guide. View [this article](https://diataxis.fr/how-to-guides/) for more information on how to write effective how-to guides. The intro should be no more than a few sentences.
+The title from the frontmatter will be used as the first heading in the guide, you don't need to include it in the intro.
-
+## What you'll learn
-By the end of this guide, you will [a sentence or list about what will be achieved by the end of the guide].
+- A thing you'll learn, ex: "How to generate a token"
+- Another thing you'll learn, ex: "How to install this library"
+- One more thing you'll learn
----
-
-## Prerequisites
-
-
+
+ Prerequisites
To follow the steps in this guide, you'll need:
-- A prerequisite, ex: "Familiarity with Asset definitions"
-- Another prerequisite, ex: "To install library"
+- A prerequisite, ex: "Familiarity with [Asset definitions](/concepts/assets)"
+- Another prerequisite, ex: "To install this library"
- One more
----
+
## Step 1: Title that describes what this step will do {#step-1}
-
+- Each section heading should have an identifier that includes the word 'step' and the number of the step, ex: {#step-1}
### Step 1.1: Title that describes a substep {#step-1-1}
-
-
----
+If a step would benefit by being broken into smaller steps, follow this section's formatting
+Each substep should get an H3 and start with Step N., followed by the number of the substep
## Step 2: Another step {#step-2}
----
-
-## Related
+## Next steps
-[List of links to related content]
+- Add links to related content
+- Go deeper into [Understanding Automation](/concepts/understanding-automation)
+- Explore [Related Example](/)
diff --git a/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-custom-eval.py b/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-custom-eval.py
new file mode 100644
index 0000000000000..8a0f006206861
--- /dev/null
+++ b/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-custom-eval.py
@@ -0,0 +1,51 @@
+from dagster import (
+ AssetExecutionContext,
+ AssetKey,
+ AssetMaterialization,
+ Definitions,
+ MaterializeResult,
+ RunRequest,
+ SensorEvaluationContext,
+ SkipReason,
+ asset,
+ asset_sensor,
+ define_asset_job,
+)
+
+
+@asset
+def daily_sales_data(context: AssetExecutionContext):
+ context.log.info("Asset to watch, perhaps some function sets metadata here")
+ yield MaterializeResult(metadata={"specific_property": "value"})
+
+
+@asset
+def weekly_report(context: AssetExecutionContext):
+ context.log.info("Running weekly report")
+
+
+my_job = define_asset_job("my_job", [weekly_report])
+
+
+@asset_sensor(asset_key=AssetKey("daily_sales_data"), job=my_job)
+def daily_sales_data_sensor(context: SensorEvaluationContext, asset_event):
+ # Provide a type hint on the underlying event
+ materialization: AssetMaterialization = (
+ asset_event.dagster_event.event_specific_data.materialization
+ )
+
+ # Example custom logic: Check if the asset metadata has a specific property
+ # highlight-start
+ if "specific_property" in materialization.metadata:
+ context.log.info("Triggering job based on custom evaluation logic")
+ yield RunRequest(run_key=context.cursor)
+ else:
+ yield SkipReason("Asset materialization does not have the required property")
+ # highlight-end
+
+
+defs = Definitions(
+ assets=[daily_sales_data, weekly_report],
+ jobs=[my_job],
+ sensors=[daily_sales_data_sensor],
+)
diff --git a/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-with-config.py b/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-with-config.py
index b1c01c332b728..3c729f898acc5 100644
--- a/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-with-config.py
+++ b/docs/docs-beta/docs/code_examples/guides/automation/asset-sensor-with-config.py
@@ -1,8 +1,11 @@
from dagster import (
AssetExecutionContext,
AssetKey,
+ AssetMaterialization,
+ Config,
Definitions,
- EventLogEntry,
+ MaterializeResult,
+ RunConfig,
RunRequest,
SensorEvaluationContext,
asset,
@@ -11,38 +14,49 @@
)
+class MyConfig(Config):
+ param1: str
+
+
@asset
def daily_sales_data(context: AssetExecutionContext):
context.log.info("Asset to watch")
+ # highlight-next-line
+ yield MaterializeResult(metadata={"specific_property": "value"})
@asset
-def weekly_report(context: AssetExecutionContext):
- context.log.info("Asset to trigger")
+def weekly_report(context: AssetExecutionContext, config: MyConfig):
+ context.log.info(f"Running weekly report with param1: {config.param1}")
-my_job = define_asset_job("my_job", [weekly_report])
+my_job = define_asset_job(
+ "my_job",
+ [weekly_report],
+ config=RunConfig(ops={"weekly_report": MyConfig(param1="value")}),
+)
-# highlight-start
@asset_sensor(asset_key=AssetKey("daily_sales_data"), job=my_job)
-def daily_sales_data_sensor(context: SensorEvaluationContext, asset_event: EventLogEntry):
- # This satisifies the type checker. Asset events are guaranteed to have a dagster_event and asset_key.
- assert asset_event.dagster_event is not None
- assert asset_event.dagster_event.asset_key is not None
-
- return RunRequest(
- run_key=context.cursor,
- run_config={
- "ops": {
- "read_materialization": {
- "config": {
- "asset_key": asset_event.dagster_event.asset_key.path,
- }
+def daily_sales_data_sensor(context: SensorEvaluationContext, asset_event):
+ materialization: AssetMaterialization = (
+ asset_event.dagster_event.event_specific_data.materialization
+ )
+
+ # Example custom logic: Check if the asset metadata has a specific property
+ # highlight-start
+ if "specific_property" in materialization.metadata:
+ yield RunRequest(
+ run_key=context.cursor,
+ run_config=RunConfig(
+ ops={
+ "weekly_report": MyConfig(
+ param1=str(materialization.metadata.get("specific_property"))
+ )
}
- }
- },
- ) # highlight-end
+ ),
+ )
+ # highlight-end
defs = Definitions(
diff --git a/docs/docs-beta/docs/code_examples/guides/automation/multi-asset-sensor.py b/docs/docs-beta/docs/code_examples/guides/automation/multi-asset-sensor.py
new file mode 100644
index 0000000000000..d3c35ba8b761f
--- /dev/null
+++ b/docs/docs-beta/docs/code_examples/guides/automation/multi-asset-sensor.py
@@ -0,0 +1,32 @@
+from dagster import (
+ AssetKey,
+ MultiAssetSensorEvaluationContext,
+ RunRequest,
+ asset,
+ define_asset_job,
+ multi_asset_sensor,
+)
+
+
+@asset
+def target_asset():
+ pass
+
+
+downstream_job = define_asset_job("downstream_job", [target_asset])
+
+
+@multi_asset_sensor(
+ monitored_assets=[
+ AssetKey("upstream_asset_1"),
+ AssetKey("upstream_asset_2"),
+ ],
+ job=downstream_job,
+)
+def my_multi_asset_sensor(context: MultiAssetSensorEvaluationContext):
+ run_requests = []
+ for asset_key, materialization in context.latest_materialization_records_by_key().items():
+ if materialization:
+ run_requests.append(RunRequest(asset_selection=[asset_key]))
+ context.advance_cursor({asset_key: materialization})
+ return run_requests
diff --git a/docs/docs-beta/docs/code_examples/guides/automation/schedule-with-partition.py b/docs/docs-beta/docs/code_examples/guides/automation/schedule-with-partition.py
new file mode 100644
index 0000000000000..2ec9852fb5151
--- /dev/null
+++ b/docs/docs-beta/docs/code_examples/guides/automation/schedule-with-partition.py
@@ -0,0 +1,22 @@
+from dagster import (
+ DailyPartitionsDefinition,
+ asset,
+ build_schedule_from_partitioned_job,
+ define_asset_job,
+)
+
+daily_partition = DailyPartitionsDefinition(start_date="2024-05-20")
+
+
+@asset(partitions_def=daily_partition)
+def daily_asset(): ...
+
+
+partitioned_asset_job = define_asset_job("partitioned_job", selection=[daily_asset])
+
+# highlight-start
+# This partition will run daily
+asset_partitioned_schedule = build_schedule_from_partitioned_job(
+ partitioned_asset_job,
+)
+# highlight-end
diff --git a/docs/docs-beta/docs/concepts/automation.md b/docs/docs-beta/docs/concepts/automation.md
index 420e995b0d090..de4a9b3269bf1 100644
--- a/docs/docs-beta/docs/concepts/automation.md
+++ b/docs/docs-beta/docs/concepts/automation.md
@@ -1 +1,66 @@
-# Automation
\ No newline at end of file
+---
+title: About Automation
+---
+
+There are several ways to automate the execution of your data pipelines with Dagster.
+
+The first system, and the most basic, is the [Schedule](/guides/automation/schedules), which responds to time.
+
+[Sensors](/guides/automation/sensors) are like schedules, but they respond to an external event defined by the user.
+
+[Asset Sensors](/guides/automation/asset-sensors) are a special case of sensor that responds to changes in asset materialization
+as reported by the Event Log.
+
+Finally, the Declarative Automation system is a
+more complex system that uses conditions on the assets to determine when to execute.
+
+## Schedules
+
+In Dagster, a schedule is defined by the `ScheduleDefinition` class, or through the `@schedule` decorator. The `@schedule`
+decorator is more flexible than the `ScheduleDefinition` class, allowing you to configure job behavior or emit log messages
+as the schedule is processed.
+
+Schedules were one of the first types of automation in Dagster, created before the introduction of Software-Defined Assets.
+As such, you may find that many of the examples can seem foreign if you are used to only working within the asset framework.
+
+For more on how assets and ops inter-relate, read about [Assets and Ops](/concepts/assets#assets-and-ops)
+
+The `dagster-daemon` process is responsible for submitting runs by checking each schedule at a regular interval to determine
+if it's time to execute the underlying job.
+
+A schedule can be thought of as a wrapper around two pieces:
+
+- A `JobDefinition`, which is a set of assets to materialize or ops to execute.
+- A `cron` string, which describes the schedule.
+
+### Define a schedule using `ScheduleDefinition`
+
+```python
+ecommerce_schedule = ScheduleDefinition(
+ job=ecommerce_job,
+ cron_schedule="15 5 * * 1-5",
+)
+```
+
+By default, schedules aren't enabled. You can enable them by visiting the Automation tab and toggling the schedule,
+or set a default status to `RUNNING` when you define the schedule.
+
+```python
+ecommerce_schedule = ScheduleDefinition(
+ job=ecommerce_job,
+ cron_schedule="15 5 * * 1-5",
+ default_status=DefaultScheduleStatus.RUNNING,
+)
+```
+
+### Define a schedule using `@schedule`
+
+If you want more control over the schedule, you can use the `@schedule` decorator. In doing so, you are then responsible for either
+emitting a `RunRequest` or a `SkipReason`. You can also emit logs, which will be visible in the Dagster UI for a given schedule's tick history.
+
+```python
+@schedule(cron_schedule="15 5 * * 1-5")
+def ecommerce_schedule(context):
+ context.log.info("This log message will be visible in the Dagster UI.")
+ return RunRequest()
+```
diff --git a/docs/docs-beta/docs/concepts/automation/schedules.md b/docs/docs-beta/docs/concepts/automation/schedules.md
index 280d14cb0761f..e69de29bb2d1d 100644
--- a/docs/docs-beta/docs/concepts/automation/schedules.md
+++ b/docs/docs-beta/docs/concepts/automation/schedules.md
@@ -1,6 +0,0 @@
----
-title: "Schedules"
-sidebar_position: 10
----
-
-# Schedules
diff --git a/docs/docs-beta/docs/guides/automation.md b/docs/docs-beta/docs/guides/automation.md
index 915eb4e32afe1..cfa58783c771f 100644
--- a/docs/docs-beta/docs/guides/automation.md
+++ b/docs/docs-beta/docs/guides/automation.md
@@ -35,8 +35,8 @@ Dagster offers several ways to automate pipeline execution:
## Schedules
Schedules allow you to run jobs at specified times, like "every Monday at 9 AM" or "daily at midnight."
-A schedule combines a selection of assets, known as a [Job](/concepts/ops-jobs), and a [cron expression](https://en.wikipedia.org/wiki/Cron)
-in order to define when the job should be run.
+A schedule combines a selection of assets, known as a [Job](/concepts/ops-jobs), and a [cron expression](https://en.wikipedia.org/wiki/Cron)
+to define when the job should be run.
To make creating cron expressions easier, you can use an online tool like [Crontab Guru](https://crontab.guru/).
@@ -51,36 +51,38 @@ For more information about how Schedules work, see [About Schedules](/concepts/s
## Sensors
-Sensors allow you to trigger runs based on events or conditions, like a new file arriving or an external system status change.
+Sensors allow you to trigger runs based on events or conditions that you define, like a new file arriving or an external system status change.
-Like schedules, sensors operate on a selection of assets, known as [Jobs](/concepts/ops-jobs) and can either start a pipeline
-through a Run or log a reason for not starting a pipeline using a SkipReason.
-
-However, unlike schedules, sensors are triggered by events that you define.
You must provide a function that the sensor will use to determine if it should trigger a run.
-### When to use Sensors
+Like schedules, sensors operate on a selection of assets, known as [Jobs](/concepts/ops-jobs) and can either start a pipeline through a Run or log a reason for not starting a pipeline using a SkipReason.
+
+
+### When to use sensors
- You need event-driven automation
- You want to react to changes in external systems
For more examples of how to create sensors, see the [How-To Use Sensors](/guides/automation/sensors) guide.
-For more information about how Sensors work, see the [About Sensors](/concepts/sensors) concept page.
+For more information about how sensors work, see the [About Sensors](/concepts/sensors) concept page.
## Asset sensors
Asset Sensors trigger jobs when specified assets are materialized, allowing you to create dependencies between jobs or code locations.
-### When to use Asset Sensors
+### When to use Asset sensors
- You need to trigger jobs based on asset materializations
- You want to create dependencies between different jobs or code locations
For more examples of how to create asset sensors, see the [How-To Use Asset Sensors](/guides/automation/asset-sensors) guide.
+## Declarative automation
+
+TODO: add content
-## Choosing the Right Automation Method
+## How to choose the right automation method
Consider these factors when selecting an automation method:
@@ -91,17 +93,17 @@ Consider these factors when selecting an automation method:
Use this table to help guide your decision:
-| Method | Best For | Works With |
-|--------|----------|------------|
-| Schedules | Regular, time-based job runs | Assets, Ops, Graphs |
-| Sensors | Event-driven automation | Assets, Ops, Graphs |
-| Declarative Automation | Asset-centric, condition-based updates | Assets only |
-| Asset Sensors | Cross-job/location asset dependencies | Assets only |
+| Method | Best For | Works With |
+| ---------------------- | -------------------------------------- | ------------------- |
+| Schedules | Regular, time-based job runs | Assets, Ops, Graphs |
+| Sensors | Event-driven automation | Assets, Ops, Graphs |
+| Declarative Automation | Asset-centric, condition-based updates | Assets only |
+| Asset Sensors | Cross-job/location asset dependencies | Assets only |
-## Next Steps
+## Next steps
- Learn more about [advanced scheduling patterns] - TODO ADD LINK
- Explore [complex sensor examples] - TODO ADD LINK
- Dive into [Declarative Automation best practices] - TODO ADD LINK
-By understanding and effectively using these automation methods, you can build more efficient data pipelines that respond to your specific needs and constraints.
\ No newline at end of file
+By understanding and effectively using these automation methods, you can build more efficient data pipelines that respond to your specific needs and constraints.
diff --git a/docs/docs-beta/docs/guides/automation/asset-sensors.md b/docs/docs-beta/docs/guides/automation/asset-sensors.md
index 1d928c3cc59bc..21d29857980d2 100644
--- a/docs/docs-beta/docs/guides/automation/asset-sensors.md
+++ b/docs/docs-beta/docs/guides/automation/asset-sensors.md
@@ -1,10 +1,91 @@
---
-title: Asset Sensors
-sidebar_position: 50
+title: Triggering jobs with Asset Sensors
+sidebar_label: Triggering jobs with Asset Sensors
+sidebar_position: 30
---
-### Basic Asset Sensor Example
+
+Asset sensors in Dagster provide a powerful mechanism for monitoring asset materializations and triggering downstream computations or notifications based on those events.
+
+This guide covers the most common use cases for asset sensors such as defining cross-job and cross-code location dependencies.
+
+
+Prerequisites
+
+- Familiarity with [Assets](/concepts/assets)
+- Familiarity with [Ops and Jobs](/concepts/ops-and-jobs)
+
+
+## Define cross-job and cross-code location dependencies
+
+Asset Sensors enable dependencies across different jobs and even different code locations. This flexibility allows for more modular and decoupled workflows.
+
+```mermaid
+graph LR;
+
+AssetToWatch --> AssetSensor;
+AssetSensor --> Job;
+Job --> Asset1;
+Job --> Asset2;
+
+subgraph CodeLocationA
+ AssetToWatch
+end
+
+subgraph CodeLocationB
+ AssetSensor
+ Job
+ Asset1
+ Asset2
+end
+```
+
+Here's a minimal example of an asset sensor that triggers a job when an asset is materialized. The `daily_sales_data` asset is in the same code location for this example, but the same pattern can be applied to assets in different code locations.
-This Asset Sensor will trigger a run of `my_job` whenever the `asset_to_watch` asset is materialized.
+## Customize evaluation logic
+
+The evaluation function of an asset sensor can be customized to include custom logic for determining when to trigger a run. This allows for fine-grained control over the conditions under which downstream jobs are executed.
+
+```mermaid
+stateDiagram-v2
+ direction LR
+
+
+
+ [*] --> AssetMaterialized
+ AssetMaterialized --> [*]
+
+ AssetMaterialized --> UserEvaluationFunction
+ UserEvaluationFunction --> RunRequest
+ UserEvaluationFunction --> SkipReason
+ SkipReason --> [*]
+ RunRequest --> [*]
+
+ class UserEvaluationFunction userDefined
+ classDef userDefined fill: lightblue
+```
+
+In this example, the `@asset_sensor` decorator allows you to define a custom evaluation function that returns a `RunRequest` object when the asset is materialized and certain metadata is present,
+otherwise it skips the run.
+
+
+
+## Trigger a job with configuration
+
+By providing a configuration to the `RunRequest` object, you can trigger a job with a specific configuration. This is useful when you want to trigger a job with custom parameters based on custom logic you define. For example, you might use a sensor to trigger a job when an asset is materialized, but also pass metadata about that materialization to the job.
+
+
+
+
+## Monitor multiple assets
+
+The previous examples showed how to use a single asset sensor to monitor a single asset and trigger a job when it's materialized. This example uses a multi-asset sensor to monitor multiple assets and trigger a job when any of the monitored assets are materialized.
+
+
+
+## Next steps
+
+- Learn more about asset sensors in [Understanding Automation](/concepts/automation)
+- Explore [Declarative Automation](/concepts/declarative-automation) as an alternative to asset sensors
\ No newline at end of file
diff --git a/docs/docs-beta/docs/guides/automation/creating-dynamic-pipelines-based-on-external-data.md b/docs/docs-beta/docs/guides/automation/creating-dynamic-pipelines-based-on-external-data.md
deleted file mode 100644
index 98576d9d32794..0000000000000
--- a/docs/docs-beta/docs/guides/automation/creating-dynamic-pipelines-based-on-external-data.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: "Creating dynamic pipelines based on external data"
-sidebar_position: 30
----
-
-# Creating dynamic pipelines based on external data
diff --git a/docs/docs-beta/docs/guides/automation/schedules.md b/docs/docs-beta/docs/guides/automation/schedules.md
index db5105529916e..5cf68d428b14a 100644
--- a/docs/docs-beta/docs/guides/automation/schedules.md
+++ b/docs/docs-beta/docs/guides/automation/schedules.md
@@ -1,57 +1,42 @@
---
-title: "Scheduling pipelines"
+title: "Scheduling cron-based pipelines"
sidebar_label: "Running pipelines on a schedule"
sidebar_position: 10
---
-## Basic Schedule Example
+Schedules enable automated execution of jobs at specified intervals. These intervals can range from common frequencies like hourly, daily, or weekly, to more intricate patterns defined using cron expressions.
-A basic schedule is defined by a `JobDefinition` and a `cron_schedule` using the `ScheduleDefinition` class.
+
+Prerequisites
-
+- Familiarity with [Assets](/concepts/assets)
+- Familiarity with [Ops and Jobs](/concepts/ops-and-jobs)
+
-## How to Set Custom Timezones
+## Basic schedule
-By default, schedules without a timezone will run in UTC. If you want to run a schedule in a different timezone, you can
-set the `timezone` parameter.
+A basic schedule is defined by a `JobDefinition` and a `cron_schedule` using the `ScheduleDefinition` class. A job can be thought of as a selection of assets or operations executed together.
-```python
-ecommerce_schedule = ScheduleDefinition(
- job=ecommerce_job,
- cron_schedule="15 5 * * 1-5",
-timezone="America/Los_Angeles",
-)
-```
+
-## How to Create Partitioned Schedules
+## Run schedules in a different timezone
-If you have a partitioned asset and job, you can create a schedule from the partition using `build_schedule_from_partitioned_job`.
-The schedule will execute as the same cadence specified by the partition definition.
+By default, schedules without a timezone will run in Coordinated Universal Time (UTC). If you want to run a schedule in a different timezone, you can set the `timezone` parameter.
```python
-from dagster import (
- asset,
- build_schedule_from_partitioned_job,
- define_asset_job,
- DailyPartitionsDefinition,
+daily_schedule = ScheduleDefinition(
+ job=daily_refresh_job,
+ cron_schedule="0 0 * * *",
+ timezone="America/Los_Angeles",
)
+```
-daily_partition = DailyPartitionsDefinition(start_date="2024-05-20")
-
-
-@asset(partitions_def=daily_partition)
-def daily_asset(): ...
-
-partitioned_asset_job = define_asset_job("partitioned_job", selection=[daily_asset])
+## Run schedules on a partitioned asset
-# highlight-start
-# This partition will run daily
-asset_partitioned_schedule = build_schedule_from_partitioned_job(
- partitioned_asset_job,
-)
-# highlight-end
+If you have a partitioned asset and job, you can create a schedule using the partition with `build_schedule_from_partitioned_job`.
+The schedule will execute as the same cadence specified by the partition definition.
-```
+
If you have a partitioned job, you can create a schedule from the partition using `build_schedule_from_partitioned_job`.
@@ -69,6 +54,12 @@ partitioned_op_schedule = build_schedule_from_partitioned_job(
# highlight-end
```
----
-For more information about how Schedules work, see the [About Schedules](/concepts/schedules) concept page.
+
+## Next steps
+
+- Learn more about schedules in [Understanding Automation](/concepts/automation)
+- React to events with [sensors](/guides/automation/sensors)
+- Explore [Declarative Automation](/concepts/declarative-automation) as an alternative to schedules
+
+By understanding and effectively using these automation methods, you can build more efficient data pipelines that respond to your specific needs and constraints.
\ No newline at end of file
diff --git a/docs/docs-beta/docs/guides/automation/sensors.md b/docs/docs-beta/docs/guides/automation/sensors.md
index fcaa64aa5be7c..f30e16b94379e 100644
--- a/docs/docs-beta/docs/guides/automation/sensors.md
+++ b/docs/docs-beta/docs/guides/automation/sensors.md
@@ -1,13 +1,28 @@
---
-title: Sensor Examples
+title: Creating event-based pipelines with sensors
+sidebar_label: Creating event-based pipelines
+sidebar_position: 20
+
---
+Sensors are a way to trigger runs in response to events in Dagster. Sensors
+run on a regular interval and can either trigger a run, or provide a reason why a run was skipped.
+
+Sensors allow you to react events in external systems. For example, you can trigger a run when a new file arrives in an S3 bucket, or when a row is updated in a database.
+
+
+Prerequisites
-### Basic Sensor Example
+- Familiarity with [Assets](/concepts/assets)
+- Familiarity with [Ops and Jobs](/concepts/ops-and-jobs)
+
+
+## Basic sensor example
This example includes a `check_for_new_files` function that simulates finding new files. In a real scenario, this function would check an actual system or directory.
The sensor runs every 5 seconds. If it finds new files, it starts a run of `my_job`. If not, it skips the run and logs "No new files found" in the Dagster UI.
+
:::tip
diff --git a/docs/docs-beta/docs/guides/automation/triggering-pipeline-runs-using-events.md b/docs/docs-beta/docs/guides/automation/triggering-pipeline-runs-using-events.md
deleted file mode 100644
index 17826f1c536e4..0000000000000
--- a/docs/docs-beta/docs/guides/automation/triggering-pipeline-runs-using-events.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: "Creating event-based pipelines"
-sidebar_position: 20
----
diff --git a/docs/docs-beta/docs/guides/data-assets.md b/docs/docs-beta/docs/guides/data-modeling.md
similarity index 100%
rename from docs/docs-beta/docs/guides/data-assets.md
rename to docs/docs-beta/docs/guides/data-modeling.md
diff --git a/docs/docs-beta/sidebars.ts b/docs/docs-beta/sidebars.ts
index 9ed1a026afa71..11c337db6fc98 100644
--- a/docs/docs-beta/sidebars.ts
+++ b/docs/docs-beta/sidebars.ts
@@ -30,10 +30,10 @@ const sidebars: SidebarsConfig = {
items: [
{
type: 'category',
- label: 'Data assets',
+ label: 'Data modeling',
link: {
type: 'doc',
- id: 'guides/data-assets',
+ id: 'guides/data-modeling',
},
items: [
{
diff --git a/docs/docs-beta/src/styles/custom.scss b/docs/docs-beta/src/styles/custom.scss
index 4e8f9bc211c6d..8a06dc9f77f5a 100644
--- a/docs/docs-beta/src/styles/custom.scss
+++ b/docs/docs-beta/src/styles/custom.scss
@@ -21,6 +21,10 @@ article {
margin: 0 auto;
}
+hr {
+ height: 1px;
+}
+
.breadcrumbs {
display: flex;
flex-direction: row;
@@ -104,10 +108,20 @@ a.pyobject {
--ifm-h5-font-size: 0.8rem;
}
- // We want this to only apply to inline code, so don't apply
- // this color to ``` code blocks nor any headings.
+ // Emulate horizontal rule above h2 headers
+ h2 {
+ border-top: 1px;
+ border-top-style: solid;
+ border-top-color: var(--theme-color-keyline);
+ margin-top: 0px;
+ padding-top: calc(var(--ifm-heading-vertical-rhythm-bottom) * var(--ifm-leading));
+ }
+
+ // We want this to only apply to inline code
:not(pre):not(h2):not(h3):not(h4):not(h5):not(h6) > code {
- border: none;
+ background-color: var(--theme-color-background-blue);
+ border: 0.5px solid var(--theme-color-keyline);
+ padding: 0.1rem;
}
// don't apply --dagster-inline-code colors to admonitions
@@ -202,10 +216,24 @@ a.pyobject {
border: 1px solid rgba(200, 200, 200, 0.3);
}
-.table-of-contents__link--active {
- font-weight: var(--ifm-font-weight-bold);
+.markdown .table-of-contents {
+ li {
+ list-style: none;
+ padding-top: 4px;
+ line-height: 1;
+ }
+ li a {
+ font-weight: var(--ifm-font-weight-normal);
+ }
+}
+.table-of-contents {
+ &__link:hover {
+ background-color: var(--theme-color-background-gray);
+ }
+ &__link--active {
+ font-weight: 500;
+ }
}
-
.pagination-nav {
&__link {
border: 0;
diff --git a/docs/docs-beta/src/styles/theme-dark.scss b/docs/docs-beta/src/styles/theme-dark.scss
index c07eef110edd6..3622df270594d 100644
--- a/docs/docs-beta/src/styles/theme-dark.scss
+++ b/docs/docs-beta/src/styles/theme-dark.scss
@@ -60,4 +60,7 @@
--ifm-menu-color: var(--theme-color-text-light);
--ifm-hover-overlay: var(--theme-color-background-blue);
--ifm-menu-color-active: var(--theme-color-background-blue);
+
+ //hr
+ --ifm-hr-background-color: var(--theme-color-background-lighter);
}
diff --git a/docs/docs-beta/src/styles/theme-light.scss b/docs/docs-beta/src/styles/theme-light.scss
index 0bcccfaa0917b..ca7c54ea169b3 100644
--- a/docs/docs-beta/src/styles/theme-light.scss
+++ b/docs/docs-beta/src/styles/theme-light.scss
@@ -76,6 +76,9 @@
--ifm-hover-overlay: var(--theme-color-background-blue);
--ifm-menu-color-active: var(--theme-color-background-blue);
+ //hr
+ --ifm-hr-background-color: var(--theme-color-background-light);
+
// docusaurus
--docusaurus-highlighted-code-line-bg: var(--theme-color-background-blue);
diff --git a/docs/docs-beta/src/theme/MDXComponents.tsx b/docs/docs-beta/src/theme/MDXComponents.tsx
index f4b9d9a17a51d..7f4f8361523c5 100644
--- a/docs/docs-beta/src/theme/MDXComponents.tsx
+++ b/docs/docs-beta/src/theme/MDXComponents.tsx
@@ -4,6 +4,8 @@ import {PyObject} from '../components/PyObject';
import CodeExample from '../components/CodeExample';
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
+import TOCInline from '@theme/TOCInline';
+
export default {
// Re-use the default mapping
...MDXComponents,
@@ -11,4 +13,5 @@ export default {
Tabs,
TabItem,
CodeExample,
+ TOCInline,
};
diff --git a/docs/vale/styles/Dagster/dagster_terms.yml b/docs/vale/styles/Dagster/dagster_terms.yml
index 4204f3d5fc3f3..9359242e2f7aa 100644
--- a/docs/vale/styles/Dagster/dagster_terms.yml
+++ b/docs/vale/styles/Dagster/dagster_terms.yml
@@ -15,5 +15,6 @@ swap:
public preview: Public Preview
run id: Run Id
'\bdagster': Dagster
- dagster cloud: Dagster Plus
+ '[Dd]agster [Cc]loud': Dagster Plus
'\bworker\b': Worker
+ '[Aa]sset Sensor': Asset sensor
\ No newline at end of file
diff --git a/docs/vale/styles/config/vocabularies/Dagster/accept.txt b/docs/vale/styles/config/vocabularies/Dagster/accept.txt
index ca940b3243c3c..0128ab9b0b513 100644
--- a/docs/vale/styles/config/vocabularies/Dagster/accept.txt
+++ b/docs/vale/styles/config/vocabularies/Dagster/accept.txt
@@ -12,4 +12,4 @@ dataframes
DataFrame
cron
materializations
-webserver
\ No newline at end of file
+webserver