Skip to content

Commit

Permalink
[pyright] analyzeUnannotatedFunctions
Browse files Browse the repository at this point in the history
  • Loading branch information
alangenfeld committed Dec 10, 2024
1 parent 62c3877 commit 425102d
Show file tree
Hide file tree
Showing 408 changed files with 2,451 additions and 2,383 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def build_steps(self) -> List[BuildkiteTopLevelStep]:
@property
def requirements(self):
# First try to infer requirements from the python package
package = PythonPackages.get(self.name)
package = PythonPackages.get(self.name) # pyright: ignore[reportArgumentType]
if package:
return set.union(package.install_requires, *package.extras_require.values())

Expand Down
2 changes: 1 addition & 1 deletion .buildkite/dagster-buildkite/dagster_buildkite/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def skip_if_no_docs_changes():
if message_contains("NO_SKIP"):
return None

if not is_feature_branch(os.getenv("BUILDKITE_BRANCH")):
if not is_feature_branch(os.getenv("BUILDKITE_BRANCH")): # pyright: ignore[reportArgumentType]
return None

# If anything changes in the docs directory
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _cluster_provider(request):
f"Found existing image tagged {docker_image}, skipping image build. To rebuild,"
f" first run: docker rmi {docker_image}"
)
except docker.errors.ImageNotFound:
except docker.errors.ImageNotFound: # pyright: ignore[reportAttributeAccessIssue]
build_and_tag_test_image(docker_image)
kind_load_images(
cluster_name=cluster_config.name,
Expand Down Expand Up @@ -231,7 +231,7 @@ def check_export_runs(instance):

# example PYTEST_CURRENT_TEST: test_user_code_deployments.py::test_execute_on_celery_k8s (teardown)
current_test = (
os.environ.get("PYTEST_CURRENT_TEST").split()[0].replace("::", "-").replace(".", "-")
os.environ.get("PYTEST_CURRENT_TEST").split()[0].replace("::", "-").replace(".", "-") # pyright: ignore[reportOptionalMemberAccess]
)

for run in instance.get_runs():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def dagster_docker_image():
f"Found existing image tagged {docker_image}, skipping image build. To rebuild, first run: "
f"docker rmi {docker_image}"
)
except docker.errors.ImageNotFound:
except docker.errors.ImageNotFound: # pyright: ignore[reportAttributeAccessIssue]
build_and_tag_test_image(docker_image)

return docker_image
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_docker_monitoring(aws_env):
find_local_test_image(docker_image)

run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"ops": {
"multiply_the_word_slow": {
Expand Down Expand Up @@ -139,21 +139,21 @@ def test_docker_monitoring(aws_env):

start_time = time.time()
while time.time() - start_time < 60:
run = instance.get_run_by_id(run.run_id)
if run.status == DagsterRunStatus.STARTED:
run = instance.get_run_by_id(run.run_id) # pyright: ignore[reportOptionalMemberAccess]
if run.status == DagsterRunStatus.STARTED: # pyright: ignore[reportOptionalMemberAccess]
break
assert run.status == DagsterRunStatus.STARTING
assert run.status == DagsterRunStatus.STARTING # pyright: ignore[reportOptionalMemberAccess]
time.sleep(1)

time.sleep(3)

instance.run_launcher._get_container( # noqa: SLF001
instance.get_run_by_id(run.run_id)
instance.run_launcher._get_container( # noqa: SLF001 # pyright: ignore[reportAttributeAccessIssue]
instance.get_run_by_id(run.run_id) # pyright: ignore[reportOptionalMemberAccess]
).stop()

# daemon resumes the run
poll_for_finished_run(instance, run.run_id, timeout=300)
assert instance.get_run_by_id(run.run_id).status == DagsterRunStatus.SUCCESS
poll_for_finished_run(instance, run.run_id, timeout=300) # pyright: ignore[reportOptionalMemberAccess]
assert instance.get_run_by_id(run.run_id).status == DagsterRunStatus.SUCCESS # pyright: ignore[reportOptionalMemberAccess]


@pytest.fixture
Expand Down Expand Up @@ -183,7 +183,7 @@ def test_docker_monitoring_run_out_of_attempts(aws_env):
find_local_test_image(docker_image)

run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"ops": {
"multiply_the_word_slow": {
Expand Down Expand Up @@ -230,17 +230,17 @@ def test_docker_monitoring_run_out_of_attempts(aws_env):

start_time = time.time()
while time.time() - start_time < 60:
run = instance.get_run_by_id(run.run_id)
if run.status == DagsterRunStatus.STARTED:
run = instance.get_run_by_id(run.run_id) # pyright: ignore[reportOptionalMemberAccess]
if run.status == DagsterRunStatus.STARTED: # pyright: ignore[reportOptionalMemberAccess]
break
assert run.status == DagsterRunStatus.STARTING
assert run.status == DagsterRunStatus.STARTING # pyright: ignore[reportOptionalMemberAccess]
time.sleep(1)

time.sleep(3)

instance.run_launcher._get_container( # noqa: SLF001
instance.get_run_by_id(run.run_id)
instance.run_launcher._get_container( # noqa: SLF001 # pyright: ignore[reportAttributeAccessIssue]
instance.get_run_by_id(run.run_id) # pyright: ignore[reportOptionalMemberAccess]
).stop(timeout=0)

poll_for_finished_run(instance, run.run_id, timeout=60)
assert instance.get_run_by_id(run.run_id).status == DagsterRunStatus.FAILURE
poll_for_finished_run(instance, run.run_id, timeout=60) # pyright: ignore[reportOptionalMemberAccess]
assert instance.get_run_by_id(run.run_id).status == DagsterRunStatus.FAILURE # pyright: ignore[reportOptionalMemberAccess]
Original file line number Diff line number Diff line change
Expand Up @@ -242,14 +242,14 @@ def _get_error_number(error):
)[SensorDaemon.daemon_type()]

# Errors build up until there are > 5, then pull off the last
if status.healthy is False and len(status.last_heartbeat.errors) >= 5:
first_error_number = _get_error_number(status.last_heartbeat.errors[0])
if status.healthy is False and len(status.last_heartbeat.errors) >= 5: # pyright: ignore[reportArgumentType,reportOptionalMemberAccess]
first_error_number = _get_error_number(status.last_heartbeat.errors[0]) # pyright: ignore[reportOptionalSubscript,reportOptionalMemberAccess]

if first_error_number > 5:
# Verify error numbers decrease consecutively
assert [
_get_error_number(error)
for error in status.last_heartbeat.errors
for error in status.last_heartbeat.errors # pyright: ignore[reportOptionalIterable,reportOptionalMemberAccess]
] == list(range(first_error_number, first_error_number - 5, -1))

assert not get_daemon_statuses(
Expand All @@ -276,10 +276,10 @@ def _get_error_number(error):
)[SensorDaemon.daemon_type()]

# Error count does not rise above 5, continues to increase
assert len(status.last_heartbeat.errors) == 5
assert len(status.last_heartbeat.errors) == 5 # pyright: ignore[reportArgumentType,reportOptionalMemberAccess]

new_first_error_number = _get_error_number(
status.last_heartbeat.errors[0]
status.last_heartbeat.errors[0] # pyright: ignore[reportOptionalSubscript,reportOptionalMemberAccess]
)

assert new_first_error_number > first_error_number
Expand Down Expand Up @@ -307,7 +307,7 @@ def _get_error_number(error):
)[SensorDaemon.daemon_type()]

# Error count does not rise above 5
if len(status.last_heartbeat.errors) == 0:
if len(status.last_heartbeat.errors) == 0: # pyright: ignore[reportArgumentType,reportOptionalMemberAccess]
break

if (now - init_time).total_seconds() > 15:
Expand All @@ -322,8 +322,8 @@ def test_multiple_error_daemon(monkeypatch):

def run_loop_error(_, _ctx, _shutdown_event):
# ?message stack cls_name cause"
yield SerializableErrorInfo("foobar", None, None, None)
yield SerializableErrorInfo("bizbuz", None, None, None)
yield SerializableErrorInfo("foobar", None, None, None) # pyright: ignore[reportArgumentType]
yield SerializableErrorInfo("bizbuz", None, None, None) # pyright: ignore[reportArgumentType]

while True:
yield
Expand Down Expand Up @@ -354,9 +354,9 @@ def run_loop_error(_, _ctx, _shutdown_event):
instance, [SensorDaemon.daemon_type()], now.timestamp()
)[SensorDaemon.daemon_type()]

if status.healthy is False and len(status.last_heartbeat.errors) == 2:
assert status.last_heartbeat.errors[0].message.strip() == "bizbuz"
assert status.last_heartbeat.errors[1].message.strip() == "foobar"
if status.healthy is False and len(status.last_heartbeat.errors) == 2: # pyright: ignore[reportArgumentType,reportOptionalMemberAccess]
assert status.last_heartbeat.errors[0].message.strip() == "bizbuz" # pyright: ignore[reportOptionalSubscript,reportOptionalMemberAccess]
assert status.last_heartbeat.errors[1].message.strip() == "foobar" # pyright: ignore[reportOptionalSubscript,reportOptionalMemberAccess]
break

if (now - init_time).total_seconds() > 10:
Expand Down Expand Up @@ -403,7 +403,7 @@ def test_warn_multiple_daemons(capsys):
now.timestamp(),
heartbeat_interval_seconds=heartbeat_interval_seconds,
)[SensorDaemon.daemon_type()]
last_heartbeat_time = status.last_heartbeat.timestamp
last_heartbeat_time = status.last_heartbeat.timestamp # pyright: ignore[reportOptionalMemberAccess]

# No warning when a second controller starts up again
with daemon_controller_from_instance(
Expand Down Expand Up @@ -437,7 +437,7 @@ def test_warn_multiple_daemons(capsys):
now.timestamp(),
heartbeat_interval_seconds=heartbeat_interval_seconds,
)[SensorDaemon.daemon_type()]
last_heartbeat_time = status.last_heartbeat.timestamp
last_heartbeat_time = status.last_heartbeat.timestamp # pyright: ignore[reportOptionalMemberAccess]

# Starting up a controller while one is running produces the warning though
with daemon_controller_from_instance(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def test_no_memory_leaks():
growth = objgraph.growth(
limit=10,
filter=lambda obj: inspect.getmodule(obj)
and "dagster" in inspect.getmodule(obj).__name__,
and "dagster" in inspect.getmodule(obj).__name__, # pyright: ignore[reportOptionalMemberAccess]
)
while True:
time.sleep(30)
Expand All @@ -103,7 +103,7 @@ def test_no_memory_leaks():
growth = objgraph.growth(
limit=10,
filter=lambda obj: inspect.getmodule(obj)
and "dagster" in inspect.getmodule(obj).__name__,
and "dagster" in inspect.getmodule(obj).__name__, # pyright: ignore[reportOptionalMemberAccess]
)
if not growth:
print( # noqa: T201
Expand Down
24 changes: 12 additions & 12 deletions integration_tests/test_suites/k8s-test-suite/tests/test_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def test_k8s_run_launcher_default(
webserver_url_for_k8s_run_launcher,
):
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")), # pyright: ignore[reportArgumentType]
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand All @@ -79,7 +79,7 @@ def test_k8s_run_launcher_volume_mounts(
webserver_url_for_k8s_run_launcher,
):
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -109,8 +109,8 @@ def test_k8s_executor_get_config_from_run_launcher(
):
# Verify that if you do not specify executor config it is delegated by the run launcher
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")), # pyright: ignore[reportArgumentType]
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {"config": {"job_image": dagster_docker_image}},
},
Expand All @@ -134,8 +134,8 @@ def test_k8s_executor_combine_configs(
# from run launcher config and executor config. Also includes each executor secret
# twice to verify that duplicates within the combined config are acceptable
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")), # pyright: ignore[reportArgumentType]
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -242,8 +242,8 @@ def test_k8s_run_launcher_image_from_origin(
check.invariant(not celery_pod_names)

run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")), # pyright: ignore[reportArgumentType]
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -280,7 +280,7 @@ def test_k8s_run_launcher_terminate(
job_name = "slow_job_k8s"

run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -344,7 +344,7 @@ def test_k8s_executor_resource_requirements(
check.invariant(not celery_pod_names)

run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -382,7 +382,7 @@ def test_execute_on_k8s_retry_job(
webserver_url_for_k8s_run_launcher,
):
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,9 @@ def test_k8s_run_launcher_default(
job_name = "demo_job"

run_id = launch_run_over_graphql(
webserver_url_for_k8s_run_launcher, run_config=run_config, job_name=job_name
webserver_url_for_k8s_run_launcher,
run_config=run_config, # pyright: ignore[reportArgumentType]
job_name=job_name,
)

result = wait_for_job_and_get_raw_logs(
Expand Down Expand Up @@ -158,7 +160,9 @@ def test_failing_k8s_run_launcher(
job_name = "always_fail_job"

run_id = launch_run_over_graphql(
webserver_url_for_k8s_run_launcher, run_config=run_config, job_name=job_name
webserver_url_for_k8s_run_launcher,
run_config=run_config, # pyright: ignore[reportArgumentType]
job_name=job_name,
)

result = wait_for_job_and_get_raw_logs(
Expand All @@ -185,7 +189,9 @@ def test_k8s_run_launcher_terminate(
)

run_id = launch_run_over_graphql(
webserver_url_for_k8s_run_launcher, run_config=run_config, job_name=job_name
webserver_url_for_k8s_run_launcher,
run_config=run_config, # pyright: ignore[reportArgumentType]
job_name=job_name,
)

DagsterKubernetesClient.production_client().wait_for_job(
Expand Down Expand Up @@ -225,7 +231,9 @@ def test_k8s_run_launcher_secret_from_deployment(
job_name = "demo_job"

run_id = launch_run_over_graphql(
webserver_url_for_k8s_run_launcher, run_config=run_config, job_name=job_name
webserver_url_for_k8s_run_launcher,
run_config=run_config, # pyright: ignore[reportArgumentType]
job_name=job_name,
)

result = wait_for_job_and_get_raw_logs(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def test_k8s_run_monitoring_startup_fail(
webserver_url_for_k8s_run_launcher,
):
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down Expand Up @@ -70,7 +70,7 @@ def test_k8s_run_monitoring_resume(
webserver_url_for_k8s_run_launcher,
):
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")), # pyright: ignore[reportArgumentType]
{
"execution": {
"config": {
Expand Down
Loading

0 comments on commit 425102d

Please sign in to comment.