Skip to content

Commit

Permalink
Merge branch 'master' into jferg/new-schema
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshFerge authored Jan 8, 2025
2 parents f9f965e + b3f39c2 commit 72394d5
Show file tree
Hide file tree
Showing 85 changed files with 1,006 additions and 1,955 deletions.
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@

## Dev
/devenv/ @getsentry/owners-sentry-dev @getsentry/dev-infra
/devservices/ @getsentry/owners-sentry-dev @getsentry/dev-infra
/.github/ @getsentry/owners-sentry-dev
/config/hooks/ @getsentry/owners-sentry-dev
/scripts/ @getsentry/owners-sentry-dev
Expand Down
51 changes: 51 additions & 0 deletions bin/benchmark_detectors
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/usr/bin/env python
# isort: skip_file

"""
This script benchmarks the performance of issue detectors in Sentry.
NOTE: This currently only supports FileIOMainThreadDetector.
Usage: python benchmark_detectors
"""
from sentry.runner import configure

configure()
import time
import sentry_sdk
from sentry.testutils.performance_issues.event_generators import get_event # noqa: S007
from sentry.utils.performance_issues.detectors import FileIOMainThreadDetector
from sentry.utils.performance_issues.performance_detection import (
get_detection_settings,
run_detector_on_data,
)

sentry_sdk.init(None)


def main():
settings = get_detection_settings()

# 10 events: 1 ignored, 1 matching, and 8 ignored
events = [get_event("file-io-on-main-thread") for _ in range(0, 10)]
events[0]["spans"][0]["data"]["file.path"] = "somethins/stuff/blah/yup/KBLayout_iPhone.dat"
for i in range(2, 10):
events[i]["spans"][0]["data"]["blocked_main_thread"] = False

count = 100_000

start = time.perf_counter()
for _ in range(0, count):
for event in events:
detector = FileIOMainThreadDetector(settings, event)
run_detector_on_data(detector, event)
elapsed = time.perf_counter() - start

ops = count * len(events)
print(f"{ops:,} ops") # noqa
print(f"{elapsed:.3f} s") # noqa
print(f"{ops/elapsed:,.2f} ops/s") # noqa


if __name__ == "__main__":
main()
14 changes: 14 additions & 0 deletions devservices/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,15 @@ x-sentry-service-config:
branch: main
repo_link: https://github.com/getsentry/taskbroker.git
mode: containerized
rabbitmq:
description: Messaging and streaming broker
modes:
default: [snuba, postgres, relay]
migrations: [postgres, redis]
acceptance-ci: [postgres, snuba, chartcuterie]
taskbroker: [snuba, postgres, relay, taskbroker]
backend-ci: [snuba, postgres, redis, bigtable, redis-cluster, symbolicator]
rabbitmq: [postgres, snuba, rabbitmq]

services:
postgres:
Expand Down Expand Up @@ -111,6 +114,17 @@ services:
- host.docker.internal:host-gateway
environment:
- IP=0.0.0.0
rabbitmq:
image: ghcr.io/getsentry/image-mirror-library-rabbitmq:3-management
ports:
- '127.0.0.1:5672:5672'
- '127.0.0.1:15672:15672'
networks:
- devservices
extra_hosts:
- host.docker.internal:host-gateway
environment:
- IP=0.0.0.0

networks:
devservices:
Expand Down
4 changes: 2 additions & 2 deletions migrations_lockfile.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@ will then be regenerated, and you should be able to merge without conflicts.

feedback: 0004_index_together

hybridcloud: 0017_add_scoping_organization_apitokenreplica
hybridcloud: 0018_add_alert_and_member_invite_scopes_to_sentry_apps

nodestore: 0002_nodestore_no_dictfield

remote_subscriptions: 0003_drop_remote_subscription

replays: 0004_index_together

sentry: 0804_delete_metrics_key_indexer_pt2
sentry: 0806_remove_monitor_attachment_id_pt1

social_auth: 0002_default_auto_field

Expand Down
3 changes: 2 additions & 1 deletion src/sentry/api/endpoints/organization_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,8 @@ def _discover_data_fn(
discover_query.dataset is not DiscoverSavedQueryTypes.DISCOVER
)
if does_widget_have_split:
return _data_fn(scoped_dataset_query, offset, limit, scoped_query)
with handle_query_errors():
return _data_fn(scoped_dataset_query, offset, limit, scoped_query)

dataset_inferred_from_query = dataset_split_decision_inferred_from_query(
self.get_field_list(organization, request),
Expand Down
1 change: 1 addition & 0 deletions src/sentry/api/serializers/models/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def serialize(self, obj, attrs, user, **kwargs) -> DashboardWidgetResponse:
obj.dashboard.organization,
actor=user,
)
and obj.widget_type == DashboardWidgetTypes.DISCOVER
and obj.discover_widget_split is not None
):
widget_type = DashboardWidgetTypes.get_type_name(obj.discover_widget_split)
Expand Down
11 changes: 11 additions & 0 deletions src/sentry/api/serializers/rest_framework/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -868,6 +868,17 @@ def update_widget(self, widget, data, order):
widget.limit = data.get("limit", widget.limit)
widget.dataset_source = data.get("dataset_source", widget.dataset_source)
widget.detail = {"layout": data.get("layout", prev_layout)}

if widget.widget_type not in [
DashboardWidgetTypes.DISCOVER,
DashboardWidgetTypes.TRANSACTION_LIKE,
DashboardWidgetTypes.ERROR_EVENTS,
]:
# Reset the discover split fields if the widget type is no longer
# a discover/errors/transactions widget
widget.discover_widget_split = None
widget.dataset_source = DatasetSourcesTypes.UNKNOWN.value

widget.save()

if "queries" in data:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Generated by Django 5.1.4 on 2025-01-06 19:59

from django.db import migrations

import bitfield.models
from sentry.new_migrations.migrations import CheckedMigration


class Migration(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment

is_post_deployment = False

dependencies = [
("hybridcloud", "0017_add_scoping_organization_apitokenreplica"),
]

operations = [
migrations.AlterField(
model_name="apikeyreplica",
name="scopes",
field=bitfield.models.BitField(
[
"project:read",
"project:write",
"project:admin",
"project:releases",
"team:read",
"team:write",
"team:admin",
"event:read",
"event:write",
"event:admin",
"org:read",
"org:write",
"org:admin",
"member:read",
"member:write",
"member:admin",
"org:integrations",
"alerts:read",
"alerts:write",
"member:invite",
],
default=None,
),
),
migrations.AlterField(
model_name="apitokenreplica",
name="scopes",
field=bitfield.models.BitField(
[
"project:read",
"project:write",
"project:admin",
"project:releases",
"team:read",
"team:write",
"team:admin",
"event:read",
"event:write",
"event:admin",
"org:read",
"org:write",
"org:admin",
"member:read",
"member:write",
"member:admin",
"org:integrations",
"alerts:read",
"alerts:write",
"member:invite",
],
default=None,
),
),
]
68 changes: 2 additions & 66 deletions src/sentry/incidents/models/alert_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,8 @@
import abc
import logging
from collections.abc import Callable, Collection, Iterable
from datetime import timedelta
from enum import Enum, IntEnum, StrEnum
from typing import TYPE_CHECKING, Any, ClassVar, Protocol, Self
from typing import TYPE_CHECKING, Any, ClassVar, Self

from django.conf import settings
from django.core.cache import cache
Expand Down Expand Up @@ -42,7 +41,7 @@
)
from sentry.seer.anomaly_detection.delete_rule import delete_rule_in_seer
from sentry.snuba.models import QuerySubscription
from sentry.snuba.subscriptions import bulk_create_snuba_subscriptions, delete_snuba_subscription
from sentry.snuba.subscriptions import bulk_create_snuba_subscriptions
from sentry.types.actor import Actor
from sentry.users.services.user import RpcUser
from sentry.users.services.user.service import user_service
Expand All @@ -55,34 +54,6 @@
logger = logging.getLogger(__name__)


class SubscriptionCallback(Protocol):
def __call__(self, subscription: QuerySubscription, *args: Any, **kwargs: Any) -> bool: ...


alert_subscription_callback_registry: dict[AlertRuleMonitorTypeInt, SubscriptionCallback] = {}


def register_alert_subscription_callback(
monitor_type: AlertRuleMonitorTypeInt,
) -> Callable[[Callable], Callable]:
def decorator(func: Callable) -> Callable:
alert_subscription_callback_registry[monitor_type] = func
return func

return decorator


def invoke_alert_subscription_callback(
monitor_type: AlertRuleMonitorTypeInt, subscription: QuerySubscription, **kwargs: Any
) -> bool:
try:
callback = alert_subscription_callback_registry[monitor_type]
except KeyError:
return False

return callback(subscription, **kwargs)


class AlertRuleStatus(Enum):
PENDING = 0
SNAPSHOT = 4
Expand Down Expand Up @@ -730,41 +701,6 @@ class Meta:
db_table = "sentry_alertruleactivity"


@register_alert_subscription_callback(AlertRuleMonitorTypeInt.ACTIVATED)
def update_alert_activations(
subscription: QuerySubscription, alert_rule: AlertRule, value: float
) -> bool:
if subscription.snuba_query is None:
return False

now = timezone.now()
subscription_end = subscription.date_added + timedelta(
seconds=subscription.snuba_query.time_window
)

if now > subscription_end:
logger.info(
"alert activation monitor finishing",
extra={
"subscription_window": subscription.snuba_query.time_window,
"date_added": subscription.date_added,
"now": now,
},
)

alert_rule.activations.filter(finished_at=None, query_subscription=subscription).update(
metric_value=value, finished_at=now
)
# NOTE: QuerySubscription deletion will set fk to null on the activation
delete_snuba_subscription(subscription)
else:
alert_rule.activations.filter(finished_at=None, query_subscription=subscription).update(
metric_value=value
)

return True


post_delete.connect(AlertRuleManager.clear_subscription_cache, sender=QuerySubscription)
post_delete.connect(AlertRuleManager.delete_data_in_seer, sender=AlertRule)
post_save.connect(AlertRuleManager.clear_subscription_cache, sender=QuerySubscription)
Expand Down
11 changes: 0 additions & 11 deletions src/sentry/incidents/subscription_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
AlertRuleThresholdType,
AlertRuleTrigger,
AlertRuleTriggerActionMethod,
invoke_alert_subscription_callback,
)
from sentry.incidents.models.alert_rule_activations import AlertRuleActivations
from sentry.incidents.models.incident import (
Expand Down Expand Up @@ -461,16 +460,6 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None:
)
return

# Trigger callbacks for any AlertRules that may need to know about the subscription update
# Current callback will update the activation metric values & delete querysubscription on finish
# TODO: register over/under triggers as alert rule callbacks as well
invoke_alert_subscription_callback(
AlertRuleMonitorTypeInt(self.alert_rule.monitor_type),
subscription=self.subscription,
alert_rule=self.alert_rule,
value=aggregation_value,
)

if aggregation_value is None:
metrics.incr("incidents.alert_rules.skipping_update_invalid_aggregation_value")
return
Expand Down
Loading

0 comments on commit 72394d5

Please sign in to comment.