Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: arrow版本升级到1.2.3 --story=121438487 #4631

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions bkmonitor/alarm_backends/core/cache/strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,12 +500,12 @@ def handle_strategy(cls, strategy: Dict, invalid_strategy_dict=None) -> bool:
"""
策略预处理
"""
strategy["update_time"] = arrow.get(strategy["update_time"]).timestamp
strategy["create_time"] = arrow.get(strategy["create_time"]).timestamp
strategy["update_time"] = arrow.get(strategy["update_time"]).int_timestamp
strategy["create_time"] = arrow.get(strategy["create_time"]).int_timestamp

for item in strategy["items"]:
# 补充item的更新时间
item["update_time"] = arrow.get(strategy["update_time"]).timestamp
item["update_time"] = arrow.get(strategy["update_time"]).int_timestamp

query_config = item["query_configs"][0]
data_source_label = query_config["data_source_label"]
Expand Down
10 changes: 5 additions & 5 deletions bkmonitor/alarm_backends/core/context/chart.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,17 @@ def get_chart_data(item: Item, source_time, title=""):
interval = max(data_source.interval for data_source in unify_query.data_sources)
chart_option = {_("今日"): 0, _("昨日"): -1, _("上周"): -7}

start_time = source_time.replace(hours=-max(interval * 5 // 3600, 1))
end_time = source_time.replace(minutes=max(interval // 60, 5))
start_time = source_time.shift(hours=-max(interval * 5 // 3600, 1))
end_time = source_time.shift(minutes=max(interval // 60, 5))

unit = load_unit(item.unit)

series = []
for name, offset in list(chart_option.items()):
data = []
records = unify_query.query_data(
start_time=start_time.replace(days=offset).timestamp * 1000,
end_time=(end_time.replace(days=offset) if offset != 0 else source_time.replace(seconds=interval)).timestamp
start_time=start_time.shift(days=offset).int_timestamp * 1000,
end_time=(end_time.shift(days=offset) if offset != 0 else source_time.shift(seconds=interval)).int_timestamp
* 1000,
)
for record in records:
Expand All @@ -99,7 +99,7 @@ def get_chart_data(item: Item, source_time, title=""):
"chart_type": "spline",
"title": title or item.name,
"subtitle": item.query_configs[0].get("metric_field", ""),
"source_timestamp": source_time.timestamp * 1000,
"source_timestamp": source_time.int_timestamp * 1000,
"locale": i18n.get_locale().replace("_", "-"),
"timezone": timezone,
"series": series,
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/management/story/kernel_story.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def check(self):
now_ts = arrow.now()
try:
records = query.query_data(
start_time=now_ts.replace(minutes=-1).timestamp * 1000, end_time=now_ts.timestamp * 1000
start_time=now_ts.shift(minutes=-1).int_timestamp * 1000, end_time=now_ts.int_timestamp * 1000
)
except Exception as e:
return APIERROR("UnifyQuery.query_data Error: %s" % e, self.story)
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/service/access/data/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def filter(self, record):
utctime = record.time
# 丢弃超过max(半个小时 或者 10个周期延迟)的告警
expire_seconds = max([record.items[0].query_configs[0]["agg_interval"] * 10, 30 * constants.CONST_MINUTES])
if arrow.utcnow().timestamp - arrow.get(utctime).timestamp > expire_seconds:
if arrow.utcnow().int_timestamp - arrow.get(utctime).int_timestamp > expire_seconds:
logger.info("Discard the data(%s) because it takes more than 30 minutes" % record.raw_data)
return True
else:
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/service/access/data/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def pull(self):
if not self.items:
return

now_timestamp = arrow.utcnow().timestamp
now_timestamp = arrow.utcnow().int_timestamp

# 设置查询时间范围
self.get_query_time_range(now_timestamp)
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/service/access/event/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class ExpireFilter(Filter):
def filter(self, event_record):
utctime = event_record.event_time
# 丢弃超过半个小时的告警
if arrow.utcnow().timestamp - arrow.get(utctime).timestamp > 30 * constants.CONST_MINUTES:
if arrow.utcnow().int_timestamp - arrow.get(utctime).int_timestamp > 30 * constants.CONST_MINUTES:
logger.info("Discard the alarm (%s) because " "it takes more than 30 minutes" % event_record.raw_data)
return True
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def dimensions(self):

@cached_property
def event_time(self):
return arrow.get(self.raw_data["_time_"]).timestamp
return arrow.get(self.raw_data["_time_"]).int_timestamp

@cached_property
def md5_dimension(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def flat(self):
"%Y-%m-%d %H:%M:%S"
)
if not alarm_time:
alarm_time = datetime.utcfromtimestamp(arrow.utcnow().timestamp).strftime("%Y-%m-%d %H:%M:%S")
alarm_time = datetime.utcfromtimestamp(arrow.utcnow().int_timestamp).strftime("%Y-%m-%d %H:%M:%S")
dimension = data.get("dimension", {})
dimension["event_name"] = data.get("event_name")
new_alarm = {
Expand Down
14 changes: 7 additions & 7 deletions bkmonitor/alarm_backends/service/converge/dimension.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
self.condition = condition
self.strategy_id = strategy_id
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp or arrow.utcnow().timestamp
self.end_timestamp = end_timestamp or arrow.utcnow().int_timestamp
self.instance_id = instance_id
self.instance_type = instance_type
self.converged_condition = converged_condition
Expand Down Expand Up @@ -177,7 +177,7 @@ def calc_dimension(self):
"""
收敛维度计算
"""
score = arrow.get(self.related_instance.create_time).replace(tzinfo="utc").timestamp
score = arrow.get(self.related_instance.create_time).replace(tzinfo="utc").int_timestamp
pipeline = FTA_CONVERGE_DIMENSION_KEY.client.pipeline()
for dimension in COMPARED_CONVERGE_DIMENSION.keys():
values = self.converge_ctx.get(dimension)
Expand All @@ -193,8 +193,8 @@ def calc_dimension(self):
# 先清理过期的数据
pipeline.zremrangebyscore(
key,
arrow.utcnow().replace(years=-1).timestamp,
arrow.utcnow().replace(minutes=-self.DimensionExpireMinutes).timestamp,
arrow.utcnow().shift(years=-1).int_timestamp,
arrow.utcnow().shift(minutes=-self.DimensionExpireMinutes).int_timestamp,
)
# 保存的score
kwargs = {"{}_{}".format(self.instance_type, str(self.related_instance.id)): score}
Expand All @@ -208,7 +208,7 @@ def calc_sub_converge_dimension(self):
二级收敛的维度计算
:return:
"""
score = arrow.get(self.related_instance.create_time).replace(tzinfo="utc").timestamp
score = arrow.get(self.related_instance.create_time).replace(tzinfo="utc").int_timestamp

label_info = {}
for dimension in SUB_CONVERGE_DIMENSION.keys():
Expand All @@ -225,8 +225,8 @@ def calc_sub_converge_dimension(self):
# 先清理过期的数据
pipeline.zremrangebyscore(
sub_converge_key,
arrow.utcnow().replace(years=-1).timestamp,
arrow.utcnow().replace(minutes=-self.DimensionExpireMinutes).timestamp,
arrow.utcnow().shift(years=-1).int_timestamp,
arrow.utcnow().shift(minutes=-self.DimensionExpireMinutes).int_timestamp,
)
# 保存的score
kwargs = {"{}_{}".format(self.instance_type, str(self.related_instance.id)): score}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def can_send_start_notice(self):

# 判断依据:x 分钟后在屏蔽范围
begin_time = self.get_now_datetime()
end_time = begin_time.replace(minutes=notice_time)
end_time = begin_time.shift(minutes=notice_time)
is_time_match = self.time_check.is_match(end_time)

if not is_time_match:
Expand All @@ -263,7 +263,7 @@ def can_send_end_notice(self):

# 判断依据:x 分钟后不在屏蔽范围
begin_time = self.get_now_datetime()
end_time = begin_time.replace(minutes=notice_time + 1)
end_time = begin_time.shift(minutes=notice_time + 1)
is_time_match = not self.time_check.is_match(end_time)

if not is_time_match:
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/service/nodata/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def handle(self):
return

logger.info("[nodata] get leader now")
now_timestamp = arrow.utcnow().timestamp - constants.CONST_MINUTES
now_timestamp = arrow.utcnow().int_timestamp - constants.CONST_MINUTES
strategy_ids = StrategyCacheManager.get_nodata_strategy_ids()
published = []
for strategy_id in strategy_ids:
Expand Down
2 changes: 1 addition & 1 deletion bkmonitor/alarm_backends/service/report/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def operation_data_custom_report_v2():

report_tool = custom_report_tool(bk_data_id)

timestamp = arrow.now().timestamp
timestamp = arrow.now().int_timestamp
# 获取运营数据,更新时间大于1天前的直接忽略
statistics = StatisticsMetric.objects.filter(update_time__gte=timestamp - 24 * 60 * 60)
for stat in statistics:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ async def _fetch(self, loop, target_keys: list):
return

finished, _ = await asyncio.wait(tasks)
timestamp = arrow.now().timestamp * 1000
timestamp = arrow.now().int_timestamp * 1000
for metrics in finished:
result = metrics.result()
if result:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@


class TestCleanResult(TestCase):

databases = {"monitor_api", "default"}

def setUp(self):
Expand All @@ -89,9 +88,9 @@ def setUp(self):
self.strategy_cache_patcher.start()

self.strategies = STRATEGIES
self.now_timestamp = arrow.utcnow().timestamp
self.three_hours_ago = arrow.utcnow().replace(hours=-3).timestamp
self.two_hours_ago = arrow.utcnow().replace(hours=-2).timestamp
self.now_timestamp = arrow.utcnow().int_timestamp
self.three_hours_ago = arrow.utcnow().shift(hours=-3).int_timestamp
self.two_hours_ago = arrow.utcnow().shift(hours=-2).int_timestamp
check_result_data = {
"{}|{}".format(self.three_hours_ago, "ANOMALY"): self.three_hours_ago,
"{}|{}".format(self.now_timestamp, "ANOMALY"): self.now_timestamp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,13 @@ def test_filter(self, mocker):

arrow_now = arrow.utcnow()
raw_data_1 = copy.deepcopy(RAW_DATA)
raw_data_1["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-28)).timestamp
raw_data_1["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-28)).int_timestamp
raw_data_2 = copy.deepcopy(RAW_DATA)
raw_data_2["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-29)).timestamp
raw_data_2["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-29)).int_timestamp
raw_data_3 = copy.deepcopy(RAW_DATA)
raw_data_3["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-30)).timestamp
raw_data_3["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-30)).int_timestamp
raw_data_4 = copy.deepcopy(RAW_DATA)
raw_data_4["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-31)).timestamp
raw_data_4["_time_"] = arrow.get(arrow_now.datetime + datetime.timedelta(minutes=-31)).int_timestamp

record = DataRecord(strategy.items[0], raw_data_1)
assert f.filter(record) is False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
utc_now = now.to("utc")
now_str = str(now.to("local").naive)
utc_now_str = str(now.to("utc").naive)
utc_timestamp = now.timestamp
utc_timestamp = now.int_timestamp

AGENT_LOSE_DATA = {
"utctime2": utc_now_str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class TestCloseStatusChecker(TestCase):

def setUp(self) -> None:
LAST_CHECKPOINTS_CACHE_KEY.client.flushall()
check_time = arrow.now().replace(seconds=-200).timestamp
check_time = arrow.now().shift(seconds=-200).int_timestamp
LAST_CHECKPOINTS_CACHE_KEY.client.hset(
LAST_CHECKPOINTS_CACHE_KEY.get_key(strategy_id=1, item_id=1),
LAST_CHECKPOINTS_CACHE_KEY.get_field(
Expand Down Expand Up @@ -197,7 +197,7 @@ def test_no_data_in_30_minutes_close(self):
checker.check_all()
self.assertEqual(alert.status, EventStatus.ABNORMAL)

check_time = arrow.now().replace(seconds=-310 - 30 * 60).timestamp
check_time = arrow.now().shift(seconds=-310 - 30 * 60).int_timestamp
LAST_CHECKPOINTS_CACHE_KEY.client.hset(
LAST_CHECKPOINTS_CACHE_KEY.get_key(strategy_id=1, item_id=1),
LAST_CHECKPOINTS_CACHE_KEY.get_field(
Expand All @@ -224,7 +224,7 @@ def test_with_big_window_unit_no_closed(self):
self.assertEqual(alert.status, EventStatus.ABNORMAL)

# 汇聚周期为11分钟,检测无数据时间应该是 5个周期 * 11分钟, 55分钟之内存在数据即表示不关闭
check_time = arrow.now().replace(seconds=-40 * 60).timestamp
check_time = arrow.now().shift(seconds=-40 * 60).int_timestamp
LAST_CHECKPOINTS_CACHE_KEY.client.hset(
LAST_CHECKPOINTS_CACHE_KEY.get_key(strategy_id=1, item_id=1),
LAST_CHECKPOINTS_CACHE_KEY.get_field(
Expand All @@ -247,7 +247,7 @@ def test_no_data_with_big_window_unit_close(self):
alert.top_event["target_type"] = ""

# 汇聚周期为11分钟,检测无数据时间应该是 5个周期 * 11分钟, 55分钟之内不存在数据即表示关闭
check_time = arrow.now().replace(seconds=-56 * 60).timestamp
check_time = arrow.now().shift(seconds=-56 * 60).int_timestamp
LAST_CHECKPOINTS_CACHE_KEY.client.hset(
LAST_CHECKPOINTS_CACHE_KEY.get_key(strategy_id=1, item_id=1),
LAST_CHECKPOINTS_CACHE_KEY.get_field(
Expand Down
Loading