Skip to content

Commit

Permalink
Merge pull request #20 from AndrewFarley/bugfix-formatting-descriptio…
Browse files Browse the repository at this point in the history
…n-variables

Bugfix: Fixing variable formatting issue, flooring all variables
  • Loading branch information
dubiety authored Dec 29, 2021
2 parents ea7cbd2 + dad79ff commit c1142ee
Showing 1 changed file with 20 additions and 20 deletions.
40 changes: 20 additions & 20 deletions alarms.tf
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
locals {
thresholds = {
FreeStorageSpaceThreshold = max(var.free_storage_space_threshold, 0)
FreeStorageSpaceTotalThreshold = max(var.free_storage_space_total_threshold, 0)
MinimumAvailableNodes = max(var.min_available_nodes, 0)
CPUUtilizationThreshold = min(max(var.cpu_utilization_threshold, 0), 100)
JVMMemoryPressureThreshold = min(max(var.jvm_memory_pressure_threshold, 0), 100)
MasterCPUUtilizationThreshold = min(max(coalesce(var.master_cpu_utilization_threshold, var.cpu_utilization_threshold), 0), 100)
MasterJVMMemoryPressureThreshold = min(max(coalesce(var.master_jvm_memory_pressure_threshold, var.jvm_memory_pressure_threshold), 0), 100)
FreeStorageSpaceThreshold = floor(max(var.free_storage_space_threshold, 0))
FreeStorageSpaceTotalThreshold = floor(max(var.free_storage_space_total_threshold, 0))
MinimumAvailableNodes = floor(max(var.min_available_nodes, 0))
CPUUtilizationThreshold = floor(min(max(var.cpu_utilization_threshold, 0), 100))
JVMMemoryPressureThreshold = floor(min(max(var.jvm_memory_pressure_threshold, 0), 100))
MasterCPUUtilizationThreshold = floor(min(max(coalesce(var.master_cpu_utilization_threshold, var.cpu_utilization_threshold), 0), 100))
MasterJVMMemoryPressureThreshold = floor(min(max(coalesce(var.master_jvm_memory_pressure_threshold, var.jvm_memory_pressure_threshold), 0), 100))
}
}

Expand All @@ -21,7 +21,7 @@ resource "aws_cloudwatch_metric_alarm" "cluster_status_is_red" {
period = var.alarm_cluster_status_is_red_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Average elasticsearch cluster status is in red over last ${var.alarm_cluster_status_is_red_periods * var.alarm_cluster_status_is_red_period / 60} minute(s)"
alarm_description = "Average elasticsearch cluster status is in red over last ${floor(var.alarm_cluster_status_is_red_periods * var.alarm_cluster_status_is_red_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -44,7 +44,7 @@ resource "aws_cloudwatch_metric_alarm" "cluster_status_is_yellow" {
period = var.alarm_cluster_status_is_yellow_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Average elasticsearch cluster status is in yellow over last ${var.alarm_cluster_status_is_yellow_periods * var.alarm_cluster_status_is_yellow_period / 60} minute(s)"
alarm_description = "Average elasticsearch cluster status is in yellow over last ${floor(var.alarm_cluster_status_is_yellow_periods * var.alarm_cluster_status_is_yellow_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -67,7 +67,7 @@ resource "aws_cloudwatch_metric_alarm" "free_storage_space_too_low" {
period = var.alarm_free_storage_space_too_low_period
statistic = "Minimum"
threshold = local.thresholds["FreeStorageSpaceThreshold"]
alarm_description = "Minimum free disk space on a single node under ${local.thresholds["FreeStorageSpaceThreshold"]}MB for the last ${var.alarm_free_storage_space_too_low_periods * var.alarm_free_storage_space_too_low_period / 60} minute(s)"
alarm_description = "Minimum free disk space on a single node under ${local.thresholds["FreeStorageSpaceThreshold"]}MB for the last ${floor(var.alarm_free_storage_space_too_low_periods * var.alarm_free_storage_space_too_low_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -91,7 +91,7 @@ resource "aws_cloudwatch_metric_alarm" "free_storage_space_total_too_low" {
period = var.alarm_free_storage_space_total_too_low_period
statistic = "Sum"
threshold = local.thresholds["FreeStorageSpaceTotalThreshold"]
alarm_description = "Total aggregate free disk space under ${local.thresholds["FreeStorageSpaceTotalThreshold"]}MB for the last ${var.alarm_free_storage_space_total_too_low_periods * var.alarm_free_storage_space_total_too_low_period / 60} minute(s)"
alarm_description = "Total aggregate free disk space under ${local.thresholds["FreeStorageSpaceTotalThreshold"]}MB for the last ${floor(var.alarm_free_storage_space_total_too_low_periods * var.alarm_free_storage_space_total_too_low_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -116,7 +116,7 @@ resource "aws_cloudwatch_metric_alarm" "cluster_index_writes_blocked" {
period = var.alarm_cluster_index_writes_blocked_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Elasticsearch index writes being blocker over last ${var.alarm_cluster_index_writes_blocked_periods * var.alarm_cluster_index_writes_blocked_period / 60} minute(s)"
alarm_description = "Elasticsearch index writes being blocker over last ${floor(var.alarm_cluster_index_writes_blocked_periods * var.alarm_cluster_index_writes_blocked_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -139,7 +139,7 @@ resource "aws_cloudwatch_metric_alarm" "insufficient_available_nodes" {
period = var.alarm_min_available_nodes_period
statistic = "Minimum"
threshold = local.thresholds["MinimumAvailableNodes"]
alarm_description = "Elasticsearch nodes minimum < ${local.thresholds["MinimumAvailableNodes"]} for ${var.alarm_min_available_nodes_periods * var.alarm_min_available_nodes_period / 60} minutes(s)"
alarm_description = "Elasticsearch nodes minimum < ${local.thresholds["MinimumAvailableNodes"]} for ${floor(var.alarm_min_available_nodes_periods * var.alarm_min_available_nodes_period / 60)} minutes(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -162,7 +162,7 @@ resource "aws_cloudwatch_metric_alarm" "automated_snapshot_failure" {
period = var.alarm_automated_snapshot_failure_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Elasticsearch automated snapshot failed over last ${var.alarm_automated_snapshot_failure_periods * var.alarm_automated_snapshot_failure_period / 60} minute(s)"
alarm_description = "Elasticsearch automated snapshot failed over last ${floor(var.alarm_automated_snapshot_failure_periods * var.alarm_automated_snapshot_failure_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -185,7 +185,7 @@ resource "aws_cloudwatch_metric_alarm" "cpu_utilization_too_high" {
period = var.alarm_cpu_utilization_too_high_period
statistic = "Average"
threshold = local.thresholds["CPUUtilizationThreshold"]
alarm_description = "Average elasticsearch cluster CPU utilization above ${local.thresholds["CPUUtilizationThreshold"]} over last ${var.alarm_cpu_utilization_too_high_periods * var.alarm_cpu_utilization_too_high_period / 60} minute(s) too high"
alarm_description = "Average elasticsearch cluster CPU utilization above ${local.thresholds["CPUUtilizationThreshold"]} over last ${floor(var.alarm_cpu_utilization_too_high_periods * var.alarm_cpu_utilization_too_high_period / 60)} minute(s) too high"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
tags = var.tags
Expand All @@ -207,7 +207,7 @@ resource "aws_cloudwatch_metric_alarm" "jvm_memory_pressure_too_high" {
period = var.alarm_jvm_memory_pressure_too_high_period
statistic = "Maximum"
threshold = local.thresholds["JVMMemoryPressureThreshold"]
alarm_description = "Elasticsearch JVM memory pressure is over ${local.thresholds["JVMMemoryPressureThreshold"]} over the last ${var.alarm_jvm_memory_pressure_too_high_periods * var.alarm_jvm_memory_pressure_too_high_period / 60} minute(s)"
alarm_description = "Elasticsearch JVM memory pressure is over ${local.thresholds["JVMMemoryPressureThreshold"]} over the last ${floor(var.alarm_jvm_memory_pressure_too_high_periods * var.alarm_jvm_memory_pressure_too_high_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
tags = var.tags
Expand All @@ -229,7 +229,7 @@ resource "aws_cloudwatch_metric_alarm" "master_cpu_utilization_too_high" {
period = var.alarm_master_cpu_utilization_too_high_period
statistic = "Average"
threshold = local.thresholds["MasterCPUUtilizationThreshold"]
alarm_description = "Average elasticsearch cluster master CPU utilization above ${local.thresholds["MasterCPUUtilizationThreshold"]} over last ${var.alarm_master_cpu_utilization_too_high_periods * var.alarm_master_cpu_utilization_too_high_period / 60} minute(s) too high"
alarm_description = "Average elasticsearch cluster master CPU utilization above ${local.thresholds["MasterCPUUtilizationThreshold"]} over last ${floor(var.alarm_master_cpu_utilization_too_high_periods * var.alarm_master_cpu_utilization_too_high_period / 60)} minute(s) too high"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
tags = var.tags
Expand All @@ -251,7 +251,7 @@ resource "aws_cloudwatch_metric_alarm" "master_jvm_memory_pressure_too_high" {
period = var.alarm_master_jvm_memory_pressure_too_high_period
statistic = "Maximum"
threshold = local.thresholds["MasterJVMMemoryPressureThreshold"]
alarm_description = "Elasticsearch JVM memory pressure is over ${local.thresholds["MasterJVMMemoryPressureThreshold"]} over the last ${var.alarm_master_jvm_memory_pressure_too_high_periods * var.alarm_master_jvm_memory_pressure_too_high_period / 60} minute(s)"
alarm_description = "Elasticsearch JVM memory pressure is over ${local.thresholds["MasterJVMMemoryPressureThreshold"]} over the last ${floor(var.alarm_master_jvm_memory_pressure_too_high_periods * var.alarm_master_jvm_memory_pressure_too_high_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
tags = var.tags
Expand All @@ -273,7 +273,7 @@ resource "aws_cloudwatch_metric_alarm" "kms_key_error" {
period = var.alarm_kms_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Elasticsearch KMS Key Error failed over last ${var.alarm_kms_periods * var.alarm_kms_period / 60} minute(s)"
alarm_description = "Elasticsearch KMS Key Error failed over last ${floor(var.alarm_kms_periods * var.alarm_kms_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand All @@ -296,7 +296,7 @@ resource "aws_cloudwatch_metric_alarm" "kms_key_inaccessible" {
period = var.alarm_kms_period
statistic = "Maximum"
threshold = "1"
alarm_description = "Elasticsearch KMS Key Inaccessible failed over last ${var.alarm_kms_periods * var.alarm_kms_period / 60} minute(s)"
alarm_description = "Elasticsearch KMS Key Inaccessible failed over last ${floor(var.alarm_kms_periods * var.alarm_kms_period / 60)} minute(s)"
alarm_actions = [local.aws_sns_topic_arn]
ok_actions = [local.aws_sns_topic_arn]
treat_missing_data = "ignore"
Expand Down

0 comments on commit c1142ee

Please sign in to comment.