-
Notifications
You must be signed in to change notification settings - Fork 8
/
variables.tf
1507 lines (1306 loc) · 49.7 KB
/
variables.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
variable "create" {
description = "Controls if kinesis firehose should be created (it affects almost all resources)"
type = bool
default = true
}
variable "name" {
description = "A name to identify the stream. This is unique to the AWS account and region the Stream is created in"
type = string
}
variable "input_source" {
description = "This is the kinesis firehose source"
type = string
default = "direct-put"
validation {
error_message = "Please use a valid source!"
condition = contains(["direct-put", "kinesis", "waf", "msk"], var.input_source)
}
}
variable "destination" {
description = "This is the destination to where the data is delivered"
type = string
validation {
error_message = "Please use a valid destination!"
condition = contains(["s3", "extended_s3", "redshift", "opensearch", "opensearchserverless", "elasticsearch", "splunk", "http_endpoint", "datadog", "coralogix", "newrelic", "dynatrace", "honeycomb", "logicmonitor", "mongodb", "sumologic", "snowflake", "iceberg"], var.destination)
}
}
variable "create_role" {
description = "Controls whether IAM role for Kinesis Firehose Stream should be created"
type = bool
default = true
}
variable "tags" {
description = "A map of tags to assign to resources."
type = map(string)
default = {}
}
######
# All Destinations
######
variable "buffering_size" {
description = "Buffer incoming data to the specified size, in MBs, before delivering it to the destination."
type = number
default = 5
validation {
error_message = "Valid values: minimum: 1 MiB, maximum: 128 MiB."
condition = var.buffering_size >= 1 && var.buffering_size <= 128
}
}
variable "buffering_interval" {
description = "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination"
type = number
default = 300
validation {
error_message = "Valid Values: Minimum: 0 seconds, maximum: 900 seconds."
condition = var.buffering_interval >= 0 && var.buffering_interval <= 900
}
}
variable "enable_lambda_transform" {
description = "Set it to true to enable data transformation with lambda"
type = bool
default = false
}
variable "transform_lambda_arn" {
description = "Lambda ARN to Transform source records"
type = string
default = null
}
variable "transform_lambda_role_arn" {
description = "The ARN of the role to execute the transform lambda. If null use the Firehose Stream role"
type = string
default = null
}
variable "transform_lambda_buffer_size" {
description = "The AWS Lambda function has a 6 MB invocation payload quota. Your data can expand in size after it's processed by the AWS Lambda function. A smaller buffer size allows for more room should the data expand after processing."
type = number
default = null
validation {
error_message = "Valid Values: minimum: 1 MB, maximum: 3 MB."
condition = var.transform_lambda_buffer_size == null || (coalesce(var.transform_lambda_buffer_size, 1) >= 1 && coalesce(var.transform_lambda_buffer_size, 1) <= 3)
}
}
variable "transform_lambda_buffer_interval" {
description = "The period of time during which Kinesis Data Firehose buffers incoming data before invoking the AWS Lambda function. The AWS Lambda function is invoked once the value of the buffer size or the buffer interval is reached."
type = number
default = null
validation {
error_message = "Valid Values: minimum: 60 seconds, maximum: 900 seconds."
condition = var.transform_lambda_buffer_interval == null || (coalesce(var.transform_lambda_buffer_interval, 60) >= 60 && coalesce(var.transform_lambda_buffer_interval, 60) <= 900)
}
}
variable "transform_lambda_number_retries" {
description = "Number of retries for AWS Transformation lambda"
type = number
default = null
validation {
error_message = "Number of retries for lambda must be between 0 and 300."
condition = var.transform_lambda_number_retries == null || (coalesce(var.transform_lambda_number_retries, 3) >= 0 && coalesce(var.transform_lambda_number_retries, 3) <= 300)
}
}
variable "s3_configuration_buffering_size" {
description = "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher."
type = number
default = 5
validation {
error_message = "Valid values: minimum: 1 MiB, maximum: 128 MiB."
condition = var.s3_configuration_buffering_size >= 1 && var.s3_configuration_buffering_size <= 128
}
}
variable "s3_configuration_buffering_interval" {
description = "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination."
type = number
default = 300
validation {
error_message = "Valid Values: Minimum: 60 seconds, maximum: 900 seconds."
condition = var.s3_configuration_buffering_interval >= 60 && var.s3_configuration_buffering_interval <= 900
}
}
variable "enable_s3_backup" {
description = "The Amazon S3 backup mode"
type = bool
default = false
}
variable "s3_backup_bucket_arn" {
description = "The ARN of the S3 backup bucket"
type = string
default = null
}
variable "s3_backup_prefix" {
description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket"
type = string
default = null
}
variable "s3_backup_buffering_size" {
description = "Buffer incoming data to the specified size, in MBs, before delivering it to the destination."
type = number
default = 5
validation {
error_message = "Valid values: minimum: 1 MiB, maximum: 128 MiB."
condition = var.s3_backup_buffering_size >= 1 && var.s3_backup_buffering_size <= 128
}
}
variable "s3_backup_buffering_interval" {
description = "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination."
type = number
default = 300
validation {
error_message = "Valid Values: Minimum: 60 seconds, maximum: 900 seconds."
condition = var.s3_backup_buffering_interval >= 60 && var.s3_backup_buffering_interval <= 900
}
}
variable "s3_backup_compression" {
description = "The compression format"
type = string
default = "UNCOMPRESSED"
validation {
error_message = "Valid values are UNCOMPRESSED, GZIP, ZIP, Snappy and HADOOP_SNAPPY."
condition = contains(["UNCOMPRESSED", "GZIP", "ZIP", "Snappy", "HADOOP_SNAPPY"], var.s3_backup_compression)
}
}
variable "s3_backup_error_output_prefix" {
description = "Prefix added to failed records before writing them to S3"
type = string
default = null
}
variable "s3_backup_enable_encryption" {
description = "Indicates if want enable KMS Encryption in S3 Backup Bucket."
type = bool
default = false
}
variable "s3_backup_kms_key_arn" {
description = "Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used."
type = string
default = null
}
variable "s3_backup_use_existing_role" {
description = "Indicates if want use the kinesis firehose role to s3 backup bucket access."
type = bool
default = true
}
variable "s3_backup_role_arn" {
description = "The role that Kinesis Data Firehose can use to access S3 Backup."
type = string
default = null
}
variable "s3_backup_enable_log" {
description = "Enables or disables the logging"
type = bool
default = true
}
variable "s3_backup_create_cw_log_group" {
description = "Enables or disables the cloudwatch log group creation"
type = bool
default = true
}
variable "s3_backup_log_group_name" {
description = "he CloudWatch group name for logging"
type = string
default = null
}
variable "s3_backup_log_stream_name" {
description = "The CloudWatch log stream name for logging"
type = string
default = null
}
variable "s3_backup_mode" {
description = "Defines how documents should be delivered to Amazon S3. Used to elasticsearch, opensearch, splunk, http configurations. For S3 and Redshift use enable_s3_backup"
type = string
default = "FailedOnly"
validation {
error_message = "Valid values are FailedOnly and All."
condition = contains(["FailedOnly", "All"], var.s3_backup_mode)
}
}
variable "enable_destination_log" {
description = "The CloudWatch Logging Options for the delivery stream"
type = bool
default = true
}
variable "create_destination_cw_log_group" {
description = "Enables or disables the cloudwatch log group creation to destination"
type = bool
default = true
}
variable "destination_log_group_name" {
description = "The CloudWatch group name for destination logs"
type = string
default = null
}
variable "destination_log_stream_name" {
description = "The CloudWatch log stream name for destination logs"
type = string
default = null
}
variable "cw_log_retention_in_days" {
description = "Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653."
type = number
default = null
}
variable "cw_tags" {
description = "A map of tags to assign to the resource."
type = map(string)
default = {}
}
variable "s3_bucket_arn" {
description = "The ARN of the S3 destination bucket"
type = string
default = null
}
variable "s3_prefix" {
description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket"
type = string
default = null
}
variable "s3_error_output_prefix" {
description = "Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name."
type = string
default = null
}
variable "enable_s3_encryption" {
description = "Indicates if want use encryption in S3 bucket."
type = bool
default = false
}
variable "s3_kms_key_arn" {
description = "Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used"
type = string
default = null
}
variable "s3_compression_format" {
description = "The compression format"
type = string
default = "UNCOMPRESSED"
validation {
error_message = "Valid values are UNCOMPRESSED, GZIP, ZIP, Snappy and HADOOP_SNAPPY."
condition = contains(["UNCOMPRESSED", "GZIP", "ZIP", "Snappy", "HADOOP_SNAPPY"], var.s3_compression_format)
}
}
variable "s3_custom_time_zone" {
description = "The time zone you prefer. Valid values are UTC or a non-3-letter IANA time zones (for example, America/Los_Angeles). Default value is UTC."
type = string
default = "UTC"
}
variable "s3_file_extension" {
description = "The file extension to override the default file extension (for example, .json)."
type = string
default = null
}
variable "s3_own_bucket" {
description = "Indicates if you own the bucket. If not, will be configure permissions to grants the bucket owner full access to the objects delivered by Kinesis Data Firehose"
type = bool
default = true
}
variable "s3_cross_account" {
description = "Indicates if S3 bucket destination is in a different account"
type = bool
default = false
}
variable "destination_cross_account" {
description = "Indicates if destination is in a different account. Only supported to Elasticsearch and OpenSearch"
type = bool
default = false
}
variable "enable_sse" {
description = "Whether to enable encryption at rest. Only makes sense when source is Direct Put"
type = bool
default = false
}
variable "sse_kms_key_type" {
description = "Type of encryption key."
type = string
default = "AWS_OWNED_CMK"
validation {
error_message = "Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK."
condition = contains(["AWS_OWNED_CMK", "CUSTOMER_MANAGED_CMK"], var.sse_kms_key_type)
}
}
variable "sse_kms_key_arn" {
description = "Amazon Resource Name (ARN) of the encryption key"
type = string
default = null
}
######
# Source Common Variables
######
variable "source_role_arn" {
description = "The ARN of the role that provides access to the source. Only Supported on Kinesis and MSK Sources"
type = string
default = null
}
variable "source_use_existing_role" {
description = "Indicates if want use the kinesis firehose role for sources access. Only Supported on Kinesis and MSK Sources"
type = bool
default = true
}
######
# Kinesis Source
######
variable "kinesis_source_stream_arn" {
description = "The kinesis stream used as the source of the firehose delivery stream"
type = string
default = null
}
variable "kinesis_source_role_arn" { # TODO: Deprecated. Remove Next Major Version
description = "DEPRECATED!! Use variable instead source_role_arn! The ARN of the role that provides access to the source Kinesis stream"
type = string
default = null
}
variable "kinesis_source_use_existing_role" { # TODO: Deprecated. Remove Next Major Version
description = "DEPRECATED!! Use variable source_use_existing_role instead! Indicates if want use the kinesis firehose role to kinesis data stream access."
type = bool
default = true
}
variable "kinesis_source_is_encrypted" {
description = "Indicates if Kinesis data stream source is encrypted"
type = bool
default = false
}
variable "kinesis_source_kms_arn" {
description = "Kinesis Source KMS Key to add Firehose role to decrypt the records."
type = string
default = null
}
######
# MSK Source
######
variable "msk_source_cluster_arn" {
description = "The ARN of the Amazon MSK cluster."
type = string
default = null
}
variable "msk_source_topic_name" {
description = "The topic name within the Amazon MSK cluster."
type = string
default = null
}
variable "msk_source_connectivity_type" {
description = "The type of connectivity used to access the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE."
type = string
default = "PUBLIC"
validation {
error_message = "Valid values are PUBLIC and PRIVATE."
condition = contains(["PUBLIC", "PRIVATE"], var.msk_source_connectivity_type)
}
}
######
# S3 Destination Configurations
######
variable "enable_cloudwatch_logs_decompression" {
description = "Enables or disables Cloudwatch Logs decompression"
type = bool
default = false
}
variable "enable_cloudwatch_logs_data_message_extraction" {
description = "Cloudwatch Logs data message extraction"
type = bool
default = false
}
variable "enable_dynamic_partitioning" {
description = "Enables or disables dynamic partitioning"
type = bool
default = false
}
variable "dynamic_partitioning_retry_duration" {
description = "Total amount of seconds Firehose spends on retries"
type = number
default = 300
validation {
error_message = "Valid values between 0 and 7200."
condition = var.dynamic_partitioning_retry_duration >= 0 && var.dynamic_partitioning_retry_duration <= 7200
}
}
variable "dynamic_partition_append_delimiter_to_record" { # TODO Variable Deprecated. Remove on Next Major Version
description = "DEPRECATED!! Use var append_delimiter_to_record instead!! Use To configure your delivery stream to add a new line delimiter between records in objects that are delivered to Amazon S3."
type = bool
default = false
}
variable "dynamic_partition_metadata_extractor_query" {
description = "Dynamic Partition JQ query."
type = string
default = null
}
variable "dynamic_partition_enable_record_deaggregation" {
description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter"
type = bool
default = false
}
variable "dynamic_partition_record_deaggregation_type" {
description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter"
type = string
default = "JSON"
validation {
error_message = "Valid values are JSON and DELIMITED."
condition = contains(["JSON", "DELIMITED"], var.dynamic_partition_record_deaggregation_type)
}
}
variable "dynamic_partition_record_deaggregation_delimiter" {
description = "Specifies the delimiter to be used for parsing through the records in the delivery stream and deaggregating them"
type = string
default = null
}
variable "enable_data_format_conversion" {
description = "Set it to true if you want to disable format conversion."
type = bool
default = false
}
variable "data_format_conversion_glue_database" {
description = "Name of the AWS Glue database that contains the schema for the output data."
type = string
default = null
}
variable "data_format_conversion_glue_use_existing_role" {
description = "Indicates if want use the kinesis firehose role to glue access."
type = bool
default = true
}
variable "data_format_conversion_glue_role_arn" {
description = "The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed."
type = string
default = null
}
variable "data_format_conversion_glue_table_name" {
description = "Specifies the AWS Glue table that contains the column information that constitutes your data schema"
type = string
default = null
}
variable "data_format_conversion_glue_catalog_id" {
description = "The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default."
type = string
default = null
}
variable "data_format_conversion_glue_region" {
description = "If you don't specify an AWS Region, the default is the current region."
type = string
default = null
}
variable "data_format_conversion_glue_version_id" {
description = "Specifies the table version for the output data schema."
type = string
default = "LATEST"
}
variable "data_format_conversion_input_format" {
description = "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe"
type = string
default = "OpenX"
validation {
error_message = "Valid values are HIVE and OPENX."
condition = contains(["HIVE", "OpenX"], var.data_format_conversion_input_format)
}
}
variable "data_format_conversion_openx_case_insensitive" {
description = "When set to true, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them."
type = bool
default = true
}
variable "data_format_conversion_openx_convert_dots_to_underscores" {
description = "Specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names."
type = bool
default = false
}
variable "data_format_conversion_openx_column_to_json_key_mappings" {
description = "A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords."
type = map(string)
default = null
}
variable "data_format_conversion_hive_timestamps" {
description = "A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings."
type = list(string)
default = []
}
variable "data_format_conversion_output_format" {
description = "Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe"
type = string
default = "PARQUET"
validation {
error_message = "Valid values are ORC and PARQUET."
condition = contains(["ORC", "PARQUET"], var.data_format_conversion_output_format)
}
}
variable "data_format_conversion_block_size" {
description = "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The Value is in Bytes."
type = number
default = 268435456
validation {
error_message = "Minimum Value is 64 MiB."
condition = var.data_format_conversion_block_size >= 67108864
}
}
variable "data_format_conversion_parquet_compression" {
description = "The compression code to use over data blocks."
type = string
default = "SNAPPY"
validation {
error_message = "Valid values are UNCOMPRESSED, SNAPPY and GZIP."
condition = contains(["UNCOMPRESSED", "SNAPPY", "GZIP"], var.data_format_conversion_parquet_compression)
}
}
variable "data_format_conversion_parquet_dict_compression" {
description = "Indicates whether to enable dictionary compression."
type = bool
default = false
}
variable "data_format_conversion_parquet_max_padding" {
description = "The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The value is in bytes"
type = number
default = 0
}
variable "data_format_conversion_parquet_page_size" {
description = "Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The value is in bytes"
type = number
default = 1048576
validation {
error_message = "Minimum Value is 64 KiB."
condition = var.data_format_conversion_parquet_page_size >= 65536
}
}
variable "data_format_conversion_parquet_writer_version" {
description = "Indicates the version of row format to output."
type = string
default = "V1"
validation {
error_message = "Valid values are V1 and V2."
condition = contains(["V1", "V2"], var.data_format_conversion_parquet_writer_version)
}
}
variable "data_format_conversion_orc_compression" {
description = "The compression code to use over data blocks."
type = string
default = "SNAPPY"
validation {
error_message = "Valid values are NONE, ZLIB and SNAPPY."
condition = contains(["NONE", "ZLIB", "SNAPPY"], var.data_format_conversion_orc_compression)
}
}
variable "data_format_conversion_orc_format_version" {
description = "The version of the file to write."
type = string
default = "V0_12"
validation {
error_message = "Valid values are V0_11 and V0_12."
condition = contains(["V0_11", "V0_12"], var.data_format_conversion_orc_format_version)
}
}
variable "data_format_conversion_orc_enable_padding" {
description = "Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying."
type = bool
default = false
}
variable "data_format_conversion_orc_padding_tolerance" {
description = "A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size."
type = number
default = 0.05
validation {
error_message = "Valid values are V0_11 and V0_12."
condition = var.data_format_conversion_orc_padding_tolerance >= 0 && var.data_format_conversion_orc_padding_tolerance <= 1
}
}
variable "data_format_conversion_orc_dict_key_threshold" {
description = "A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1."
type = number
default = 0.0
validation {
error_message = "Valid values are between 0 and 1."
condition = var.data_format_conversion_orc_dict_key_threshold >= 0 && var.data_format_conversion_orc_dict_key_threshold <= 1
}
}
variable "data_format_conversion_orc_bloom_filter_columns" {
description = "A list of column names for which you want Kinesis Data Firehose to create bloom filters."
type = list(string)
default = []
}
variable "data_format_conversion_orc_bloom_filter_false_positive_probability" {
description = "The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter."
type = number
default = 0.05
validation {
error_message = "Valid values are between 0 and 1."
condition = var.data_format_conversion_orc_bloom_filter_false_positive_probability >= 0 && var.data_format_conversion_orc_bloom_filter_false_positive_probability <= 1
}
}
variable "data_format_conversion_orc_row_index_stripe" {
description = "The number of rows between index entries."
type = number
default = 10000
validation {
error_message = "Minimum value is 1000."
condition = var.data_format_conversion_orc_row_index_stripe >= 1000
}
}
variable "data_format_conversion_orc_stripe_size" {
description = "he number of bytes in each strip."
type = number
default = 67108864
validation {
error_message = "Minimum Value is 8 MiB."
condition = var.data_format_conversion_orc_stripe_size >= 8388608
}
}
variable "append_delimiter_to_record" {
description = "To configure your delivery stream to add a new line delimiter between records in objects that are delivered to Amazon S3."
type = bool
default = false
}
######
# Redshift Destination Variables
######
variable "redshift_cluster_endpoint" {
description = "The redshift endpoint"
type = string
default = null
}
variable "redshift_username" {
description = "The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions"
type = string
default = null
sensitive = true
}
variable "redshift_password" {
description = "The password for the redshift username above"
type = string
default = null
sensitive = true
}
variable "redshift_database_name" {
description = "The redshift database name"
type = string
default = null
}
variable "redshift_table_name" {
description = "The name of the table in the redshift cluster that the s3 bucket will copy to"
type = string
default = null
}
variable "redshift_copy_options" {
description = "Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter"
type = string
default = null
}
variable "redshift_data_table_columns" {
description = "The data table columns that will be targeted by the copy command"
type = string
default = null
}
variable "redshift_retry_duration" {
description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt"
type = string
default = 3600
validation {
error_message = "Minimum: 0 second, maximum: 7200 seconds."
condition = var.redshift_retry_duration >= 0 && var.redshift_retry_duration <= 7200
}
}
variable "redshift_cluster_identifier" {
description = "Redshift Cluster identifier. Necessary to associate the iam role to cluster"
type = string
default = null
}
variable "associate_role_to_redshift_cluster" {
description = "Set it to false if don't want the module associate the role to redshift cluster"
type = bool
default = true
}
######
# Elasticsearch Destination Variables
######
variable "elasticsearch_domain_arn" {
description = "The ARN of the Amazon ES domain. The pattern needs to be arn:.*"
type = string
default = null
}
variable "elasticsearch_index_name" {
description = "The Elasticsearch index name"
type = string
default = null
}
variable "elasticsearch_index_rotation_period" {
description = "The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data"
type = string
default = "OneDay"
validation {
error_message = "Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth."
condition = contains(["NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"], var.elasticsearch_index_rotation_period)
}
}
variable "elasticsearch_type_name" {
description = "The Elasticsearch type name with maximum length of 100 characters"
type = string
default = null
}
variable "elasticsearch_retry_duration" {
description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt"
type = string
default = 300
validation {
error_message = "Minimum: 0 seconds."
condition = var.elasticsearch_retry_duration >= 0 && var.elasticsearch_retry_duration <= 7200
}
}
######
# Opensearch Destination Variables
######
variable "opensearch_domain_arn" {
description = "The ARN of the Amazon Opensearch domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint."
type = string
default = null
}
variable "opensearch_index_name" {
description = "The Opensearch (And OpenSearch Serverless) index name."
type = string
default = null
}
variable "opensearch_index_rotation_period" {
description = "The Opensearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data"
type = string
default = "OneDay"
validation {
error_message = "Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth."
condition = contains(["NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"], var.opensearch_index_rotation_period)
}
}
variable "opensearch_type_name" {
description = "The opensearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty."
type = string
default = null
}
variable "opensearch_retry_duration" {
description = "After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0."
type = string
default = 300
validation {
error_message = "Minimum: 0 seconds."
condition = var.opensearch_retry_duration >= 0 && var.opensearch_retry_duration <= 7200
}
}
variable "opensearch_vpc_create_service_linked_role" {
description = "Set it to True if want create Opensearch Service Linked Role to Access VPC."
type = bool
default = false
}
variable "opensearch_document_id_options" {
description = "The method for setting up document ID."
type = string
default = "FIREHOSE_DEFAULT"
validation {
error_message = "Valid values are FIREHOSE_DEFAULT and NO_DOCUMENT_ID."
condition = contains(["FIREHOSE_DEFAULT", "NO_DOCUMENT_ID"], var.opensearch_document_id_options)
}
}
variable "opensearchserverless_collection_endpoint" {
description = "The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service."
type = string
default = null
}
variable "opensearchserverless_collection_arn" {
description = "The ARN of the Amazon Opensearch Serverless Collection. The pattern needs to be arn:.*."
type = string
default = null
}
######
# VPC Variables
######
variable "enable_vpc" {
description = "Indicates if destination is configured in VPC. Supports Elasticsearch and Opensearch destinations."
type = bool
default = false
}
variable "vpc_use_existing_role" {
description = "Indicates if want use the kinesis firehose role to VPC access. Supports Elasticsearch and Opensearch destinations."
type = bool
default = true
}
variable "vpc_role_arn" {
description = "The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Supports Elasticsearch and Opensearch destinations."
type = string
default = null
}
variable "vpc_subnet_ids" {
description = "A list of subnet IDs to associate with Kinesis Firehose. Supports Elasticsearch and Opensearch destinations."
type = list(string)
default = null
}
variable "vpc_security_group_same_as_destination" {
description = "Indicates if the firehose security group is the same as destination."
type = bool
default = true
}
variable "vpc_security_group_firehose_ids" {
description = "A list of security group IDs to associate with Kinesis Firehose."
type = list(string)
default = null
}
variable "vpc_create_security_group" {
description = "Indicates if want create security group to associate to kinesis firehose"
type = bool
default = false
}
variable "vpc_security_group_firehose_configure_existing" {
description = "Indicates if want configure an existing firehose security group with the necessary rules"
type = bool
default = false
}
variable "vpc_security_group_tags" {
description = "A map of tags to assign to security group"
type = map(string)
default = {}
}
variable "vpc_security_group_destination_ids" {
description = "A list of security group IDs associated to destinations to allow firehose traffic"
type = list(string)
default = null
}
variable "vpc_create_destination_security_group" {
description = "Indicates if want create destination security group to associate to firehose destinations"
type = bool
default = false
}
variable "vpc_security_group_destination_configure_existing" {
description = "Indicates if want configure an existing destination security group with the necessary rules"
type = bool
default = false
}
variable "vpc_security_group_destination_vpc_id" {
description = "VPC ID to create the destination security group. Only supported to Redshift and splunk destinations"
type = string
default = null
}
######