From f65d56ead300fd59e7fa17f2e3e79f8b0616ff23 Mon Sep 17 00:00:00 2001 From: velotioaastha Date: Mon, 3 Jun 2024 13:31:27 +0530 Subject: [PATCH] upgrade eks module from v17 to v20 --- eks_module_upgrade.sh | 152 +++++++++++++++++++++++++++++++++++++ modules/app_eks/main.tf | 67 ++++++++++------ modules/app_eks/outputs.tf | 9 ++- versions.tf | 4 +- 4 files changed, 205 insertions(+), 27 deletions(-) create mode 100755 eks_module_upgrade.sh diff --git a/eks_module_upgrade.sh b/eks_module_upgrade.sh new file mode 100755 index 00000000..a1960075 --- /dev/null +++ b/eks_module_upgrade.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +# Exit immediately if a command exits with a non-zero status +set -e + +# Function to check if Terraform is installed +check_terraform_installed() { + if ! command -v terraform &> /dev/null; then + echo "Terraform could not be found. Please install Terraform." + exit 1 + fi +} + +# Function to check if AWS CLI is installed +check_aws_cli_installed() { + if ! command -v aws &> /dev/null; then + echo "AWS CLI could not be found. Please install AWS CLI." + exit 1 + fi +} + +# Function to check if tfstate file exists +check_tfstate_file() { + if [ ! -f "terraform.tfstate" ]; then + echo "terraform.tfstate file not found. Please ensure you are in the correct directory." + exit 1 + fi +} + +# Function to update kubeconfig +update_kubeconfig() { + if [ -z "$1" ]; then + echo "Cluster name is required to update kubeconfig." + exit 1 + fi + + CLUSTER_NAME="$1" + REGION="${2:-$AWS_DEFAULT_REGION}" + + if [ -z "$REGION" ]; then + echo "Region is required. Please set the AWS_DEFAULT_REGION environment variable or provide it as an argument." + exit 1 + fi + + echo "Updating kubeconfig for cluster ${CLUSTER_NAME} in region ${REGION}..." + aws eks update-kubeconfig --name "${CLUSTER_NAME}" --region "${REGION}" +} + +# Function to handle pre-Terraform steps +pre_terraform_steps() { + echo "Running pre-Terraform steps..." + + # Check if Terraform is installed + check_terraform_installed + + # Check if tfstate file exists + check_tfstate_file + + # Initialize and upgrade Terraform + terraform init -upgrade + + # Rename the cluster IAM role TF resource to the new name + terraform state mv 'module.app_eks.module.eks.aws_iam_role.cluster[0]' 'module.app_eks.module.eks.aws_iam_role.cluster_new[0]' + + # Remove node_groups from TF state + terraform state rm 'module.app_eks.module.eks.module.node_groups' + + # Remove node groups security group from state + terraform state rm 'module.app_eks.module.eks.aws_security_group.workers[0]' + + # Remove policy attachment for node groups from TF state + terraform state rm 'module.app_eks.module.eks.aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy[0]' \ + 'module.app_eks.module.eks.aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy[0]' \ + 'module.app_eks.module.eks.aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly[0]' + + # Remove node groups AWS security group rule resources from TF state + for rule in $(terraform state list | grep 'aws_security_group_rule.workers'); do + terraform state rm "$rule" + done + + # Remove cluster AWS security group rule resources from TF state + for rule in $(terraform state list | grep 'aws_security_group_rule.cluster'); do + terraform state rm "$rule" + done + + # Remove IAM role for node groups + terraform state rm 'module.app_eks.module.eks.aws_iam_role.workers[0]' + + # Rename IAM role policy attachments + terraform state mv 'module.app_eks.module.eks.aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy[0]' 'module.app_eks.module.eks.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"]' + terraform state mv 'module.app_eks.module.eks.aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy[0]' 'module.app_eks.module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"]' + + echo "Pre-Terraform steps completed successfully." +} + +# Function to handle post-Terraform steps +post_terraform_steps() { + echo "Running post-Terraform steps..." + + # Check if AWS CLI is installed + check_aws_cli_installed + + # Check if the cluster name is provided + if [ -z "$2" ]; then + echo "Cluster name is required for post-Terraform steps." + echo "Usage: $0 --post []" + exit 1 + fi + + CLUSTER_NAME="$2" + REGION="${3:-$AWS_DEFAULT_REGION}" + + if [ -z "$REGION" ]; then + echo "Region is required. Please set the AWS_DEFAULT_REGION environment variable or provide it as an argument." + exit 1 + fi + + # Update kubeconfig + update_kubeconfig "${CLUSTER_NAME}" "${REGION}" + + # Get the list of node groups with the prefix matching the cluster name + NODE_GROUPS=$(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" --region "${REGION}" --query 'nodegroups[?starts_with(@, `'${CLUSTER_NAME}'`)]' --output text) + + if [ -z "$NODE_GROUPS" ]; then + echo "No node groups found with the prefix '${CLUSTER_NAME}'." + exit 1 + fi + + for NODE_GROUP in $NODE_GROUPS; do + echo "Processing node group: ${NODE_GROUP}" + + # Drain all pods from the node group + for NODE in $(kubectl get nodes --selector="eks.amazonaws.com/nodegroup=${NODE_GROUP}" -o name); do + kubectl drain "$NODE" --ignore-daemonsets --delete-local-data --force || true + done + + # Delete the node group + aws eks delete-nodegroup --cluster-name "${CLUSTER_NAME}" --nodegroup-name "${NODE_GROUP}" --region "${REGION}" + done + + echo "Post-Terraform steps completed successfully." +} + +# Check the input argument +if [ "$1" == "--pre" ]; then + pre_terraform_steps +elif [ "$1" == "--post" ]; then + post_terraform_steps "$@" +else + echo "Usage: $0 --pre | --post []" + exit 1 +fi diff --git a/modules/app_eks/main.tf b/modules/app_eks/main.tf index 9210ad9d..888008a5 100644 --- a/modules/app_eks/main.tf +++ b/modules/app_eks/main.tf @@ -16,50 +16,73 @@ locals { module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 17.23" + version = "~> 20.12" + prefix_separator = "" + iam_role_name = var.namespace + cluster_security_group_name = var.namespace + cluster_security_group_description = "EKS cluster security group." cluster_name = var.namespace cluster_version = var.cluster_version vpc_id = var.network_id - subnets = var.network_private_subnets + subnet_ids = var.network_private_subnets - map_accounts = var.map_accounts - map_roles = var.map_roles - map_users = var.map_users + enable_irsa = false + # aws_auth_accounts = var.map_accounts + # aws_auth_roles = var.map_roles + # aws_auth_users = var.map_users cluster_enabled_log_types = ["api", "audit", "controllerManager", "scheduler"] - cluster_endpoint_private_access = true cluster_endpoint_public_access = var.cluster_endpoint_public_access cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs - cluster_log_retention_in_days = 30 + cloudwatch_log_group_retention_in_days = 30 - cluster_encryption_config = var.kms_key_arn != "" ? [ - { + create_kms_key = false + cluster_encryption_config = var.kms_key_arn != "" ? { provider_key_arn = var.kms_key_arn resources = ["secrets"] - } - ] : null + } : null - worker_additional_security_group_ids = [aws_security_group.primary_workers.id] + eks_managed_node_group_defaults = { + vpc_security_group_ids = [aws_security_group.primary_workers.id] + } - node_groups = { + eks_managed_node_groups = { primary = { - create_launch_template = local.create_launch_template, - desired_capacity = var.desired_capacity, - disk_encrypted = local.encrypt_ebs_volume, - disk_kms_key_id = var.kms_key_arn, - disk_type = "gp3" + create_launch_template = local.create_launch_template, + desired_size = var.desired_capacity, + min_size = var.desired_capacity, + max_size = 5, enable_monitoring = true force_update_version = local.encrypt_ebs_volume, iam_role_arn = aws_iam_role.node.arn, instance_types = var.instance_types, - kubelet_extra_args = local.system_reserved != "" ? "--system-reserved=${local.system_reserved}" : "", - max_capacity = 5, + network_interfaces = [ + { + device_index = 0 + associate_public_ip_address = false + delete_on_termination = true + security_groups = [aws_security_group.primary_workers.id] + } + ] + + bootstrap_extra_args = local.system_reserved != "" ? "--kubelet-extra-args '--system-reserved=${local.system_reserved}'" : "", metadata_http_put_response_hop_limit = 2 metadata_http_tokens = "required", - min_capacity = var.desired_capacity, - version = var.cluster_version, + cluster_version = var.cluster_version, + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + delete_on_termination = true + volume_type = "gp3" + volume_size = 100 + encrypted = local.encrypt_ebs_volume + kms_key_id = var.kms_key_arn + } + } + } } } diff --git a/modules/app_eks/outputs.tf b/modules/app_eks/outputs.tf index 304b51db..8112e2c9 100644 --- a/modules/app_eks/outputs.tf +++ b/modules/app_eks/outputs.tf @@ -1,9 +1,12 @@ output "autoscaling_group_names" { - value = { for name, value in module.eks.node_groups : name => lookup(lookup(lookup(value, "resources")[0], "autoscaling_groups")[0], "name") } + value = { + "primary" = module.eks.eks_managed_node_groups_autoscaling_group_names[0] + } } + output "cluster_id" { - value = module.eks.cluster_id - description = "ID of the created EKS cluster" + value = module.eks.cluster_name + description = "Name of the created EKS cluster" } output "efs_id" { diff --git a/versions.tf b/versions.tf index 1ffbef44..735ece88 100644 --- a/versions.tf +++ b/versions.tf @@ -1,9 +1,9 @@ terraform { - required_version = "~> 1.0" + required_version = "~> 1.3" required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = "~> 5.40" } kubernetes = { source = "hashicorp/kubernetes"