I am encountering an error while trying to create an Amazon EKS (Elastic Kubernetes Service) cluster using Terraform. The error message indicates that the specified availability zone, us-east-1e, does not currently have sufficient capacity to support the cluster creation:
Cannot create cluster 'eks_cloud1120' because us-east-1e, the targeted availability zone, does not currently have sufficient capacity to support the cluster. Retry and choose from these availability zones: us-east-1a, us-east-1b, us-east-1c, us-east-1d
This is my main.tf file:
data "aws_iam_policy_document" "assume_role" {
statement {
effect = "Allow"
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
actions = ["sts:AssumeRole"]
}
}
resource "aws_iam_role" "example" {
name = "eks-uber1120-nirmal"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}
resource "aws_iam_role_policy_attachment" "example-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.example.name
}
#get vpc data
data "aws_vpc" "default" {
default = true
}
#get public subnets for cluster
data "aws_subnets" "public" {
filter {
name = "vpc-id"
values = [data.aws_vpc.default.id]
}
}
#cluster provision
resource "aws_eks_cluster" "example" {
name = "eks_cloud1120"
role_arn = aws_iam_role.example.arn
vpc_config {
subnet_ids = data.aws_subnets.public.ids
}
# Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling.
# Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups.
depends_on = [
aws_iam_role_policy_attachment.example-AmazonEKSClusterPolicy,
]
}
resource "aws_iam_role" "example1" {
name = "eks-uber1120-nirmal-naveen"
assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
resource "aws_iam_role_policy_attachment" "example-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.example1.name
}
resource "aws_iam_role_policy_attachment" "example-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.example1.name
}
resource "aws_iam_role_policy_attachment" "example-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.example1.name
}
#create node group
resource "aws_eks_node_group" "example" {
cluster_name = aws_eks_cluster.example.name
node_group_name = "Node-cloud"
node_role_arn = aws_iam_role.example1.arn
subnet_ids = data.aws_subnets.public.ids
scaling_config {
desired_size = 1
max_size = 2
min_size = 1
}
instance_types = ["t2.medium"]
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
aws_iam_role_policy_attachment.example-AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.example-AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.example-AmazonEC2ContainerRegistryReadOnly,
]
}
I tried with specify specific availability zone but still getting error.
Replace the list of availability zones with the ones that have sufficient capacity.
check this code.