Ben Allen 1 mēnesi atpakaļ
revīzija
c0d54bc4ea

+ 8 - 0
.gitignore

@@ -0,0 +1,8 @@
+.terraform/
+*.tfstate
+*.tfstate.*
+crash.log
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json

+ 45 - 0
.terraform.lock.hcl

@@ -0,0 +1,45 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+  version     = "6.36.0"
+  constraints = "~> 6.0"
+  hashes = [
+    "h1:r9icn1WEZVvEXiy6ZKexLzAPnXkkt+22jJ9WQYPfKB0=",
+    "zh:0eb4481315564aaeec4905a804fd0df22c40f509ad2af63615eeaa90abacf81c",
+    "zh:12c3cddc461a8dbaa04387fe83420b64c4c05cb5479d181674168ca7daefcc38",
+    "zh:1b55a09661e80acf6826faa38dd8fbff24c2ef620d2a0a16918491a222c55370",
+    "zh:269cb1a406d0cac762bce82119247395a0bbf0d4ad2492fb2ea5653b4f44bc05",
+    "zh:3bfb78e3345f0c3846e76578952a09fb5dda05d2d73e19473fb0af0000469a66",
+    "zh:3ead4f4388c7dd78ed198082a981746324da0d7a51460c9b455fd884d86fc82c",
+    "zh:44906654199991b3f1a21c6a984bc5f9f556ff4baa4e5f77e168968e941c2725",
+    "zh:4803d050d581b05b0fd0ae5cce95ec1784d66e2bc9da4b1f7663df0ce7914609",
+    "zh:4cf9fe8fae58b62e83c0672a9c66e0963b7289aaf768a250e9bc44570d82cbd5",
+    "zh:5bfd7a1fb3116164b411777115dd4b272a68984fa949c687e41a3041318c82f1",
+    "zh:77cbcf2db512617f10b81e11c20d40fa534ef07163171cbe35214fa8f74b4e85",
+    "zh:8201cabed01f1434bf9ea7fbcf2a95612a87a0398b870b2643bd1a5119793d2d",
+    "zh:9aaded4cf36ec2abbe35086733a4510e08819698180b21a9387ba4112aee02e0",
+    "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+    "zh:f594ef2683a0d23d3a6f0ad6c84a55ed79368c158ee08c2f3b7c41ec446a701f",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+  version     = "4.2.1"
+  constraints = ">= 4.0.0"
+  hashes = [
+    "h1:F5d6bQY8UlBo0D71Sv7CsV+3aZOFz0yeNF+vufog7h4=",
+    "zh:0d1e7d07ac973b97fa228f46596c800de830820506ee145626f079dd6bbf8d8a",
+    "zh:5c7e3d4348cb4861ab812973ef493814a4b224bdd3e9d534a7c8a7c992382b86",
+    "zh:7c6d4a86cd7a4e9c1025c6b3a3a6a45dea202af85d870cddbab455fb1bd568ad",
+    "zh:7d0864755ba093664c4b2c07c045d3f5e3d7c799dda1a3ef33d17ed1ac563191",
+    "zh:83734f57950ab67c0d6a87babdb3f13c908cbe0a48949333f489698532e1391b",
+    "zh:951e3c285218ebca0cf20eaa4265020b4ef042fea9c6ade115ad1558cfe459e5",
+    "zh:b9543955b4297e1d93b85900854891c0e645d936d8285a190030475379c5c635",
+    "zh:bb1bd9e86c003d08c30c1b00d44118ed5bbbf6b1d2d6f7eaac4fa5c6ebea5933",
+    "zh:c9477bfe00653629cd77ddac3968475f7ad93ac3ca8bc45b56d1d9efb25e4a6e",
+    "zh:d4cfda8687f736d0cba664c22ec49dae1188289e214ef57f5afe6a7217854fed",
+    "zh:dc77ee066cf96532a48f0578c35b1eaf6dc4d8ddd0e3ae8e029a3b10676dd5d3",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}

+ 80 - 0
README.md

@@ -0,0 +1,80 @@
+# EKS Cluster Module
+
+This module creates a new AWS VPC and deploys an Amazon EKS cluster with a single managed node group into private subnets.
+
+It can also reuse an existing VPC and existing private/public subnets instead of creating new networking.
+
+## What it creates
+
+- A new VPC with DNS support enabled
+- Public and private subnets across at least two availability zones
+- An internet gateway and NAT gateway routing for private workloads
+- IAM roles for the EKS control plane and worker nodes
+- An EKS cluster
+- One EKS managed node group
+- Managed EKS addons for CoreDNS, kube-proxy, VPC CNI, and EKS Pod Identity Agent
+
+## Usage
+
+```hcl
+provider "aws" {
+  region = "us-east-1"
+}
+
+module "eks" {
+  source = "./tfmods"
+
+  name   = "demo-eks"
+  region = "us-east-1"
+
+  kubernetes_version      = "1.35"
+  availability_zone_count = 2
+
+  node_instance_types = ["t3.large"]
+  node_desired_size   = 2
+  node_min_size       = 2
+  node_max_size       = 4
+
+  cluster_admin_principal_arns = [
+    "arn:aws:iam::123456789012:role/platform-admin",
+    "arn:aws:iam::123456789012:user/cluster-operator",
+  ]
+
+  tags = {
+    Project = "platform"
+    Owner   = "infra"
+  }
+}
+```
+
+To use existing networking instead of creating a new VPC:
+
+```hcl
+module "eks" {
+  source = "./tfmods"
+
+  name   = "demo-eks"
+  region = "us-east-1"
+
+  create_vpc                = false
+  existing_vpc_id           = "vpc-0123456789abcdef0"
+  existing_private_subnet_ids = ["subnet-aaa", "subnet-bbb"]
+  existing_public_subnet_ids  = ["subnet-ccc", "subnet-ddd"]
+}
+```
+
+After `terraform apply`, configure `kubectl` with:
+
+```bash
+aws eks update-kubeconfig --region us-east-1 --name demo-eks
+```
+
+## Notes
+
+- Private subnets are used for the cluster and worker nodes.
+- By default, the module creates a single NAT gateway to reduce cost.
+- When `create_vpc = false`, the module skips all VPC, subnet, NAT, IGW, and route table creation and uses the supplied subnet IDs instead.
+- The EKS API endpoint is public and private by default. Restrict `cluster_public_access_cidrs` in real environments.
+- Extra cluster admins are created with EKS access entries and the managed `AmazonEKSClusterAdminPolicy` at cluster scope.
+- The Pod Identity addon uses the EKS addon name `eks-pod-identity-agent`.
+- You must configure AWS credentials outside this module.

+ 130 - 0
eks.tf

@@ -0,0 +1,130 @@
+resource "aws_eks_cluster" "this" {
+  name     = var.name
+  role_arn = aws_iam_role.cluster.arn
+  version  = var.kubernetes_version
+
+  access_config {
+    authentication_mode                         = "API_AND_CONFIG_MAP"
+    bootstrap_cluster_creator_admin_permissions = true
+  }
+
+  vpc_config {
+    endpoint_private_access = var.cluster_endpoint_private_access
+    endpoint_public_access  = var.cluster_endpoint_public_access
+    public_access_cidrs     = var.cluster_public_access_cidrs
+    subnet_ids              = local.private_subnet_ids
+  }
+
+  depends_on = [
+    aws_iam_role_policy_attachment.cluster_policy
+  ]
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_node_group" "default" {
+  cluster_name    = aws_eks_cluster.this.name
+  node_group_name = "${var.name}-default"
+  node_role_arn   = aws_iam_role.node.arn
+  subnet_ids      = local.private_subnet_ids
+  version         = var.kubernetes_version
+  disk_size       = var.node_disk_size
+  capacity_type   = var.node_capacity_type
+  instance_types  = var.node_instance_types
+
+  scaling_config {
+    desired_size = var.node_desired_size
+    min_size     = var.node_min_size
+    max_size     = var.node_max_size
+  }
+
+  update_config {
+    max_unavailable = 1
+  }
+
+  depends_on = [
+    aws_iam_role_policy_attachment.node_worker_policy,
+    aws_iam_role_policy_attachment.node_cni_policy,
+    aws_iam_role_policy_attachment.node_ecr_policy
+  ]
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_access_entry" "cluster_admins" {
+  for_each = toset(var.cluster_admin_principal_arns)
+
+  cluster_name  = aws_eks_cluster.this.name
+  principal_arn = each.value
+  type          = "STANDARD"
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_access_policy_association" "cluster_admins" {
+  for_each = aws_eks_access_entry.cluster_admins
+
+  cluster_name  = aws_eks_cluster.this.name
+  principal_arn = each.value.principal_arn
+  policy_arn    = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
+
+  access_scope {
+    type = "cluster"
+  }
+}
+
+resource "aws_eks_access_entry" "node_role" {
+  cluster_name  = aws_eks_cluster.this.name
+  principal_arn = aws_iam_role.node.arn
+  type          = "EC2_LINUX"
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_addon" "coredns" {
+  cluster_name                = aws_eks_cluster.this.name
+  addon_name                  = "coredns"
+  addon_version               = var.coredns_addon_version
+  resolve_conflicts_on_create = "OVERWRITE"
+  resolve_conflicts_on_update = "OVERWRITE"
+
+  depends_on = [aws_eks_node_group.default]
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_addon" "kube_proxy" {
+  cluster_name                = aws_eks_cluster.this.name
+  addon_name                  = "kube-proxy"
+  addon_version               = var.kube_proxy_addon_version
+  resolve_conflicts_on_create = "OVERWRITE"
+  resolve_conflicts_on_update = "OVERWRITE"
+
+  depends_on = [aws_eks_node_group.default]
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_addon" "vpc_cni" {
+  cluster_name                = aws_eks_cluster.this.name
+  addon_name                  = "vpc-cni"
+  addon_version               = var.vpc_cni_addon_version
+  resolve_conflicts_on_create = "OVERWRITE"
+  resolve_conflicts_on_update = "OVERWRITE"
+
+  depends_on = [aws_eks_node_group.default]
+
+  tags = local.common_tags
+}
+
+resource "aws_eks_addon" "pod_identity_agent" {
+  cluster_name                = aws_eks_cluster.this.name
+  addon_name                  = "eks-pod-identity-agent"
+  addon_version               = var.pod_identity_agent_addon_version
+  resolve_conflicts_on_create = "OVERWRITE"
+  resolve_conflicts_on_update = "OVERWRITE"
+
+  depends_on = [aws_eks_node_group.default]
+
+  tags = local.common_tags
+}

+ 45 - 0
examples/basic/.terraform.lock.hcl

@@ -0,0 +1,45 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+  version     = "6.36.0"
+  constraints = "~> 6.0"
+  hashes = [
+    "h1:r9icn1WEZVvEXiy6ZKexLzAPnXkkt+22jJ9WQYPfKB0=",
+    "zh:0eb4481315564aaeec4905a804fd0df22c40f509ad2af63615eeaa90abacf81c",
+    "zh:12c3cddc461a8dbaa04387fe83420b64c4c05cb5479d181674168ca7daefcc38",
+    "zh:1b55a09661e80acf6826faa38dd8fbff24c2ef620d2a0a16918491a222c55370",
+    "zh:269cb1a406d0cac762bce82119247395a0bbf0d4ad2492fb2ea5653b4f44bc05",
+    "zh:3bfb78e3345f0c3846e76578952a09fb5dda05d2d73e19473fb0af0000469a66",
+    "zh:3ead4f4388c7dd78ed198082a981746324da0d7a51460c9b455fd884d86fc82c",
+    "zh:44906654199991b3f1a21c6a984bc5f9f556ff4baa4e5f77e168968e941c2725",
+    "zh:4803d050d581b05b0fd0ae5cce95ec1784d66e2bc9da4b1f7663df0ce7914609",
+    "zh:4cf9fe8fae58b62e83c0672a9c66e0963b7289aaf768a250e9bc44570d82cbd5",
+    "zh:5bfd7a1fb3116164b411777115dd4b272a68984fa949c687e41a3041318c82f1",
+    "zh:77cbcf2db512617f10b81e11c20d40fa534ef07163171cbe35214fa8f74b4e85",
+    "zh:8201cabed01f1434bf9ea7fbcf2a95612a87a0398b870b2643bd1a5119793d2d",
+    "zh:9aaded4cf36ec2abbe35086733a4510e08819698180b21a9387ba4112aee02e0",
+    "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+    "zh:f594ef2683a0d23d3a6f0ad6c84a55ed79368c158ee08c2f3b7c41ec446a701f",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+  version     = "4.2.1"
+  constraints = ">= 4.0.0"
+  hashes = [
+    "h1:F5d6bQY8UlBo0D71Sv7CsV+3aZOFz0yeNF+vufog7h4=",
+    "zh:0d1e7d07ac973b97fa228f46596c800de830820506ee145626f079dd6bbf8d8a",
+    "zh:5c7e3d4348cb4861ab812973ef493814a4b224bdd3e9d534a7c8a7c992382b86",
+    "zh:7c6d4a86cd7a4e9c1025c6b3a3a6a45dea202af85d870cddbab455fb1bd568ad",
+    "zh:7d0864755ba093664c4b2c07c045d3f5e3d7c799dda1a3ef33d17ed1ac563191",
+    "zh:83734f57950ab67c0d6a87babdb3f13c908cbe0a48949333f489698532e1391b",
+    "zh:951e3c285218ebca0cf20eaa4265020b4ef042fea9c6ade115ad1558cfe459e5",
+    "zh:b9543955b4297e1d93b85900854891c0e645d936d8285a190030475379c5c635",
+    "zh:bb1bd9e86c003d08c30c1b00d44118ed5bbbf6b1d2d6f7eaac4fa5c6ebea5933",
+    "zh:c9477bfe00653629cd77ddac3968475f7ad93ac3ca8bc45b56d1d9efb25e4a6e",
+    "zh:d4cfda8687f736d0cba664c22ec49dae1188289e214ef57f5afe6a7217854fed",
+    "zh:dc77ee066cf96532a48f0578c35b1eaf6dc4d8ddd0e3ae8e029a3b10676dd5d3",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}

+ 17 - 0
examples/basic/README.md

@@ -0,0 +1,17 @@
+# Basic Example
+
+This example deploys the local EKS module with a new VPC and a bootstrap managed node group.
+
+## Usage
+
+```bash
+cd examples/basic
+terraform init
+terraform plan
+```
+
+Override values with `-var` arguments or a `.tfvars` file if needed.
+
+## Notes
+
+- The managed node group provides the initial cluster capacity.

+ 32 - 0
examples/basic/main.tf

@@ -0,0 +1,32 @@
+terraform {
+  required_version = ">= 1.5.0"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = "~> 6.0"
+    }
+  }
+}
+
+provider "aws" {
+  region = var.region
+}
+
+module "eks" {
+  source = "../.."
+
+  name   = var.name
+  region = var.region
+
+  kubernetes_version = var.kubernetes_version
+
+  node_instance_types = var.node_instance_types
+  node_desired_size   = var.node_desired_size
+  node_min_size       = var.node_min_size
+  node_max_size       = var.node_max_size
+
+  cluster_admin_principal_arns = var.cluster_admin_principal_arns
+
+  tags = var.tags
+}

+ 14 - 0
examples/basic/outputs.tf

@@ -0,0 +1,14 @@
+output "cluster_name" {
+  description = "EKS cluster name."
+  value       = module.eks.cluster_name
+}
+
+output "cluster_endpoint" {
+  description = "EKS cluster endpoint."
+  value       = module.eks.cluster_endpoint
+}
+
+output "configure_kubectl" {
+  description = "Command to configure kubectl for this example cluster."
+  value       = module.eks.configure_kubectl
+}

+ 2 - 0
examples/basic/terraform.tfvars

@@ -0,0 +1,2 @@
+name   = "ben"
+region = "us-east-2"

+ 55 - 0
examples/basic/variables.tf

@@ -0,0 +1,55 @@
+variable "name" {
+  description = "Name for the EKS cluster and related resources."
+  type        = string
+  default     = "example-eks"
+}
+
+variable "region" {
+  description = "AWS region to deploy into."
+  type        = string
+  default     = "us-east-1"
+}
+
+variable "kubernetes_version" {
+  description = "EKS Kubernetes version."
+  type        = string
+  default     = "1.35"
+}
+
+variable "node_instance_types" {
+  description = "Instance types for the default managed node group."
+  type        = list(string)
+  default     = ["t3.large"]
+}
+
+variable "node_desired_size" {
+  description = "Desired node count."
+  type        = number
+  default     = 2
+}
+
+variable "node_min_size" {
+  description = "Minimum node count."
+  type        = number
+  default     = 2
+}
+
+variable "node_max_size" {
+  description = "Maximum node count."
+  type        = number
+  default     = 4
+}
+
+variable "cluster_admin_principal_arns" {
+  description = "Optional IAM principal ARNs to grant cluster-admin access."
+  type        = list(string)
+  default     = []
+}
+
+variable "tags" {
+  description = "Tags applied to resources."
+  type        = map(string)
+  default = {
+    Example = "basic"
+  }
+}

+ 67 - 0
iam.tf

@@ -0,0 +1,67 @@
+data "aws_iam_policy_document" "eks_cluster_assume_role" {
+  statement {
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["eks.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "cluster" {
+  name               = "${var.name}-eks-cluster-role"
+  assume_role_policy = data.aws_iam_policy_document.eks_cluster_assume_role.json
+
+  tags = local.common_tags
+}
+
+resource "aws_iam_role_policy_attachment" "cluster_policy" {
+  role       = aws_iam_role.cluster.name
+  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
+}
+
+data "aws_iam_policy_document" "eks_node_assume_role" {
+  statement {
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["ec2.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "node" {
+  name               = "${var.name}-eks-node-role"
+  assume_role_policy = data.aws_iam_policy_document.eks_node_assume_role.json
+
+  tags = local.common_tags
+}
+
+resource "aws_iam_role_policy_attachment" "node_worker_policy" {
+  role       = aws_iam_role.node.name
+  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
+}
+
+resource "aws_iam_role_policy_attachment" "node_cni_policy" {
+  role       = aws_iam_role.node.name
+  policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
+}
+
+resource "aws_iam_role_policy_attachment" "node_ecr_policy" {
+  role       = aws_iam_role.node.name
+  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly"
+}
+
+data "tls_certificate" "eks_oidc" {
+  url = aws_eks_cluster.this.identity[0].oidc[0].issuer
+}
+
+resource "aws_iam_openid_connect_provider" "this" {
+  client_id_list  = ["sts.amazonaws.com"]
+  thumbprint_list = [data.tls_certificate.eks_oidc.certificates[0].sha1_fingerprint]
+  url             = aws_eks_cluster.this.identity[0].oidc[0].issuer
+
+  tags = local.common_tags
+}

+ 30 - 0
locals.tf

@@ -0,0 +1,30 @@
+data "aws_availability_zones" "available" {
+  state = "available"
+}
+
+locals {
+  az_count = min(var.availability_zone_count, length(data.aws_availability_zones.available.names))
+  azs      = slice(data.aws_availability_zones.available.names, 0, local.az_count)
+
+  public_subnet_cidrs = [
+    for index in range(local.az_count) : cidrsubnet(var.vpc_cidr, 4, index)
+  ]
+
+  private_subnet_cidrs = [
+    for index in range(local.az_count) : cidrsubnet(var.vpc_cidr, 4, index + local.az_count)
+  ]
+
+  common_tags = merge(
+    {
+      Terraform   = "true"
+      Module      = "eks-new-vpc"
+      Environment = var.name
+    },
+    var.tags
+  )
+
+  vpc_id = var.create_vpc ? aws_vpc.this[0].id : var.existing_vpc_id
+
+  public_subnet_ids  = var.create_vpc ? values(aws_subnet.public)[*].id : var.existing_public_subnet_ids
+  private_subnet_ids = var.create_vpc ? values(aws_subnet.private)[*].id : var.existing_private_subnet_ids
+}

+ 91 - 0
networking.tf

@@ -0,0 +1,91 @@
+resource "aws_internet_gateway" "this" {
+  count = var.create_vpc ? 1 : 0
+
+  vpc_id = aws_vpc.this[0].id
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-igw"
+    }
+  )
+}
+
+resource "aws_eip" "nat" {
+  for_each = var.create_vpc ? (var.single_nat_gateway ? { shared = local.azs[0] } : { for az in local.azs : az => az }) : {}
+
+  domain = "vpc"
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-nat-eip-${each.key}"
+    }
+  )
+}
+
+resource "aws_nat_gateway" "this" {
+  for_each = var.create_vpc ? (var.single_nat_gateway ? { shared = local.azs[0] } : { for az in local.azs : az => az }) : {}
+
+  allocation_id = aws_eip.nat[each.key].id
+  subnet_id     = aws_subnet.public[each.value].id
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-nat-${each.key}"
+    }
+  )
+
+  depends_on = [aws_internet_gateway.this]
+}
+
+resource "aws_route_table" "public" {
+  count = var.create_vpc ? 1 : 0
+
+  vpc_id = aws_vpc.this[0].id
+
+  route {
+    cidr_block = "0.0.0.0/0"
+    gateway_id = aws_internet_gateway.this[0].id
+  }
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-public-rt"
+    }
+  )
+}
+
+resource "aws_route_table_association" "public" {
+  for_each = var.create_vpc ? aws_subnet.public : {}
+
+  subnet_id      = each.value.id
+  route_table_id = aws_route_table.public[0].id
+}
+
+resource "aws_route_table" "private" {
+  for_each = var.create_vpc ? aws_subnet.private : {}
+
+  vpc_id = aws_vpc.this[0].id
+
+  route {
+    cidr_block     = "0.0.0.0/0"
+    nat_gateway_id = aws_nat_gateway.this[var.single_nat_gateway ? "shared" : each.key].id
+  }
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-private-rt-${each.key}"
+    }
+  )
+}
+
+resource "aws_route_table_association" "private" {
+  for_each = var.create_vpc ? aws_subnet.private : {}
+
+  subnet_id      = each.value.id
+  route_table_id = aws_route_table.private[each.key].id
+}

+ 80 - 0
outputs.tf

@@ -0,0 +1,80 @@
+output "cluster_name" {
+  description = "Name of the EKS cluster."
+  value       = aws_eks_cluster.this.name
+}
+
+output "cluster_arn" {
+  description = "ARN of the EKS cluster."
+  value       = aws_eks_cluster.this.arn
+}
+
+output "cluster_version" {
+  description = "Kubernetes version of the EKS cluster."
+  value       = aws_eks_cluster.this.version
+}
+
+output "cluster_endpoint" {
+  description = "Kubernetes API server endpoint."
+  value       = aws_eks_cluster.this.endpoint
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64-encoded certificate data required for Kubernetes clients."
+  value       = aws_eks_cluster.this.certificate_authority[0].data
+  sensitive   = true
+}
+
+output "cluster_oidc_issuer_url" {
+  description = "OIDC issuer URL for the cluster."
+  value       = aws_eks_cluster.this.identity[0].oidc[0].issuer
+}
+
+output "oidc_provider_arn" {
+  description = "ARN of the IAM OIDC provider for the cluster."
+  value       = aws_iam_openid_connect_provider.this.arn
+}
+
+output "vpc_id" {
+  description = "ID of the VPC used by the cluster."
+  value       = local.vpc_id
+}
+
+output "public_subnet_ids" {
+  description = "IDs of the public subnets used by the cluster."
+  value       = local.public_subnet_ids
+}
+
+output "private_subnet_ids" {
+  description = "IDs of the private subnets used by the cluster."
+  value       = local.private_subnet_ids
+}
+
+output "node_group_name" {
+  description = "Name of the managed node group."
+  value       = aws_eks_node_group.default.node_group_name
+}
+
+output "node_role_name" {
+  description = "IAM role name used by the default managed node group."
+  value       = aws_iam_role.node.name
+}
+
+output "installed_addons" {
+  description = "Managed EKS addons configured by this module."
+  value = compact([
+    aws_eks_addon.coredns.addon_name,
+    aws_eks_addon.kube_proxy.addon_name,
+    aws_eks_addon.vpc_cni.addon_name,
+    aws_eks_addon.pod_identity_agent.addon_name,
+  ])
+}
+
+output "cluster_admin_principal_arns" {
+  description = "IAM principals granted cluster-admin access through EKS access entries."
+  value       = [for entry in aws_eks_access_entry.cluster_admins : entry.principal_arn]
+}
+
+output "configure_kubectl" {
+  description = "Command to update local kubeconfig for the cluster."
+  value       = "aws eks update-kubeconfig --region ${var.region} --name ${aws_eks_cluster.this.name}"
+}

+ 172 - 0
variables.tf

@@ -0,0 +1,172 @@
+variable "name" {
+  description = "Base name used for created resources."
+  type        = string
+}
+
+variable "region" {
+  description = "AWS region for informational outputs and examples."
+  type        = string
+}
+
+variable "kubernetes_version" {
+  description = "EKS Kubernetes version."
+  type        = string
+  default     = "1.35"
+}
+
+variable "vpc_cidr" {
+  description = "CIDR block for the new VPC."
+  type        = string
+  default     = "10.0.0.0/16"
+}
+
+variable "create_vpc" {
+  description = "Whether to create a new VPC and subnets. Set to false to use existing networking."
+  type        = bool
+  default     = true
+}
+
+variable "existing_vpc_id" {
+  description = "Existing VPC ID to use when create_vpc is false."
+  type        = string
+  default     = null
+  nullable    = true
+
+  validation {
+    condition     = var.create_vpc || var.existing_vpc_id != null
+    error_message = "existing_vpc_id must be set when create_vpc is false."
+  }
+}
+
+variable "existing_public_subnet_ids" {
+  description = "Existing public subnet IDs to use when create_vpc is false."
+  type        = list(string)
+  default     = []
+}
+
+variable "existing_private_subnet_ids" {
+  description = "Existing private subnet IDs to use when create_vpc is false."
+  type        = list(string)
+  default     = []
+
+  validation {
+    condition     = var.create_vpc || length(var.existing_private_subnet_ids) >= 2
+    error_message = "At least two existing_private_subnet_ids must be provided when create_vpc is false."
+  }
+}
+
+variable "availability_zone_count" {
+  description = "How many availability zones to spread the cluster across."
+  type        = number
+  default     = 2
+
+  validation {
+    condition     = var.availability_zone_count >= 2
+    error_message = "availability_zone_count must be at least 2."
+  }
+}
+
+variable "single_nat_gateway" {
+  description = "Whether to create one shared NAT gateway instead of one per private subnet AZ."
+  type        = bool
+  default     = true
+}
+
+variable "cluster_endpoint_public_access" {
+  description = "Whether the EKS API server endpoint is publicly accessible."
+  type        = bool
+  default     = true
+}
+
+variable "cluster_endpoint_private_access" {
+  description = "Whether the EKS API server endpoint is privately accessible."
+  type        = bool
+  default     = true
+}
+
+variable "cluster_public_access_cidrs" {
+  description = "CIDR ranges allowed to access the public EKS API endpoint."
+  type        = list(string)
+  default     = ["0.0.0.0/0"]
+}
+
+variable "node_instance_types" {
+  description = "EC2 instance types for the managed node group."
+  type        = list(string)
+  default     = ["t3.medium"]
+}
+
+variable "node_capacity_type" {
+  description = "Capacity type for the managed node group."
+  type        = string
+  default     = "ON_DEMAND"
+
+  validation {
+    condition     = contains(["ON_DEMAND", "SPOT"], var.node_capacity_type)
+    error_message = "node_capacity_type must be ON_DEMAND or SPOT."
+  }
+}
+
+variable "node_disk_size" {
+  description = "Disk size in GiB for worker nodes."
+  type        = number
+  default     = 20
+}
+
+variable "node_desired_size" {
+  description = "Desired node count for the managed node group."
+  type        = number
+  default     = 2
+}
+
+variable "node_min_size" {
+  description = "Minimum node count for the managed node group."
+  type        = number
+  default     = 2
+}
+
+variable "node_max_size" {
+  description = "Maximum node count for the managed node group."
+  type        = number
+  default     = 4
+}
+
+variable "tags" {
+  description = "Additional tags to apply to all supported resources."
+  type        = map(string)
+  default     = {}
+}
+
+variable "cluster_admin_principal_arns" {
+  description = "Additional IAM principal ARNs to grant EKS cluster-admin access."
+  type        = list(string)
+  default     = []
+}
+
+variable "coredns_addon_version" {
+  description = "Optional explicit version for the CoreDNS EKS addon."
+  type        = string
+  default     = null
+  nullable    = true
+}
+
+variable "kube_proxy_addon_version" {
+  description = "Optional explicit version for the kube-proxy EKS addon."
+  type        = string
+  default     = null
+  nullable    = true
+}
+
+variable "vpc_cni_addon_version" {
+  description = "Optional explicit version for the VPC CNI EKS addon."
+  type        = string
+  default     = null
+  nullable    = true
+}
+
+variable "pod_identity_agent_addon_version" {
+  description = "Optional explicit version for the EKS Pod Identity Agent addon."
+  type        = string
+  default     = null
+  nullable    = true
+}

+ 15 - 0
versions.tf

@@ -0,0 +1,15 @@
+terraform {
+  required_version = ">= 1.5.0"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = "~> 6.0"
+    }
+
+    tls = {
+      source  = "hashicorp/tls"
+      version = ">= 4.0"
+    }
+  }
+}

+ 59 - 0
vpc.tf

@@ -0,0 +1,59 @@
+resource "aws_vpc" "this" {
+  count = var.create_vpc ? 1 : 0
+
+  cidr_block           = var.vpc_cidr
+  enable_dns_hostnames = true
+  enable_dns_support   = true
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name = "${var.name}-vpc"
+    }
+  )
+}
+
+resource "aws_subnet" "public" {
+  for_each = var.create_vpc ? {
+    for index, az in local.azs : az => {
+      cidr_block = local.public_subnet_cidrs[index]
+      az         = az
+    }
+  } : {}
+
+  vpc_id                  = aws_vpc.this[0].id
+  availability_zone       = each.value.az
+  cidr_block              = each.value.cidr_block
+  map_public_ip_on_launch = true
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name                                = "${var.name}-public-${each.value.az}"
+      "kubernetes.io/cluster/${var.name}" = "shared"
+      "kubernetes.io/role/elb"            = "1"
+    }
+  )
+}
+
+resource "aws_subnet" "private" {
+  for_each = var.create_vpc ? {
+    for index, az in local.azs : az => {
+      cidr_block = local.private_subnet_cidrs[index]
+      az         = az
+    }
+  } : {}
+
+  vpc_id            = aws_vpc.this[0].id
+  availability_zone = each.value.az
+  cidr_block        = each.value.cidr_block
+
+  tags = merge(
+    local.common_tags,
+    {
+      Name                                = "${var.name}-private-${each.value.az}"
+      "kubernetes.io/cluster/${var.name}" = "shared"
+      "kubernetes.io/role/internal-elb"   = "1"
+    }
+  )
+}