From 73902869d5cf37f24127f57a6f4239541823b647 Mon Sep 17 00:00:00 2001 From: icurfer Date: Tue, 18 Nov 2025 21:20:47 +0900 Subject: [PATCH] init --- README.md | 4 + main.tf | 767 +++++++++++++++++++++++++++++++++++++++++++++++++++ outputs.tf | 39 +++ terraform.tf | 11 + 4 files changed, 821 insertions(+) create mode 100644 main.tf create mode 100644 outputs.tf create mode 100644 terraform.tf diff --git a/README.md b/README.md index 9098d07..fe424d5 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,6 @@ # assignment03 +- 과제 3 진행을 위한 코드 입니다. +- eks 구성, Karpenter iam role 등록까지 과정입니다. +- 배포후 aws-auth에 karpenter rbac등록 +- karpenter 배포는 eks안에서 진행 합니다. \ No newline at end of file diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..187c765 --- /dev/null +++ b/main.tf @@ -0,0 +1,767 @@ +/* + Provider Information + Used default accounts +*/ +provider "aws" { + region = "ap-northeast-2" # 리전별 프로바이더 설정 필요. +} + +/* + get eks info + Set Kubernetes Provider +*/ +data "aws_eks_cluster" "eks" { + # name = aws_eks_cluster.eks-cluster.name + name = module.eks_cluster.cluster_name + depends_on = [module.eks_cluster] +} + +data "aws_eks_cluster_auth" "eks" { + # name = aws_eks_cluster.eks-cluster.name + name = module.eks_cluster.cluster_name + depends_on = [module.eks_cluster] +} + +data "tls_certificate" "oidc" { + url = data.aws_eks_cluster.eks.identity[0].oidc[0].issuer + depends_on = [module.eks_cluster] +} + + +provider "kubernetes" { + alias = "eks" + + host = data.aws_eks_cluster.eks.endpoint + token = data.aws_eks_cluster_auth.eks.token + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) + +} + +################# +### Infra ### +################# +// Local Vaiables +locals { + account_id = data.aws_caller_identity.this.account_id + region = "ap-northeast-2" + common_tags = { + project = "icurfer-demo" + owner = "icurfer" + } + cidr = { + vpc = "10.3.0.0/16" + zone_a = "10.3.1.0/24" + zone_c = "10.3.3.0/24" + zone_a_private = "10.3.2.0/24" + zone_c_private = "10.3.4.0/24" + } + udp_port = { + dns_port = 53 + } + any_protocol = "-1" + tcp_protocol = "tcp" + icmp_protocol = "icmp" + all_ips = ["0.0.0.0/0"] + admin_ip = ["118.222.2.22/32"] + + node_group_scaling_config = { + desired_size = 2 + max_size = 4 + min_size = 1 + } + +} + +// GET 계정정보 +data "aws_caller_identity" "this" {} + +################################## +### Create Infra - Network ### +################################## + +// vpc 생성 +module "vpc" { + source = "./modules/vpc" + tag_name = "${local.common_tags.project}" + cidr_block = "10.3.0.0/16" + +} + +// Ingernet gateway +module "igw" { + source = "./modules/igw" + + vpc_id = module.vpc.vpc_id + + tag_name = "${local.common_tags.project}" + + depends_on = [ + module.vpc + ] +} + +// Create Public Subnet +module "subnet_ext" { + source = "./modules/vpc-subnet" + + // set variables, ./modules/vpc-subnet/valiables.tf + vpc_id = module.vpc.vpc_id + subnet-az-list = { + "zone-a" = { + name = "${local.region}a" + cidr = local.cidr.zone_a + } + "zone-c" = { + name = "${local.region}c" + cidr = local.cidr.zone_c + } + } + public_ip_on = true + + k8s_ingress = true # public subnet에 eks lb 생성을 위한 변수 + + tag_name = "${local.common_tags.project}" + + depends_on = [ + module.vpc + ] +} + +// Create private외부통신을 위한 nat +module "ngw" { + source = "./modules/nat-gateway" + subnet_id = module.subnet_ext.subnet.zone-a.id + # subnet_id = module.subnet_public.subnet.zone-a.id + + tag_name = "${local.common_tags.project}" + + depends_on = [ + module.subnet_ext + ] +} + +// Create public route +module "route_public" { + source = "./modules/route-table" + vpc_id = module.vpc.vpc_id + tag_name = "${local.common_tags.project}-ext" + +} + +# // 라우팅 테이블에 룰 추가 +module "route_add" { + source = "./modules/route-add" + route_id = module.route_public.route_id + igw_id = module.igw.igw_id + gw_type = "igw" + destination_cidr = "0.0.0.0/0" +} + +# //서브넷 - 라우팅테이블 +module "route_association" { + source = "./modules/route-association" + route_table_id = module.route_public.route_id + + association_count = 2 + subnet_ids = [module.subnet_ext.subnet.zone-a.id, module.subnet_ext.subnet.zone-c.id] +} + +// Create Private Subnet +module "subnet_int" { + source = "./modules/vpc-subnet" + + // set variables, ./modules/vpc-subnet/valiables.tf + vpc_id = module.vpc.vpc_id + subnet-az-list = { + "zone-a" = { + name = "${local.region}a" + cidr = local.cidr.zone_a_private + } + "zone-c" = { + name = "${local.region}c" + cidr = local.cidr.zone_c_private + } + } + public_ip_on = false + + k8s_ingress = false # public subnet에 eks lb 생성을 위한 변수 + + karpenter = true # karpenter사용시 적용 + eks_cluster_name = local.common_tags.project + + tag_name = "${local.common_tags.project}" + + depends_on = [ + module.vpc + ] +} + +// Create private route +module "route_private" { + source = "./modules/route-table" + tag_name = "${local.common_tags.project}-int" + vpc_id = module.vpc.vpc_id + +} +module "route_add_nat" { + source = "./modules/route-add" + route_id = module.route_private.route_id + nat_id = module.ngw.nat_id + gw_type = "nat" + destination_cidr = "0.0.0.0/0" +} + +module "route_association_nat" { + source = "./modules/route-association" + route_table_id = module.route_private.route_id + + association_count = 2 + subnet_ids = [module.subnet_int.subnet.zone-a.id, module.subnet_int.subnet.zone-c.id] +} + +################################## +### Create Infra - Bastion ### +################################## +module "bastion" { + source = "./modules/ec2" + ami_name = "ami-010be25c3775061c9" //ubuntu 22.04 LTS + instance_type = "t2.micro" + tag_name = "bastion" + public_ip_associate = true + key_name = "icurfer-demo" + public_subnet = module.subnet_ext.subnet.zone-a.id + private_subnet = module.subnet_int.subnet.zone-a.id + sg_list = [module.bastion_sg.sg_id] + user_data_file = null + # user_data_file = "${path.module}/assignments.sh" + + depends_on = [ + module.bastion_sg + ] +} + +module "bastion_sg" { + source = "./modules/sg" + sg_name = "${local.common_tags.project}-bastion-sg" + vpc_id = module.vpc.vpc_id + tag_name = "${local.common_tags.project}" +} +module "bastion_sg_ingress" { + source = "./modules/sg-rule-add" + type = "ingress" + rules = { + "ssh" = { + from_port = "22" + to_port = "22" + protocol = "tcp" + cidr_blocks = "118.222.2.22/32" + } + } + + security_group_id = module.bastion_sg.sg_id + + tag_name = "${local.common_tags.project}" +} + +module "bastion_sg_egress" { + source = "./modules/sg-rule-add" + type = "egress" + rules = { + "ssh" = { + from_port = "-1" + to_port = "-1" + protocol = "-1" + cidr_blocks = "0.0.0.0/0" + } + } + + security_group_id = module.bastion_sg.sg_id + + tag_name = "${local.common_tags.project}" +} + +############################ +### Get AWS EKS Role ### +############################ + +// EKS ploicy +data "aws_iam_policy_document" "eks-assume-role-policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} +data "aws_iam_policy_document" "eks_node_group_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} +// karpenter ploicy +data "aws_iam_policy_document" "kerpenter_ng_role" { + statement { + effect = "Allow" + + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} +#### karpenter iam str #### +data "aws_iam_policy_document" "karpenter_controller_trust_policy" { + statement { + effect = "Allow" + + actions = [ + "sts:AssumeRoleWithWebIdentity" + ] + + principals { + type = "Federated" + identifiers = [ + aws_iam_openid_connect_provider.oidc_provider.arn + ] + } + + condition { + test = "StringEquals" + variable = "${replace(data.aws_eks_cluster.eks.identity[0].oidc[0].issuer, "https://", "")}:aud" + values = ["sts.amazonaws.com"] + } + + condition { + test = "StringEquals" + variable = "${replace(data.aws_eks_cluster.eks.identity[0].oidc[0].issuer, "https://", "")}:sub" + values = ["system:serviceaccount:karpenter:karpenter"] + } + } +} + +data "aws_iam_policy_document" "karpenter_controller_permission_policy" { + statement { + sid = "Karpenter" + effect = "Allow" + + actions = [ + "ssm:GetParameter", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", + "ec2:DeleteLaunchTemplate", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceStatus", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeCapacityReservations", + "pricing:GetProducts", + "iam:CreateInstanceProfile", + "iam:GetInstanceProfile", + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:PassRole", + "iam:TagInstanceProfile", + "iam:UntagInstanceProfile", + "iam:TagRole", + "iam:UntagRole", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:CreateServiceLinkedRole" + ] + + resources = ["*"] + } + + statement { + sid = "ConditionalEC2Termination" + effect = "Allow" + + actions = [ + "ec2:TerminateInstances" + ] + + resources = ["*"] + + condition { + test = "StringLike" + variable = "ec2:ResourceTag/karpenter.sh/provisioner-name" + values = ["*"] + } + } + + statement { + sid = "PassNodeIAMRole" + effect = "Allow" + + actions = [ + "iam:PassRole" + ] + + resources = [ + "arn:aws:iam::${local.account_id}:role/KarpenterNodeRole" + ] + } + + statement { + sid = "EKSClusterEndpointLookup" + effect = "Allow" + + actions = [ + "eks:DescribeCluster" + ] + + resources = [ + "arn:aws:eks:${local.region}:${local.account_id}:cluster/${local.common_tags.project}" + ] + } +} +#### karpenter iam end #### + +// eks controle-plane 역할 생성 +module "eks_cluster_iam" { + source = "./modules/iam" + iam_name = "eks-cluster-demo" + policy = data.aws_iam_policy_document.eks-assume-role-policy.json + tag_name = "${local.common_tags.project}" +} + +// eks controle 역할 정책 추가 +module "eks_cluster_iam_att" { + source = "./modules/iam-policy-attach" + iam_name = "eks-cluster-att1" + role_name = module.eks_cluster_iam.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + + depends_on = [ + module.eks_cluster_iam + ] +} +module "eks_cluster_iam_att2" { + source = "./modules/iam-policy-attach" + iam_name = "eks-cluster-att2" + role_name = module.eks_cluster_iam.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" + + depends_on = [ + module.eks_cluster_iam + ] +} + +// eks 노드그룹 역할 생성 및 추가 +module "eks_nodegroup_iam" { + source = "./modules/iam" + iam_name = "eks-nodegroup-test" + policy = data.aws_iam_policy_document.eks_node_group_role.json + tag_name = local.common_tags.project +} +module "eks_nodegroup_iam_att_1" { + source = "./modules/iam-policy-attach" + iam_name = "eks-nodegroup-att1" + role_name = module.eks_nodegroup_iam.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + + depends_on = [ + module.eks_nodegroup_iam + ] +} +module "eks_nodegroup_iam_att_2" { + source = "./modules/iam-policy-attach" + iam_name = "eks-nodegroup-att2" + role_name = module.eks_nodegroup_iam.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + + depends_on = [ + module.eks_nodegroup_iam + ] +} +module "eks_nodegroup_iam_att_3" { + source = "./modules/iam-policy-attach" + iam_name = "eks-nodegroup-att3" + role_name = module.eks_nodegroup_iam.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + + depends_on = [ + module.eks_nodegroup_iam + ] +} + +module "eks_sg" { + source = "./modules/sg" + sg_name = "${local.common_tags.project}-eks-sg" + vpc_id = module.vpc.vpc_id + + karpenter = true # karpenter사용시 적용 + eks_cluster_name = local.common_tags.project + + # tag_name = "${local.common_tags.project}" + tag_name = "${local.common_tags.project}" +} + +module "eks_sg_ingress" { + source = "./modules/sg-rule-add" + type = "ingress" + rules = { + "ssh" = { + from_port = "22" + to_port = "22" + protocol = "tcp" + cidr_blocks = "${module.bastion.private_ip}/32" + } + } + + security_group_id = module.eks_sg.sg_id + + tag_name = "${local.common_tags.project}" +} + +module "eks_sg_egress" { + source = "./modules/sg-rule-add" + type = "egress" + rules = { + "ssh" = { + from_port = "-1" + to_port = "-1" + protocol = "-1" + cidr_blocks = "0.0.0.0/0" + } + } + + security_group_id = module.eks_sg.sg_id + + tag_name = "${local.common_tags.project}" +} + +module "eks_cluster" { + source = "./modules/eks-cluster" + name = local.common_tags.project + iam_role_arn = module.eks_cluster_iam.iam_arn + sg_list = [module.eks_sg.sg_id] + # private subnet + subnet_list = [module.subnet_int.subnet.zone-a.id, module.subnet_int.subnet.zone-c.id] + + depends_on = [ + module.eks_cluster_iam, + module.eks_sg, + ] + + client_id = data.aws_caller_identity.this.id + +} + +###################### +### Kubernetes ### +###################### +# terraform import kubernetes_config_map.aws_auth kube-system/aws-auth +resource "kubernetes_config_map" "aws_auth" { + provider = kubernetes.eks + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = { + mapRoles = yamlencode([ + { + groups = ["system:bootstrappers", "system:nodes"] + rolearn = "arn:aws:iam::${local.account_id}:role/${module.eks_nodegroup_iam.iam_name}" + username = "system:node:{{EC2PrivateDNSName}}" + } + ]) + + mapUsers = yamlencode([ + { + groups = ["system:masters"] + userarn = "arn:aws:iam::${local.account_id}:user/${local.common_tags.project}" + username = "admin" + }, + { + groups = ["system:masters"] + userarn = "arn:aws:iam::${local.account_id}:root" + username = "admin" + } + ]) + } + depends_on = [ + module.eks_nodegroup_iam, + module.eks_cluster, + ] +} +############################# +module "eks_node_group" { + source = "./modules/eks-node-group" + + ng_type = "ondemand" + + node_group_name = "${local.common_tags.project}-ondemand-ng" + cluster_name = module.eks_cluster.cluster_name + # iam_role_arn = module.eks_nodegroup_iam.iam_arn + iam_role_arn = "arn:aws:iam::${local.account_id}:role/eks-nodegroup-test" + # private subnet + subnet_list = [module.subnet_int.subnet.zone-a.id, module.subnet_int.subnet.zone-c.id] + + min_size = 1 + desired_size = 2 + max_size = 4 + + depends_on = [ + module.eks_nodegroup_iam, + module.eks_cluster, + ] +} + +module "eks_spot_ng" { + source = "./modules/eks-node-group" + + ng_type = "spot" + + node_group_name = "${local.common_tags.project}-spot-ng" + cluster_name = module.eks_cluster.cluster_name + # iam_role_arn = module.eks_nodegroup_iam.iam_arn + iam_role_arn = "arn:aws:iam::${local.account_id}:role/eks-nodegroup-test" + # private subnet + subnet_list = [module.subnet_int.subnet.zone-a.id, module.subnet_int.subnet.zone-c.id] + + min_size = 0 + desired_size = 0 + max_size = 3 + + depends_on = [ + module.eks_nodegroup_iam, + module.eks_cluster, + ] +} + +# https://developer.hashicorp.com/terraform/language/resources/terraform-data +resource "terraform_data" "kubeconfig" { + triggers_replace = [ + module.eks_cluster.cluster_id + ] + + provisioner "local-exec" { + command = "aws eks update-kubeconfig --name ${module.eks_cluster.cluster_name} --region ap-northeast-2 --kubeconfig ./kubeconfig" + } +} + +data "local_file" "kubeconfig" { + filename = "./kubeconfig" + + depends_on = [ + terraform_data.kubeconfig + ] +} + +output "kubeconfig_content" { + value = data.local_file.kubeconfig.content +} + + +########################## +### Karpenter Role ### +########################## +// eks 노드그룹 역할 생성 및 추가 +module "karpenter_node_role" { + source = "./modules/iam" + iam_name = "KarpenterNodeRole" + policy = data.aws_iam_policy_document.kerpenter_ng_role.json + tag_name = local.common_tags.project +} + +module "karpenter_node_role_att1" { + source = "./modules/iam-policy-attach" + iam_name = "karpenter-att1" + role_name = module.karpenter_node_role.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + depends_on = [ + module.karpenter_node_role + ] +} + +module "karpenter_node_role_att2" { + source = "./modules/iam-policy-attach" + iam_name = "karpenter-att2" + role_name = module.karpenter_node_role.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + depends_on = [ + module.karpenter_node_role + ] +} + +module "karpenter_node_role_att3" { + source = "./modules/iam-policy-attach" + iam_name = "karpenter-att3" + role_name = module.karpenter_node_role.iam_name + arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + depends_on = [ + module.karpenter_node_role + ] +} + +module "karpenter_node_role_att4" { + source = "./modules/iam-policy-attach" + iam_name = "karpenter-att4" + role_name = module.karpenter_node_role.iam_name + arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + depends_on = [ + module.karpenter_node_role + ] +} + + +/* + instance profile은 console에서 확인이 불가능. + aws iam list-instance-profiles | grep Karpenter +*/ +resource "aws_iam_instance_profile" "karpenter_profile" { + name = "KarpenterNodeInstanceProfile" + role = module.karpenter_node_role.iam_name +} + +/* oidc 생성 */ +// https://docs.aws.amazon.com/ko_kr/eks/latest/userguide/enable-iam-roles-for-service-accounts.html + + +// https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate +resource "aws_iam_openid_connect_provider" "oidc_provider" { + url = data.aws_eks_cluster.eks.identity[0].oidc[0].issuer + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.oidc.certificates[0].sha1_fingerprint] +} +// trust policy > permission policy +module "karpenter_controller_role" { + source = "./modules/iam" + iam_name = "KarpenterControllerRole" + policy = data.aws_iam_policy_document.karpenter_controller_trust_policy.json + tag_name = local.common_tags.project +} + +resource "aws_iam_policy" "karpenter_controller_permission_policy" { + name = "KarpenterControllerPermission" + policy = data.aws_iam_policy_document.karpenter_controller_permission_policy.json +} + +resource "aws_iam_policy_attachment" "karpenter_controller_attach" { + name = "karpenter-controller-attach" + roles = [module.karpenter_controller_role.iam_name] + policy_arn = aws_iam_policy.karpenter_controller_permission_policy.arn +} \ No newline at end of file diff --git a/outputs.tf b/outputs.tf new file mode 100644 index 0000000..f515cd6 --- /dev/null +++ b/outputs.tf @@ -0,0 +1,39 @@ +//main-outputs +output "aws_id" { + description = "The AWS Account ID." + value = data.aws_caller_identity.this.account_id +} + +output "info_vpc" { + description = "vpc_id & vpc_name" + value = module.vpc +} + +output "info_igw" { + description = "igw info" + value = module.igw +} + +output "info_subnet_ext" { + description = "public subnet info" + value = module.subnet_ext +} + +output "info_ngw" { + description = "ngw_id" + value = module.ngw.nat_id +} + +output "bastion" { + description = "bastion" + value = module.bastion +} + +output "info_eks_id" { + description = "eks_cluster_id" + value = module.eks_cluster.cluster_id +} + +output "info_eks_cluster" { + value = data.aws_eks_cluster.eks +} \ No newline at end of file diff --git a/terraform.tf b/terraform.tf new file mode 100644 index 0000000..d30e8e5 --- /dev/null +++ b/terraform.tf @@ -0,0 +1,11 @@ +// Terraform Backend +terraform { + cloud { + + organization = "icurfer-demo" + + workspaces { + name = "tf-cloud-backend" + } + } +} \ No newline at end of file