본문 바로가기

Programming/Iac

EKS Cluster 생성하기

728x90

Pluralith로 그린거...

일단 위처럼 만들어 보려고 하는데 자동으로 로컬에 kubeconfig까지 떨어졌으면 좋겠어서 코드를 짜기 시작..

 

################################################################################
# EKS Cluster
################################################################################

resource "aws_iam_role" "eks_cluster" {
  name = "EKSClusterRole_${var.cluster_name}"

  assume_role_policy = jsonencode({
    Version = "2012-10-17"
    Statement = [
      {
        Effect = "Allow",
        Action = "sts:AssumeRole",
        Principal = {
          Service = "eks.amazonaws.com"
        }
      }
    ]
  })
}

resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
  role       = aws_iam_role.eks_cluster.name
}

resource "aws_cloudwatch_log_group" "this" {
  # The log group name format is /aws/eks/<cluster-name>/cluster
  # Reference: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html
  name              = "/aws/eks/${var.cluster_name}/cluster"
  retention_in_days = 1
}

resource "aws_eks_cluster" "this" {
  name     = var.cluster_name
  role_arn = aws_iam_role.eks_cluster.arn
  version  = var.cluster_version

  enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]

  vpc_config {
    subnet_ids = var.subnet_ids
  }

  depends_on = [
    aws_cloudwatch_log_group.this,
    aws_iam_role_policy_attachment.AmazonEKSClusterPolicy
  ]
}

################################################################################
# EKS Node Group
################################################################################

resource "aws_iam_role" "eks_node" {
  name = "EKSNodeRole_${var.cluster_name}"

  assume_role_policy = jsonencode({
    Version = "2012-10-17"
    Statement = [
      {
        Effect = "Allow",
        Action = "sts:AssumeRole",
        Principal = {
          Service = "ec2.amazonaws.com"
        }
      }
    ]
  })
}

resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
  role       = aws_iam_role.eks_node.name
}

resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
  role       = aws_iam_role.eks_node.name
}

resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
  role       = aws_iam_role.eks_node.name
}

resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
  role       = aws_iam_role.eks_node.name
}

resource "aws_eks_node_group" "default" {
  cluster_name    = aws_eks_cluster.this.name
  node_group_name = "default"
  node_role_arn   = aws_iam_role.eks_node.arn
  subnet_ids      = var.subnet_ids
  instance_types  = var.node_group_instance_types

  scaling_config {
    desired_size = var.node_group_desired_size
    max_size     = var.node_group_max_size
    min_size     = var.node_group_min_size
  }

  update_config {
    max_unavailable = var.node_group_max_unavailable
  }

  depends_on = [
    aws_iam_role_policy_attachment.AmazonEKSWorkerNodePolicy,
    aws_iam_role_policy_attachment.AmazonEKS_CNI_Policy,
    aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
    aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore
  ]
}


################################################################################
# Authentication
################################################################################

data "aws_eks_cluster_auth" "this" {
  name   = var.cluster_name
}

resource "local_file" "kubeconfig" {
  filename        = "kubeconfig.${var.cluster_name}"
  file_permission = "0600"
  content = yamlencode({
    apiVersion = "v1"
    clusters = [
      {
        cluster = {
          "certificate-authority-data" = aws_eks_cluster.this.certificate_authority[0].data
          server                       = aws_eks_cluster.this.endpoint
        }
        name = aws_eks_cluster.this.arn
      }
    ]
    contexts = [
      {
        context = {
          cluster = aws_eks_cluster.this.arn
          user    = aws_eks_cluster.this.arn
        }
        name = aws_eks_cluster.this.arn
      }
    ]
    "current-context" = aws_eks_cluster.this.arn
    kind              = "Config"
    preferences       = {}
    users = [
      {
        name = aws_eks_cluster.this.arn
        user = {
          exec = {
            apiVersion = "client.authentication.k8s.io/v1beta1"
            command    = "aws"
            args       = ["--region", var.region, "eks", "get-token", "--cluster-name", var.cluster_name]
          }
        }
      }
    ]
  })
}

죠기 local-file이라는 리소스타입을 쓰면 Terraform을 실행한 로컬에 kubeconfig파일이 떨어진다. 기존 파일과 겹치지 않기 위해 네이밍 조절을 필수.

 

설치되는 VPC는 별도의 모듈에서 생성한 것을 상속하게 만들었다.

그 다음에 바로 Kubernetes 프로바이더와 Helm 프로바이더로 한번에 istio라던가 설치를 해보려고 한다.

 

terraform {
  required_providers {
    kubernetes = {}
    helm       = {}
  }
}

resource "kubernetes_labels" "cluster1_default" {
  api_version = "v1"
  kind        = "Namespace"
  metadata {
    name = "default"
  }
  labels = {
    istio-injection = "enabled"
  }
}


resource "kubernetes_namespace" "istio" {
  metadata {
    name = "istio-system"

    labels = {
      name = "istio-system"
    }
  }
}

resource "helm_release" "istio" {
  chart      = "base"
  name       = "istio-base"
  namespace  = "istio-system"
  repository = "https://istio-release.storage.googleapis.com/charts"
  depends_on = [
    kubernetes_namespace.istio
  ]
}

resource "helm_release" "istiod" {
  chart      = "istiod"
  name       = "istiod"
  namespace  = "istio-system"
  repository = "https://istio-release.storage.googleapis.com/charts"
  depends_on = [
    kubernetes_namespace.istio
  ]
}

resource "helm_release" "istio_ingress2" {
  chart      = "gateway"
  name       = "istio-ingress"
  namespace  = "default"
  repository = "https://istio-release.storage.googleapis.com/charts"
  depends_on = [
    helm_release.istio,
    helm_release.istiod
  ]
}

resource "helm_release" "telemetry" {
  chart     = "./helm/telemetry"
  name      = "telemetry"
  namespace = "istio-system"
}

이런식으로 쓰면된다. 중요한건 depends_on이다. 서로 다른 프로바이더로 실행시키기 때문에 의존이 필수적으로 발생한다.

 

module "eks_cluster" {
  source = "./module/eks"

  providers = {
    aws = aws.sso-org-root
  }

  region                  = var.region
  cluster_name            = "cluster"
  subnet_ids              = [module.vpc.public_subnet_id_0, module.vpc.public_subnet_id_1]
  node_group_desired_size = 3
  node_group_max_size     = 3
  node_group_min_size     = 3
  depends_on = [
    module.vpc
  ]
}

provider "kubernetes" {
  alias                  = "eks-cluster"
  host                   = module.eks_cluster.host
  cluster_ca_certificate = module.eks_cluster.cluster_ca_certificate
  token                  = module.eks_cluster.token
}

provider "helm" {
  alias = "eks-cluster"
  kubernetes {
    host                   = module.eks_cluster.host
    cluster_ca_certificate = module.eks_cluster.cluster_ca_certificate
    token                  = module.eks_cluster.token
  }
}

module "cluster_istio" {
  source = "./module/eks_istio"
  providers = {
    kubernetes = kubernetes.eks-cluster
    helm       = helm.eks-cluster
  }
  depends_on = [
    module.eks_cluster
  ]
}

VPC 생성 -> Cluster생성 -> Kubernetes 접속 -> Helm 배포

 

이 순서가 매우 중요한데 이놈때문에 배포하고 테스트하는데만 한 싸이클에 30분 넘게 소요됐다...

IaC는 다 좋은데 디버깅이 안되서 개발하던 입장에선 겁나 답답하다..

728x90

'Programming > Iac' 카테고리의 다른 글

Terraform Associate 공부하며 정리한 내용들  (0) 2022.11.11
Terraform Associate (002) 취득 후기  (0) 2022.11.06