Skip to content

Instantly share code, notes, and snippets.

@NassK
Last active October 11, 2020 22:37
Show Gist options
  • Select an option

  • Save NassK/d7358124125f5d97f990a63ef8a3d17d to your computer and use it in GitHub Desktop.

Select an option

Save NassK/d7358124125f5d97f990a63ef8a3d17d to your computer and use it in GitHub Desktop.

Revisions

  1. NassK revised this gist Oct 11, 2020. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion variables.tf
    Original file line number Diff line number Diff line change
    @@ -1,6 +1,6 @@
    variable "region" {
    description = "The AWS region"
    default "eu-west-1"
    default = "eu-west-1"
    }

    variable "cluster_name" {
  2. NassK revised this gist Oct 11, 2020. 1 changed file with 5 additions and 1 deletion.
    6 changes: 5 additions & 1 deletion variables.tf
    Original file line number Diff line number Diff line change
    @@ -1,5 +1,9 @@
    variable "region" {
    description = "The AWS region"
    default "eu-west-1"
    }

    variable "cluster_name" {
    description = "The name of the Amazon EKS cluster."
    default = "my-eks-cluster"
    }

  3. NassK revised this gist Oct 11, 2020. 1 changed file with 0 additions and 101 deletions.
    101 changes: 0 additions & 101 deletions step_5.tf
    Original file line number Diff line number Diff line change
    @@ -1,104 +1,3 @@
    provider "aws" {
    version = "~> 2.0"
    region = "eu-west-1"
    }

    ## Step 1: Configuring the VPC
    module "vpc" {
    source = "terraform-aws-modules/vpc/aws"

    name = "my-eks-vpc"
    cidr = "10.0.0.0/16"

    azs = ["eu-west-1a", "eu-west-1b", "eu-west-1c"]
    private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
    public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]

    enable_nat_gateway = true
    enable_vpn_gateway = false

    vpc_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    private_subnet_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    public_subnet_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    enable_dns_hostnames = true # Needed for worker nodes in private subnets
    enable_dns_support = true

    /**
    * Needed for workers node in public subnets.
    * map_public_ip_on_launch = true
    */
    }

    ## Step 2: Configuring the EKS cluster
    resource "aws_eks_cluster" "cluster" { # Here we create the EKS cluster itself.
    name = var.cluster_name
    role_arn = aws_iam_role.eks_cluster.arn # The cluster needs an IAM role to gain some permission over your AWS account

    vpc_config {
    subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets) # We pass all 6 subnets (public and private ones). Retrieved from the AWS module before.
    endpoint_public_access = true # The cluster will have a public endpoint. We will be able to call it from the public internet.
    endpoint_private_access = true # STEP 3: The cluster will have a private endpoint too. Worker nodes will be able to call the control plane without leaving the VPC.
    }

    enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] # We enable control plane components logging against Amazon Cloudwatch log group.

    # Ensure that IAM Role permissions are handled before the EKS Cluster.
    depends_on = [
    aws_iam_role_policy_attachment.policy-AmazonEKSClusterPolicy,
    aws_iam_role_policy_attachment.policy-AmazonEKSVPCResourceController,
    aws_cloudwatch_log_group.eks_cluster_control_plane_components
    ]
    }

    resource "aws_iam_role" "eks_cluster" {
    name = "${var.cluster_name}_role"

    assume_role_policy = jsonencode({
    Statement = [{
    Action = "sts:AssumeRole"
    Effect = "Allow"
    Principal = {
    Service = "eks.amazonaws.com"
    }
    }]
    Version = "2012-10-17"
    })
    }

    resource "aws_iam_role_policy_attachment" "policy-AmazonEKSClusterPolicy" {
    policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
    role = aws_iam_role.eks_cluster.name
    }

    resource "aws_iam_role_policy_attachment" "policy-AmazonEKSVPCResourceController" {
    policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
    role = aws_iam_role.eks_cluster.name
    }

    resource "aws_cloudwatch_log_group" "eks_cluster_control_plane_components" { # To log control plane components
    name = "/aws/eks/${var.cluster_name}/cluster"
    retention_in_days = 7
    }

    # Step 4: Configuring the Kubectl CLI
    resource "null_resource" "generate_kubeconfig" { # Generate a kubeconfig (needs aws cli >=1.62 and kubectl)

    provisioner "local-exec" {
    command = "aws eks update-kubeconfig --name ${var.cluster_name} --region eu-west-1"
    }

    depends_on = [aws_eks_cluster.cluster]
    }

    # Step 5: Integrating Service Accounts with IAM role
    data "tls_certificate" "cluster" {
    url = aws_eks_cluster.cluster.identity.0.oidc.0.issuer
  4. NassK created this gist Oct 11, 2020.
    111 changes: 111 additions & 0 deletions step_5.tf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,111 @@
    provider "aws" {
    version = "~> 2.0"
    region = "eu-west-1"
    }

    ## Step 1: Configuring the VPC
    module "vpc" {
    source = "terraform-aws-modules/vpc/aws"

    name = "my-eks-vpc"
    cidr = "10.0.0.0/16"

    azs = ["eu-west-1a", "eu-west-1b", "eu-west-1c"]
    private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
    public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]

    enable_nat_gateway = true
    enable_vpn_gateway = false

    vpc_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    private_subnet_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    public_subnet_tags = {
    "kubernetes.io/cluster/${var.cluster_name}" = "shared"
    }

    enable_dns_hostnames = true # Needed for worker nodes in private subnets
    enable_dns_support = true

    /**
    * Needed for workers node in public subnets.
    * map_public_ip_on_launch = true
    */
    }

    ## Step 2: Configuring the EKS cluster
    resource "aws_eks_cluster" "cluster" { # Here we create the EKS cluster itself.
    name = var.cluster_name
    role_arn = aws_iam_role.eks_cluster.arn # The cluster needs an IAM role to gain some permission over your AWS account

    vpc_config {
    subnet_ids = concat(module.vpc.public_subnets, module.vpc.private_subnets) # We pass all 6 subnets (public and private ones). Retrieved from the AWS module before.
    endpoint_public_access = true # The cluster will have a public endpoint. We will be able to call it from the public internet.
    endpoint_private_access = true # STEP 3: The cluster will have a private endpoint too. Worker nodes will be able to call the control plane without leaving the VPC.
    }

    enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] # We enable control plane components logging against Amazon Cloudwatch log group.

    # Ensure that IAM Role permissions are handled before the EKS Cluster.
    depends_on = [
    aws_iam_role_policy_attachment.policy-AmazonEKSClusterPolicy,
    aws_iam_role_policy_attachment.policy-AmazonEKSVPCResourceController,
    aws_cloudwatch_log_group.eks_cluster_control_plane_components
    ]
    }

    resource "aws_iam_role" "eks_cluster" {
    name = "${var.cluster_name}_role"

    assume_role_policy = jsonencode({
    Statement = [{
    Action = "sts:AssumeRole"
    Effect = "Allow"
    Principal = {
    Service = "eks.amazonaws.com"
    }
    }]
    Version = "2012-10-17"
    })
    }

    resource "aws_iam_role_policy_attachment" "policy-AmazonEKSClusterPolicy" {
    policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
    role = aws_iam_role.eks_cluster.name
    }

    resource "aws_iam_role_policy_attachment" "policy-AmazonEKSVPCResourceController" {
    policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
    role = aws_iam_role.eks_cluster.name
    }

    resource "aws_cloudwatch_log_group" "eks_cluster_control_plane_components" { # To log control plane components
    name = "/aws/eks/${var.cluster_name}/cluster"
    retention_in_days = 7
    }

    # Step 4: Configuring the Kubectl CLI
    resource "null_resource" "generate_kubeconfig" { # Generate a kubeconfig (needs aws cli >=1.62 and kubectl)

    provisioner "local-exec" {
    command = "aws eks update-kubeconfig --name ${var.cluster_name} --region eu-west-1"
    }

    depends_on = [aws_eks_cluster.cluster]
    }

    # Step 5: Integrating Service Accounts with IAM role
    data "tls_certificate" "cluster" {
    url = aws_eks_cluster.cluster.identity.0.oidc.0.issuer
    }

    resource "aws_iam_openid_connect_provider" "cluster" { # We need an open id connector to allow our service account to assume an IAM role
    client_id_list = ["sts.amazonaws.com"]
    thumbprint_list = concat([data.tls_certificate.cluster.certificates.0.sha1_fingerprint], [])
    url = aws_eks_cluster.cluster.identity.0.oidc.0.issuer
    }
    5 changes: 5 additions & 0 deletions variables.tf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,5 @@
    variable "cluster_name" {
    description = "The name of the Amazon EKS cluster."
    default = "my-eks-cluster"
    }