Skip to content

Instantly share code, notes, and snippets.

@codyborders
Forked from jamesmishra/README.md
Created February 12, 2022 15:02
Show Gist options
  • Save codyborders/c1e958e4c99659c4738d783f49322533 to your computer and use it in GitHub Desktop.
Save codyborders/c1e958e4c99659c4738d783f49322533 to your computer and use it in GitHub Desktop.

Revisions

  1. @jamesmishra jamesmishra revised this gist Feb 3, 2020. 3 changed files with 38 additions and 29 deletions.
    49 changes: 28 additions & 21 deletions README.md
    Original file line number Diff line number Diff line change
    @@ -4,6 +4,22 @@ for the purpose of running a given `docker-compose.yml` file.

    # Usage
    ```terraform
    # ===== OUR MAGIC DOCKER-COMPOSE.YML FILE HERE =====
    # It is also possible to get Terraform to read an external `docker-compose.yml`
    # file and load it into this variable.
    # We'll be showing off a demo nginx page.
    variable "example_docker_compose" {
    type = string
    default = <<EOF
    version: "3.1"
    services:
    hello:
    image: nginxdemos/hello
    restart: always
    ports:
    - 80:80
    EOF
    }
    # Configure the VPC that we will use.
    resource "aws_vpc" "prod" {
    @@ -15,30 +31,20 @@ resource "aws_internet_gateway" "prod" {
    vpc_id = aws_vpc.prod.id
    }
    resource "aws_route" "prod__to_internet" {
    route_table_id = aws_vpc.prod.main_route_table_id
    destination_cidr_block = "0.0.0.0/0"
    gateway_id = aws_internet_gateway.prod.id
    }
    resource "aws_subnet" "prod" {
    vpc_id = aws_vpc.prod.id
    availability_zone = "us-east-1a"
    cidr_block = "10.0.0.0/18"
    map_public_ip_on_launch = true
    depends_on = [aws_internet_gateway.prod.id]
    depends_on = [aws_internet_gateway.prod]
    }
    # ===== OUR MAGIC DOCKER-COMPOSE.YML FILE HERE =====
    # It is also possible to get Terraform to read an external `docker-compose.yml`
    # file and load it into this variable.
    # We'll be showing off a demo nginx page.
    variable "example_docker_compose" {
    type = string
    default = <<EOF
    version: "3.1"
    services:
    hello:
    image: nginxdemos/hello
    restart: always
    ports:
    - 80:80
    EOF
    }
    # Allow port 80 so we can connect to the container.
    resource "aws_security_group" "allow_http" {
    name = "allow_http"
    @@ -53,12 +59,12 @@ resource "aws_security_group" "allow_http" {
    egress {
    from_port = 0
    to_port = 0
    protocol = "tcp"
    protocol = "-1"
    cidr_blocks = ["0.0.0.0/0"]
    }
    }
    # Make sure to download the other files into the `modules/one_dokcer_instance_on_ec2`
    # Make sure to download the other files into the `modules/one_docker_instance_on_ec2`
    # directory
    module "run_docker_example" {
    source = "./modules/one_docker_instance_on_ec2"
    @@ -67,10 +73,11 @@ module "run_docker_example" {
    instance_type = "t3.nano"
    docker_compose_str = var.example_docker_compose
    subnet_id = aws_subnet.prod.id
    availability_zone = aws_subnet.prod.availability_zone
    vpc_security_group_ids = [aws_security_group.allow_http.id]
    associate_public_ip_address = true
    persistent_volume_size_gb = 1
    }
    ```

    # TODO
    11 changes: 4 additions & 7 deletions main.tf
    Original file line number Diff line number Diff line change
    @@ -1,8 +1,8 @@

    locals {
    block_device_path = "/dev/sdh"
    user_data = <<EOF
    #!/bin/bash
    set -Eeuxo pipefail
    # Filesystem code is adapted from:
    # https://github.com/GSA/devsecops-example/blob/03067f68ee2765f8477ae84235f7faa1d2f2cb70/terraform/files/attach-data-volume.sh
    @@ -29,11 +29,12 @@ chmod 0755 $DEST
    # Now we install docker and docker-compose.
    # Adapted from:
    # https://gist.github.com/npearce/6f3c7826c7499587f00957fee62f8ee9
    yum update -y
    amazon-linux-extras install docker
    systemctl start docker.service
    usermod -a -G docker ec2-user
    chkconfig docker on
    yum install python3-pip
    yum install -y python3-pip
    python3 -m pip install docker-compose
    # Put the docker-compose.yml file at the root of our persistent volume
    @@ -61,8 +62,6 @@ systemctl start my_custom_service
    EOF
    }

    # This should pull the latest Amazon Linux 2, which comes with docker
    # but not docker-compose...
    data "aws_ami" "latest_amazon_linux" {
    most_recent = true
    owners = ["amazon"]
    @@ -72,9 +71,7 @@ data "aws_ami" "latest_amazon_linux" {
    }
    }


    resource "aws_ebs_volume" "persistent" {
    # TODO: Terraform tries to delete the volume when the AZ of the AWS instance changes.
    availability_zone = aws_instance.this.availability_zone
    size = var.persistent_volume_size_gb
    }
    @@ -83,11 +80,11 @@ resource "aws_volume_attachment" "persistent" {
    device_name = local.block_device_path
    volume_id = aws_ebs_volume.persistent.id
    instance_id = aws_instance.this.id
    skip_destroy = true
    }

    resource "aws_instance" "this" {
    ami = data.aws_ami.latest_amazon_linux.id
    availability_zone = var.availability_zone
    instance_type = var.instance_type
    key_name = var.key_name
    associate_public_ip_address = var.associate_public_ip_address
    7 changes: 6 additions & 1 deletion variables.tf
    Original file line number Diff line number Diff line change
    @@ -9,6 +9,11 @@ variable "description" {
    default = ""
    }

    variable "availability_zone" {
    description = "The availability zone for both the AWS instance and the EBS volume."
    type = string
    }

    variable "systemd_after_stage" {
    description = "When to run our container. This usually does not need to change."
    type = string
    @@ -74,4 +79,4 @@ variable "persistent_volume_mount_path" {
    description = "Where on the filesystem to mount our persistent volume"
    type = string
    default = "/persistent"
    }
    }
  2. @jamesmishra jamesmishra revised this gist Feb 2, 2020. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion README.md
    Original file line number Diff line number Diff line change
    @@ -62,7 +62,7 @@ resource "aws_security_group" "allow_http" {
    # directory
    module "run_docker_example" {
    source = "./modules/one_docker_instance_on_ec2"
    name = "neocrym_demo"
    name = "one_docker_instance_on_ec2_demo"
    key_name = "name_of_your_ssh_key_here"
    instance_type = "t3.nano"
    docker_compose_str = var.example_docker_compose
  3. @jamesmishra jamesmishra revised this gist Feb 2, 2020. 1 changed file with 68 additions and 2 deletions.
    70 changes: 68 additions & 2 deletions README.md
    Original file line number Diff line number Diff line change
    @@ -4,14 +4,80 @@ for the purpose of running a given `docker-compose.yml` file.

    # Usage
    ```terraform
    resource "aws_vpc" prod {
    # Configure the VPC that we will use.
    resource "aws_vpc" "prod" {
    cidr_block = "10.0.0.0/16"
    enable_dns_hostnames = true
    }
    resource "aws_internet_gateway" "prod" {
    vpc_id = aws_vpc.prod.id
    }
    resource "aws_subnet" "prod" {
    vpc_id = aws_vpc.prod.id
    cidr_block = "10.0.0.0/18"
    map_public_ip_on_launch = true
    depends_on = [aws_internet_gateway.prod.id]
    }
    # ===== OUR MAGIC DOCKER-COMPOSE.YML FILE HERE =====
    # It is also possible to get Terraform to read an external `docker-compose.yml`
    # file and load it into this variable.
    # We'll be showing off a demo nginx page.
    variable "example_docker_compose" {
    type = string
    default = <<EOF
    version: "3.1"
    services:
    hello:
    image: nginxdemos/hello
    restart: always
    ports:
    - 80:80
    EOF
    }
    # Allow port 80 so we can connect to the container.
    resource "aws_security_group" "allow_http" {
    name = "allow_http"
    description = "Show off how we run a docker-compose file."
    ingress {
    from_port = 80
    to_port = 80
    protocol = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
    }
    egress {
    from_port = 0
    to_port = 0
    protocol = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
    }
    }
    # Make sure to download the other files into the `modules/one_dokcer_instance_on_ec2`
    # directory
    module "run_docker_example" {
    source = "./modules/one_docker_instance_on_ec2"
    name = "neocrym_demo"
    key_name = "name_of_your_ssh_key_here"
    instance_type = "t3.nano"
    docker_compose_str = var.example_docker_compose
    subnet_id = aws_subnet.prod.id
    vpc_security_group_ids = [aws_security_group.allow_http.id]
    persistent_volume_size_gb = 1
    }
    ```

    # TODO
    I want to configure the AWS instance's permissions to allow pulling
    containers from private AWS Elastic Container Registry repositories.
    It should just involve adding the relevant permissions to the EC2 IAM
    instance profile, and then making the corresponding change with the registry.
    instance profile, and then making the corresponding change with the registry.

    I would also like to set up the AWS EC2 Client VPN so I can access the
    containers over the Internet without having to expose them to the Internet.
  4. @jamesmishra jamesmishra revised this gist Feb 2, 2020. 1 changed file with 14 additions and 14 deletions.
    28 changes: 14 additions & 14 deletions README.md
    Original file line number Diff line number Diff line change
    @@ -1,17 +1,17 @@
    There are a lot of ways to run Docker containers on Amaon Web Services.
    You can run your containers directly on EC2 with Docker Swarm or Kubernetes.
    Or, for a simpler alternative, you could use Dokku. Or you could use
    one of the AWS managed services like use Elastic Beanstalk,
    Elastic Container Service (EC2 type), Elastic Container Service (Fargate type),
    Elastic Kuberrnetes Service, and maybe some other ways I'm forgetting.
    # Introduction
    This is a Hashicorp Terraform module that provisions an AWS EC2 instance
    for the purpose of running a given `docker-compose.yml` file.

    I use docker-compose to manage my development environment on my laptop, and
    I wrote this Terraform module as a way to automatically spin up EC2 instances
    that run any docker-compose.yml file I give it.
    # Usage
    ```terraform
    resource "aws_vpc" prod {
    cidr_block = "10.0.0.0/16"
    enable_dns_hostnames = true
    }
    ```

    # TODO

    I want to configure the AWS instance to pull container images from private
    AWS Elastic Container Registry repositories. It should just involve
    adding the relevant permissions to the EC2 IAM instance profile, and then
    making the corresponding change with the registry.
    I want to configure the AWS instance's permissions to allow pulling
    containers from private AWS Elastic Container Registry repositories.
    It should just involve adding the relevant permissions to the EC2 IAM
    instance profile, and then making the corresponding change with the registry.
  5. @jamesmishra jamesmishra created this gist Feb 2, 2020.
    17 changes: 17 additions & 0 deletions README.md
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,17 @@
    There are a lot of ways to run Docker containers on Amaon Web Services.
    You can run your containers directly on EC2 with Docker Swarm or Kubernetes.
    Or, for a simpler alternative, you could use Dokku. Or you could use
    one of the AWS managed services like use Elastic Beanstalk,
    Elastic Container Service (EC2 type), Elastic Container Service (Fargate type),
    Elastic Kuberrnetes Service, and maybe some other ways I'm forgetting.

    I use docker-compose to manage my development environment on my laptop, and
    I wrote this Terraform module as a way to automatically spin up EC2 instances
    that run any docker-compose.yml file I give it.

    # TODO

    I want to configure the AWS instance to pull container images from private
    AWS Elastic Container Registry repositories. It should just involve
    adding the relevant permissions to the EC2 IAM instance profile, and then
    making the corresponding change with the registry.
    104 changes: 104 additions & 0 deletions main.tf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,104 @@

    locals {
    block_device_path = "/dev/sdh"
    user_data = <<EOF
    #!/bin/bash
    # Filesystem code is adapted from:
    # https://github.com/GSA/devsecops-example/blob/03067f68ee2765f8477ae84235f7faa1d2f2cb70/terraform/files/attach-data-volume.sh
    DEVICE=${local.block_device_path}
    DEST=${var.persistent_volume_mount_path}
    devpath=$(readlink -f $DEVICE)
    if [[ $(file -s $devpath) != *ext4* && -b $devpath ]]; then
    # Filesystem has not been created. Create it!
    mkfs -t ext4 $devpath
    fi
    # add to fstab if not present
    if ! egrep "^$devpath" /etc/fstab; then
    echo "$devpath $DEST ext4 defaults,nofail,noatime,nodiratime,barrier=0,data=writeback 0 2" | tee -a /etc/fstab > /dev/null
    fi
    mkdir -p $DEST
    mount $DEST
    chown ec2-user:ec2-user $DEST
    chmod 0755 $DEST
    # Filesystem code is over
    # Now we install docker and docker-compose.
    # Adapted from:
    # https://gist.github.com/npearce/6f3c7826c7499587f00957fee62f8ee9
    amazon-linux-extras install docker
    systemctl start docker.service
    usermod -a -G docker ec2-user
    chkconfig docker on
    yum install python3-pip
    python3 -m pip install docker-compose
    # Put the docker-compose.yml file at the root of our persistent volume
    cat > $DEST/docker-compose.yml <<-TEMPLATE
    ${var.docker_compose_str}
    TEMPLATE
    # Write the systemd service that manages us bringing up the service
    cat > /etc/systemd/system/my_custom_service.service <<-TEMPLATE
    [Unit]
    Description=${var.description}
    After=${var.systemd_after_stage}
    [Service]
    Type=simple
    User=${var.user}
    ExecStart=/usr/local/bin/docker-compose -f $DEST/docker-compose.yml up
    Restart=on-failure
    [Install]
    WantedBy=multi-user.target
    TEMPLATE
    # Start the service.
    systemctl start my_custom_service
    EOF
    }

    # This should pull the latest Amazon Linux 2, which comes with docker
    # but not docker-compose...
    data "aws_ami" "latest_amazon_linux" {
    most_recent = true
    owners = ["amazon"]
    filter {
    name = "name"
    values = ["amzn*"]
    }
    }


    resource "aws_ebs_volume" "persistent" {
    # TODO: Terraform tries to delete the volume when the AZ of the AWS instance changes.
    availability_zone = aws_instance.this.availability_zone
    size = var.persistent_volume_size_gb
    }

    resource "aws_volume_attachment" "persistent" {
    device_name = local.block_device_path
    volume_id = aws_ebs_volume.persistent.id
    instance_id = aws_instance.this.id
    skip_destroy = true
    }

    resource "aws_instance" "this" {
    ami = data.aws_ami.latest_amazon_linux.id
    instance_type = var.instance_type
    key_name = var.key_name
    associate_public_ip_address = var.associate_public_ip_address
    vpc_security_group_ids = var.vpc_security_group_ids
    subnet_id = var.subnet_id
    iam_instance_profile = var.iam_instance_profile
    user_data = local.user_data
    tags = merge (
    {
    Name = var.name
    },
    var.tags
    )
    }
    84 changes: 84 additions & 0 deletions outputs.tf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,84 @@
    # We try to match the API contract that `aws_instance` has.
    # Descriptions for these outputs are copied from:
    # https://www.terraform.io/docs/providers/aws/r/instance.html
    output "id" {
    description = "The instance ID"
    value = aws_instance.this.id
    }

    output "arn" {
    description = "The ARN of the instance"
    value = aws_instance.this.arn
    }

    output "availability_zone" {
    description = "The availability zone of the instance"
    value = aws_instance.this.availability_zone
    }

    output "placement_group" {
    description = "The placement group of the instance"
    value = aws_instance.this.placement_group
    }

    output "public_dns" {
    description = "The public DNS name assigned to the instance. For EC2-VPC, this is only available if you've enabled DNS hostnames for your VPC"
    value = aws_instance.this.public_dns
    }

    output "public_ip" {
    description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use public_ip, as this field will change after the EIP is attached."
    value = aws_instance.this.public_ip
    }

    output "ipv6_addresses" {
    description = "A list of assigned IPv6 addresses, if any"
    value = aws_instance.this.ipv6_addresses
    }

    output "primary_network_interface_id" {
    description = "The ID of the instance's primary network interface"
    value = aws_instance.this.primary_network_interface_id
    }

    output "private_dns" {
    description = " The private DNS name assigned to the instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC"
    value = aws_instance.this.private_dns
    }

    output "private_ip" {
    description = "The private IP address assigned to the instance"
    value = aws_instance.this.private_ip
    }

    output "security_groups" {
    description = " The associated security groups."
    value = aws_instance.this.security_groups
    }

    output "vpc_security_group_ids" {
    description = "The associated security groups in non-default VPC."
    value = aws_instance.this.vpc_security_group_ids
    }

    output "subnet_id" {
    description = "The VPC subnet ID."
    value = aws_instance.this.subnet_id
    }

    output "credit_specification" {
    description = " Credit specification of instance."
    value = aws_instance.this.credit_specification
    }

    output "instance_state" {
    description = "The state of the instance. One of: pending, running, shutting-down, terminated, stopping, stopped. See Instance Lifecycle for more information."
    value = aws_instance.this.instance_state
    }

    # TODO: This is a list with the `aws_instance` resource and we are just
    # returning a string. I know there is an obvious solution for this...
    output "ebs_block_device_id" {
    description = "The persistent block device that we are storing information on."
    value = aws_ebs_volume.persistent.id
    }
    77 changes: 77 additions & 0 deletions variables.tf
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,77 @@
    variable "name" {
    description = "Name to be used on all resources"
    type = string
    }

    variable "description" {
    description = "Description of the service for systemd"
    type = string
    default = ""
    }

    variable "systemd_after_stage" {
    description = "When to run our container. This usually does not need to change."
    type = string
    default = "network.target"
    }

    variable "user" {
    description = "What user to run as. You will need to run as root to use one of the lower ports."
    type = string
    default = "root"
    }

    variable "tags" {
    description = "Tags to put on the EC2 instance"
    type = map(string)
    default = {}
    }

    variable "key_name" {
    description = "Name of the SSH key to log in with"
    type = string
    }

    variable "instance_type" {
    description = "The default AWS instance size to run these containers on"
    type = string
    }

    variable "docker_compose_str" {
    description = "The entire docker compose file to write."
    type = string
    }

    variable "subnet_id" {
    description = "The VPC subnet to launch the instance in"
    type = string
    }

    variable "vpc_security_group_ids" {
    description = "The security groups that the instance should have"
    type = list(string)
    default = []
    }

    variable "iam_instance_profile" {
    description = "The name of the IAM instance profile to give to the EC2 instance"
    type = string
    default = ""
    }

    variable "associate_public_ip_address" {
    description = "Whether to associate a public IP address in the VPC"
    type = bool
    default = false
    }

    variable "persistent_volume_size_gb" {
    description = "The size of the volume mounted"
    type = number
    }

    variable "persistent_volume_mount_path" {
    description = "Where on the filesystem to mount our persistent volume"
    type = string
    default = "/persistent"
    }