Skip to content

Commit

Permalink
Initial port from terraform-aws-sombra
Browse files Browse the repository at this point in the history
  • Loading branch information
dmattia committed Mar 23, 2020
1 parent 2c0fae2 commit 143b594
Show file tree
Hide file tree
Showing 3 changed files with 209 additions and 0 deletions.
39 changes: 39 additions & 0 deletions execution_role.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
data "aws_iam_policy_document" "ecs_cloudwatch_doc" {
statement {
actions = ["sts:AssumeRole"]
effect = "Allow"
principals {
type = "Service"
identifiers = ["ecs-tasks.amazonaws.com"]
}
}
}

resource "aws_iam_role" "execution_role" {
name_prefix = "${var.deploy_env}-ecs-execution-role"
assume_role_policy = data.aws_iam_policy_document.ecs_cloudwatch_doc.json
}

/**
* This resource code seems pretty gross, but 'tis the way it has to be.
*
* If you were to use a for_each loop, the code would work cleanly like:
* for_each = setunion(var.additional_task_policy_arns, ["arn:..."])
*
* The issue with that is that terraform doesn't then know how many resources
* to create until the variable additional_task_policy_arns is fully realized,
* which may be a computed value from the calling module. In that case, you
* would have to use `-target other_targets.that.determine.the.var` before
* you could create this resource, which is buggy and messes up CI.
*
* By forcing the calling module to pass an explicit count, we can always know how
* many resources will be created at plan time.
*/
resource "aws_iam_role_policy_attachment" "ecs_role_policy" {
count = var.additional_task_policy_arns_count + 1
role = aws_iam_role.execution_role.name
policy_arn = concat(
var.additional_task_policy_arns,
["arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"]
)[count.index]
}
80 changes: 80 additions & 0 deletions main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
resource "aws_ecs_service" "service" {
name = var.name
cluster = var.cluster_id
desired_count = var.desired_count

task_definition = aws_ecs_task_definition.task.arn
launch_type = "FARGATE"

network_configuration {
security_groups = [aws_security_group.service_security_group.id]
subnets = var.subnet_ids
assign_public_ip = false
}

deployment_controller {
type = "ECS"
}

health_check_grace_period_seconds = length(var.load_balancers) > 0 ? 60 : 0

dynamic "load_balancer" {
for_each = var.load_balancers
content {
target_group_arn = load_balancer.value.target_group_arn
container_name = load_balancer.value.container_name
container_port = load_balancer.value.container_port
}
}

# Allow external changes to service count without Terraform plan difference
lifecycle {
ignore_changes = [desired_count]
}

propagate_tags = "SERVICE"
tags = var.tags
}

resource "aws_ecs_task_definition" "task" {
family = "${var.name}-task"
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = var.cpu
memory = var.memory
execution_role_arn = aws_iam_role.execution_role.arn
task_role_arn = aws_iam_role.execution_role.arn
container_definitions = var.container_definitions
tags = var.tags
}

resource "aws_security_group" "service_security_group" {
name = "${var.name}-ecs-security-group"
description = "Allows inbound access to an ECS service only through its alb"
vpc_id = var.vpc_id

dynamic "ingress" {
for_each = var.load_balancers
content {
from_port = ingress.value.container_port
to_port = ingress.value.container_port
security_groups = var.alb_security_group_ids
protocol = "tcp"
}
}

# Allow all outgoing access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

timeouts {
create = "45m"
delete = "45m"
}

tags = var.tags
}
90 changes: 90 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
variable name {
description = "The name of the service. Used as a prefix for other resource names"
}

variable cluster_id {
description = <<EOF
The id of the ECS cluster this service belongs to.
Having multiple related services in one service can decrease cost
by more efficiently using a shared pool of resources.
EOF
}

variable desired_count {
type = number
description = "The number of tasks to keep alive at all times"
}

variable vpc_id {
description = "ID of the VPC the alb is in"
}

variable subnet_ids {
type = list(string)
description = "List of subnets tasks can be run in."
}

variable load_balancers {
type = list(object({
target_group_arn = string
container_name = string
container_port = string
}))

description = <<EOF
When using ECS services, the service will ensure that at least
{@variable desired_count} tasks are running at all times. Because
there can be multiple tasks running at once, we set up a load
balancer to ditribute traffic.
`target_group_arn` is the arn of the target group on that alb that will
be set to watch over the tasks managed by this service.
EOF
}

variable tags {
type = map(string)
description = "Tags to set on all resources that support them"
}

variable cpu {
default = 512
description = "How much CPU should be allocated to each app instance?"
}

variable memory {
default = 1024
description = "How much memory should be allocated to each app instance?"
}

variable container_definitions {
type = string
description = "JSON encoded list of container definitions"
}

variable additional_task_policy_arns {
type = list(string)
description = "IAM Policy arns to be added to the tasks"
}

variable additional_task_policy_arns_count {
type = number
description = "The number of items in var.additional_task_policy_arns. Terraform is not quite smart enough to figure this out on its own."
}

variable alb_security_group_ids {
type = list(string)
description = "The ids of all security groups set on the ALB. We require that the tasks can only talk to the ALB"
}

variable deploy_env {
type = string
description = "The environment resources are to be created in. Usually dev, staging, or prod"
}

variable aws_region {
type = string
description = "The AWS region to create resources in."
default = "eu-west-1"
}

0 comments on commit 143b594

Please sign in to comment.