Skip to content

Large refactoring + Adopting to new concept #89

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cicd.tpl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,16 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v2.3.3
uses: actions/checkout@v4
- name: AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: $${{ secrets.AWS_ECR_DEV_ACCESS_KEY }}
aws-secret-access-key: $${{ secrets.AWS_ECR_DEV_SECRET_KEY }}
aws-region: $${{ env.REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
uses: aws-actions/amazon-ecr-login@v2
- name: Build and push Docker image
env:
ECR_REGISTRY_DEV: $${{ steps.login-ecr.outputs.registry }}
Expand Down
2 changes: 1 addition & 1 deletion cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# ---------------------------------------------------
module "ecs_fargate" {
source = "terraform-aws-modules/ecs/aws"
version = "4.1.3"
version = "~> 5.0"
cluster_name = "${local.name_prefix}-${var.clp_zenv}"
tags = var.standard_tags

Expand Down
2 changes: 1 addition & 1 deletion config.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
terraform {
required_version = ">= 1.0"
required_version = "= 1.5.5"
required_providers {
github = {
source = "integrations/github"
Expand Down
113 changes: 61 additions & 52 deletions dbs.tf
Original file line number Diff line number Diff line change
@@ -1,57 +1,66 @@
locals {
database_default_properties = {
postgres = {
username = var.database_username != "" ? var.database_username : "postgres"
port = 5432
}
mysql = {
username = var.database_username != "" ? var.database_username : "root"
port = 3306
}
}
}
# locals {
# database_default_properties = {
# postgres = {
# username = var.database_username != "" ? var.database_username : "postgres"
# port = 5432
# }
# mysql = {
# username = var.database_username != "" ? var.database_username : "root"
# port = 3306
# }
# }
# }

module "database" {
source = "terraform-aws-modules/rds/aws"
version = "~> 5.0"
# module "database" {
# source = "terraform-aws-modules/rds/aws"
# version = "~> 5.0"

for_each = {
for datastore_name, datastore_config in var.datastores :
datastore_name => datastore_config if contains(["sql"], tostring(datastore_config["type"]))
}
# for_each = {
# for datastore_name, datastore_config in var.datastores :
# datastore_name => can(datastore_config["main"], "type") && contains(["dynamodb"], tostring(datastore_config["main"]["type"]))
# }

identifier = "${local.name_prefix}-${var.clp_zenv}-${each.value.engine}-${each.key}"
db_name = each.value.name
engine = each.value.engine
engine_version = each.value.version
instance_class = each.value.instance
allocated_storage = coalesce(try(each.value.database_allocated_storage, var.database_allocated_storage), var.database_allocated_storage)
max_allocated_storage = each.value.autoscaling == "enabled" ? try(each.value.database_max_allocated_storage, var.database_max_allocated_storage) : 0
storage_encrypted = true
username = coalesce(try(each.value.database_username, local.database_default_properties[each.value.engine].username), local.database_default_properties[each.value.engine].username)
password = random_password.database[each.key].result
port = coalesce(try(each.value.database_port, local.database_default_properties[each.value.engine].port), local.database_default_properties[each.value.engine].port)
create_db_option_group = false
create_db_parameter_group = false
create_db_subnet_group = true
subnet_ids = var.private_subnets
vpc_security_group_ids = var.security_groups
maintenance_window = "Mon:00:00-Mon:03:00"
backup_window = "04:00-06:00"
backup_retention_period = 0
tags = merge(try(each.value.tags, {}), var.standard_tags)
}
# identifier = "${local.name_prefix}-${var.clp_zenv}-${each.value.engine}-${each.key}"
# db_name = each.value.name
# engine = each.value.engine
# engine_version = each.value.version
# instance_class = each.value.instance
# allocated_storage = coalesce(try(each.value.database_allocated_storage, var.database_allocated_storage), var.database_allocated_storage)
# max_allocated_storage = each.value.autoscaling == "enabled" ? try(each.value.database_max_allocated_storage, var.database_max_allocated_storage) : 0
# storage_encrypted = true
# username = coalesce(try(each.value.database_username, local.database_default_properties[each.value.engine].username), local.database_default_properties[each.value.engine].username)
# password = random_password.database[each.key].result
# port = coalesce(try(each.value.database_port, local.database_default_properties[each.value.engine].port), local.database_default_properties[each.value.engine].port)
# create_db_option_group = false
# create_db_parameter_group = false
# create_db_subnet_group = true
# subnet_ids = var.private_subnets
# vpc_security_group_ids = var.security_groups
# maintenance_window = "Sun:00:00-Mon:03:00"
# backup_window = "04:00-06:00"
# backup_retention_period = 0
# tags = merge(try(each.value.tags, {}), var.standard_tags)
# }

resource "random_password" "database" {
for_each = { for datastore_name, datastore_config in var.datastores : datastore_name => datastore_config if datastore_config.type == "sql" }
length = 24
special = false
}

resource "aws_ssm_parameter" "database_connection_string" {
for_each = { for datastore_name, datastore_config in var.datastores : datastore_name => datastore_config if datastore_config.type == "sql" }
name = "/${local.name_prefix}/${var.clp_zenv}/${each.value.engine}_connection_string-${each.key}"
type = "SecureString"
value = "${each.value.engine}://${module.database[each.key].db_instance_username}:${random_password.database[each.key].result}@${module.database[each.key].db_instance_endpoint}/${module.database[each.key].db_instance_name}"
tags = merge(try(each.value.tags, {}), var.standard_tags)
}
# resource "random_password" "database" {
# for_each = {
# for datastore_name, datastore_config in var.datastores :
# datastore_name => can(datastore_config["main"], "type") && contains(["dynamodb"], tostring(datastore_config["main"]["type"]))
# }

# length = 24
# special = false
# }

# resource "aws_ssm_parameter" "database_connection_string" {
# for_each = {
# for datastore_name, datastore_config in var.datastores :
# datastore_name => can(datastore_config["main"], "type") && contains(["dynamodb"], tostring(datastore_config["main"]["type"]))
# }

# name = "/${local.name_prefix}/${var.clp_zenv}/${each.value.engine}_connection_string-${each.key}"
# type = "SecureString"
# value = "${each.value.engine}://${module.database[each.key].db_instance_username}:${random_password.database[each.key].result}@${module.database[each.key].db_instance_endpoint}/${module.database[each.key].db_instance_name}"
# tags = merge(try(each.value.tags, {}), var.standard_tags)
# }
48 changes: 0 additions & 48 deletions iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ resource "aws_iam_role" "main" {
tags = var.standard_tags

managed_policy_arns = [
aws_iam_policy.sqs.arn,
aws_iam_policy.s3.arn,
aws_iam_policy.ecs.arn,
aws_iam_policy.rds.arn,
aws_iam_policy.pricing.arn,
Expand All @@ -30,52 +28,6 @@ resource "aws_iam_role" "main" {
})
}

resource "aws_iam_policy" "sqs" {
name = "${local.name_prefix}-${var.clp_zenv}-sqs"

policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "stmt1617103351726",
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": [
"${aws_sqs_queue.main.arn}",
"${aws_sqs_queue.reversed.arn}"
]
}
]
}
POLICY
}

resource "aws_iam_policy" "s3" {
name = "${local.name_prefix}-${var.clp_zenv}-s3"

policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "stmt1617103351726",
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"${var.s3_tf_artefacts}",
"${var.s3_tf_artefacts}/*"
]
}
]
}
POLICY
}

resource "aws_iam_policy" "ecs" {
name = "${local.name_prefix}-${var.clp_zenv}-ecs"

Expand Down
23 changes: 7 additions & 16 deletions logdna.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,13 @@ resource "logdna_view" "main" {
tags = ["${local.name_prefix}-${var.clp_zenv}"]
}

# resource "logdna_view" "errors" {
# levels = ["error"]
# name = "${var.clp_zenv}-${local.short_region_name} - errors"
# query = "-health"
# categories = [upper(var.clp_account)]
# tags = ["${local.name_prefix}-${var.clp_zenv}"]

# # slack_channel {
# # immediate = "true"
# # operator = "presence"
# # terminal = "false"
# # triggerinterval = "30"
# # triggerlimit = 1
# # url = var.logdna_slack_non_prod_alerts
# # }
# }
resource "logdna_view" "errors" {
levels = ["error"]
name = "${var.clp_zenv}-${local.short_region_name} - errors"
query = "-health"
categories = [upper(var.clp_account)]
tags = ["${local.name_prefix}-${var.clp_zenv}"]
}

# ---------------------------------------------------
# Mezmo (LogDNA) pushing logs from CloudWatch
Expand Down
12 changes: 2 additions & 10 deletions services.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,8 @@
locals {
added_env = [
{
name = "QUEUE_URL"
value = aws_sqs_queue.main.url
},
{
name = "QUEUE_URL_REVERSED"
value = aws_sqs_queue.reversed.url
},
{
name = "S3_TERRAFORM_ARTEFACTS"
value = var.s3_tf_artefacts
name = "PLACEHOLDER"
value = "PLACEHOLDER"
},
]
}
Expand Down
13 changes: 0 additions & 13 deletions sqs.tf

This file was deleted.

77 changes: 25 additions & 52 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ variable "ecr_account_id" {}
variable "public_subnets" {}
variable "private_subnets" {}
variable "security_groups" {}
variable "s3_tf_artefacts" {}

variable "ipwhitelist" {
type = list(string)
default = ["0.0.0.0/1", "128.0.0.0/1"]
Expand All @@ -25,24 +25,6 @@ variable "ipwhitelist" {
}
variable "mezmo_account_id" {}

# variable "datastores" {
# type = map(object({
# name = string
# type = string
# engine = string
# version = string
# class = string
# instance = string
# autoscaling = string
# database_allocated_storage = optional(number)
# database_max_allocated_storage = optional(number)
# database_username = optional(string)
# database_port = optional(number)
# tags = optional(map(string))
# }))
# default = {}
# }

variable "datastores" {
type = object(
{
Expand Down Expand Up @@ -165,46 +147,37 @@ variable "services" {
})
}))
default = {
frontend = {
public = true
name = "frontend"
cpu = 256
memory = 512
endpoint = ""
environment = []
deploy = {
gitrepo = "kuttleio/frontend"
dockerfilepath = "Dockerfile"
method = "from_branch"
branch = "master"
}
}
backend = {
public = true
name = "backend"
cpu = 256
memory = 512
endpoint = "backend"
api = {
public = true
name = "api"
endpoint = "api"
cpu = 256
memory = 512
environment = []
deploy = {
gitrepo = "kuttleio/backend"
gitrepo = "kuttleio/api"
dockerfilepath = "Dockerfile"
method = "from_branch"
branch = "master"
method = "from_branch"
branch = "master"
}
}
runner = {
public = false
name = "runner"
cpu = 256
memory = 512
endpoint = ""
environment = []
back = {
public = false
name = "back"
endpoint = "back"
cpu = 256
memory = 512
environment = [
{
name = "SERVICE_ENV_VAR"
value = "yeeee"
},
]
deploy = {
gitrepo = "kuttleio/runner"
gitrepo = "kuttleio/back"
dockerfilepath = "Dockerfile"
method = "from_branch"
branch = "master"
method = "from_branch"
branch = "master"
}
}
}
Expand Down
Loading