diff --git a/.github/workflows/pr-title.yaml b/.github/workflows/pr-title.yaml new file mode 100644 index 0000000..d105f80 --- /dev/null +++ b/.github/workflows/pr-title.yaml @@ -0,0 +1,25 @@ +name: lint-pr-title + +on: + pull_request: + types: [opened, edited, reopened, synchronize] + +jobs: + pr-lint: + runs-on: ubuntu-latest + steps: + - uses: morrisoncole/pr-lint-action@v1.7.1 + with: + # Note: if you have branch protection rules enabled, the `GITHUB_TOKEN` permissions + # won't cover dismissing reviews. Your options are to pass in a custom token + # (perhaps by creating some sort of 'service' user and creating a personal access + # token with the correct permissions) or to turn off `on-failed-regex-request-changes` + # and use action failure to prevent merges instead (with + # `on-failed-regex-fail-action: true`). See: + # https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token + # https://docs.github.com/en/rest/pulls/reviews#dismiss-a-review-for-a-pull-request + repo-token: "${{ secrets.GITHUB_TOKEN }}" + title-regex: '^(build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w ])+([\s\S]*)' + on-failed-regex-fail-action: true + on-failed-regex-create-review: false + on-failed-regex-request-changes: false diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml new file mode 100644 index 0000000..97f5447 --- /dev/null +++ b/.github/workflows/pre-commit.yaml @@ -0,0 +1,40 @@ +name: pre-commit +on: + push: + branches: + - main + pull_request: + branches: + - main +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: install pre-commit + run: pip install pre-commit + + - name: Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.5.7" + + - name: install hcledit + run: | + curl -sSLo ./hcledit.tar.gz https://github.com/minamijoyo/hcledit/releases/download/v0.2.10/hcledit_0.2.10_linux_amd64.tar.gz + tar -xzf hcledit.tar.gz + chmod +x hcledit + mv hcledit /usr/local/bin/hcledit + + - name: install tf-lint + run: curl -s https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash + + - name: install tf-docs + run: | + curl -sSLo ./terraform-docs.tar.gz https://terraform-docs.io/dl/v0.17.0/terraform-docs-v0.17.0-linux-amd64.tar.gz + tar -xzf terraform-docs.tar.gz + chmod +x terraform-docs + mv terraform-docs /usr/local/bin/terraform-docs + + - name: pre-commit + run: pre-commit run --all-files --show-diff-on-failure diff --git a/.github/workflows/releases.yaml b/.github/workflows/releases.yaml new file mode 100644 index 0000000..e0532b7 --- /dev/null +++ b/.github/workflows/releases.yaml @@ -0,0 +1,30 @@ +name: release-notes + +on: + push: + tags: + - v[0-9]+.[0-9]+.[0-9]+ +jobs: + deploy: + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Update CHANGELOG + id: changelog + uses: requarks/changelog-action@v1 + with: + token: ${{ github.token }} + tag: ${{ github.ref_name }} + + - name: Create Release + uses: ncipollo/release-action@v1.14.0 + with: + allowUpdates: true + draft: false + makeLatest: true + name: ${{ github.ref_name }} + body: ${{ steps.changelog.outputs.changes }} + token: ${{ github.token }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..33bc250 --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc + +.idea/* + +# Devenv +.devenv* +devenv.local.nix +.direnv diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..a5f44e5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,38 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-merge-conflict + - id: end-of-file-fixer + - id: trailing-whitespace + - id: forbid-submodules +- repo: https://github.com/adrienverge/yamllint + rev: v1.35.1 + hooks: + - id: yamllint + args: [--strict, -c=.yamllint] +- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook + rev: v9.18.0 + hooks: + - id: commitlint + stages: [commit-msg] + additional_dependencies: ['@commitlint/config-conventional'] +- repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.96.1 + hooks: + - id: terraform_wrapper_module_for_each + - id: terraform_tflint + args: + - '--args=--only=terraform_deprecated_interpolation' + - '--args=--only=terraform_deprecated_index' + - '--args=--only=terraform_unused_declarations' + - '--args=--only=terraform_comment_syntax' + - '--args=--only=terraform_documented_outputs' + - '--args=--only=terraform_documented_variables' + - '--args=--only=terraform_typed_variables' + - '--args=--only=terraform_module_pinned_source' + - '--args=--only=terraform_naming_convention' + - '--args=--only=terraform_required_version' + - '--args=--only=terraform_required_providers' + - '--args=--only=terraform_standard_module_structure' + - '--args=--only=terraform_workspace_remote' diff --git a/.tflint.hcl b/.tflint.hcl new file mode 100644 index 0000000..524c47c --- /dev/null +++ b/.tflint.hcl @@ -0,0 +1,5 @@ +plugin "aws" { + enabled = true + version = "0.36.0" + source = "github.com/terraform-linters/tflint-ruleset-aws" +} diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..7306079 --- /dev/null +++ b/.yamllint @@ -0,0 +1,12 @@ +extends: relaxed + +rules: + line-length: disable + commas: disable + indentation: + spaces: 2 + indent-sequences: whatever + +ignore: | + *.terraform/ + gitops/base-install/cert-manager/create-issuer/templates/create-issuer.yaml diff --git a/README.md b/README.md new file mode 100644 index 0000000..c03cab9 --- /dev/null +++ b/README.md @@ -0,0 +1,63 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.5.7 | +| [aws](#requirement\_aws) | >= 5.45.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 5.45.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [cert\_manager\_irsa\_role](#module\_cert\_manager\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.51.0 | +| [ebs\_csi\_driver\_irsa\_role](#module\_ebs\_csi\_driver\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.51.0 | +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 20.31.6 | +| [external\_dns\_irsa\_role](#module\_external\_dns\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.51.0 | +| [karpenter](#module\_karpenter) | terraform-aws-modules/eks/aws//modules/karpenter | 20.31.6 | +| [load\_balancer\_controller\_irsa\_role](#module\_load\_balancer\_controller\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.51.0 | +| [s3\_csi](#module\_s3\_csi) | terraform-aws-modules/s3-bucket/aws | 4.3.0 | +| [s3\_driver\_irsa\_role](#module\_s3\_driver\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.51.0 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | 5.17.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_vpc_endpoint.eks_vpc_endpoints](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) | resource | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [extra\_access\_entries](#input\_extra\_access\_entries) | EKS access entries needed by IAM roles interacting with this cluster |
list(object({
principal_arn = string
kubernetes_groups = optional(list(string))
policy_arn = string
access_scope_type = string
access_scope_namespaces = optional(list(string))
}))
| `[]` | no | +| [initial\_instance\_types](#input\_initial\_instance\_types) | instance types of the initial managed node group | `list(string)` | n/a | yes | +| [initial\_node\_desired\_size](#input\_initial\_node\_desired\_size) | desired size of the initial managed node group | `number` | `3` | no | +| [initial\_node\_labels](#input\_initial\_node\_labels) | labels for the initial managed node group | `map(string)` |
{
"kube-ovn/role": "master"
}
| no | +| [initial\_node\_max\_size](#input\_initial\_node\_max\_size) | max size of the initial managed node group | `number` | `6` | no | +| [initial\_node\_min\_size](#input\_initial\_node\_min\_size) | minimum size of the initial managed node group | `number` | `2` | no | +| [initial\_node\_taints](#input\_initial\_node\_taints) | taints for the initial managed node group | `list(object({ key = string, value = string, effect = string }))` |
[
{
"effect": "NO_SCHEDULE",
"key": "CriticalAddonsOnly",
"value": "true"
},
{
"effect": "NO_SCHEDULE",
"key": "nidhogg.uswitch.com/kube-system.kube-multus-ds",
"value": "true"
}
]
| no | +| [s3\_csi\_driver\_bucket\_arns](#input\_s3\_csi\_driver\_bucket\_arns) | existing buckets the s3 CSI driver should have access to | `list(string)` | `[]` | no | +| [s3\_csi\_driver\_create\_bucket](#input\_s3\_csi\_driver\_create\_bucket) | create a new bucket for use with the s3 CSI driver | `bool` | `true` | no | +| [stack\_admin\_arns](#input\_stack\_admin\_arns) | arn to the roles for the cluster admins role | `list(string)` | `[]` | no | +| [stack\_ci\_admin\_arn](#input\_stack\_ci\_admin\_arn) | arn to the ci role | `string` | n/a | yes | +| [stack\_ci\_ro\_arn](#input\_stack\_ci\_ro\_arn) | arn to the ci role for planning on PRs | `string` | n/a | yes | +| [stack\_create](#input\_stack\_create) | should resources be created | `bool` | `true` | no | +| [stack\_name](#input\_stack\_name) | Name of the stack | `string` | `"foundation-stack"` | no | +| [stack\_ro\_arns](#input\_stack\_ro\_arns) | arn to the roles for the cluster read only role | `list(string)` | `[]` | no | +| [stack\_tags](#input\_stack\_tags) | tags to be added to the stack, should at least have Owner and Environment | `map(any)` |
{
"Environment": "prod",
"Owner": "pelotech"
}
| no | +| [stack\_vpc\_block](#input\_stack\_vpc\_block) | Variables for defining the vpc for the stack |
object({
cidr = string
azs = list(string)
private_subnets = list(string)
public_subnets = list(string)
database_subnets = list(string)
})
|
{
"azs": [
"us-west-2a",
"us-west-2b",
"us-west-2c"
],
"cidr": "172.16.0.0/16",
"database_subnets": [
"172.16.200.0/24",
"172.16.201.0/24",
"172.16.202.0/24"
],
"private_subnets": [
"172.16.0.0/24",
"172.16.1.0/24",
"172.16.2.0/24"
],
"public_subnets": [
"172.16.100.0/24",
"172.16.101.0/24",
"172.16.102.0/24"
]
}
| no | +| [vpc\_endpoints](#input\_vpc\_endpoints) | vpc endpoints within the cluster vpc network | `list(string)` | `[]` | no | + +## Outputs + +No outputs. + diff --git a/catalog-info.yaml b/catalog-info.yaml new file mode 100644 index 0000000..ab32a12 --- /dev/null +++ b/catalog-info.yaml @@ -0,0 +1,15 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: terraform-foundation-aws-stack + description: Terraform modules to bootstrap k8s cluster in aws + tags: + - terraform + - k8s + annotations: + github.com/project-slug: pelotech/terraform-foundation-aws-stack + backstage.io/techdocs-ref: dir:. +spec: + type: infrastructure + lifecycle: production + owner: group:pelotech/leads diff --git a/commitlint.config.js b/commitlint.config.js new file mode 100644 index 0000000..3347cb9 --- /dev/null +++ b/commitlint.config.js @@ -0,0 +1 @@ +module.exports = {extends: ['@commitlint/config-conventional']}; diff --git a/docs/CLEANUP.md b/docs/CLEANUP.md new file mode 100644 index 0000000..0b7a481 --- /dev/null +++ b/docs/CLEANUP.md @@ -0,0 +1,21 @@ +# Cleanup + +This is the location to be able to hunt down most of the various resources which are created by the clusters components + +## Karpenter +* Search for tags with `karpenter.sh/managed-by` = `{cluster-name}` + +## ALB/Cluster - +### Security groups +* Tags `elbv2.k8s.aws/cluster` = `{cluster-name}` +* Tags `aws:eks:cluster-name` = `{cluster-name}` +### Load Balancers +* Tags `elbv2.k8s.aws/cluster` = `{cluster-name}` +### Target Groups +* Tags `elbv2.k8s.aws/cluster` = `{cluster-name}` + +## EBS +### Volumes +* Tags `ebs.csi.aws.com/cluster` = `true` +* Tags `kubernetes.io/cluster/{cluster-name}` = `owned` +* Tags `KubernetesCluster` = `{cluster-name}` diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..2e2d771 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,4 @@ +![pre-commit](https://github.com/pelotech/terraform-foundation-aws-stack/actions/workflows/pre-commit.yaml/badge.svg) + +# Foundation - Pelotech's GitOps K8s Cluster +This is the terraform module that helps bootstrap foundation in AWS diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..b13ef95 --- /dev/null +++ b/main.tf @@ -0,0 +1,289 @@ +terraform { + required_version = ">= 1.5.7" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.45.0" + } + } +} +data "aws_partition" "current" {} + +locals { + admin_access_entries = { + for index, item in concat(var.stack_admin_arns, [var.stack_ci_admin_arn]) : "admin_${index}" => { + principal_arn = item + policy_associations = { + cluster_admin = { + policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } + ro_access_entries = { + for index, item in concat(var.stack_ro_arns, [var.stack_ci_ro_arn]) : "ro_${index}" => { + principal_arn = item + policy_associations = { + view_only = { + policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } + extra_access_entries = { + for index, item in var.extra_access_entries : "extra_${index}" => { + principal_arn = item.principal_arn + kubernetes_groups = item.kubernetes_groups + policy_associations = { + extra_association = { + policy_arn = item.policy_arn + access_scope = { + type = item.access_scope_type + namespaces = item.access_scope_namespaces + } + } + } + } + } + s3_csi_arns = compact(concat([module.s3_csi.s3_bucket_arn], var.s3_csi_driver_bucket_arns)) +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "5.17.0" + name = var.stack_name + enable_dns_hostnames = "true" + enable_dns_support = "true" + enable_nat_gateway = "true" + one_nat_gateway_per_az = "true" + cidr = var.stack_vpc_block.cidr + azs = var.stack_vpc_block.azs + private_subnets = var.stack_vpc_block.private_subnets + public_subnets = var.stack_vpc_block.public_subnets + database_subnets = var.stack_vpc_block.database_subnets + create_database_subnet_group = true + create_database_subnet_route_table = true + create_database_internet_gateway_route = true + + public_subnet_tags = { + "kubernetes.io/role/elb" = 1 + } + private_subnet_tags = { + "karpenter.sh/discovery" = var.stack_name + "kubernetes.io/role/internal-elb" = 1 + } + tags = merge(var.stack_tags, { + }) +} + +data "aws_region" "current" {} + +# https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/using-govcloud-vpc-endpoints.html +resource "aws_vpc_endpoint" "eks_vpc_endpoints" { + for_each = toset(var.vpc_endpoints) + vpc_id = module.vpc.vpc_id + service_name = "com.amazonaws.${data.aws_region.current.name}.${each.value}" + tags = var.stack_tags +} + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "20.31.6" + cluster_name = var.stack_name + cluster_version = "1.29" + create = var.stack_create + # TODO: resume usage of node security group; see: https://linear.app/pelotech/issue/PEL-97 + create_node_security_group = false + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + cluster_enabled_log_types = [] + + subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + create_kms_key = true + enable_irsa = true + # cluster_encryption_config = [{ + # resources = ["secrets"] + # }] + kms_key_administrators = concat(var.stack_admin_arns, [var.stack_ci_admin_arn, var.stack_ci_ro_arn]) + eks_managed_node_groups = { + "initial-${var.stack_name}" = { + iam_role_use_name_prefix = false + instance_types = var.initial_instance_types + min_size = var.initial_node_min_size + max_size = var.initial_node_max_size + desired_size = var.initial_node_desired_size + capacity_type = "ON_DEMAND" + labels = var.initial_node_labels + bootstrap_extra_args = "--use-max-pods false" + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 100 + volume_type = "gp3" + encrypted = true + delete_on_termination = true + } + } + } + taints = var.initial_node_taints + } + } + access_entries = merge(local.admin_access_entries, local.ro_access_entries, local.extra_access_entries) + tags = merge(var.stack_tags, { + # NOTE - if creating multiple security groups with this module, only tag the + # security group that Karpenter should utilize with the following tag + # (i.e. - at most, only one security group should have this tag in your account) + "karpenter.sh/discovery" = var.stack_name + }) +} + +module "karpenter" { + count = var.stack_create ? 1 : 0 + source = "terraform-aws-modules/eks/aws//modules/karpenter" + version = "20.31.6" + cluster_name = module.eks.cluster_name + enable_irsa = true + enable_pod_identity = false # TODO: PR because it doesn't work in govcloud (-> it works now since 8/24) + enable_v1_permissions = true + queue_name = var.stack_name + irsa_oidc_provider_arn = module.eks.oidc_provider_arn + irsa_namespace_service_accounts = ["karpenter:karpenter"] + # TODO: get a better naming conventions for roles + node_iam_role_name = "KarpenterNodeRole-${var.stack_name}" + iam_role_name = "${var.stack_name}-karpenter-role" + iam_role_use_name_prefix = false + node_iam_role_use_name_prefix = false + tags = merge(var.stack_tags, { + }) +} + +# IAM roles and policies for the cluster +module "load_balancer_controller_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.51.0" + + create_role = var.stack_create + + role_name = "${var.stack_name}-alb-role" + attach_load_balancer_controller_policy = true + + oidc_providers = { + cluster = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["alb:aws-load-balancer-controller"] + } + } + tags = merge(var.stack_tags, { + }) +} + +module "ebs_csi_driver_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.51.0" + + create_role = var.stack_create + + role_name = "${var.stack_name}-ebs-csi-driver-role" + attach_ebs_csi_policy = true + + oidc_providers = { + cluster = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-driver"] + } + } + tags = merge(var.stack_tags, { + }) +} + +module "s3_csi" { + source = "terraform-aws-modules/s3-bucket/aws" + version = "4.3.0" + bucket = "${var.stack_tags.Owner}-${var.stack_name}-csi-bucket" + + create_bucket = var.s3_csi_driver_create_bucket + attach_deny_insecure_transport_policy = true + attach_require_latest_tls_policy = true + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + server_side_encryption_configuration = { + rule = { + apply_server_side_encryption_by_default = { + sse_algorithm = "AES256" + } + } + } + tags = merge(var.stack_tags, { + }) +} + +module "s3_driver_irsa_role" { + count = var.stack_create ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.51.0" + create_role = var.stack_create + + role_name = "${var.stack_name}-s3-csi-driver-role" + attach_mountpoint_s3_csi_policy = true + mountpoint_s3_csi_bucket_arns = local.s3_csi_arns + mountpoint_s3_csi_path_arns = [for arn in local.s3_csi_arns : "${arn}/*"] + oidc_providers = { + cluster = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:s3-csi-driver"] + } + } + tags = merge(var.stack_tags, { + }) +} + +module "external_dns_irsa_role" { + count = var.stack_create ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.51.0" + + create_role = var.stack_create + + role_name = "${var.stack_name}-external-dns-role" + attach_external_dns_policy = true + external_dns_hosted_zone_arns = ["*"] + + oidc_providers = { + cluster = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["external-dns:external-dns-controller"] + } + } + tags = merge(var.stack_tags, { + }) +} + +module "cert_manager_irsa_role" { + count = var.stack_create ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "5.51.0" + + create_role = var.stack_create + + role_name = "${var.stack_name}-cert-manager-role" + attach_cert_manager_policy = true + cert_manager_hosted_zone_arns = ["*"] + + oidc_providers = { + cluster = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["cert-manager:cert-manager"] + } + } + tags = merge(var.stack_tags, { + }) +} diff --git a/mkdocs.yaml b/mkdocs.yaml new file mode 100644 index 0000000..2931a68 --- /dev/null +++ b/mkdocs.yaml @@ -0,0 +1,7 @@ +site_name: terraform-foundation-aws-stack +site_description: Terraform modules for bootstraping aws stack +nav: + - Home: README.md + +plugins: + - techdocs-core diff --git a/outputs.tf b/outputs.tf new file mode 100644 index 0000000..e69de29 diff --git a/renovate.json5 b/renovate.json5 new file mode 100644 index 0000000..2938d18 --- /dev/null +++ b/renovate.json5 @@ -0,0 +1,4 @@ +{ + extends: ["config:recommended",], + dependencyDashboard: true, +} diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000..2f6b70f --- /dev/null +++ b/variables.tf @@ -0,0 +1,148 @@ +variable "stack_name" { + type = string + default = "foundation-stack" + description = "Name of the stack" +} +variable "stack_create" { + type = bool + default = true + description = "should resources be created" +} +variable "stack_tags" { + type = map(any) + default = { + Owner = "pelotech" + Environment = "prod" + } + description = "tags to be added to the stack, should at least have Owner and Environment" +} +variable "stack_vpc_block" { + type = object({ + cidr = string + azs = list(string) + private_subnets = list(string) + public_subnets = list(string) + database_subnets = list(string) + }) + default = { + cidr = "172.16.0.0/16" + azs = ["us-west-2a", "us-west-2b", "us-west-2c"] + private_subnets = ["172.16.0.0/24", "172.16.1.0/24", "172.16.2.0/24"] + public_subnets = ["172.16.100.0/24", "172.16.101.0/24", "172.16.102.0/24"] + database_subnets = ["172.16.200.0/24", "172.16.201.0/24", "172.16.202.0/24"] + } + description = "Variables for defining the vpc for the stack" +} + +variable "extra_access_entries" { + type = list(object({ + principal_arn = string + kubernetes_groups = optional(list(string)) + policy_arn = string + access_scope_type = string + access_scope_namespaces = optional(list(string)) + })) + description = "EKS access entries needed by IAM roles interacting with this cluster" + default = [] + + validation { + error_message = "Access scope type can only be 'namespace' or 'cluster'" + condition = alltrue([ + for v in var.extra_access_entries : contains(["namespace", "cluster"], v.access_scope_type) + ]) + } + + validation { + error_message = "The access scope type 'namespace' requires 'access_scope_namespaces', namespaces can't be set otherwise." + condition = alltrue([ + for v in var.extra_access_entries : ((v.access_scope_type == "namespace" && v.access_scope_namespaces != null) || (v.access_scope_type != "namespace" && v.access_scope_namespaces == null)) + ]) + } +} + +variable "stack_ci_admin_arn" { + type = string + description = "arn to the ci role" +} + +# TODO: find a cleaner way for KMS access to be able to run plans on the module +variable "stack_ci_ro_arn" { + type = string + description = "arn to the ci role for planning on PRs" +} + +variable "stack_admin_arns" { + type = list(string) + default = [] + description = "arn to the roles for the cluster admins role" +} + +variable "stack_ro_arns" { + type = list(string) + default = [] + description = "arn to the roles for the cluster read only role" +} + +variable "initial_node_taints" { + type = list(object({ key = string, value = string, effect = string })) + default = [ + { + key = "CriticalAddonsOnly" + value = "true" + effect = "NO_SCHEDULE" + }, + { + key = "nidhogg.uswitch.com/kube-system.kube-multus-ds" + value = "true" + effect = "NO_SCHEDULE" + } + ] + description = "taints for the initial managed node group" +} +variable "initial_node_labels" { + type = map(string) + default = { + "kube-ovn/role" = "master" + } + description = "labels for the initial managed node group" +} + +variable "initial_instance_types" { + type = list(string) + description = "instance types of the initial managed node group" +} + +variable "initial_node_min_size" { + type = number + default = 2 + description = "minimum size of the initial managed node group" +} + +variable "initial_node_max_size" { + type = number + default = 6 + description = "max size of the initial managed node group" +} + +variable "initial_node_desired_size" { + type = number + default = 3 + description = "desired size of the initial managed node group" +} + +variable "s3_csi_driver_create_bucket" { + type = bool + default = true + description = "create a new bucket for use with the s3 CSI driver" +} + +variable "s3_csi_driver_bucket_arns" { + type = list(string) + default = [] + description = "existing buckets the s3 CSI driver should have access to" +} +variable "vpc_endpoints" { + type = list(string) + description = "vpc endpoints within the cluster vpc network" + default = [] +}