From 4e2f0bca6dfa72500208ce89388f4773ffdb0080 Mon Sep 17 00:00:00 2001 From: Nitin Aggarwal Date: Wed, 13 Apr 2022 09:25:43 -0700 Subject: [PATCH] all (#178) --- content/en/Reference/aws/eks_access.md | 106 ++++++++++++++++++ content/en/Reference/aws/modules/aws-dns.md | 1 + content/en/Reference/aws/modules/aws-eks.md | 1 + .../en/Reference/aws/modules/aws-k8s-base.md | 8 +- .../Reference/aws/modules/aws-k8s-service.md | 2 + content/en/Reference/aws/modules/aws-mysql.md | 11 +- .../en/Reference/aws/modules/aws-nodegroup.md | 5 +- .../en/Reference/aws/modules/aws-postgres.md | 22 +++- .../aws/modules/cloudfront-distribution.md | 74 +++++------- .../aws/modules/global-accelerator.md | 74 ++++++++++++ .../azurerm/modules/azure-k8s-service.md | 2 + .../google/modules/gcp-k8s-service.md | 2 + 12 files changed, 254 insertions(+), 54 deletions(-) create mode 100644 content/en/Reference/aws/eks_access.md create mode 100644 content/en/Reference/aws/modules/global-accelerator.md diff --git a/content/en/Reference/aws/eks_access.md b/content/en/Reference/aws/eks_access.md new file mode 100644 index 0000000..7f7afb8 --- /dev/null +++ b/content/en/Reference/aws/eks_access.md @@ -0,0 +1,106 @@ +--- +title: "EKS Access" +linkTitle: "EKS Access" +date: 2022-01-03 +draft: false +weight: 1 +description: How to access your Opta EKS Cluster +--- + +## EKS Access +As each Kubernetes cluster maintains its own cloud-agnostic rbac system to govern its own usage, extra steps +must be taken on each cloud provider to reconcile the given cloud's IAM with the cluster's. For EKS, this is done +via the `aws-auth` [configmap](https://kubernetes.io/docs/concepts/configuration/configmap/) stored in the `kube-system` +namespace (see [here](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) for the official documentation). +This configmap is essentially a mapping stating "AWS IAM user/role X is in group/has permissions A, B, C" in this cluster. +An admin can view this configmap via this command `kubectl get cm -n kube-system aws-auth -o yaml` and these configmaps +typically look like so: +```yaml +apiVersion: v1 +data: # NOTE there are separate sections for AWS IAM Users and AWS IAM roles. + mapRoles: | + - groups: ['system:bootstrappers', 'system:nodes'] + rolearn: arn:aws:iam::ACCOUNT_ID:role/opta-live-example-dev-eks-default-node-group + username: system:node:{{EC2PrivateDNSName}} + - groups: ['system:bootstrappers', 'system:nodes'] + rolearn: arn:aws:iam::ACCOUNT_ID:role/opta-live-example-dev-eks-nodegroup1-node-group + username: system:node:{{EC2PrivateDNSName}} + - groups: ['system:masters'] + rolearn: arn:aws:iam::ACCOUNT_ID:role/live-example-dev-live-example-dev-deployerrole + username: opta-managed + mapUsers: | + - groups: ['system:masters'] + userarn: arn:aws:iam::ACCOUNT_ID:user/live-example-dev-live-example-dev-deployeruser + username: opta-managed +``` + +> Note: the IAM user/role who created the cluster is always considered root/admin and does not appear + +As you can see, each entry has the following fields: +* rolearn/userarn: the arn of the AWS IAM user/role to link. +* username: the human-friendly distinct name/alias to recognize the rbac request from. +* groups: the list of Kubernetes rbac groups to give the role/user access to. + +Please refer to the [official docs](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for full details, but +note that if you want admin privileges, you simply need the `system:masters` group. For convenience, Opta has exposed a +field in the `k8s-base` module for AWS known as `admin_arns`, which is where users can quickly add IAM users/roles to +add as admins without dealing with Kubernetes directly. + +```yaml +name: staging +org_name: my-org +providers: + aws: + region: us-east-1 + account_id: XXXX # Your 12 digit AWS account id +modules: + - type: base + - type: dns + domain: staging.startup.com + subdomains: + - hello + - type: k8s-cluster + - type: k8s-base + admin_arns: + - "arn:aws:iam::XXXX:user/my-user" + - "arn:aws:iam::XXXX:role/my-role" +``` + +## K8s RBAC Groups +Admittedly, Kubernetes rbac groups are +[currently difficult to view](https://stackoverflow.com/questions/51612976/how-to-view-members-of-subject-with-group-kind), +but you should be able to see details the current ones with the following command (you will need `jq` installed): +`kubectl get clusterrolebindings -o json | jq -r '.items[] | select(.subjects[0].kind=="Group")` and +`kubectl get rolebindings -A -o json | jq -r '.items[] | select(.subjects[0].kind=="Group")` (none for this by default). + +Essentially an rbac group is created by creating a ClusterRoleBinding (or RoleBinding for namespace-limited permissions) +between the CluterRole/Role whose permissions you want to give and a new or pre-existing Group to give it to. Take the +following yaml for instace: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: my-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:discovery +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: my-group +``` + +For this case, this ClusterRoleBinding says "give all member of the Group named my-group all the permissions of the +ClusterRole named system:discovery on all namespaces" (you can bind to ServiceAccounts as well, please see the docs for +more details). + +## Conclusion +So, to summarize: + +* If you wish to add an IAM role/user to be an admin in the K8s cluster, go ahead and use the `admin_arns` field for the + AWS `k8s-base` module +* If you wish to add an IAM role/user to a different set of K8s permissions already found in a pre-existing group, go + ahead and manually add them in the `aws-auth` configmap on the `kube-system` namespace +* If you wish to create a new K8s group to capture a new set of permissions, go ahead and do so with role binding/cluster role bindings. diff --git a/content/en/Reference/aws/modules/aws-dns.md b/content/en/Reference/aws/modules/aws-dns.md index 82cd79f..4ab7c99 100644 --- a/content/en/Reference/aws/modules/aws-dns.md +++ b/content/en/Reference/aws/modules/aws-dns.md @@ -91,6 +91,7 @@ new apply. | `domain` | The domain you want (you will also get the subdomains for your use) | `None` | True | | `delegated` | Set to true once the extra [dns setup is complete](/features/dns-and-cert/dns/) and it will add the ssl certs. | `False` | False | | `upload_cert` | Deprecated | `False` | False | +| `linked_module` | The module type (or name if given) to automatically add root dns records for. | `` | False | ## Outputs diff --git a/content/en/Reference/aws/modules/aws-eks.md b/content/en/Reference/aws/modules/aws-eks.md index 67c42e6..6c78784 100644 --- a/content/en/Reference/aws/modules/aws-eks.md +++ b/content/en/Reference/aws/modules/aws-eks.md @@ -29,6 +29,7 @@ For information about the default IAM permissions given to the node group please | `spot_instances` | A boolean specifying whether to use [spot instances](https://aws.amazon.com/ec2/spot/) for the default nodegroup or not. The spot instances will be configured to have the max price equal to the on-demand price (so no danger of overcharging). _WARNING_: By using spot instances you must accept the real risk of frequent abrupt node terminations and possibly (although extremely rarely) even full blackouts (all nodes die). The former is a small risk as containers of Opta services will be automatically restarted on surviving nodes. So just make sure to specify a minimum of more than 1 containers -- Opta by default attempts to spread them out amongst many nodes. The former is a graver concern which can be addressed by having multiple node groups of different instance types (see aws nodegroup module) and ideally at least one non-spot. | `False` | False | | `enable_metrics` | Enable autoscaling group cloudwatch metrics collection for the default nodegroup. | `False` | False | | `node_launch_template` | Custom launch template for the underlying ec2s. | `{}` | False | +| `ami_type` | The AMI type to use for the nodes. For more information about this, please visit [here](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) Note: Currently, "CUSTOM" ami type is not supported. | `AL2_x86_64` | False | ## Outputs diff --git a/content/en/Reference/aws/modules/aws-k8s-base.md b/content/en/Reference/aws/modules/aws-k8s-base.md index c978005..8d98d80 100644 --- a/content/en/Reference/aws/modules/aws-k8s-base.md +++ b/content/en/Reference/aws/modules/aws-k8s-base.md @@ -12,7 +12,6 @@ description: Creates base infrastructure for k8s environments This module is responsible for all the base infrastructure we package into the Opta K8s environments. This includes: - [Autoscaler](https://github.com/kubernetes/autoscaler) for scaling up and down the ec2s as needed -- [External DNS](https://github.com/kubernetes-sigs/external-dns) to automatically hook up the ingress to the hosted zone and its domain - [Ingress Nginx](https://github.com/kubernetes/ingress-nginx) to expose services to the public - [Metrics server](https://github.com/kubernetes-sigs/metrics-server) for scaling different deployments based on cpu/memory usage - [Linkerd](https://linkerd.io/) as our service mesh. @@ -24,7 +23,6 @@ This module is responsible for all the base infrastructure we package into the O | Name | Description | Default | Required | | ----------- | ----------- | ------- | -------- | -| `cert_arn` | The arn of the ACM certificate to use for SSL. By default uses the one created by the DNS module if the module is found and delegation enabled. | `` | False | | `nginx_high_availability` | Deploy the nginx ingress in a high-availability configuration. | `False` | False | | `linkerd_high_availability` | Deploy the linkerd service mesh in a high-availability configuration for its control plane. | `False` | False | | `linkerd_enabled` | Enable the linkerd service mesh installation. | `True` | False | @@ -36,10 +34,14 @@ This module is responsible for all the base infrastructure we package into the O | `cert_manager_values` | Certificate Manager helm chart additional values. [Available options](https://artifacthub.io/packages/helm/cert-manager/cert-manager?modal=values) | `{}` | False | | `linkerd_values` | Linkerd helm chart additional values. [Available options](https://artifacthub.io/packages/helm/linkerd2/linkerd2/2.10.2?modal=values) | `{}` | False | | `ingress_nginx_values` | Ingress Nginx helm chart additional values. [Available options](https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx/4.0.17?modal=values) | `{}` | False | +| `domain` | Domain to setup the ingress with. By default uses the one specified in the DNS module if the module is found. | `` | False | +| `zone_id` | ID of Route53 hosted zone to add a record for. By default uses the one created by the DNS module if the module is found. | `` | False | +| `cert_arn` | The arn of the ACM certificate to use for SSL. By default uses the one created by the DNS module if the module is found and delegation enabled. | `` | False | ## Outputs | Name | Description | | ----------- | ----------- | -| `load_balancer_raw_dns` | The dns of the network load balancer provisioned to handle ingress to your environment | \ No newline at end of file +| `load_balancer_raw_dns` | The dns of the network load balancer provisioned to handle ingress to your environment | +| `load_balancer_arn` | The arn of the network load balancer provisioned to handle ingress to your environment | \ No newline at end of file diff --git a/content/en/Reference/aws/modules/aws-k8s-service.md b/content/en/Reference/aws/modules/aws-k8s-service.md index 33888e8..06a4d1d 100644 --- a/content/en/Reference/aws/modules/aws-k8s-service.md +++ b/content/en/Reference/aws/modules/aws-k8s-service.md @@ -182,6 +182,8 @@ Cron Jobs are currently created outside the default linkerd service mesh. | `ingress_extra_annotations` | These are extra annotations to add to ingress objects | `{}` | False | | `tolerations` | Taint tolerations to add to the pods. | `[]` | False | | `cron_jobs` | A list of cronjobs to execute as part of this service | `[]` | False | +| `pod_annotations` | These are extra annotations to add to k8s-service pod objects | `{}` | False | +| `timeout` | Time in seconds to wait for deployment. | `300` | False | ## Outputs diff --git a/content/en/Reference/aws/modules/aws-mysql.md b/content/en/Reference/aws/modules/aws-mysql.md index 4ec30ba..1dd8c6f 100644 --- a/content/en/Reference/aws/modules/aws-mysql.md +++ b/content/en/Reference/aws/modules/aws-mysql.md @@ -17,6 +17,14 @@ Opta will provision your database with 7 days of automatic daily backups in the You can find them either programmatically via the aws cli, or through the AWS web console (they will be called system snapshots, and they have a different tab than the manual ones). +### Performance and Scaling + +You can modify the DB instance class with the field `instance_class` in the module configuration. + +Storage scaling is automatically managed by AWS Aurora, see the [official documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Performance.html). + +To add replicas to an existing cluser, follow the [official guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-replicas-adding.html). + ### Linking When linked to a k8s-service, it adds connection credentials to your container's environment variables as: @@ -63,4 +71,5 @@ To those with the permissions, you can view it via the following command (MANIFE | `engine_version` | The version of the database to use. | `5.7.mysql_aurora.2.04.2` | False | | `multi_az` | Enable read-write replication across different availability zones on the same reason (doubles the cost, but needed for compliance). Can be added and updated at a later date without need to recreate. | `False` | False | | `backup_retention_days` | How many days to keep the backup retention | `7` | False | -| `safety` | Add deletion protection to stop accidental db deletions | `False` | False | \ No newline at end of file +| `safety` | Add deletion protection to stop accidental db deletions | `False` | False | +| `db_name` | The name of the database to create. Follow naming conventions [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) | `app` | False | \ No newline at end of file diff --git a/content/en/Reference/aws/modules/aws-nodegroup.md b/content/en/Reference/aws/modules/aws-nodegroup.md index 57cf620..00e8da8 100644 --- a/content/en/Reference/aws/modules/aws-nodegroup.md +++ b/content/en/Reference/aws/modules/aws-nodegroup.md @@ -70,6 +70,7 @@ daemonsets to run their agents in each node, so please be careful and read their | `min_nodes` | Min number of nodes to allow via autoscaling | `3` | False | | `node_disk_size` | The size of disk to give the nodes' ec2s in GB. | `20` | False | | `node_instance_type` | The [ec2 instance type](https://aws.amazon.com/ec2/instance-types/) for the nodes. | `t3.medium` | False | -| `use_gpu` | Should we expect and use the gpus present in the ec2? | `False` | False | | `spot_instances` | A boolean specifying whether to use [spot instances](https://aws.amazon.com/ec2/spot/) for the default nodegroup or not. The spot instances will be configured to have the max price equal to the on-demand price (so no danger of overcharging). _WARNING_: By using spot instances you must accept the real risk of frequent abrupt node terminations and possibly (although extremely rarely) even full blackouts (all nodes die). The former is a small risk as containers of Opta services will be automatically restarted on surviving nodes. So just make sure to specify a minimum of more than 1 containers -- Opta by default attempts to spread them out amongst many nodes. The former is a graver concern which can be addressed by having multiple node groups of different instance types (see aws nodegroup module) and ideally at least one non-spot. | `False` | False | -| `taints` | Taints to add to the nodes in this nodegroup. | `[]` | False | \ No newline at end of file +| `taints` | Taints to add to the nodes in this nodegroup. | `[]` | False | +| `use_gpu` | Should we expect and use the gpus present in the ec2? Note: This input would be deprecated in the coming releases. Please switch to using `ami_type`. Usage: If using, `use_gpu: false`, just remove it. If using `use_gpu: true` replace it with `ami_type: AL2_x86_64_GPU` | `False` | False | +| `ami_type` | The AMI type to use for the nodes. For more information about this, please visit [here](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) Note: Currently, "CUSTOM" ami type is not supported. | `AL2_x86_64` | False | \ No newline at end of file diff --git a/content/en/Reference/aws/modules/aws-postgres.md b/content/en/Reference/aws/modules/aws-postgres.md index 592cbe9..87abb1c 100644 --- a/content/en/Reference/aws/modules/aws-postgres.md +++ b/content/en/Reference/aws/modules/aws-postgres.md @@ -17,6 +17,15 @@ Opta will provision your database with 7 days of automatic daily backups in the You can find them either programmatically via the aws cli, or through the AWS web console (they will be called system snapshots, and they have a different tab than the manual ones). +### Performance and Scaling + +You can modify the DB instance class with the field `instance_class` in the module configuration. + +Storage scaling is automatically managed by AWS Aurora, see the [official documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Performance.html). + +To add replicas to an existing cluser, follow the [official guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-replicas-adding.html). + + ### Linking When linked to a k8s-service, it adds connection credentials to your container's environment variables as: @@ -63,4 +72,15 @@ To those with the permissions, you can view it via the following command (MANIFE | `engine_version` | The version of the database to use. | `11.9` | False | | `multi_az` | Enable read-write replication across different availability zones on the same reason (doubles the cost, but needed for compliance). Can be added and updated at a later date without need to recreate. | `False` | False | | `safety` | Add deletion protection to stop accidental db deletions | `False` | False | -| `backup_retention_days` | How many days to keep the backup retention | `7` | False | \ No newline at end of file +| `backup_retention_days` | How many days to keep the backup retention | `7` | False | +| `extra_security_groups_ids` | Ids of extra AWS security groups to add to the database | `[]` | False | +| `create_global_database` | Create an Aurora Global database with this db as the master/writer | `False` | False | +| `existing_global_database_id` | ID of the Aurora global database to attach | `None` | False | +| `database_name` | The name of the database to create. Follow naming conventions [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) | `app` | False | + +## Outputs + + +| Name | Description | +| ----------- | ----------- | +| `global_database_id` | The id of the global database, if created | \ No newline at end of file diff --git a/content/en/Reference/aws/modules/cloudfront-distribution.md b/content/en/Reference/aws/modules/cloudfront-distribution.md index 5eaf73e..8a17194 100644 --- a/content/en/Reference/aws/modules/cloudfront-distribution.md +++ b/content/en/Reference/aws/modules/cloudfront-distribution.md @@ -22,14 +22,14 @@ providers: account_id: XXXXXXXXXX modules: - type: aws-s3 - name: testmodule bucket_name: "a-unique-s3-bucket-name" - files: "./my-site-files" # See S3 module for more info about uploading your files t S3 + files: "./my-site-files" # See S3 module for more info about uploading your files to S3 + name: testmodule + - type: dns + domain: staging.startup.com # Fill in with your desired domain, or remove this whole entry if handling dns outside of Opta + delegated: false # Set to true when ready -- see the "Configure DNS" page + linked_module: cloudfront-distribution - type: cloudfront-distribution - # Uncomment the following and fill in to support your domain with ssl -# acm_cert_arn: "arn:aws:acm:us-east-1:XXXXXXXXXX:certificate/cert-id" -# domains: -# - "your.domain.com" links: - testmodule ``` @@ -45,14 +45,15 @@ providers: account_id: XXXXXXXXXX modules: - type: base + - type: dns + domain: staging.startup.com # Fill in with your desired domain, or remove this whole entry if handling dns outside of Opta + delegated: false # Set to true when ready -- see the "Configure DNS" page + linked_module: cloudfront-distribution - type: k8s-cluster - type: k8s-base name: testbase + expose_self_signed_ssl: true - type: cloudfront-distribution -# Uncomment the following and fill in to support your domain with ssl -# acm_cert_arn: "arn:aws:acm:us-east-1:XXXXXXXXXX:certificate/cert-id" -# domains: -# - "your.domain.com" links: - testbase ``` @@ -68,45 +69,23 @@ caching capabilities. That means that while delivery speeds are significantly fa (~1hr) to reflect changes into your static site deployment. Please keep this in mind when deploying such changes. You may immediately verify the latest copy by downloading from your S3 bucket directly. -### Using your own domain -If you are ready to start hosting your site with your domain via the cloudfront distribution, then proceed as follows: -1. Get an [AWS ACM certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) for your site. +### Domain / DNS +If you are ready to start hosting your site with your domain via the cloudfront distribution, then go ahead and follow +the [configuring dns guide](/features/dns-and-cert/dns), which will also set up your SSL. Traffic should +start flowing from your domain to your cloudfront distribution and on towards your S3 bucket / K8s cluster. You could +also manually configure DNS / SSL from outside of Opta using the following steps: +1. Remove the dns module entirely from your yaml, if you haven't already. +2. Get an [AWS ACM certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) for your site. Make sure that you get it in region us-east-1. If you already have one at hand in your account (e.g. from another active Opta deployment), then feel free to reuse that. -2. [Validate](https://docs.aws.amazon.com/acm/latest/userguide/dns-validation.html) the certificate by adding the correct CNAME entries in your domain's DNS settings. Specific instructions for popular domain providers are [explained here](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html). -3. Create a new separate CNAME record for the domain you wish to use for cloudfront and point it at the `cloudfront_domain` gotten above. -3. Set the acm_cert_arn and domains fields in opta accordingly -4. Opta apply and you're done! - -### AWS WAF with Cloudfront - -[AWS WAF](https://aws.amazon.com/waf/) is a web application firewall that helps protect your web applications or APIs against common web exploits and bots that may affect availability, compromise security, or consume excessive resources. In this section we explain how to configure AWS WAF with your Cloudfront distribution. - -As a pre-requisite, follow the steps in the previous section (__Using your own domain__) to create a and validate a certificate for the custom domain. After completing those steps, users have the ability to access your services at `https://your-custom-domain`; and because your CNAME record for your custom domain points to the cloudfront distribution URL, traffic will be directed through your cloud-front distribution. - -Next, we need to create an AWS WAF to protect our service and cloudfront CDN cache. We do this via the [AWS WAF GUI](https://console.aws.amazon.com/wafv2/homev2). - -Here are a few screen shots showing how the WAF GUI values can be configured for a "passthrough" WAF to start with. - -We start at the WAF landing page in the AWS Console: - - - - - -We configure the WAF to use the cloudfront distribution we created with Opta; this can be selected by selecting the `Cloudfront distribution` radio button and then clicking on the `Add AWS Resources` button to select the cloudfront distribution; you should then end up with something like so: - - - - - -The initial configuration of the WAF allows all traffic: - - - - +3. [Validate](https://docs.aws.amazon.com/acm/latest/userguide/dns-validation.html) the certificate by adding the correct CNAME entries in your domain's DNS settings. +4. Fill in the `acm_cert_arn` field for the cloudfront module with the arn of your cert. +5. In your hosted zone, create either an A record (if it's on the same AWS account) or a CNAME pointing to the cloudfront + distribution url (the `cloudfront_domain` output). Alternatively, if it's a hosted zone on the same AWS account you could pass the `zone_id` to the + cloudfront module to have Opta automatically take care of this for you. +6. Fill in the `domains` field to include the domains for which you have the certificate for (no need to include wildcard repetition, that's automatic). +7. Opta apply and you're done! -Finally, please [configure AWS WAF rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) for your specific application protection needs. ## Fields @@ -119,11 +98,12 @@ Finally, please [configure AWS WAF rules](https://docs.aws.amazon.com/waf/latest | `status_404_page_file` | The name of the existing s3 object in your bucket which will serve as the 404 page. | `None` | False | | `status_500_page_file` | The name of the existing s3 object in your bucket which will serve as the 500 page. | `None` | False | | `price_class` | The cloudfront price class for this distribution. Can be PriceClass_All, PriceClass_200, or PriceClass_100 | `PriceClass_200` | False | -| `acm_cert_arn` | The ACM certificate arn you wish to use to handle ssl (needed if you want https for your site) | `None` | False | +| `acm_cert_arn` | The ACM certificate arn you wish to use to handle ssl (needed if you want https for your site) | `` | False | | `domains` | The domains which you want your cloudfront distribution to support. | `[]` | False | | `links` | The linked s3 buckets to attach to your cloudfront distribution (currently only supports one). | `[]` | False | | `allowed_methods` | HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. | `['GET', 'HEAD', 'OPTIONS']` | False | | `cached_methods` | CloudFront caches the response to the specified HTTP method requests. | `['GET', 'HEAD', 'OPTIONS']` | False | +| `zone_id` | ID of Route53 hosted zone to add a record for. By default uses the one created by the DNS module if the module is found. | `` | False | ## Outputs diff --git a/content/en/Reference/aws/modules/global-accelerator.md b/content/en/Reference/aws/modules/global-accelerator.md new file mode 100644 index 0000000..3488bf1 --- /dev/null +++ b/content/en/Reference/aws/modules/global-accelerator.md @@ -0,0 +1,74 @@ +--- +title: "global-accelerator" +linkTitle: "global-accelerator" +date: 2021-07-21 +draft: false +weight: 1 +description: Add an AWS Global Accelerator to your env. +--- + +This module sets up an [AWS Global Accelerator](https://aws.amazon.com/global-accelerator/) for you. For those new to +this service, a Global Accelerator can be used as an alternative (or helper to) multi region deployments, "fast +forwarding" requests across AWS' underlying networks to drastically decrease long-distance network request latencies. +The Global Accelerator is meant to be deployed in front of a load balancer, and exposes a domain and public ip addresses +to which send public traffic. In Opta + +```yaml +name: testing-global-accelerator +org_name: runx +providers: + aws: + region: us-east-1 + account_id: XXXXXXXXXX +modules: + - type: base + - type: dns + name: dns + domain: staging.startup.com + delegated: false # Set to true when ready -- see the "Configure DNS" page + linked_module: global-accelerator + - type: k8s-cluster + - type: k8s-base + # Uncomment when enabling dns to get ssl +# cert_arn: "${{module.dns.cert_arn}}" # Or add your own cert if not using Opta's dns module + - type: global-accelerator +``` + +### Domain / DNS +If you are ready to start hosting your site with your domain via the global accelerator, then go ahead and follow +the [configuring dns guide](/features/dns-and-cert/dns), which will also set up your SSL. Traffic should +start flowing from your domain to your global acceleratorn and on towards your K8s cluster. You could +also manually configure DNS / SSL from outside of Opta using the following steps: +1. Remove the dns module entirely from your yaml, if you haven't already. +2. Get an [AWS ACM certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) for your site. + Make sure that you get it in region us-east-1. If you already have one at hand in your account (e.g. from another + active Opta deployment), then feel free to reuse that. +3. [Validate](https://docs.aws.amazon.com/acm/latest/userguide/dns-validation.html) the certificate by adding the correct CNAME entries in your domain's DNS settings. +4. Fill in the `cert_arn` field for the k8s-base module with the arn of your cert. +5. In your hosted zone, create either an A record (if it's on the same AWS account) or a CNAME pointing to the Global Accelerator + dns name (the `global_accelerator_dns_name` output). Alternatively, if it's a hosted zone on the same AWS account you could pass the `zone_id` to the + global accelerator module to have Opta automatically take care of this for you. +6. Fill in the `domains` field to include the domains for which you have the certificate for (no need to include wildcard repetition, that's automatic). +7. Opta apply and you're done! + +## Fields + + +| Name | Description | Default | Required | +| ----------- | ----------- | ------- | -------- | +| `flow_logs_enabled` | Enable flow logs? | `False` | False | +| `flow_logs_bucket` | Flow logs bucket | `` | False | +| `flow_logs_prefix` | Flow logs prefix | `global-accelerator-flow-logs/` | False | +| `endpoint_id` | The id of the endpoint to direct traffic to. If it's an NLB or ALB, then it's the arn. If it's an EIP, then it's the allocation id. | `None` | False | +| `domain` | Domain to setup the ingress with. By default uses the one specified in the DNS module if the module is found. | `` | False | +| `zone_id` | ID of Route53 hosted zone to add a record for. By default uses the one created by the DNS module if the module is found. | `` | False | + +## Outputs + + +| Name | Description | +| ----------- | ----------- | +| `global_accelerator_arn` | The arn of the global accelerator created. | +| `global_accelerator_dns_name` | The public dns name of the global accelerator created. | +| `global_accelerator_ip_addresses` | The public ip addresses of the global accelerator created. | +| `global_accelerator_endpoint_arns` | The arns of the global accelerator endpoint groups created. | \ No newline at end of file diff --git a/content/en/Reference/azurerm/modules/azure-k8s-service.md b/content/en/Reference/azurerm/modules/azure-k8s-service.md index 668735c..0397b32 100644 --- a/content/en/Reference/azurerm/modules/azure-k8s-service.md +++ b/content/en/Reference/azurerm/modules/azure-k8s-service.md @@ -116,6 +116,8 @@ deleting the kubernetes persistent volume claims. | `links` | A list of extra IAM role policies not captured by Opta which you wish to give to your service. | `[]` | False | | `persistent_storage` | A list persistent storages to add to each instance of your service (need to give a `size` which is the size in GB for the storage volume to be, and `path` which is the path in the filesystem of each instance to place it under) | `[]` | False | | `ingress_extra_annotations` | These are extra annotations to add to ingress objects | `{}` | False | +| `pod_annotations` | These are extra annotations to add to k8s-service pod objects | `{}` | False | +| `timeout` | Time in seconds to wait for deployment. | `300` | False | ## Outputs diff --git a/content/en/Reference/google/modules/gcp-k8s-service.md b/content/en/Reference/google/modules/gcp-k8s-service.md index 3b68485..efe5906 100644 --- a/content/en/Reference/google/modules/gcp-k8s-service.md +++ b/content/en/Reference/google/modules/gcp-k8s-service.md @@ -181,6 +181,8 @@ Cron Jobs are currently created outside the default linkerd service mesh. | `additional_iam_roles` | A list of extra project-level iam roles to grant to the service account created for this k8s service | `[]` | False | | `tolerations` | Taint tolerations to add to the pods. | `[]` | False | | `cron_jobs` | A list of cronjobs to execute as part of this service | `[]` | False | +| `pod_annotations` | These are extra annotations to add to k8s-service pod objects | `{}` | False | +| `timeout` | Time in seconds to wait for deployment. | `300` | False | ## Outputs