diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0a6ab3baac..900414c22f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -23,7 +23,7 @@ jobs: run: | if [[ -n "${GITHUB_HEAD_REF}" ]]; then # This is a PR, use the source branch name - echo "REF_NAME=${GITHUB_HEAD_REF}" >> $GITHUB_ENV + echo "REF_NAME=${GITHUB_HEAD_REF/\//-}" >> $GITHUB_ENV else # This is a push, use the branch or tag name from GITHUB_REF echo "REF_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV diff --git a/automation/config_pgcluster.yml b/automation/config_pgcluster.yml index ebf322dabd..248eece994 100644 --- a/automation/config_pgcluster.yml +++ b/automation/config_pgcluster.yml @@ -200,6 +200,9 @@ - role: postgresql_privs when: inventory_hostname in groups['primary'] + - role: backup + when: wal_g_install | bool or pgbackrest_install | bool + - role: wal_g when: wal_g_install | bool diff --git a/automation/deploy_pgcluster.yml b/automation/deploy_pgcluster.yml index 1ca8282a61..c9041c58ed 100644 --- a/automation/deploy_pgcluster.yml +++ b/automation/deploy_pgcluster.yml @@ -284,6 +284,8 @@ - role: ntp - role: ssh_keys - role: copy + - role: backup + when: wal_g_install | bool or pgbackrest_install | bool - name: deploy_pgcluster.yml | Deploy balancers ansible.builtin.import_playbook: balancers.yml @@ -317,6 +319,8 @@ - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings tags: always roles: + - role: backup + when: pgbackrest_install | bool - role: pgbackrest when: pgbackrest_install | bool @@ -354,6 +358,9 @@ tags: always roles: + - role: backup + when: (wal_g_install | bool) or (pgbackrest_install | bool) + - role: wal_g when: wal_g_install|bool diff --git a/automation/roles/backup/defaults/main.yml b/automation/roles/backup/defaults/main.yml new file mode 100644 index 0000000000..87b99a0204 --- /dev/null +++ b/automation/roles/backup/defaults/main.yml @@ -0,0 +1,40 @@ +--- +# Backups (if 'pgbackrest_install' or 'wal_g_install' is 'true') +aws_s3_bucket_create: true # if 'cloud_provider=aws' +aws_s3_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the S3 bucket. +aws_s3_bucket_region: "{{ server_location }}" # The AWS region to use. +aws_s3_bucket_object_lock_enabled: false # Whether S3 Object Lock to be enabled. +aws_s3_bucket_encryption: "AES256" # Describes the default server-side encryption to apply to new objects in the bucket. Choices: "AES256", "aws:kms" +aws_s3_bucket_block_public_acls: true # Sets BlockPublicAcls value. +aws_s3_bucket_ignore_public_acls: true # Sets IgnorePublicAcls value. +aws_s3_bucket_absent: false # Allow to delete S3 bucket when deleting a cluster servers using the 'state=absent' variable. + +gcp_bucket_create: true # if 'cloud_provider=gcp' +gcp_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the GCS bucket. +gcp_bucket_storage_class: "MULTI_REGIONAL" # The bucket’s default storage class. +gcp_bucket_default_object_acl: "projectPrivate" # Apply a predefined set of default object access controls to this bucket. +gcp_bucket_absent: false # Allow to delete GCS bucket when deleting a cluster servers using the 'state=absent' variable. + +azure_blob_storage_create: true # if 'cloud_provider=azure' +azure_blob_storage_name: "{{ patroni_cluster_name }}-backup" # Name of a blob container within the storage account. +azure_blob_storage_blob_type: "block" # Type of blob object. Values include: block, page. +azure_blob_storage_account_name: "{{ patroni_cluster_name | lower | replace('-', '') | truncate(24, true, '') }}" +azure_blob_storage_account_type: "Standard_RAGRS" # Type of storage account. +azure_blob_storage_account_kind: "BlobStorage" # The kind of storage. Values include: Storage, StorageV2, BlobStorage, BlockBlobStorage, FileStorage. +azure_blob_storage_account_access_tier: "Hot" # The access tier for this storage account. Required when kind=BlobStorage. +azure_blob_storage_account_public_network_access: "Enabled" # Allow public network access to Storage Account to create Blob Storage container. +azure_blob_storage_account_allow_blob_public_access: false # Disallow public anonymous access. +azure_blob_storage_absent: false # Allow to delete Azure Blob Storage when deleting a cluster servers using the 'state=absent' variable. + +digital_ocean_spaces_create: true # if 'cloud_provider=digitalocean' +digital_ocean_spaces_name: "{{ patroni_cluster_name }}-backup" # Name of the Spaces Object Storage (S3 bucket). +digital_ocean_spaces_region: "nyc3" # The region to create the Space in. +digital_ocean_spaces_absent: false # Allow to delete Spaces Object Storage when deleting a cluster servers using the 'state=absent' variable. + +hetzner_object_storage_create: true # if 'cloud_provider=hetzner' +hetzner_object_storage_name: "{{ patroni_cluster_name }}-backup" # Name of the Object Storage (S3 bucket). +hetzner_object_storage_region: "{{ server_location }}" # The region where the Object Storage (S3 bucket) will be created. +hetzner_object_storage_endpoint: "https://{{ hetzner_object_storage_region }}.your-objectstorage.com" +hetzner_object_storage_access_key: "" # (required) Object Storage ACCESS KEY +hetzner_object_storage_secret_key: "" # (required) Object Storage SECRET KEY +hetzner_object_storage_absent: false # Allow to delete Object Storage when deleting a cluster servers using the 'state=absent' variable. diff --git a/automation/roles/backup/tasks/aws.yml b/automation/roles/backup/tasks/aws.yml new file mode 100644 index 0000000000..5d32f64143 --- /dev/null +++ b/automation/roles/backup/tasks/aws.yml @@ -0,0 +1,17 @@ +--- +# S3 bucket (Backups) +- name: "AWS: Create S3 bucket '{{ aws_s3_bucket_name }}'" + amazon.aws.s3_bucket: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ aws_s3_bucket_name }}" + region: "{{ aws_s3_bucket_region }}" + object_lock_enabled: "{{ aws_s3_bucket_object_lock_enabled }}" + encryption: "{{ aws_s3_bucket_encryption }}" + public_access: + block_public_acls: "{{ aws_s3_bucket_block_public_acls }}" + ignore_public_acls: "{{ aws_s3_bucket_ignore_public_acls }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - aws_s3_bucket_create | bool diff --git a/automation/roles/backup/tasks/azure.yml b/automation/roles/backup/tasks/azure.yml new file mode 100644 index 0000000000..fc5510151b --- /dev/null +++ b/automation/roles/backup/tasks/azure.yml @@ -0,0 +1,37 @@ +--- +# Azure Blob Storage (Backups) +- block: + - name: "Azure: Create Storage Account '{{ azure_blob_storage_account_name }}'" + azure.azcollection.azure_rm_storageaccount: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_blob_storage_account_name }}" + account_type: "{{ azure_blob_storage_account_type }}" + kind: "{{ azure_blob_storage_account_kind }}" + access_tier: "{{ azure_blob_storage_account_access_tier }}" + public_network_access: "{{ azure_blob_storage_account_public_network_access }}" + allow_blob_public_access: "{{ azure_blob_storage_account_allow_blob_public_access }}" + state: present + + - name: "Azure: Get Storage Account info" + azure.azcollection.azure_rm_storageaccount_info: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_blob_storage_account_name }}" + show_connection_string: true + no_log: true # do not output storage account contents to the ansible log + register: azure_storage_account_info + + - name: "Set variable: azure_storage_account_key" + ansible.builtin.set_fact: + azure_storage_account_key: "{{ azure_storage_account_info.storageaccounts[0].primary_endpoints.key }}" + no_log: true # do not output storage account contents to the ansible log + + - name: "Azure: Create Blob Storage container '{{ azure_blob_storage_name }}'" + azure.azcollection.azure_rm_storageblob: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + account_name: "{{ azure_blob_storage_account_name }}" + container: "{{ azure_blob_storage_name }}" + blob_type: "{{ azure_blob_storage_blob_type }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - azure_blob_storage_create | bool diff --git a/automation/roles/backup/tasks/digitalocean.yml b/automation/roles/backup/tasks/digitalocean.yml new file mode 100644 index 0000000000..afecad1a97 --- /dev/null +++ b/automation/roles/backup/tasks/digitalocean.yml @@ -0,0 +1,13 @@ +--- +# Spaces Object Storage (Backups) +- name: "DigitalOcean: Create Spaces Bucket '{{ digital_ocean_spaces_name }}'" + community.digitalocean.digital_ocean_spaces: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ digital_ocean_spaces_name }}" + region: "{{ digital_ocean_spaces_region }}" + aws_access_key_id: "{{ AWS_ACCESS_KEY_ID }}" + aws_secret_access_key: "{{ AWS_SECRET_ACCESS_KEY }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - digital_ocean_spaces_create | bool diff --git a/automation/roles/backup/tasks/gcp.yml b/automation/roles/backup/tasks/gcp.yml new file mode 100644 index 0000000000..f09e8cd0a1 --- /dev/null +++ b/automation/roles/backup/tasks/gcp.yml @@ -0,0 +1,14 @@ +--- +# GCS Bucket (Backups) +- name: "GCP: Create bucket '{{ gcp_bucket_name }}'" + google.cloud.gcp_storage_bucket: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ gcp_bucket_name }}" + storage_class: "{{ gcp_bucket_storage_class }}" + predefined_default_object_acl: "{{ gcp_bucket_default_object_acl }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - gcp_bucket_create | bool diff --git a/automation/roles/backup/tasks/hetzner.yml b/automation/roles/backup/tasks/hetzner.yml new file mode 100644 index 0000000000..a1ece8b480 --- /dev/null +++ b/automation/roles/backup/tasks/hetzner.yml @@ -0,0 +1,115 @@ +--- +# Object Storage (S3 bucket for backups) +- name: Ensure that 'boto3' dependency is present on controlling host + ansible.builtin.pip: + name: boto3 + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_create | bool +- name: "Hetzner Cloud: Create Object Storage (S3 bucket) '{{ hetzner_object_storage_name }}'" + amazon.aws.s3_bucket: + endpoint_url: "{{ hetzner_object_storage_endpoint }}" + ceph: true + aws_access_key: "{{ hetzner_object_storage_access_key }}" + aws_secret_key: "{{ hetzner_object_storage_secret_key }}" + name: "{{ hetzner_object_storage_name }}" + region: "{{ hetzner_object_storage_region }}" + requester_pays: false + state: present + register: s3_bucket_result + failed_when: s3_bucket_result.failed and not "GetBucketRequestPayment" in s3_bucket_result.msg + # TODO: https://github.com/ansible-collections/amazon.aws/issues/2447 + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_create | bool + - hetzner_object_storage_access_key | length > 0 + - hetzner_object_storage_secret_key | length > 0 + +- name: "Hetzner Cloud: Delete Object Storage (S3 bucket) '{{ hetzner_object_storage_name }}'" + amazon.aws.s3_bucket: + endpoint_url: "{{ hetzner_object_storage_endpoint }}" + ceph: true + access_key: "{{ hetzner_object_storage_access_key }}" + secret_key: "{{ hetzner_object_storage_secret_key }}" + name: "{{ hetzner_object_storage_name }}" + region: "{{ hetzner_object_storage_region }}" + requester_pays: false + state: absent + force: true + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_absent | bool + - hetzner_object_storage_access_key | length > 0 + - hetzner_object_storage_secret_key | length > 0 + +- name: "Set variable 'pgbackrest_conf' for backup in Hetzner Object Storage (S3 bucket)" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "s3" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-s3-key", value: "{{ PGBACKREST_S3_KEY | default(hetzner_object_storage_access_key | default('')) }}" } + - { option: "repo1-s3-key-secret", value: "{{ PGBACKREST_S3_KEY_SECRET | default(hetzner_object_storage_secret_key | default('')) }}" } + - { option: "repo1-s3-bucket", value: "{{ PGBACKREST_S3_BUCKET | default(hetzner_object_storage_name | default(patroni_cluster_name + '-backup')) }}" } + - { option: "repo1-s3-endpoint", value: "{{ PGBACKREST_S3_ENDPOINT | default(hetzner_object_storage_endpoint | + default('https://' + (hetzner_object_storage_region | default(server_location)) + '.your-objectstorage.com')) }}" } + - { option: "repo1-s3-region", value: "{{ PGBACKREST_S3_REGION | default(hetzner_object_storage_region | default(server_location)) }}" } + - { option: "repo1-s3-uri-style", value: "{{ PGBACKREST_S3_URI_STYLE | default('path') }}" } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + # delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log + +# Hetzner Object Storage (if 'cloud_provider=hetzner') +- name: "Set variable 'wal_g_json' for backup in AWS S3 bucket" + ansible.builtin.set_fact: + wal_g_json: + - { option: "AWS_ACCESS_KEY_ID", value: "{{ WALG_AWS_ACCESS_KEY_ID | default(hetzner_object_storage_access_key | default('')) }}" } + - { option: "AWS_SECRET_ACCESS_KEY", value: "{{ WALG_AWS_SECRET_ACCESS_KEY | default(hetzner_object_storage_secret_key | default('')) }}" } + - { option: "AWS_ENDPOINT", value: "{{ WALG_S3_ENDPOINT | default(hetzner_object_storage_endpoint | + default('https://' + (hetzner_object_storage_region | default(server_location)) + '.your-objectstorage.com')) }}" } + - { option: "AWS_S3_FORCE_PATH_STYLE", value: "{{ AWS_S3_FORCE_PATH_STYLE | default(true) }}" } + - { option: "AWS_REGION", value: "{{ WALG_S3_REGION | default(hetzner_object_storage_region | default(server_location)) }}" } + - { + option: "WALG_S3_PREFIX", + value: "{{ WALG_S3_PREFIX | default('s3://' + (hetzner_object_storage_name | default(patroni_cluster_name + '-backup'))) }}", + } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log diff --git a/automation/roles/backup/tasks/main.yml b/automation/roles/backup/tasks/main.yml new file mode 100644 index 0000000000..a08fdce894 --- /dev/null +++ b/automation/roles/backup/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Import tasks for aws + ansible.builtin.import_tasks: aws.yml + when: backup_provider == 'aws' + tags: pgbackrest, wal-g, wal_g + +- name: Import tasks for azure + ansible.builtin.import_tasks: azure.yml + when: backup_provider == 'azure' + tags: pgbackrest, wal-g, wal_g + +- name: Import tasks for digitalocean + ansible.builtin.import_tasks: digitalocean.yml + when: backup_provider == 'digitalocean' + tags: pgbackrest, wal-g, wal_g + +- name: Import tasks for gcp + ansible.builtin.import_tasks: gcp.yml + when: backup_provider == 'gcp' + tags: pgbackrest, wal-g, wal_g + +- name: Import tasks for hetzner + ansible.builtin.import_tasks: hetzner.yml + when: backup_provider == 'hetzner' + tags: pgbackrest, wal-g, wal_g diff --git a/automation/roles/cloud_resources/defaults/main.yml b/automation/roles/cloud_resources/defaults/main.yml index 9cf5ad95f6..3a83ca8866 100644 --- a/automation/roles/cloud_resources/defaults/main.yml +++ b/automation/roles/cloud_resources/defaults/main.yml @@ -32,43 +32,3 @@ database_public_allowed_ips: "" # (comma-separated list of IP addresses in CIDR # Load balancer cloud_load_balancer: true # Create a Load Balancer in the Cloud. - -# Backups (if 'pgbackrest_install' or 'wal_g_install' is 'true') -aws_s3_bucket_create: true # if 'cloud_provider=aws' -aws_s3_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the S3 bucket. Bucket naming rules: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html -aws_s3_bucket_region: "{{ server_location }}" # The AWS region to use. -aws_s3_bucket_object_lock_enabled: false # Whether S3 Object Lock to be enabled. -aws_s3_bucket_encryption: "AES256" # Describes the default server-side encryption to apply to new objects in the bucket. Choices: "AES256", "aws:kms" -aws_s3_bucket_block_public_acls: true # Sets BlockPublicAcls value. -aws_s3_bucket_ignore_public_acls: true # Sets IgnorePublicAcls value. -aws_s3_bucket_absent: false # Allow to delete S3 bucket when deleting a cluster servers using the 'state=absent' variable. - -gcp_bucket_create: true # if 'cloud_provider=gcp' -gcp_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the GCS bucket. -gcp_bucket_storage_class: "MULTI_REGIONAL" # The bucket’s default storage class. Values include: MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, DURABLE_REDUCED_AVAILABILITY. -gcp_bucket_default_object_acl: "projectPrivate" # Apply a predefined set of default object access controls to this bucket. -gcp_bucket_absent: false # Allow to delete GCS bucket when deleting a cluster servers using the 'state=absent' variable. - -azure_blob_storage_create: true # if 'cloud_provider=azure' -azure_blob_storage_name: "{{ patroni_cluster_name }}-backup" # Name of a blob container within the storage account. -azure_blob_storage_blob_type: "block" # Type of blob object. Values include: block, page. -azure_blob_storage_account_name: "{{ patroni_cluster_name | lower | replace('-', '') | truncate(24, true, '') }}" # Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only. -azure_blob_storage_account_type: "Standard_RAGRS" # Type of storage account. Values include: Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_ZRS, Standard_RAGZRS, Standard_GZRS, Premium_LRS, Premium_ZRS. -azure_blob_storage_account_kind: "BlobStorage" # The kind of storage. Values include: Storage, StorageV2, BlobStorage, BlockBlobStorage, FileStorage. -azure_blob_storage_account_access_tier: "Hot" # The access tier for this storage account. Required when kind=BlobStorage. -azure_blob_storage_account_public_network_access: "Enabled" # Allow public network access to Storage Account to create Blob Storage container. -azure_blob_storage_account_allow_blob_public_access: false # Disallow public anonymous access. -azure_blob_storage_absent: false # Allow to delete Azure Blob Storage when deleting a cluster servers using the 'state=absent' variable. - -digital_ocean_spaces_create: true # if 'cloud_provider=digitalocean' -digital_ocean_spaces_name: "{{ patroni_cluster_name }}-backup" # Name of the Spaces Object Storage (S3 bucket). -digital_ocean_spaces_region: "nyc3" # The region to create the Space in. -digital_ocean_spaces_absent: false # Allow to delete Spaces Object Storage when deleting a cluster servers using the 'state=absent' variable. - -hetzner_object_storage_create: true # if 'cloud_provider=hetzner' -hetzner_object_storage_name: "{{ patroni_cluster_name }}-backup" # Name of the Object Storage (S3 bucket). -hetzner_object_storage_region: "{{ server_location }}" # The region where the Object Storage (S3 bucket) will be created. -hetzner_object_storage_endpoint: "https://{{ hetzner_object_storage_region }}.your-objectstorage.com" -hetzner_object_storage_access_key: "" # (required) Object Storage ACCESS KEY -hetzner_object_storage_secret_key: "" # (required) Object Storage SECRET KEY -hetzner_object_storage_absent: false # Allow to delete Object Storage when deleting a cluster servers using the 'state=absent' variable. diff --git a/automation/roles/cloud_resources/tasks/aws.yml b/automation/roles/cloud_resources/tasks/aws.yml index 2ed5326a07..8821bf63c7 100644 --- a/automation/roles/cloud_resources/tasks/aws.yml +++ b/automation/roles/cloud_resources/tasks/aws.yml @@ -402,22 +402,6 @@ (item == 'replica' and server_count | int > 1) or (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) - # S3 bucket (Backups) - - name: "AWS: Create S3 bucket '{{ aws_s3_bucket_name }}'" - amazon.aws.s3_bucket: - access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" - secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" - name: "{{ aws_s3_bucket_name }}" - region: "{{ aws_s3_bucket_region }}" - object_lock_enabled: "{{ aws_s3_bucket_object_lock_enabled }}" - encryption: "{{ aws_s3_bucket_encryption }}" - public_access: - block_public_acls: "{{ aws_s3_bucket_block_public_acls }}" - ignore_public_acls: "{{ aws_s3_bucket_ignore_public_acls }}" - state: present - when: - - (pgbackrest_install | bool or wal_g_install | bool) - - aws_s3_bucket_create | bool when: state == 'present' - name: Wait for host to be available via SSH diff --git a/automation/roles/cloud_resources/tasks/azure.yml b/automation/roles/cloud_resources/tasks/azure.yml index 24db509570..178b31364f 100644 --- a/automation/roles/cloud_resources/tasks/azure.yml +++ b/automation/roles/cloud_resources/tasks/azure.yml @@ -400,42 +400,6 @@ (item == 'replica' and server_count | int > 1) or (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) - # Azure Blob Storage (Backups) - - block: - - name: "Azure: Create Storage Account '{{ azure_blob_storage_account_name }}'" - azure.azcollection.azure_rm_storageaccount: - resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" - name: "{{ azure_blob_storage_account_name }}" - account_type: "{{ azure_blob_storage_account_type }}" - kind: "{{ azure_blob_storage_account_kind }}" - access_tier: "{{ azure_blob_storage_account_access_tier }}" - public_network_access: "{{ azure_blob_storage_account_public_network_access }}" - allow_blob_public_access: "{{ azure_blob_storage_account_allow_blob_public_access }}" - state: present - - - name: "Azure: Get Storage Account info" - azure.azcollection.azure_rm_storageaccount_info: - resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" - name: "{{ azure_blob_storage_account_name }}" - show_connection_string: true - no_log: true # do not output storage account contents to the ansible log - register: azure_storage_account_info - - - name: "Set variable: azure_storage_account_key" - ansible.builtin.set_fact: - azure_storage_account_key: "{{ azure_storage_account_info.storageaccounts[0].primary_endpoints.key }}" - no_log: true # do not output storage account contents to the ansible log - - - name: "Azure: Create Blob Storage container '{{ azure_blob_storage_name }}'" - azure.azcollection.azure_rm_storageblob: - resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" - account_name: "{{ azure_blob_storage_account_name }}" - container: "{{ azure_blob_storage_name }}" - blob_type: "{{ azure_blob_storage_blob_type }}" - state: present - when: - - (pgbackrest_install | bool or wal_g_install | bool) - - azure_blob_storage_create | bool when: state == 'present' - name: "Wait for host to be available via SSH" diff --git a/automation/roles/cloud_resources/tasks/digitalocean.yml b/automation/roles/cloud_resources/tasks/digitalocean.yml index a778cfd54e..8f8222a352 100644 --- a/automation/roles/cloud_resources/tasks/digitalocean.yml +++ b/automation/roles/cloud_resources/tasks/digitalocean.yml @@ -601,18 +601,6 @@ register: digitalocean_load_balancer when: cloud_load_balancer | bool - # Spaces Object Storage (Backups) - - name: "DigitalOcean: Create Spaces Bucket '{{ digital_ocean_spaces_name }}'" - community.digitalocean.digital_ocean_spaces: - oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" - name: "{{ digital_ocean_spaces_name }}" - region: "{{ digital_ocean_spaces_region }}" - aws_access_key_id: "{{ AWS_ACCESS_KEY_ID }}" - aws_secret_access_key: "{{ AWS_SECRET_ACCESS_KEY }}" - state: present - when: - - (pgbackrest_install | bool or wal_g_install | bool) - - digital_ocean_spaces_create | bool when: state == 'present' - name: Wait for host to be available via SSH diff --git a/automation/roles/cloud_resources/tasks/gcp.yml b/automation/roles/cloud_resources/tasks/gcp.yml index ce5b8ba3d9..afee1bf067 100644 --- a/automation/roles/cloud_resources/tasks/gcp.yml +++ b/automation/roles/cloud_resources/tasks/gcp.yml @@ -474,19 +474,6 @@ (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) when: cloud_load_balancer | bool - # GCS Bucket (Backups) - - name: "GCP: Create bucket '{{ gcp_bucket_name }}'" - google.cloud.gcp_storage_bucket: - auth_kind: "serviceaccount" - service_account_contents: "{{ gcp_service_account_contents }}" - project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" - name: "{{ gcp_bucket_name }}" - storage_class: "{{ gcp_bucket_storage_class }}" - predefined_default_object_acl: "{{ gcp_bucket_default_object_acl }}" - state: present - when: - - (pgbackrest_install | bool or wal_g_install | bool) - - gcp_bucket_create | bool when: state == 'present' - name: Wait for host to be available via SSH diff --git a/automation/roles/cloud_resources/tasks/hetzner.yml b/automation/roles/cloud_resources/tasks/hetzner.yml index 6b8ad792aa..afb4086611 100644 --- a/automation/roles/cloud_resources/tasks/hetzner.yml +++ b/automation/roles/cloud_resources/tasks/hetzner.yml @@ -443,26 +443,6 @@ when: - cloud_firewall | bool - # Object Storage (S3 bucket for backups) - - name: "Hetzner Cloud: Create Object Storage (S3 bucket) '{{ hetzner_object_storage_name }}'" - amazon.aws.s3_bucket: - endpoint_url: "{{ hetzner_object_storage_endpoint }}" - ceph: true - aws_access_key: "{{ hetzner_object_storage_access_key }}" - aws_secret_key: "{{ hetzner_object_storage_secret_key }}" - name: "{{ hetzner_object_storage_name }}" - region: "{{ hetzner_object_storage_region }}" - requester_pays: false - state: present - register: s3_bucket_result - failed_when: s3_bucket_result.failed and not "GetBucketRequestPayment" in s3_bucket_result.msg - # TODO: https://github.com/ansible-collections/amazon.aws/issues/2447 - when: - - (pgbackrest_install | bool or wal_g_install | bool) - - hetzner_object_storage_create | bool - - hetzner_object_storage_access_key | length > 0 - - hetzner_object_storage_secret_key | length > 0 - # Server and volume - name: "Hetzner Cloud: Create or modify server" hetzner.hcloud.server: diff --git a/automation/roles/common/defaults/main.yml b/automation/roles/common/defaults/main.yml index 78d76d946e..9e727b9f00 100644 --- a/automation/roles/common/defaults/main.yml +++ b/automation/roles/common/defaults/main.yml @@ -716,3 +716,7 @@ netdata_conf: # You can fine-tune retention for each tier by setting a time limit or size limit. Setting a limit to 0 disables it. # More options you can specify in the roles/netdata/templates/netdata.conf.j2 # https://learn.netdata.cloud/docs/netdata-agent/configuration + +# Backup Provider +# Options: aws, azure, digitalocean, gcp, hetzner +backup_provider: ''