|
| 1 | +apiVersion: postgresql.cnpg.io/v1 |
| 2 | + |
| 3 | +# Represents a PostgreSQL cluster made up of a single primary and an optional number of replicas |
| 4 | +# that co-exist in the same Kubernetes namespace for High Availability and offloading of read-only |
| 5 | +# queries. |
| 6 | +kind: Cluster |
| 7 | + |
| 8 | +metadata: |
| 9 | + name: main |
| 10 | + namespace: cloudnative-pg |
| 11 | + |
| 12 | +spec: |
| 13 | + instances: 1 |
| 14 | + |
| 15 | + storage: |
| 16 | + pvcTemplate: |
| 17 | + accessModes: |
| 18 | + - ReadWriteOnce |
| 19 | + resources: |
| 20 | + requests: |
| 21 | + storage: 1Gi |
| 22 | + storageClassName: upcloud-block-storage-hdd |
| 23 | + volumeMode: Filesystem |
| 24 | + |
| 25 | + resources: |
| 26 | + requests: |
| 27 | + memory: 256Mi |
| 28 | + cpu: 200m |
| 29 | + limits: |
| 30 | + memory: 256Mi |
| 31 | + cpu: 200m |
| 32 | + |
| 33 | + primaryUpdateStrategy: unsupervised |
| 34 | + |
| 35 | + postgresql: |
| 36 | + parameters: |
| 37 | + # Feature to manage the retention of WAL files on the primary server. When a standby server |
| 38 | + # connects to a primary server for replication, the primary server keeps track of the WAL |
| 39 | + # files that the standby server has successfully replicated. Replication slots (named |
| 40 | + # positions in the WAL stream) ensure that the primary server retains the necessary WAL files |
| 41 | + # until all standby servers have consumed them. This prevents the primary server from |
| 42 | + # removing WAL files that are still needed by the standby servers for replication, thus |
| 43 | + # avoiding potential data loss scenarios. |
| 44 | + max_replication_slots: "4" |
| 45 | + |
| 46 | + # WAL senders are PostgreSQL server processes responsible for streaming WAL data from the |
| 47 | + # primary server to standby servers for replication. When a standby server establishes a |
| 48 | + # streaming replication connection to the primary server, the primary server creates a WAL |
| 49 | + # sender process dedicated to streaming WAL data to that specific standby server. |
| 50 | + # |
| 51 | + # Determines how many maximum standby servers can simultaneously replicate from the primary |
| 52 | + # server. |
| 53 | + max_wal_senders: "4" |
| 54 | + |
| 55 | + # How much memory is dedicated to the PostgreSQL server for caching data. |
| 56 | + shared_buffers: 64MB |
| 57 | + |
| 58 | + bootstrap: |
| 59 | + initdb: |
| 60 | + database: instagram_clone |
| 61 | + |
| 62 | + owner: admin |
| 63 | + secret: |
| 64 | + name: admin-credentials |
| 65 | + |
| 66 | + postInitApplicationSQLRefs: |
| 67 | + secretRefs: |
| 68 | + - name: main-cluster-init-sql |
| 69 | + key: create-debezium-user.sql |
| 70 | + |
| 71 | + # Enabling integration with Prometheus and Grafana. |
| 72 | + monitoring: |
| 73 | + enablePodMonitor: true |
0 commit comments