From 0f884e113a735e8e94db97258afdfa945ca10340 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Thu, 8 Feb 2024 22:13:06 +0100 Subject: [PATCH 1/5] chore: add chainsaw e2e tests support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Charles-Edouard Brétéché --- .chainsaw.yaml | 13 + .github/workflows/test.yaml | 87 ++++ Makefile | 7 + testing/chainsaw/README.md | 92 ++++ .../01--non-crunchy-cluster.yaml | 193 ++++++++ .../e2e-other/cluster-migrate/01-assert.yaml | 8 + .../cluster-migrate/02--create-data.yaml | 30 ++ .../e2e-other/cluster-migrate/02-assert.yaml | 7 + .../e2e-other/cluster-migrate/04-errors.yaml | 4 + .../cluster-migrate/05--cluster.yaml | 30 ++ .../e2e-other/cluster-migrate/06-assert.yaml | 21 + .../e2e-other/cluster-migrate/README.md | 45 ++ .../cluster-migrate/chainsaw-test.yaml | 133 ++++++ .../10--cluster.yaml | 29 ++ .../10-assert.yaml | 36 ++ .../12-assert.yaml | 32 ++ .../14-errors.yaml | 42 ++ .../README.md | 7 + .../chainsaw-test.yaml | 59 +++ .../exporter-append-custom-queries/README.md | 5 + .../chainsaw-test.yaml | 62 +++ ...xporter-append-queries-cluster-checks.yaml | 29 ++ .../exporter-append-queries-cluster.yaml | 21 + .../exporter-append-queries-configmap.yaml | 6 + .../exporter-replica/chainsaw-test.yaml | 55 +++ .../exporter-replica-cluster-checks.yaml | 24 + .../files/exporter-replica-cluster.yaml | 19 + .../e2e-other/exporter-standby/README.md | 9 + .../exporter-standby/chainsaw-test.yaml | 102 ++++ .../exporter-standby/files/cluster-certs.yaml | 19 + .../files/primary-cluster-checks.yaml | 20 + .../files/primary-cluster.yaml | 22 + .../files/standby-cluster-checks.yaml | 21 + .../files/standby-cluster.yaml | 25 + .../files/update-primary-password-checks.yaml | 18 + .../files/update-primary-password.yaml | 11 + .../files/update-standby-password-checks.yaml | 18 + .../files/update-standby-password.yaml | 11 + .../exporter-upgrade/00--cluster.yaml | 30 ++ .../e2e-other/exporter-upgrade/00-assert.yaml | 10 + .../exporter-upgrade/02--update-cluster.yaml | 7 + .../e2e-other/exporter-upgrade/02-assert.yaml | 24 + .../e2e-other/exporter-upgrade/README.md | 31 ++ .../exporter-upgrade/chainsaw-test.yaml | 70 +++ .../chainsaw/e2e-other/gssapi/00-assert.yaml | 9 + .../chainsaw/e2e-other/gssapi/01-assert.yaml | 15 + .../chainsaw/e2e-other/gssapi/01-cluster.yaml | 41 ++ .../chainsaw/e2e-other/gssapi/02-assert.yaml | 6 + .../e2e-other/gssapi/02-psql-connect.yaml | 47 ++ testing/chainsaw/e2e-other/gssapi/README.md | 14 + .../e2e-other/gssapi/chainsaw-test.yaml | 35 ++ .../01--valid-upgrade.yaml | 11 + .../01-assert.yaml | 10 + .../10--cluster.yaml | 23 + .../10-assert.yaml | 12 + .../11--shutdown-cluster.yaml | 8 + .../11-assert.yaml | 11 + .../12--start-and-update-version.yaml | 17 + .../12-assert.yaml | 31 ++ .../13--shutdown-cluster.yaml | 8 + .../13-assert.yaml | 11 + .../14--annotate-cluster.yaml | 8 + .../14-assert.yaml | 22 + .../15--start-cluster.yaml | 10 + .../15-assert.yaml | 18 + .../17--check-version.yaml | 39 ++ .../17-assert.yaml | 7 + .../major-upgrade-missing-image/README.md | 36 ++ .../chainsaw-test.yaml | 61 +++ .../postgis-cluster/00--cluster.yaml | 26 + .../e2e-other/postgis-cluster/00-assert.yaml | 24 + .../postgis-cluster/01--psql-connect.yaml | 132 ++++++ .../e2e-other/postgis-cluster/01-assert.yaml | 6 + .../postgis-cluster/chainsaw-test.yaml | 20 + .../e2e-other/resize-volume/00-assert.yaml | 7 + .../e2e-other/resize-volume/01--cluster.yaml | 25 + .../e2e-other/resize-volume/01-assert.yaml | 59 +++ .../resize-volume/02--create-data.yaml | 31 ++ .../e2e-other/resize-volume/02-assert.yaml | 7 + .../e2e-other/resize-volume/03--resize.yaml | 25 + .../e2e-other/resize-volume/03-assert.yaml | 37 ++ .../resize-volume/06--check-data.yaml | 40 ++ .../e2e-other/resize-volume/06-assert.yaml | 7 + .../e2e-other/resize-volume/11--cluster.yaml | 25 + .../e2e-other/resize-volume/11-assert.yaml | 59 +++ .../e2e-other/resize-volume/13--resize.yaml | 25 + .../e2e-other/resize-volume/13-assert.yaml | 43 ++ .../resize-volume/chainsaw-test.yaml | 48 ++ .../e2e/cluster-pause/chainsaw-test.yaml | 44 ++ .../files/00-cluster-created.yaml | 23 + .../files/00-create-cluster.yaml | 25 + .../files/01-cluster-paused.yaml | 34 ++ .../cluster-pause/files/01-pause-cluster.yaml | 17 + .../files/02-cluster-resumed.yaml | 30 ++ .../files/02-resume-cluster.yaml | 6 + .../cluster-pause/files/chainsaw-test.yaml | 26 + .../e2e/cluster-start/chainsaw-test.yaml | 32 ++ .../files/00-cluster-created.yaml | 24 + .../files/00-create-cluster.yaml | 25 + .../cluster-start/files/01-connect-psql.yaml | 29 ++ .../files/01-psql-connected.yaml | 6 + .../cluster-start/files/chainsaw-test.yaml | 20 + .../e2e/delete-namespace/00--namespace.yaml | 5 + .../e2e/delete-namespace/01--cluster.yaml | 18 + .../e2e/delete-namespace/01-assert.yaml | 22 + .../e2e/delete-namespace/02-errors.yaml | 49 ++ .../chainsaw/e2e/delete-namespace/README.md | 11 + .../e2e/delete-namespace/chainsaw-test.yaml | 27 ++ testing/chainsaw/e2e/delete/00--cluster.yaml | 27 ++ testing/chainsaw/e2e/delete/00-assert.yaml | 20 + testing/chainsaw/e2e/delete/02-errors.yaml | 42 ++ testing/chainsaw/e2e/delete/10--cluster.yaml | 29 ++ testing/chainsaw/e2e/delete/10-assert.yaml | 36 ++ testing/chainsaw/e2e/delete/12-errors.yaml | 42 ++ testing/chainsaw/e2e/delete/20--cluster.yaml | 27 ++ testing/chainsaw/e2e/delete/20-errors.yaml | 10 + testing/chainsaw/e2e/delete/22-errors.yaml | 42 ++ testing/chainsaw/e2e/delete/README.md | 19 + .../chainsaw/e2e/delete/chainsaw-test.yaml | 59 +++ .../e2e/exporter-custom-queries/README.md | 3 + .../chainsaw-test.yaml | 100 ++++ ...xporter-custom-queries-cluster-checks.yaml | 31 ++ .../exporter-custom-queries-cluster.yaml | 21 + ...ustom-queries-configmap-update-checks.yaml | 6 + ...orter-custom-queries-configmap-update.yaml | 6 + .../exporter-custom-queries-configmap.yaml | 6 + .../e2e/exporter-no-tls/chainsaw-test.yaml | 56 +++ .../files/exporter-no-tls-cluster-checks.yaml | 24 + .../files/exporter-no-tls-cluster.yaml | 18 + .../e2e/exporter-password-change/README.md | 36 ++ .../chainsaw-test.yaml | 92 ++++ .../files/check-restarted-pod.yaml | 8 + .../files/initial-postgrescluster-checks.yaml | 33 ++ .../files/initial-postgrescluster.yaml | 18 + .../update-monitoring-password-checks.yaml | 16 + .../files/update-monitoring-password.yaml | 11 + .../e2e/exporter-tls/chainsaw-test.yaml | 59 +++ .../files/exporter-tls-certs.yaml | 12 + .../files/exporter-tls-cluster-checks.yaml | 29 ++ .../files/exporter-tls-cluster.yaml | 20 + .../01--valid-upgrade.yaml | 11 + .../01-assert.yaml | 10 + .../10--cluster.yaml | 23 + .../10-assert.yaml | 12 + .../11--shutdown-cluster.yaml | 8 + .../11-assert.yaml | 11 + .../12--start-and-update-version.yaml | 17 + .../12-assert.yaml | 31 ++ .../13--shutdown-cluster.yaml | 8 + .../13-assert.yaml | 11 + .../14--annotate-cluster.yaml | 8 + .../14-assert.yaml | 22 + .../15--start-cluster.yaml | 10 + .../15-assert.yaml | 18 + .../17--check-version.yaml | 39 ++ .../17-assert.yaml | 7 + .../e2e/major-upgrade-missing-image/README.md | 36 ++ .../chainsaw-test.yaml | 61 +++ .../major-upgrade/01--invalid-pgupgrade.yaml | 10 + .../chainsaw/e2e/major-upgrade/01-assert.yaml | 10 + .../e2e/major-upgrade/02--valid-upgrade.yaml | 10 + .../chainsaw/e2e/major-upgrade/02-assert.yaml | 10 + .../10--already-updated-cluster.yaml | 16 + .../chainsaw/e2e/major-upgrade/10-assert.yaml | 11 + .../e2e/major-upgrade/11-delete-cluster.yaml | 8 + .../20--cluster-with-invalid-version.yaml | 18 + .../chainsaw/e2e/major-upgrade/20-assert.yaml | 11 + .../e2e/major-upgrade/21-delete-cluster.yaml | 8 + .../e2e/major-upgrade/30--cluster.yaml | 22 + .../chainsaw/e2e/major-upgrade/30-assert.yaml | 31 ++ .../e2e/major-upgrade/31--create-data.yaml | 94 ++++ .../chainsaw/e2e/major-upgrade/31-assert.yaml | 14 + .../major-upgrade/32--shutdown-cluster.yaml | 8 + .../chainsaw/e2e/major-upgrade/32-assert.yaml | 11 + .../major-upgrade/33--annotate-cluster.yaml | 8 + .../chainsaw/e2e/major-upgrade/33-assert.yaml | 22 + .../major-upgrade/34--restart-cluster.yaml | 10 + .../chainsaw/e2e/major-upgrade/34-assert.yaml | 18 + .../35-check-pgbackrest-and-replica.yaml | 11 + .../36--check-data-and-version.yaml | 108 +++++ .../chainsaw/e2e/major-upgrade/36-assert.yaml | 14 + .../e2e/password-change/00--cluster.yaml | 25 + .../e2e/password-change/00-assert.yaml | 15 + .../password-change/01--psql-connect-uri.yaml | 23 + .../e2e/password-change/01--psql-connect.yaml | 30 ++ .../e2e/password-change/01-assert.yaml | 13 + .../e2e/password-change/02--secret.yaml | 8 + .../e2e/password-change/02-errors.yaml | 10 + .../password-change/03--psql-connect-uri.yaml | 26 + .../e2e/password-change/03--psql-connect.yaml | 34 ++ .../e2e/password-change/03-assert.yaml | 13 + .../e2e/password-change/04--secret.yaml | 9 + .../e2e/password-change/04-errors.yaml | 10 + .../password-change/05--psql-connect-uri.yaml | 26 + .../e2e/password-change/05--psql-connect.yaml | 34 ++ .../e2e/password-change/05-assert.yaml | 13 + .../e2e/password-change/06--cluster.yaml | 10 + .../e2e/password-change/06-assert.yaml | 15 + .../password-change/07--psql-connect-uri.yaml | 23 + .../e2e/password-change/07--psql-connect.yaml | 30 ++ .../e2e/password-change/07-assert.yaml | 13 + .../e2e/password-change/08--secret.yaml | 8 + .../e2e/password-change/08-errors.yaml | 10 + .../password-change/09--psql-connect-uri.yaml | 26 + .../e2e/password-change/09--psql-connect.yaml | 34 ++ .../e2e/password-change/09-assert.yaml | 13 + .../e2e/password-change/10--secret.yaml | 9 + .../e2e/password-change/10-errors.yaml | 10 + .../password-change/11--psql-connect-uri.yaml | 26 + .../e2e/password-change/11--psql-connect.yaml | 34 ++ .../e2e/password-change/11-assert.yaml | 13 + .../chainsaw/e2e/password-change/README.md | 27 ++ .../e2e/password-change/chainsaw-test.yaml | 92 ++++ testing/chainsaw/e2e/pgadmin/01--cluster.yaml | 46 ++ testing/chainsaw/e2e/pgadmin/01-assert.yaml | 32 ++ .../chainsaw/e2e/pgadmin/chainsaw-test.yaml | 67 +++ .../e2e/pgbackrest-init/00--cluster.yaml | 38 ++ .../e2e/pgbackrest-init/00-assert.yaml | 68 +++ .../e2e/pgbackrest-init/02-assert.yaml | 10 + .../e2e/pgbackrest-init/04--cluster.yaml | 40 ++ .../e2e/pgbackrest-init/04-assert.yaml | 34 ++ .../chainsaw/e2e/pgbackrest-init/README.md | 6 + .../e2e/pgbackrest-init/chainsaw-test.yaml | 68 +++ .../01--create-cluster.yaml | 26 + .../e2e/pgbackrest-restore/01-assert.yaml | 12 + .../pgbackrest-restore/02--create-data.yaml | 32 ++ .../e2e/pgbackrest-restore/02-assert.yaml | 7 + .../e2e/pgbackrest-restore/03-assert.yaml | 13 + .../pgbackrest-restore/04--clone-cluster.yaml | 22 + .../e2e/pgbackrest-restore/04-assert.yaml | 12 + .../pgbackrest-restore/05--check-data.yaml | 49 ++ .../e2e/pgbackrest-restore/05-assert.yaml | 7 + .../07--update-cluster.yaml | 25 + .../e2e/pgbackrest-restore/09--add-data.yaml | 31 ++ .../e2e/pgbackrest-restore/09-assert.yaml | 7 + .../pgbackrest-restore/11--clone-cluster.yaml | 22 + .../e2e/pgbackrest-restore/11-assert.yaml | 12 + .../pgbackrest-restore/12--check-data.yaml | 51 ++ .../e2e/pgbackrest-restore/12-assert.yaml | 7 + .../e2e/pgbackrest-restore/15-assert.yaml | 16 + .../pgbackrest-restore/16--check-data.yaml | 100 ++++ .../e2e/pgbackrest-restore/16-assert.yaml | 15 + .../e2e/pgbackrest-restore/chainsaw-test.yaml | 260 ++++++++++ .../chainsaw/e2e/pgbouncer/00--cluster.yaml | 25 + testing/chainsaw/e2e/pgbouncer/00-assert.yaml | 15 + .../e2e/pgbouncer/01--psql-connect.yaml | 41 ++ testing/chainsaw/e2e/pgbouncer/01-assert.yaml | 6 + .../e2e/pgbouncer/10--read-certificate.yaml | 28 ++ testing/chainsaw/e2e/pgbouncer/10-assert.yaml | 8 + .../e2e/pgbouncer/11--open-connection.yaml | 43 ++ testing/chainsaw/e2e/pgbouncer/11-assert.yaml | 18 + .../e2e/pgbouncer/13--read-certificate.yaml | 28 ++ testing/chainsaw/e2e/pgbouncer/13-assert.yaml | 8 + .../chainsaw/e2e/pgbouncer/16--reconnect.yaml | 46 ++ testing/chainsaw/e2e/pgbouncer/16-assert.yaml | 8 + .../chainsaw/e2e/pgbouncer/chainsaw-test.yaml | 107 +++++ .../e2e/replica-read/00--cluster.yaml | 26 + .../chainsaw/e2e/replica-read/00-assert.yaml | 15 + .../replica-read/01--psql-replica-read.yaml | 44 ++ .../chainsaw/e2e/replica-read/01-assert.yaml | 6 + .../e2e/replica-read/chainsaw-test.yaml | 20 + .../e2e/root-cert-ownership/00--cluster.yaml | 35 ++ .../e2e/root-cert-ownership/00-assert.yaml | 26 + .../e2e/root-cert-ownership/02-assert.yaml | 9 + .../e2e/root-cert-ownership/02-errors.yaml | 4 + .../e2e/root-cert-ownership/04-errors.yaml | 9 + .../e2e/root-cert-ownership/README.md | 23 + .../root-cert-ownership/chainsaw-test.yaml | 93 ++++ .../e2e/scaledown/00--create-cluster.yaml | 32 ++ testing/chainsaw/e2e/scaledown/00-assert.yaml | 14 + .../e2e/scaledown/01--update-cluster.yaml | 14 + testing/chainsaw/e2e/scaledown/01-assert.yaml | 10 + .../e2e/scaledown/10--create-cluster.yaml | 26 + testing/chainsaw/e2e/scaledown/10-assert.yaml | 30 ++ .../e2e/scaledown/12--update-cluster.yaml | 15 + testing/chainsaw/e2e/scaledown/12-assert.yaml | 21 + .../e2e/scaledown/20--create-cluster.yaml | 33 ++ testing/chainsaw/e2e/scaledown/20-assert.yaml | 14 + .../e2e/scaledown/21--update-cluster.yaml | 21 + testing/chainsaw/e2e/scaledown/21-assert.yaml | 14 + .../chainsaw/e2e/scaledown/chainsaw-test.yaml | 70 +++ testing/chainsaw/e2e/scaledown/readme.MD | 31 ++ .../e2e/security-context/00--cluster.yaml | 26 + .../e2e/security-context/00-assert.yaml | 186 ++++++++ .../e2e/security-context/chainsaw-test.yaml | 71 +++ .../chainsaw/e2e/standalone-pgadmin/README.md | 49 ++ .../e2e/standalone-pgadmin/chainsaw-test.yaml | 445 ++++++++++++++++++ .../files/00-pgadmin-check.yaml | 42 ++ .../standalone-pgadmin/files/00-pgadmin.yaml | 12 + .../files/02-cluster-check.yaml | 6 + .../standalone-pgadmin/files/02-cluster.yaml | 17 + .../standalone-pgadmin/files/02-pgadmin.yaml | 17 + .../files/04-cluster-check.yaml | 6 + .../standalone-pgadmin/files/04-cluster.yaml | 17 + .../files/06-cluster-check.yaml | 6 + .../standalone-pgadmin/files/06-cluster.yaml | 17 + .../standalone-pgadmin/files/06-pgadmin.yaml | 20 + .../files/chainsaw-test.yaml | 36 ++ .../e2e/streaming-standby/00--secrets.yaml | 19 + .../01--primary-cluster.yaml | 19 + .../e2e/streaming-standby/01-assert.yaml | 16 + .../streaming-standby/02--create-data.yaml | 32 ++ .../e2e/streaming-standby/02-assert.yaml | 7 + .../03--standby-cluster.yaml | 22 + .../e2e/streaming-standby/03-assert.yaml | 16 + .../e2e/streaming-standby/04--check-data.yaml | 49 ++ .../e2e/streaming-standby/04-assert.yaml | 7 + .../chainsaw/e2e/streaming-standby/README.md | 9 + .../e2e/streaming-standby/chainsaw-test.yaml | 36 ++ .../chainsaw/e2e/switchover/01--cluster.yaml | 20 + .../chainsaw/e2e/switchover/01-assert.yaml | 27 ++ .../chainsaw/e2e/switchover/03-assert.yaml | 36 ++ .../e2e/switchover/chainsaw-test.yaml | 34 ++ .../e2e/tablespace-enabled/00--cluster.yaml | 52 ++ .../e2e/tablespace-enabled/00-assert.yaml | 24 + .../tablespace-enabled/01--psql-connect.yaml | 45 ++ .../e2e/tablespace-enabled/01-assert.yaml | 6 + .../chainsaw/e2e/tablespace-enabled/README.md | 6 + .../e2e/tablespace-enabled/chainsaw-test.yaml | 20 + .../00--create-resources.yaml | 28 ++ .../e2e/wal-pvc-pgupgrade/00-assert.yaml | 31 ++ .../wal-pvc-pgupgrade/01--create-data.yaml | 94 ++++ .../e2e/wal-pvc-pgupgrade/01-assert.yaml | 14 + .../02--shutdown-cluster.yaml | 8 + .../e2e/wal-pvc-pgupgrade/02-assert.yaml | 11 + .../03--annotate-cluster.yaml | 8 + .../e2e/wal-pvc-pgupgrade/03-assert.yaml | 22 + .../04--restart-cluster.yaml | 10 + .../e2e/wal-pvc-pgupgrade/04-assert.yaml | 18 + .../06--check-data-and-version.yaml | 108 +++++ .../e2e/wal-pvc-pgupgrade/06-assert.yaml | 14 + .../e2e/wal-pvc-pgupgrade/chainsaw-test.yaml | 54 +++ .../scripts/pgbackrest-initialization.sh | 24 + 333 files changed, 9729 insertions(+) create mode 100755 .chainsaw.yaml create mode 100644 testing/chainsaw/README.md create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/01-assert.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/02--create-data.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/02-assert.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/04-errors.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/05--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/06-assert.yaml create mode 100644 testing/chainsaw/e2e-other/cluster-migrate/README.md create mode 100755 testing/chainsaw/e2e-other/cluster-migrate/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml create mode 100644 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml create mode 100644 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml create mode 100644 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/README.md create mode 100755 testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-append-custom-queries/README.md create mode 100755 testing/chainsaw/e2e-other/exporter-append-custom-queries/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml create mode 100755 testing/chainsaw/e2e-other/exporter-replica/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/README.md create mode 100755 testing/chainsaw/e2e-other/exporter-standby/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/cluster-certs.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password-checks.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-upgrade/00--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-upgrade/00-assert.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-upgrade/02--update-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-upgrade/02-assert.yaml create mode 100644 testing/chainsaw/e2e-other/exporter-upgrade/README.md create mode 100755 testing/chainsaw/e2e-other/exporter-upgrade/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/00-assert.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/01-assert.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/01-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/02-assert.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/02-psql-connect.yaml create mode 100644 testing/chainsaw/e2e-other/gssapi/README.md create mode 100755 testing/chainsaw/e2e-other/gssapi/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/01-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/10--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/10-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/11-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/12-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/13-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/14-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/15-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/17--check-version.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/17-assert.yaml create mode 100644 testing/chainsaw/e2e-other/major-upgrade-missing-image/README.md create mode 100755 testing/chainsaw/e2e-other/major-upgrade-missing-image/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/postgis-cluster/00--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/postgis-cluster/00-assert.yaml create mode 100644 testing/chainsaw/e2e-other/postgis-cluster/01--psql-connect.yaml create mode 100644 testing/chainsaw/e2e-other/postgis-cluster/01-assert.yaml create mode 100755 testing/chainsaw/e2e-other/postgis-cluster/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/00-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/01--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/01-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/02--create-data.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/02-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/03--resize.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/03-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/06--check-data.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/06-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/11--cluster.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/11-assert.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/13--resize.yaml create mode 100644 testing/chainsaw/e2e-other/resize-volume/13-assert.yaml create mode 100755 testing/chainsaw/e2e-other/resize-volume/chainsaw-test.yaml create mode 100755 testing/chainsaw/e2e/cluster-pause/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/00-cluster-created.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/00-create-cluster.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/01-cluster-paused.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/01-pause-cluster.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/02-cluster-resumed.yaml create mode 100644 testing/chainsaw/e2e/cluster-pause/files/02-resume-cluster.yaml create mode 100755 testing/chainsaw/e2e/cluster-pause/files/chainsaw-test.yaml create mode 100755 testing/chainsaw/e2e/cluster-start/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/cluster-start/files/00-cluster-created.yaml create mode 100644 testing/chainsaw/e2e/cluster-start/files/00-create-cluster.yaml create mode 100644 testing/chainsaw/e2e/cluster-start/files/01-connect-psql.yaml create mode 100644 testing/chainsaw/e2e/cluster-start/files/01-psql-connected.yaml create mode 100755 testing/chainsaw/e2e/cluster-start/files/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/delete-namespace/00--namespace.yaml create mode 100644 testing/chainsaw/e2e/delete-namespace/01--cluster.yaml create mode 100644 testing/chainsaw/e2e/delete-namespace/01-assert.yaml create mode 100644 testing/chainsaw/e2e/delete-namespace/02-errors.yaml create mode 100644 testing/chainsaw/e2e/delete-namespace/README.md create mode 100755 testing/chainsaw/e2e/delete-namespace/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/delete/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/delete/00-assert.yaml create mode 100644 testing/chainsaw/e2e/delete/02-errors.yaml create mode 100644 testing/chainsaw/e2e/delete/10--cluster.yaml create mode 100644 testing/chainsaw/e2e/delete/10-assert.yaml create mode 100644 testing/chainsaw/e2e/delete/12-errors.yaml create mode 100644 testing/chainsaw/e2e/delete/20--cluster.yaml create mode 100644 testing/chainsaw/e2e/delete/20-errors.yaml create mode 100644 testing/chainsaw/e2e/delete/22-errors.yaml create mode 100644 testing/chainsaw/e2e/delete/README.md create mode 100755 testing/chainsaw/e2e/delete/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/README.md create mode 100755 testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml create mode 100644 testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml create mode 100755 testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/README.md create mode 100755 testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/files/check-restarted-pod.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password.yaml create mode 100755 testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/exporter-tls/files/exporter-tls-certs.yaml create mode 100644 testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml create mode 100644 testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/01-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/10--cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/10-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/11-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/12-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/13-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/14-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/15--start-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/15-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/17--check-version.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/17-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade-missing-image/README.md create mode 100755 testing/chainsaw/e2e/major-upgrade-missing-image/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/01--invalid-pgupgrade.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/01-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/02--valid-upgrade.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/02-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/10--already-updated-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/10-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/20--cluster-with-invalid-version.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/20-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/21-delete-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/30--cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/30-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/31--create-data.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/31-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/32--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/32-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/33--annotate-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/33-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/34--restart-cluster.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/34-assert.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/36--check-data-and-version.yaml create mode 100644 testing/chainsaw/e2e/major-upgrade/36-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/password-change/00-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/01--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/01--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/01-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/02--secret.yaml create mode 100644 testing/chainsaw/e2e/password-change/02-errors.yaml create mode 100644 testing/chainsaw/e2e/password-change/03--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/03--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/03-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/04--secret.yaml create mode 100644 testing/chainsaw/e2e/password-change/04-errors.yaml create mode 100644 testing/chainsaw/e2e/password-change/05--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/05--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/05-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/06--cluster.yaml create mode 100644 testing/chainsaw/e2e/password-change/06-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/07--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/07--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/07-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/08--secret.yaml create mode 100644 testing/chainsaw/e2e/password-change/08-errors.yaml create mode 100644 testing/chainsaw/e2e/password-change/09--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/09--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/09-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/10--secret.yaml create mode 100644 testing/chainsaw/e2e/password-change/10-errors.yaml create mode 100644 testing/chainsaw/e2e/password-change/11--psql-connect-uri.yaml create mode 100644 testing/chainsaw/e2e/password-change/11--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/password-change/11-assert.yaml create mode 100644 testing/chainsaw/e2e/password-change/README.md create mode 100755 testing/chainsaw/e2e/password-change/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/pgadmin/01--cluster.yaml create mode 100644 testing/chainsaw/e2e/pgadmin/01-assert.yaml create mode 100755 testing/chainsaw/e2e/pgadmin/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/00-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/02-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/04--cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/04-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-init/README.md create mode 100755 testing/chainsaw/e2e/pgbackrest-init/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/01--create-cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/01-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/02--create-data.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/02-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/03-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/04--clone-cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/04-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/05--check-data.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/05-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/07--update-cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/09--add-data.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/09-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/11--clone-cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/11-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/12--check-data.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/12-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/15-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/16--check-data.yaml create mode 100644 testing/chainsaw/e2e/pgbackrest-restore/16-assert.yaml create mode 100755 testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/00-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/01--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/01-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/10--read-certificate.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/10-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/11--open-connection.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/11-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/13--read-certificate.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/13-assert.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/16--reconnect.yaml create mode 100644 testing/chainsaw/e2e/pgbouncer/16-assert.yaml create mode 100755 testing/chainsaw/e2e/pgbouncer/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/replica-read/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/replica-read/00-assert.yaml create mode 100644 testing/chainsaw/e2e/replica-read/01--psql-replica-read.yaml create mode 100644 testing/chainsaw/e2e/replica-read/01-assert.yaml create mode 100755 testing/chainsaw/e2e/replica-read/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/00-assert.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/02-assert.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/02-errors.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/04-errors.yaml create mode 100644 testing/chainsaw/e2e/root-cert-ownership/README.md create mode 100755 testing/chainsaw/e2e/root-cert-ownership/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/scaledown/00--create-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/00-assert.yaml create mode 100644 testing/chainsaw/e2e/scaledown/01--update-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/01-assert.yaml create mode 100644 testing/chainsaw/e2e/scaledown/10--create-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/10-assert.yaml create mode 100644 testing/chainsaw/e2e/scaledown/12--update-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/12-assert.yaml create mode 100644 testing/chainsaw/e2e/scaledown/20--create-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/20-assert.yaml create mode 100644 testing/chainsaw/e2e/scaledown/21--update-cluster.yaml create mode 100644 testing/chainsaw/e2e/scaledown/21-assert.yaml create mode 100755 testing/chainsaw/e2e/scaledown/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/scaledown/readme.MD create mode 100644 testing/chainsaw/e2e/security-context/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/security-context/00-assert.yaml create mode 100755 testing/chainsaw/e2e/security-context/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/README.md create mode 100755 testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster-check.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/02-pgadmin.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster-check.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster-check.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster.yaml create mode 100644 testing/chainsaw/e2e/standalone-pgadmin/files/06-pgadmin.yaml create mode 100755 testing/chainsaw/e2e/standalone-pgadmin/files/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/00--secrets.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/01--primary-cluster.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/01-assert.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/02--create-data.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/02-assert.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/03--standby-cluster.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/03-assert.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/04--check-data.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/04-assert.yaml create mode 100644 testing/chainsaw/e2e/streaming-standby/README.md create mode 100755 testing/chainsaw/e2e/streaming-standby/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/switchover/01--cluster.yaml create mode 100644 testing/chainsaw/e2e/switchover/01-assert.yaml create mode 100644 testing/chainsaw/e2e/switchover/03-assert.yaml create mode 100755 testing/chainsaw/e2e/switchover/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/tablespace-enabled/00--cluster.yaml create mode 100644 testing/chainsaw/e2e/tablespace-enabled/00-assert.yaml create mode 100644 testing/chainsaw/e2e/tablespace-enabled/01--psql-connect.yaml create mode 100644 testing/chainsaw/e2e/tablespace-enabled/01-assert.yaml create mode 100644 testing/chainsaw/e2e/tablespace-enabled/README.md create mode 100755 testing/chainsaw/e2e/tablespace-enabled/chainsaw-test.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/00--create-resources.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/00-assert.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/01--create-data.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/01-assert.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/02-assert.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/03-assert.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/04-assert.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/06--check-data-and-version.yaml create mode 100644 testing/chainsaw/e2e/wal-pvc-pgupgrade/06-assert.yaml create mode 100755 testing/chainsaw/e2e/wal-pvc-pgupgrade/chainsaw-test.yaml create mode 100755 testing/chainsaw/scripts/pgbackrest-initialization.sh diff --git a/.chainsaw.yaml b/.chainsaw.yaml new file mode 100755 index 0000000000..6c635bf0ed --- /dev/null +++ b/.chainsaw.yaml @@ -0,0 +1,13 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Configuration +metadata: + name: configuration +spec: + parallel: 2 + timeouts: + assert: 5m0s + cleanup: 5m0s + delete: 5m0s + error: 5m0s + exec: 5m0s diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 34faf14a59..23d2921b80 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -170,6 +170,93 @@ jobs: - name: Stop PGO run: docker stop 'postgres-operator' || true + chainsaw-k3d: + runs-on: ubuntu-20.04 + needs: [go-test] + strategy: + fail-fast: false + matrix: + kubernetes: [v1.28, v1.27, v1.26, v1.25] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: { go-version: 1.x } + + - name: Start k3s + uses: ./.github/actions/k3d + with: + k3s-channel: "${{ matrix.kubernetes }}" + prefetch-images: | + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-19 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-2 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.21-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.10-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.10-3.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.5-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.5-3.3-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.1-3.3-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.1-3.4-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-7.8-0 + - run: go mod download + - name: Build executable + run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator + + - name: Get pgMonitor files. + run: make get-pgmonitor + env: + PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + + # Start a Docker container with the working directory mounted. + - name: Start PGO + run: | + kubectl apply --server-side -k ./config/namespace + kubectl apply --server-side -k ./config/dev + hack/create-kubeconfig.sh postgres-operator pgo + docker run --detach --network host --read-only \ + --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ + --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ + --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-19' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-2' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.21-0' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest' \ + --env 'RELATED_IMAGE_POSTGRES_14=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.10-0' \ + --env 'RELATED_IMAGE_POSTGRES_14_GIS_3.1=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.10-3.1-0' \ + --env 'RELATED_IMAGE_POSTGRES_15=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.5-0' \ + --env 'RELATED_IMAGE_POSTGRES_15_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.5-3.3-0' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.1-3.3-0' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.1-3.4-0' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-7.8-0' \ + --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ + --name 'postgres-operator' ubuntu \ + postgres-operator + + - name: Install chainsaw + uses: kyverno/action-install-chainsaw@07b6c986572f2abaf6647c85d37cbecfddc4a6ab # v0.1.3 + + # - run: make generate-kuttl + # env: + # KUTTL_PG_UPGRADE_FROM_VERSION: '15' + # KUTTL_PG_UPGRADE_TO_VERSION: '16' + # KUTTL_PG_VERSION: '15' + # KUTTL_POSTGIS_VERSION: '3.4' + # KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' + + - run: | + make check-chainsaw && exit + failed=$? + echo '::group::PGO logs'; docker logs 'postgres-operator'; echo '::endgroup::' + exit $failed + + - name: Stop PGO + run: docker stop 'postgres-operator' || true + coverage-report: if: ${{ success() || contains(needs.*.result, 'success') }} runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index d71e002a1d..dc118d9a3a 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,7 @@ GO_CMD = $(GO_ENV) $(GO) GO_TEST ?= $(GO) test KUTTL ?= kubectl-kuttl KUTTL_TEST ?= $(KUTTL) test +CHAINSAW ?= chainsaw # Disable optimizations if creating a debug build ifeq ("$(DEBUG_BUILD)", "true") @@ -214,6 +215,12 @@ check-envtest-existing: createnamespaces $(GO_TEST) -count=1 -cover -p=1 -tags=envtest ./... kubectl delete -k ./config/dev +# Expects operator to be running +.PHONY: check-chainsaw +check-chainsaw: ## Run chainsaw end-to-end tests +check-chainsaw: + $(CHAINSAW) test --test-dir ./testing/chainsaw + # Expects operator to be running .PHONY: check-kuttl check-kuttl: ## Run kuttl end-to-end tests diff --git a/testing/chainsaw/README.md b/testing/chainsaw/README.md new file mode 100644 index 0000000000..555ce9a26d --- /dev/null +++ b/testing/chainsaw/README.md @@ -0,0 +1,92 @@ +# KUTTL + +## Installing + +Docs for install: https://kuttl.dev/docs/cli.html#setup-the-kuttl-kubectl-plugin + +Options: + - Download and install the binary + - Install the `kubectl krew` [plugin manager](https://github.com/kubernetes-sigs/krew) + and `kubectl krew install kuttl` + +## Cheat sheet + +### Suppressing Noisy Logs + +KUTTL gives you the option to suppress events from the test logging output. To enable this feature +update the `kuttl` parameter when calling the `make` target + +``` +KUTTL_TEST='kuttl test --suppress-log=events' make check-kuttl +``` + +To suppress the events permanently, you can add the following to the KUTTL config (kuttl-test.yaml) +``` +suppress: +- events +``` + +### Run test suite + +Make sure that the operator is running in your Kubernetes environment and that your `kubeconfig` is +set up. Then run the make targets: + +``` +make generate-kuttl check-kuttl +``` + +### Running a single test + +A single test is considered to be one directory under `kuttl/e2e-generated`, for example +`kuttl/e2e-generated/restore` is the `restore` test. + +There are two ways to run a single test in isolation: +- using an env var with the make target: `KUTTL_TEST='kuttl test --test ' make check-kuttl` +- using `kubectl kuttl --test` flag: `kubectl kuttl test testing/kuttl/e2e-generated --test ` + +### Writing additional tests + +To make it easier to read tests, we want to put our `assert.yaml`/`errors.yaml` files after the +files that create/update the objects for a step. To achieve this, infix an extra `-` between the +step number and the object/step name. + +For example, if the `00` test step wants to create a cluster and then assert that the cluster is ready, +the files would be named + +```yaml +00--cluster.yaml # note the extra `-` to ensure that it sorts above the following file +00-assert.yaml +``` + +### Generating tests + +KUTTL is good at setting up K8s objects for testing, but does not have a native way to dynamically +change those K8s objects before applying them. That means that, if we wanted to write a cluster +connection test for PG 13 and PG 14, we would end up writing two nearly identical tests. + +Rather than write those multiple tests, we are using `envsubst` to replace some common variables +and writing those files to the `testing/kuttl/e2e-generated*` directories. + +These templated test files can be generated by setting some variables in the command line and +calling the `make generate-kuttl` target: + +```console +KUTTL_PG_VERSION=13 KUTTL_POSTGIS_VERSION=3.0 make generate-kuttl +``` + +This will loop through the files under the `e2e` and `e2e-other` directories and create matching +files under the `e2e-generated` and `e2e-generated-other` directories that can be checked for +correctness before running the tests. + +Please note, `make check-kuttl` does not run the `e2e-other` tests. To run the `postgis-cluster` +test, you can use: + +``` +kubectl kuttl test testing/kuttl/e2e-generated-other/ --timeout=180 --test postgis-cluster` +``` + +To run the `gssapi` test, please see testing/kuttl/e2e-other/gssapi/README.md. + +To prevent errors, we want to set defaults for all the environment variables used in the source +YAML files; so if you add a new test with a new variable, please update the Makefile with a +reasonable/preferred default. diff --git a/testing/chainsaw/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml b/testing/chainsaw/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml new file mode 100644 index 0000000000..1ccceb7098 --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml @@ -0,0 +1,193 @@ +apiVersion: v1 +kind: Secret +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster +type: Opaque +stringData: + postgres-password: "SR6kNAFXvX" +--- +apiVersion: v1 +kind: Service +metadata: + name: non-crunchy-cluster-hl + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +spec: + type: ClusterIP + sessionAffinity: None + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + nodePort: null + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +spec: + replicas: 1 + serviceName: non-crunchy-cluster-hl + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + selector: + matchLabels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + template: + metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + spec: + serviceAccountName: default + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + namespaces: + - "default" + topologyKey: kubernetes.io/hostname + weight: 1 + securityContext: + fsGroup: 1001 + hostNetwork: false + hostIPC: false + containers: + - name: postgresql + image: docker.io/bitnami/postgresql:${KUTTL_BITNAMI_IMAGE_TAG} + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: POSTGRESQL_PORT_NUMBER + value: "5432" + - name: POSTGRESQL_VOLUME_DIR + value: "/bitnami/postgresql" + - name: PGDATA + value: "/bitnami/postgresql/data" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: non-crunchy-cluster + key: postgres-password + - name: POSTGRESQL_ENABLE_LDAP + value: "no" + - name: POSTGRESQL_ENABLE_TLS + value: "no" + - name: POSTGRESQL_LOG_HOSTNAME + value: "false" + - name: POSTGRESQL_LOG_CONNECTIONS + value: "false" + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: "false" + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: "off" + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: "error" + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: "pgaudit" + ports: + - name: tcp-postgresql + containerPort: 5432 + livenessProbe: + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + exec: + command: + - /bin/sh + - -c + - exec pg_isready -U "postgres" -h localhost -p 5432 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + exec: + command: + - /bin/sh + - -c + - -e + - | + exec pg_isready -U "postgres" -h localhost -p 5432 + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] + resources: + limits: {} + requests: + cpu: 250m + memory: 256Mi + volumeMounts: + - name: dshm + mountPath: /dev/shm + - name: data + mountPath: /bitnami/postgresql + volumes: + - name: dshm + emptyDir: + medium: Memory + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" diff --git a/testing/chainsaw/e2e-other/cluster-migrate/01-assert.yaml b/testing/chainsaw/e2e-other/cluster-migrate/01-assert.yaml new file mode 100644 index 0000000000..c45fe79261 --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/01-assert.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: non-crunchy-cluster +status: + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e-other/cluster-migrate/02--create-data.yaml b/testing/chainsaw/e2e-other/cluster-migrate/02--create-data.yaml new file mode 100644 index 0000000000..a9b7ebf152 --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/02--create-data.yaml @@ -0,0 +1,30 @@ +--- +# Create some data that will be preserved after migration. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - { name: PGHOST, value: "non-crunchy-cluster" } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + - { name: PGPASSWORD, valueFrom: { secretKeyRef: { name: non-crunchy-cluster, key: postgres-password } } } + command: + - psql + - --username=postgres + - --dbname=postgres + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE TABLE IF NOT EXISTS important (data) AS VALUES ('treasure'); diff --git a/testing/chainsaw/e2e-other/cluster-migrate/02-assert.yaml b/testing/chainsaw/e2e-other/cluster-migrate/02-assert.yaml new file mode 100644 index 0000000000..5115ba97c9 --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/cluster-migrate/04-errors.yaml b/testing/chainsaw/e2e-other/cluster-migrate/04-errors.yaml new file mode 100644 index 0000000000..1767e8040f --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/04-errors.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Pod +metadata: + name: non-crunchy-cluster-0 diff --git a/testing/chainsaw/e2e-other/cluster-migrate/05--cluster.yaml b/testing/chainsaw/e2e-other/cluster-migrate/05--cluster.yaml new file mode 100644 index 0000000000..a81666ed01 --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/05--cluster.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-migrate +spec: + dataSource: + volumes: + pgDataVolume: + pvcName: data-non-crunchy-cluster-0 + directory: data + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/cluster-migrate/06-assert.yaml b/testing/chainsaw/e2e-other/cluster-migrate/06-assert.yaml new file mode 100644 index 0000000000..1a25966abb --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/06-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-migrate +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: cluster-migrate + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running diff --git a/testing/chainsaw/e2e-other/cluster-migrate/README.md b/testing/chainsaw/e2e-other/cluster-migrate/README.md new file mode 100644 index 0000000000..b2becc9ffb --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/README.md @@ -0,0 +1,45 @@ +## Cluster Migrate + +This test was developed to check that users could bypass some known problems when +migrating from a non-Crunchy PostgreSQL image to a Crunchy PostgreSQL image: + +1) it changes the ownership of the data directory (which depends on fsGroup +behavior to change group ownership which is not available in all providers); +2) it makes sure a postgresql.conf file is available, as required by Patroni. + +Important note on *environment*: +As noted above, this work relies on fsGroup, so this test will not work in the current +form in all environments. For instance, this creates a PG cluster with fsGroup set, +which will result in an error in OpenShift. + +Important note on *PV permissions*: +This test involves changing permissions on PersistentVolumes, which may not be available +in all environments to all users (since this is a cluster-wide permission). + +Important note on migrating between different builds of *Postgres 15*: +PG 15 introduced new behavior around database collation versions, which result in errors like: + +``` +WARNING: database \"postgres\" has a collation version mismatch +DETAIL: The database was created using collation version 2.31, but the operating system provides version 2.28 +``` + +This error occured in `reconcilePostgresDatabases` and prevented PGO from finishing the reconcile +loop. For _testing purposes_, this problem is worked around in steps 06 and 07, which wait for +the PG pod to be ready and then send a command to `REFRESH COLLATION VERSION` on the `postgres` +and `template1` databases (which were the only databases where this error was observed during +testing). + +This solution is fine for testing purposes, but is not a solution that should be done in production +as an automatic step. User intervention and supervision is recommended in that case. + +### Steps + +* 01: Create a non-Crunchy PostgreSQL cluster and wait for it to be ready +* 02: Create data on that cluster +* 03: Alter the Reclaim policy of the PV so that it will survive deletion of the cluster +* 04: Delete the original cluster, leaving the PV +* 05: Create a PGO-managed `postgrescluster` with the remaing PV as the datasource +* 06-07: Wait for the PG pod to be ready and alter the collation (PG 15 only, see above) +* 08: Alter the PV to the original Reclaim policy +* 09: Check that the data successfully migrated diff --git a/testing/chainsaw/e2e-other/cluster-migrate/chainsaw-test.yaml b/testing/chainsaw/e2e-other/cluster-migrate/chainsaw-test.yaml new file mode 100755 index 0000000000..770075407b --- /dev/null +++ b/testing/chainsaw/e2e-other/cluster-migrate/chainsaw-test.yaml @@ -0,0 +1,133 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: cluster-migrate +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--non-crunchy-cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--create-data.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - script: + content: | + set -e + VOLUME_NAME=$( + kubectl get pvc --namespace "${NAMESPACE}" \ + --output=jsonpath={.items..spec.volumeName} + ) + + ORIGINAL_POLICY=$( + kubectl get pv "${VOLUME_NAME}" \ + --output=jsonpath={.spec.persistentVolumeReclaimPolicy} + ) + + kubectl create configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ + --from-literal=ORIGINAL_POLICY="${ORIGINAL_POLICY}" \ + --from-literal=VOLUME_NAME="${VOLUME_NAME}" + + kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + + kubectl label pv "${VOLUME_NAME}" postgres-operator-test=kuttl app.kubernetes.io/name=postgresql app.kubernetes.io/instance=non-crunchy-cluster test-namespace="${NAMESPACE}" + - name: step-04 + try: + - delete: + ref: + apiVersion: apps/v1 + kind: StatefulSet + name: non-crunchy-cluster + - delete: + ref: + apiVersion: v1 + kind: Service + name: non-crunchy-cluster + - delete: + ref: + apiVersion: v1 + kind: Service + name: non-crunchy-cluster-hl + - delete: + ref: + apiVersion: v1 + kind: Secret + name: non-crunchy-cluster + - error: + file: 04-errors.yaml + - name: step-05 + try: + - apply: + file: 05--cluster.yaml + - name: step-06 + try: + - assert: + file: 06-assert.yaml + - name: step-07 + try: + - script: + content: | + set -e + if [[ ${KUTTL_PG_VERSION} -ge 15 ]]; then + PRIMARY= + while [[ -z "${PRIMARY}" ]]; do + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=cluster-migrate, + postgres-operator.crunchydata.com/role=master' + ) + done + + # Ignore warnings about collation changes. This is DANGEROUS on real data! + # Only do this automatic step in test conditions; with real data, this may cause + # more problems as you may need to reindex. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c database \ + -- psql -qAt --command \ + 'ALTER DATABASE postgres REFRESH COLLATION VERSION; ALTER DATABASE template1 REFRESH COLLATION VERSION;' + fi + - name: step-08 + try: + - script: + content: | + set -e + SAVED_DATA=$( + kubectl get configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ + --output=jsonpath="{.data..['ORIGINAL_POLICY','VOLUME_NAME']}" + ) + + IFS=' ' + read ORIGINAL_POLICY VOLUME_NAME <<< "${SAVED_DATA}" + + kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"'${ORIGINAL_POLICY}'"}}' + - name: step-09 + try: + - script: + content: | + set -e + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=cluster-migrate, + postgres-operator.crunchydata.com/role=master' + ) + + TREASURE=$( + kubectl exec "${PRIMARY}" --namespace "${NAMESPACE}" \ + --container database \ + -- psql -U postgres -qt -c "select data from important" + ) + + if [[ "${TREASURE}" != " treasure" ]]; then + echo "Migration from 3rd-party PG pod failed, result from query: ${TREASURE}" + exit 1 + fi diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml new file mode 100644 index 0000000000..a3236da358 --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + switchover: + enabled: true + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml new file mode 100644 index 0000000000..d77e27e307 --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml new file mode 100644 index 0000000000..76f0f8dff6 --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml @@ -0,0 +1,32 @@ +--- +# Wait for switchover to finish. A former replica should now be the primary. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/role: master + testing/role-before: replica +--- +# The former primary should now be a replica. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/role: replica + testing/role-before: master +--- +# All instances should be healthy. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +status: + instances: + - name: instance1 + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml new file mode 100644 index 0000000000..2a1015824b --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/README.md b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/README.md new file mode 100644 index 0000000000..bf914aa6cf --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/README.md @@ -0,0 +1,7 @@ +This test originally existed as the second test-case in the `delete` KUTTL test. +The test as written was prone to occasional flakes, sometimes due to missing events +(which were being used to check the timestamp of the container delete event). + +After discussion, we decided that this behavior (replica deleting before the primary) +was no longer required in v5, and the decision was made to sequester this test-case for +further testing and refinement. \ No newline at end of file diff --git a/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/chainsaw-test.yaml b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/chainsaw-test.yaml new file mode 100755 index 0000000000..b8829e50d1 --- /dev/null +++ b/testing/chainsaw/e2e-other/delete-with-replica-and-check-timestamps/chainsaw-test.yaml @@ -0,0 +1,59 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: delete-with-replica-and-check-timestamps +spec: + steps: + - name: step-10 + try: + - apply: + file: 10--cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=master' \ + 'testing/role-before=master' + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + 'testing/role-before=replica' + - script: + content: | + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/delete-switchover-with-timestamp \ + "postgres-operator.crunchydata.com/trigger-switchover=$(date)" + - name: step-12 + try: + - assert: + file: 12-assert.yaml + - name: step-13 + try: + - script: + content: "PRIMARY=$(\n kubectl get pods --namespace=\"${NAMESPACE}\" \\\n + \ --selector='postgres-operator.crunchydata.com/role=master' \\\n --output=jsonpath={.items..metadata.name}\n)\n\nREPLICA=$(\n + \ kubectl get pods --namespace=\"${NAMESPACE}\" \\\n --selector='postgres-operator.crunchydata.com/role=replica' + \\\n --output=jsonpath={.items..metadata.name}\n)\n\necho \"DELETE: Found + primary ${PRIMARY} and replica ${REPLICA} pods\"\n\nif [ -z \"$PRIMARY\" + ]; then exit 1; fi \nif [ -z \"$REPLICA\" ]; then exit 1; fi\n\nkubectl + delete postgrescluster -n \"${NAMESPACE}\" delete-switchover-with-timestamp\n\nkubectl + wait \"pod/${REPLICA}\" --namespace \"${NAMESPACE}\" --for=delete --timeout=180s\n\nKILLING_REPLICA_TIMESTAMP=$(\n + \ kubectl get events --namespace=\"${NAMESPACE}\" \\\n --field-selector + reason=\"Killing\",involvedObject.fieldPath=\"spec.containers{database}\",involvedObject.name=\"${REPLICA}\" + \\\n --output=jsonpath={.items..firstTimestamp}\n)\n\nkubectl wait \"pod/${PRIMARY}\" + --namespace \"${NAMESPACE}\" --for=delete --timeout=180s\n\nKILLING_PRIMARY_TIMESTAMP=$(\n + \ kubectl get events --namespace=\"${NAMESPACE}\" \\\n --field-selector + reason=\"Killing\",involvedObject.fieldPath=\"spec.containers{database}\",involvedObject.name=\"${PRIMARY}\" + \\\n --output=jsonpath={.items..firstTimestamp}\n)\n\necho \"DELETE: + Found primary ${KILLING_PRIMARY_TIMESTAMP} and replica ${KILLING_REPLICA_TIMESTAMP} + timestamps\"\n\nif [[ \"${KILLING_PRIMARY_TIMESTAMP}\" < \"${KILLING_REPLICA_TIMESTAMP}\" + ]]; then exit 1; fi\n" + - name: step-14 + try: + - error: + file: 14-errors.yaml diff --git a/testing/chainsaw/e2e-other/exporter-append-custom-queries/README.md b/testing/chainsaw/e2e-other/exporter-append-custom-queries/README.md new file mode 100644 index 0000000000..a24aa444c7 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-append-custom-queries/README.md @@ -0,0 +1,5 @@ +Exporter - AppendCustomQueries Enabled + +Note: This series of tests depends on PGO being deployed with the AppendCustomQueries feature gate ON. There is a separate set of tests in e2e that tests exporter functionality without the AppendCustomQueries feature. + +When running this test, make sure that the PGO_FEATURE_GATES environment variable is set to "AppendCustomQueries=true" on the PGO Deployment. diff --git a/testing/chainsaw/e2e-other/exporter-append-custom-queries/chainsaw-test.yaml b/testing/chainsaw/e2e-other/exporter-append-custom-queries/chainsaw-test.yaml new file mode 100755 index 0000000000..e1d830489f --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-append-custom-queries/chainsaw-test.yaml @@ -0,0 +1,62 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-append-custom-queries +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-append-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + queries_files=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- ls /conf + ) + + { + contains "${queries_files}" "queries.yml" && + contains "${queries_files}" "defaultQueries.yml" + } || { + echo >&2 'The /conf directory should contain queries.yml and defaultQueries.yml. Instead it has:' + echo "${queries_files}" + exit 1 + } + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a test." && + contains "${master_queries_contents}" "ccp_postgresql_version" + } || { + echo >&2 'The master queries.yml file should contain the contents of both defaultQueries.yml and the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } + name: step-00 + try: + - apply: + file: files/exporter-append-queries-configmap.yaml + - apply: + file: files/exporter-append-queries-cluster.yaml + - assert: + file: files/exporter-append-queries-cluster-checks.yaml diff --git a/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml new file mode 100644 index 0000000000..459356ddfc --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-append-queries +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-append-queries + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-append-queries-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test diff --git a/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml new file mode 100644 index 0000000000..c4f75771aa --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-append-queries +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + configuration: + - configMap: + name: custom-queries-test diff --git a/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml new file mode 100644 index 0000000000..9964d6bc1e --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/chainsaw/e2e-other/exporter-replica/chainsaw-test.yaml b/testing/chainsaw/e2e-other/exporter-replica/chainsaw-test.yaml new file mode 100755 index 0000000000..1b9de7af88 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-replica/chainsaw-test.yaml @@ -0,0 +1,55 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-replica +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + replica=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-replica \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true \ + -l postgres-operator.crunchydata.com/role=replica) + [ "$replica" = "" ] && retry "Replica Pod not found" && exit 1 + + replica_condition_json=$(kubectl get "${replica}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$replica_condition_json" = "" ] && retry "Replica conditions not found" && exit 1 + { + check_containers_ready "$replica_condition_json" + } || { + retry "containers not ready" + exit 1 + } + + scrape_metrics=$(kubectl exec ${replica} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { + contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; + } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${replica}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL + name: step-00 + try: + - apply: + file: files/exporter-replica-cluster.yaml + - assert: + file: files/exporter-replica-cluster-checks.yaml diff --git a/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml b/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml new file mode 100644 index 0000000000..7c775b47b1 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-replica + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-replica-exporter-queries-config diff --git a/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml b/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml new file mode 100644 index 0000000000..504d33bc3a --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml @@ -0,0 +1,19 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-replica +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e-other/exporter-standby/README.md b/testing/chainsaw/e2e-other/exporter-standby/README.md new file mode 100644 index 0000000000..34df4e5b7a --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/README.md @@ -0,0 +1,9 @@ +# Exporter connection on standby cluster + +The exporter standby test will deploy two clusters, one primary and one standby. +Both clusters have monitoring enabled and are created in the same namespace to +allow for easy connections over the network. + +The `ccp_monitoring` password for both clusters are updated to match allowing +the exporter on the standby cluster to query postgres using the proper `ccp_monitoring` +password. diff --git a/testing/chainsaw/e2e-other/exporter-standby/chainsaw-test.yaml b/testing/chainsaw/e2e-other/exporter-standby/chainsaw-test.yaml new file mode 100755 index 0000000000..b5dd349309 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/chainsaw-test.yaml @@ -0,0 +1,102 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-standby +spec: + steps: + - name: step-00 + try: + - apply: + file: files/cluster-certs.yaml + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=primary-cluster \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} + name: step-01 + try: + - apply: + file: files/primary-cluster.yaml + - assert: + file: files/primary-cluster-checks.yaml + - name: step-02 + try: + - apply: + file: files/update-primary-password.yaml + - assert: + file: files/update-primary-password-checks.yaml + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} + name: step-03 + try: + - apply: + file: files/standby-cluster.yaml + - assert: + file: files/standby-cluster-checks.yaml + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + check_containers_ready() { bash -ceu ' echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@";} + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + password=$(kubectl exec -n ${NAMESPACE} ${pod} -c exporter -- bash -c 'cat /opt/crunchy/password') + { contains "${password}" "password"; } || { + retry "unexpected password: ${password}" + exit 1 + } + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + name: step-04 + try: + - apply: + file: files/update-standby-password.yaml + - assert: + file: files/update-standby-password-checks.yaml diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/cluster-certs.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/cluster-certs.yaml new file mode 100644 index 0000000000..1f8dd06ccf --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/cluster-certs.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNQakNDQWVXZ0F3SUJBZ0lSQU93NURHaGVVZnVNY25KYVdKNkllall3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nak13TkRFeE1UYzBOVE01V2hjTgpNek13TkRBNE1UZzBOVE01V2pBOU1Uc3dPUVlEVlFRREV6SndjbWx0WVhKNUxXTnNkWE4wWlhJdGNISnBiV0Z5CmVTNWtaV1poZFd4MExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc0xqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDkKQXdFSEEwSUFCT3RlNytQWFlDci9RQVJkcHlwYTFHcEpkbW5wOFN3ZG9FOTIzUXoraWt4UllTalgwUHBXcytqUQpVNXlKZ0NDdGxyZmxFZVZ4S2YzaVpiVHdadFlIaHVxamdlTXdnZUF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQXdHCkExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVVkp0R0R0Yk1kMVlmemUrRXBLdGJDMTdINXFZd2daNEcKQTFVZEVRU0JsakNCazRJeWNISnBiV0Z5ZVMxamJIVnpkR1Z5TFhCeWFXMWhjbmt1WkdWbVlYVnNkQzV6ZG1NdQpZMngxYzNSbGNpNXNiMk5oYkM2Q0kzQnlhVzFoY25rdFkyeDFjM1JsY2kxd2NtbHRZWEo1TG1SbFptRjFiSFF1CmMzWmpnaDl3Y21sdFlYSjVMV05zZFhOMFpYSXRjSEpwYldGeWVTNWtaV1poZFd4MGdoZHdjbWx0WVhKNUxXTnMKZFhOMFpYSXRjSEpwYldGeWVUQUtCZ2dxaGtqT1BRUURBd05IQURCRUFpQjA3Q3YzRHJTNXUxRFdaek1MQjdvbAppcjFFWEpQTnFaOXZWQUF5ZTdDMGJRSWdWQVlDM2F0ekl4a0syNHlQUU1TSjU1OGFaN3JEdkZGZXdOaVpmdSt0CjdETT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUoxYkNXMTByR3o2VWQ1K2R3WmZWcGNUNFlqck9XVG1iVW9XNXRxYTA2b1ZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNjE3djQ5ZGdLdjlBQkYybktsclVha2wyYWVueExCMmdUM2JkRFA2S1RGRmhLTmZRK2xhego2TkJUbkltQUlLMld0K1VSNVhFcC9lSmx0UEJtMWdlRzZnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: cluster-cert +type: Opaque +--- +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqekNDQVRTZ0F3SUJBZ0lRRzA0MEprWjYwZkZtanpaVG1SekhyakFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUJjeEZUQVRCZ05WQkFNTURGOWpjblZ1WTJoNWNtVndiREJaTUJNR0J5cUdTTTQ5CkFnRUdDQ3FHU000OUF3RUhBMElBQk5HVHcvSmVtaGxGK28xUlRBb0VXSndzdjJ6WjIyc1p4N2NjT2VmL1NXdjYKeXphYkpaUmkvREFyK0kwUHNyTlhmand3a0xMa3hERGZsTklvcFZMNVYwT2pXakJZTUE0R0ExVWREd0VCL3dRRQpBd0lGb0RBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkZTYlJnN1d6SGRXSDgzdmhLU3JXd3RlCngrYW1NQmNHQTFVZEVRUVFNQTZDREY5amNuVnVZMmg1Y21Wd2JEQUtCZ2dxaGtqT1BRUURBd05KQURCR0FpRUEKcWVsYmUvdTQzRFRPWFdlell1b3Nva0dUbHg1U2ljUFRkNk05Q3pwU2VoWUNJUUNOOS91Znc0SUZzdDZOM1RtYQo4MmZpSElKSUpQY0RjM2ZKUnFna01RQmF0QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBxeTVzNVJxWThKUmdycjJreE9zaG9hc25yTWhUUkJPYjZ0alI3T2ZqTFlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMFpQRDhsNmFHVVg2alZGTUNnUlluQ3kvYk5uYmF4bkh0eHc1NS85SmEvckxOcHNsbEdMOApNQ3Y0alEreXMxZCtQRENRc3VURU1OK1UwaWlsVXZsWFF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: replication-cert +type: Opaque diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster-checks.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster-checks.yaml new file mode 100644 index 0000000000..c2a59244a5 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster-checks.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster.yaml new file mode 100644 index 0000000000..8f51632f5b --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/primary-cluster.yaml @@ -0,0 +1,22 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster-checks.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster-checks.yaml new file mode 100644 index 0000000000..237dec721e --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster-checks.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +status: + instances: + - name: instance1 + replicas: 1 + updatedReplicas: 1 + # The cluster should not become fully ready in this step, the ccp_monitoring password + # on the standby does not match the primary +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster.yaml new file mode 100644 index 0000000000..33e9ec2c2c --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/standby-cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + standby: + enabled: true + host: primary-cluster-primary + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password-checks.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password-checks.yaml new file mode 100644 index 0000000000..1ef72b49c9 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password-checks.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: primary-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: primary-cluster +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= +--- +# TODO: Check that password is set as a file diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password.yaml new file mode 100644 index 0000000000..a66450b103 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/update-primary-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: primary-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password-checks.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password-checks.yaml new file mode 100644 index 0000000000..34d5357318 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password-checks.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: standby-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: standby-cluster +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= +--- +# TODO: Check that password is set as a file diff --git a/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password.yaml b/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password.yaml new file mode 100644 index 0000000000..57371fce93 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-standby/files/update-standby-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: standby-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/00--cluster.yaml b/testing/chainsaw/e2e-other/exporter-upgrade/00--cluster.yaml new file mode 100644 index 0000000000..0e53eab2de --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/00--cluster.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +spec: + postgresVersion: 14 + image: us.gcr.io/container-suite/crunchy-postgres:ubi8-14.0-5.0.3-0 + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + monitoring: + pgmonitor: + exporter: + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.3.1-0 diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/00-assert.yaml b/testing/chainsaw/e2e-other/exporter-upgrade/00-assert.yaml new file mode 100644 index 0000000000..c569c97454 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/00-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/02--update-cluster.yaml b/testing/chainsaw/e2e-other/exporter-upgrade/02--update-cluster.yaml new file mode 100644 index 0000000000..cde17d80b4 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/02--update-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +spec: + postgresVersion: 14 + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.5-1 diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/02-assert.yaml b/testing/chainsaw/e2e-other/exporter-upgrade/02-assert.yaml new file mode 100644 index 0000000000..9ad238b944 --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/02-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: exporter-primary diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/README.md b/testing/chainsaw/e2e-other/exporter-upgrade/README.md new file mode 100644 index 0000000000..fefe28a95c --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/README.md @@ -0,0 +1,31 @@ +The exporter-upgrade test makes sure that PGO updates an extension used for monitoring. This +avoids an error where a user might update to a new PG image with a newer extension, but with an +older extension operative. + +Note: This test relies on two `crunchy-postgres` images with known, different `pgnodemx` extensions: +the image created in 00--cluster.yaml has `pgnodemx` 1.1; the image we update the cluster to in +02--update-cluster.yaml has `pgnodemx` 1.3. + +00-01 +This starts up a cluster with a purposely outdated `pgnodemx` extension. Because we want a specific +extension, the image used here is hard-coded (and so outdated it's not publicly available). + +(This image is so outdated that it doesn't finish creating a backup with the current PGO, which is +why the 00-assert.yaml only checks that the pod is ready; and why 01--check-exporter.yaml wraps the +call in a retry loop.) + +02-03 +The cluster is updated with a newer (and hardcoded) image with a newer version of `pgnodemx`. Due +to the change made in https://github.com/CrunchyData/postgres-operator/pull/3400, this should no +longer produce multiple errors. + +Note: a few errors may be logged after the `exporter` container attempts to run the `pgnodemx` +functions but before the extension is updated. So this checks that there are no more than 2 errors, +since that was the observed maximum number of printed errors during manual tests of the check. + +For instance, using these hardcoded images (with `pgnodemx` versions 1.1 and 1.3), those errors were: + +``` +Error running query on database \"localhost:5432\": ccp_nodemx_disk_activity pq: query-specified return tuple and function return type are not compatible" +Error running query on database \"localhost:5432\": ccp_nodemx_data_disk pq: query-specified return tuple and function return type are not compatible +``` diff --git a/testing/chainsaw/e2e-other/exporter-upgrade/chainsaw-test.yaml b/testing/chainsaw/e2e-other/exporter-upgrade/chainsaw-test.yaml new file mode 100755 index 0000000000..4f88c89c2e --- /dev/null +++ b/testing/chainsaw/e2e-other/exporter-upgrade/chainsaw-test.yaml @@ -0,0 +1,70 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-upgrade +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - script: + content: | + set -e + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=exporter, + postgres-operator.crunchydata.com/role=master' + ) + + # Ensure that the metrics endpoint is available from inside the exporter container + for i in {1..5}; do + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter -- curl http://localhost:9187/metrics + sleep 2 + done + + # Ensure that the monitoring user exists and is configured. + kubectl exec --stdin --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + ASSERT result.rolconfig @> '{jit=off}', format('got config: %L', result.rolconfig); + END $$ + SQL + - name: step-02 + try: + - apply: + file: 02--update-cluster.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - script: + content: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=exporter, + postgres-operator.crunchydata.com/role=master' + ) + + # Get errors from the exporter + # See the README.md for a discussion of these errors + ERR=$(kubectl logs --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter | grep -e "Error running query on database") + ERR_COUNT=$(echo "$ERR" | wc -l) + + if [[ "$ERR_COUNT" -gt 2 ]]; then + echo "Errors in log from exporter: ${ERR}" + exit 1 + fi diff --git a/testing/chainsaw/e2e-other/gssapi/00-assert.yaml b/testing/chainsaw/e2e-other/gssapi/00-assert.yaml new file mode 100644 index 0000000000..ea828be0c4 --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/00-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: krb5 +--- +apiVersion: v1 +kind: Secret +metadata: + name: krb5-keytab diff --git a/testing/chainsaw/e2e-other/gssapi/01-assert.yaml b/testing/chainsaw/e2e-other/gssapi/01-assert.yaml new file mode 100644 index 0000000000..dbda953ead --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/01-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: gssapi +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: gssapi-primary diff --git a/testing/chainsaw/e2e-other/gssapi/01-cluster.yaml b/testing/chainsaw/e2e-other/gssapi/01-cluster.yaml new file mode 100644 index 0000000000..8acfe46c4d --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/01-cluster.yaml @@ -0,0 +1,41 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: gssapi +spec: + config: + files: + - secret: + name: krb5-keytab + - configMap: + name: krb5 + patroni: + dynamicConfiguration: + postgresql: + pg_hba: + - host postgres postgres 0.0.0.0/0 scram-sha-256 + - host all krb5hippo@PGO.CRUNCHYDATA.COM 0.0.0.0/0 gss + parameters: + krb_server_keyfile: /etc/postgres/krb5.keytab + users: + - name: postgres + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/gssapi/02-assert.yaml b/testing/chainsaw/e2e-other/gssapi/02-assert.yaml new file mode 100644 index 0000000000..36f85d95d4 --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/02-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-gssapi +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/gssapi/02-psql-connect.yaml b/testing/chainsaw/e2e-other/gssapi/02-psql-connect.yaml new file mode 100644 index 0000000000..30f02b3b19 --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/02-psql-connect.yaml @@ -0,0 +1,47 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-gssapi +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - bash + - -c + - -- + - |- + psql -c 'create user "krb5hippo@PGO.CRUNCHYDATA.COM";' + kinit -k -t /krb5-conf/krb5.keytab krb5hippo@PGO.CRUNCHYDATA.COM + psql -U krb5hippo@PGO.CRUNCHYDATA.COM -h gssapi-primary.$(NAMESPACE).svc.cluster.local -d postgres \ + -c 'select version();' + env: + - name: NAMESPACE + valueFrom: { fieldRef: { fieldPath: metadata.namespace } } + - name: PGHOST + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: port } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: password } } + - name: PGDATABASE + value: postgres + - name: KRB5_CONFIG + value: /krb5-conf/krb5.conf + volumeMounts: + - name: krb5-conf + mountPath: /krb5-conf + volumes: + - name: krb5-conf + projected: + sources: + - configMap: + name: krb5 + - secret: + name: krb5-keytab diff --git a/testing/chainsaw/e2e-other/gssapi/README.md b/testing/chainsaw/e2e-other/gssapi/README.md new file mode 100644 index 0000000000..72d8d2b997 --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/README.md @@ -0,0 +1,14 @@ +# GSSAPI Authentication + +This test verifies that it is possible to properly configure PostgreSQL for GSSAPI +authentication. This is done by configuring a PostgresCluster for GSSAPI authentication, +and then utilizing a Kerberos ticket that has been issued by a Kerberos KDC server to log into +PostgreSQL. + +## Assumptions + +- A Kerberos Key Distribution Center (KDC) Pod named `krb5-kdc-0` is deployed inside of a `krb5` +namespace within the Kubernetes cluster +- The KDC server (`krb5-kdc-0`) contains a `/krb5-conf/krb5.sh` script that can be run as part +of the test to create the Kerberos principals, keytab secret and client configuration needed to +successfully run the test diff --git a/testing/chainsaw/e2e-other/gssapi/chainsaw-test.yaml b/testing/chainsaw/e2e-other/gssapi/chainsaw-test.yaml new file mode 100755 index 0000000000..b4209c220b --- /dev/null +++ b/testing/chainsaw/e2e-other/gssapi/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: gssapi +spec: + steps: + - name: step-00 + try: + - command: + args: + - exec + - -n + - krb5 + - -it + - krb5-kdc-0 + - -- + - /krb5-scripts/krb5.sh + - ${NAMESPACE} + entrypoint: kubectl + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01-cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02-psql-connect.yaml + - assert: + file: 02-assert.yaml diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml new file mode 100644 index 0000000000..fa3985231d --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -0,0 +1,11 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # postgres version that is no longer available + fromPostgresVersion: 10 + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade-empty-image diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/01-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/01-assert.yaml new file mode 100644 index 0000000000..b7d0f936fb --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/10--cluster.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/10--cluster.yaml new file mode 100644 index 0000000000..c85a9b8dae --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/10--cluster.yaml @@ -0,0 +1,23 @@ +--- +# Create the cluster we will do an actual upgrade on, but set the postgres version +# to '10' to force a missing image scenario +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + # postgres version that is no longer available + postgresVersion: 10 + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/10-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/10-assert.yaml new file mode 100644 index 0000000000..72e9ff6387 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/10-assert.yaml @@ -0,0 +1,12 @@ +--- +# The cluster is not running due to the missing image, not due to a proper +# shutdown status. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/11-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/11-assert.yaml new file mode 100644 index 0000000000..5bd9d447cb --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/11-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml new file mode 100644 index 0000000000..fcdf4f62e3 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml @@ -0,0 +1,17 @@ +--- +# Update the postgres version and restart the cluster. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: false + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # update postgres version + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/12-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/12-assert.yaml new file mode 100644 index 0000000000..14c33cccfe --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/12-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/13-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/13-assert.yaml new file mode 100644 index 0000000000..78e51e566a --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/13-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml new file mode 100644 index 0000000000..2fa2c949a9 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image + annotations: + postgres-operator.crunchydata.com/allow-upgrade: empty-image-upgrade diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/14-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/14-assert.yaml new file mode 100644 index 0000000000..bd828180f4 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/14-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciliation is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml new file mode 100644 index 0000000000..e5f270fb2f --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/15-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/15-assert.yaml new file mode 100644 index 0000000000..dfcbd4c819 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/15-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/17--check-version.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/17--check-version.yaml new file mode 100644 index 0000000000..5315c1d14f --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/17--check-version.yaml @@ -0,0 +1,39 @@ +--- +# Check the version reported by PostgreSQL +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-empty-image-pguser-major-upgrade-empty-image, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/17-assert.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/17-assert.yaml new file mode 100644 index 0000000000..56289c35c1 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/17-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/README.md b/testing/chainsaw/e2e-other/major-upgrade-missing-image/README.md new file mode 100644 index 0000000000..341cc854f7 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/README.md @@ -0,0 +1,36 @@ +## Major upgrade missing image tests + +This is a variation derived from our major upgrade KUTTL tests designed to +test scenarios where required container images are not defined in either the +PostgresCluster spec or via the RELATED_IMAGES environment variables. + +### Basic PGUpgrade controller and CRD instance validation + +* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-assert: check that the PGUpgrade instance exists and has the expected status + +### Verify new statuses for missing required container images + +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" +* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade +* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" + +### Update to an available Postgres version, start and upgrade PostgresCluster + +* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false +* 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" +* 13--shutdown-cluster: set spec.shutdown to 'true' +* 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" +* 14--annotate-cluster: set the required annotation +* 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status +* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' + +### Verify upgraded PostgresCluster + +* 15-assert: verify that the cluster is running +* 16-check-pgbackrest: check that the pgbackrest setup has successfully completed +* 17--check-version: check the version reported by PostgreSQL +* 17-assert: assert the Job from the previous step succeeded + + diff --git a/testing/chainsaw/e2e-other/major-upgrade-missing-image/chainsaw-test.yaml b/testing/chainsaw/e2e-other/major-upgrade-missing-image/chainsaw-test.yaml new file mode 100755 index 0000000000..d0e61a1214 --- /dev/null +++ b/testing/chainsaw/e2e-other/major-upgrade-missing-image/chainsaw-test.yaml @@ -0,0 +1,61 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: major-upgrade-missing-image +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--valid-upgrade.yaml + - assert: + file: 01-assert.yaml + - name: step-10 + try: + - apply: + file: 10--cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - apply: + file: 11--shutdown-cluster.yaml + - assert: + file: 11-assert.yaml + - name: step-12 + try: + - apply: + file: 12--start-and-update-version.yaml + - assert: + file: 12-assert.yaml + - name: step-13 + try: + - apply: + file: 13--shutdown-cluster.yaml + - assert: + file: 13-assert.yaml + - name: step-14 + try: + - apply: + file: 14--annotate-cluster.yaml + - assert: + file: 14-assert.yaml + - name: step-15 + try: + - apply: + file: 15--start-cluster.yaml + - assert: + file: 15-assert.yaml + - name: step-16 + try: + - script: + content: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-empty-image-repo-host" -c pgbackrest -- pgbackrest check --stanza=db + - name: step-17 + try: + - apply: + file: 17--check-version.yaml + - assert: + file: 17-assert.yaml diff --git a/testing/chainsaw/e2e-other/postgis-cluster/00--cluster.yaml b/testing/chainsaw/e2e-other/postgis-cluster/00--cluster.yaml new file mode 100644 index 0000000000..8dc88788bc --- /dev/null +++ b/testing/chainsaw/e2e-other/postgis-cluster/00--cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: postgis +spec: + postgresVersion: ${KUTTL_PG_VERSION} + postGISVersion: "${KUTTL_POSTGIS_VERSION}" + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/postgis-cluster/00-assert.yaml b/testing/chainsaw/e2e-other/postgis-cluster/00-assert.yaml new file mode 100644 index 0000000000..b0bda7753f --- /dev/null +++ b/testing/chainsaw/e2e-other/postgis-cluster/00-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: postgis +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: postgis + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgis-primary diff --git a/testing/chainsaw/e2e-other/postgis-cluster/01--psql-connect.yaml b/testing/chainsaw/e2e-other/postgis-cluster/01--psql-connect.yaml new file mode 100644 index 0000000000..814958a9f6 --- /dev/null +++ b/testing/chainsaw/e2e-other/postgis-cluster/01--psql-connect.yaml @@ -0,0 +1,132 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-postgis-connect +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: password } } + - { name: GIS_VERSION, value: "${KUTTL_POSTGIS_VERSION}" } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + command: + - bash + - -c + - | + # Ensure PostGIS version is set + GIS_VERSION=${KUTTL_POSTGIS_VERSION} + GIS_VERSION=${GIS_VERSION:-notset} + + # check version + RESULT=$(psql -c "DO \$\$ + DECLARE + result boolean; + BEGIN + SELECT postgis_version() LIKE '%${GIS_VERSION}%' INTO result; + ASSERT result = 't', 'PostGIS version incorrect'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check full version + RESULT=$(psql -c "DO \$\$ + DECLARE + result boolean; + BEGIN + SELECT postgis_full_version() LIKE 'POSTGIS=\"%${GIS_VERSION}%' INTO result; + ASSERT result = 't', 'PostGIS full version incorrect'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check expected schemas (tiger, tiger_data and topology) + # - https://www.postgresql.org/docs/current/catalog-pg-namespace.html + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger' INTO result; + ASSERT result = 'tiger', 'PostGIS tiger schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger_data' INTO result; + ASSERT result = 'tiger_data', 'PostGIS tiger_data schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='topology' INTO result; + ASSERT result = 'topology', 'PostGIS topology schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check point creation + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT pg_typeof(ST_MakePoint(28.385200,-81.563900)) INTO result; + ASSERT result = 'geometry', 'Unable to create PostGIS point'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check GeoJSON function + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT ST_AsGeoJSON('SRID=4326;POINT(-118.4079 33.9434)'::geography) INTO result; + ASSERT result = '{\"type\":\"Point\",\"coordinates\":[-118.4079,33.9434]}', FORMAT('GeoJSON check failed, got %L', result); + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi diff --git a/testing/chainsaw/e2e-other/postgis-cluster/01-assert.yaml b/testing/chainsaw/e2e-other/postgis-cluster/01-assert.yaml new file mode 100644 index 0000000000..22e9e6f9de --- /dev/null +++ b/testing/chainsaw/e2e-other/postgis-cluster/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-postgis-connect +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/postgis-cluster/chainsaw-test.yaml b/testing/chainsaw/e2e-other/postgis-cluster/chainsaw-test.yaml new file mode 100755 index 0000000000..8eb80273c2 --- /dev/null +++ b/testing/chainsaw/e2e-other/postgis-cluster/chainsaw-test.yaml @@ -0,0 +1,20 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: postgis-cluster +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--psql-connect.yaml + - assert: + file: 01-assert.yaml diff --git a/testing/chainsaw/e2e-other/resize-volume/00-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/00-assert.yaml new file mode 100644 index 0000000000..b4372b75e7 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/00-assert.yaml @@ -0,0 +1,7 @@ +# Ensure that the default StorageClass supports VolumeExpansion +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" +allowVolumeExpansion: true diff --git a/testing/chainsaw/e2e-other/resize-volume/01--cluster.yaml b/testing/chainsaw/e2e-other/resize-volume/01--cluster.yaml new file mode 100644 index 0000000000..4737fb25f4 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/01--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/resize-volume/01-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/01-assert.yaml new file mode 100644 index 0000000000..ea72af469c --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/01-assert.yaml @@ -0,0 +1,59 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: resize-volume-up-primary +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/testing/chainsaw/e2e-other/resize-volume/02--create-data.yaml b/testing/chainsaw/e2e-other/resize-volume/02--create-data.yaml new file mode 100644 index 0000000000..c41a6f80c4 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/02--create-data.yaml @@ -0,0 +1,31 @@ +--- +# Create some data that should be present after resizing. +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/chainsaw/e2e-other/resize-volume/02-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/02-assert.yaml new file mode 100644 index 0000000000..fdb42e68f5 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/resize-volume/03--resize.yaml b/testing/chainsaw/e2e-other/resize-volume/03--resize.yaml new file mode 100644 index 0000000000..dd7c96901f --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/03--resize.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi diff --git a/testing/chainsaw/e2e-other/resize-volume/03-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/03-assert.yaml new file mode 100644 index 0000000000..11aa230cd4 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/03-assert.yaml @@ -0,0 +1,37 @@ +# We know that the PVC sizes have change so now we can check that they have been +# updated to have the expected size +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/chainsaw/e2e-other/resize-volume/06--check-data.yaml b/testing/chainsaw/e2e-other/resize-volume/06--check-data.yaml new file mode 100644 index 0000000000..682a46ef4d --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/06--check-data.yaml @@ -0,0 +1,40 @@ +--- +# Confirm that all the data still exists. +apiVersion: batch/v1 +kind: Job +metadata: + name: check-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data still exists. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + keep_data jsonb; + BEGIN + SELECT jsonb_agg(important) INTO keep_data FROM important; + ASSERT keep_data = '[{"data":"treasure"}]', format('got %L', keep_data); + END $$$$; diff --git a/testing/chainsaw/e2e-other/resize-volume/06-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/06-assert.yaml new file mode 100644 index 0000000000..cf743b8701 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/06-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: check-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e-other/resize-volume/11--cluster.yaml b/testing/chainsaw/e2e-other/resize-volume/11--cluster.yaml new file mode 100644 index 0000000000..8d2d602ca6 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/11--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi diff --git a/testing/chainsaw/e2e-other/resize-volume/11-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/11-assert.yaml new file mode 100644 index 0000000000..666b4a85c7 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/11-assert.yaml @@ -0,0 +1,59 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: resize-volume-down-primary +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/chainsaw/e2e-other/resize-volume/13--resize.yaml b/testing/chainsaw/e2e-other/resize-volume/13--resize.yaml new file mode 100644 index 0000000000..77af2f2aa3 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/13--resize.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e-other/resize-volume/13-assert.yaml b/testing/chainsaw/e2e-other/resize-volume/13-assert.yaml new file mode 100644 index 0000000000..4210214fd6 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/13-assert.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Event +type: Warning +involvedObject: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: resize-volume-down +reason: PersistentVolumeError +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/chainsaw/e2e-other/resize-volume/chainsaw-test.yaml b/testing/chainsaw/e2e-other/resize-volume/chainsaw-test.yaml new file mode 100755 index 0000000000..9eb16578a6 --- /dev/null +++ b/testing/chainsaw/e2e-other/resize-volume/chainsaw-test.yaml @@ -0,0 +1,48 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: resize-volume +spec: + steps: + - name: step-00 + try: + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--create-data.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - apply: + file: 03--resize.yaml + - assert: + file: 03-assert.yaml + - name: step-06 + try: + - apply: + file: 06--check-data.yaml + - assert: + file: 06-assert.yaml + - name: step-11 + try: + - apply: + file: 11--cluster.yaml + - assert: + file: 11-assert.yaml + - name: step-13 + try: + - apply: + file: 13--resize.yaml + - assert: + file: 13-assert.yaml diff --git a/testing/chainsaw/e2e/cluster-pause/chainsaw-test.yaml b/testing/chainsaw/e2e/cluster-pause/chainsaw-test.yaml new file mode 100755 index 0000000000..8defe16c39 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/chainsaw-test.yaml @@ -0,0 +1,44 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: cluster-pause +spec: + steps: + - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause + - podLogs: + namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause + name: step-00 + try: + - apply: + file: files/00-create-cluster.yaml + - assert: + file: files/00-cluster-created.yaml + - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause + - podLogs: + namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause + name: step-01 + try: + - apply: + file: files/01-pause-cluster.yaml + - assert: + file: files/01-cluster-paused.yaml + - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause + - podLogs: + namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause + name: step-02 + try: + - apply: + file: files/02-resume-cluster.yaml + - assert: + file: files/02-cluster-resumed.yaml diff --git a/testing/chainsaw/e2e/cluster-pause/files/00-cluster-created.yaml b/testing/chainsaw/e2e/cluster-pause/files/00-cluster-created.yaml new file mode 100644 index 0000000000..5c867a7892 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/00-cluster-created.yaml @@ -0,0 +1,23 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + conditions: + - message: pgBackRest dedicated repository host is ready + reason: RepoHostReady + status: "True" + type: PGBackRestRepoHostReady + - message: pgBackRest replica create repo is ready for backups + reason: StanzaCreated + status: "True" + type: PGBackRestReplicaRepoReady + - message: pgBackRest replica creation is now possible + reason: RepoBackupComplete + status: "True" + type: PGBackRestReplicaCreate + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/cluster-pause/files/00-create-cluster.yaml b/testing/chainsaw/e2e/cluster-pause/files/00-create-cluster.yaml new file mode 100644 index 0000000000..abf7b9f4f2 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/00-create-cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/cluster-pause/files/01-cluster-paused.yaml b/testing/chainsaw/e2e/cluster-pause/files/01-cluster-paused.yaml new file mode 100644 index 0000000000..ecd459d3e1 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/01-cluster-paused.yaml @@ -0,0 +1,34 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + conditions: + - message: pgBackRest dedicated repository host is ready + reason: RepoHostReady + status: "True" + type: PGBackRestRepoHostReady + - message: pgBackRest replica create repo is ready for backups + reason: StanzaCreated + status: "True" + type: PGBackRestReplicaRepoReady + - message: pgBackRest replica creation is now possible + reason: RepoBackupComplete + status: "True" + type: PGBackRestReplicaCreate + - message: No spec changes will be applied and no other statuses will be updated. + reason: Paused + status: "False" + type: Progressing + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: ClusterIP diff --git a/testing/chainsaw/e2e/cluster-pause/files/01-pause-cluster.yaml b/testing/chainsaw/e2e/cluster-pause/files/01-pause-cluster.yaml new file mode 100644 index 0000000000..6a21b00b22 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/01-pause-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + # We change the service, but this won't result in a change until we resume + service: + type: LoadBalancer + paused: true + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/cluster-pause/files/02-cluster-resumed.yaml b/testing/chainsaw/e2e/cluster-pause/files/02-cluster-resumed.yaml new file mode 100644 index 0000000000..1c90fe5f22 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/02-cluster-resumed.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + conditions: + - message: pgBackRest dedicated repository host is ready + reason: RepoHostReady + status: "True" + type: PGBackRestRepoHostReady + - message: pgBackRest replica create repo is ready for backups + reason: StanzaCreated + status: "True" + type: PGBackRestReplicaRepoReady + - message: pgBackRest replica creation is now possible + reason: RepoBackupComplete + status: "True" + type: PGBackRestReplicaCreate + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: LoadBalancer diff --git a/testing/chainsaw/e2e/cluster-pause/files/02-resume-cluster.yaml b/testing/chainsaw/e2e/cluster-pause/files/02-resume-cluster.yaml new file mode 100644 index 0000000000..2f5665e146 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/02-resume-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + paused: false diff --git a/testing/chainsaw/e2e/cluster-pause/files/chainsaw-test.yaml b/testing/chainsaw/e2e/cluster-pause/files/chainsaw-test.yaml new file mode 100755 index 0000000000..8f66fbbd11 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-pause/files/chainsaw-test.yaml @@ -0,0 +1,26 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: files +spec: + steps: + - name: step-00 + try: + - apply: + file: 00-cluster-created.yaml + - apply: + file: 00-create-cluster.yaml + - name: step-01 + try: + - apply: + file: 01-cluster-paused.yaml + - apply: + file: 01-pause-cluster.yaml + - name: step-02 + try: + - apply: + file: 02-cluster-resumed.yaml + - apply: + file: 02-resume-cluster.yaml diff --git a/testing/chainsaw/e2e/cluster-start/chainsaw-test.yaml b/testing/chainsaw/e2e/cluster-start/chainsaw-test.yaml new file mode 100755 index 0000000000..a1deb6ca03 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/chainsaw-test.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: cluster-start +spec: + steps: + - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start + - podLogs: + namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start + name: step-00 + try: + - apply: + file: files/00-create-cluster.yaml + - assert: + file: files/00-cluster-created.yaml + - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start + - podLogs: + namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start + name: step-01 + try: + - apply: + file: files/01-connect-psql.yaml + - assert: + file: files/01-psql-connected.yaml diff --git a/testing/chainsaw/e2e/cluster-start/files/00-cluster-created.yaml b/testing/chainsaw/e2e/cluster-start/files/00-cluster-created.yaml new file mode 100644 index 0000000000..ecc6ab7fe8 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/files/00-cluster-created.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: cluster-start + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-start-primary diff --git a/testing/chainsaw/e2e/cluster-start/files/00-create-cluster.yaml b/testing/chainsaw/e2e/cluster-start/files/00-create-cluster.yaml new file mode 100644 index 0000000000..a870d940f1 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/files/00-create-cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/cluster-start/files/01-connect-psql.yaml b/testing/chainsaw/e2e/cluster-start/files/01-connect-psql.yaml new file mode 100644 index 0000000000..b4cef74941 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/files/01-connect-psql.yaml @@ -0,0 +1,29 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: password } } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/cluster-start/files/01-psql-connected.yaml b/testing/chainsaw/e2e/cluster-start/files/01-psql-connected.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/files/01-psql-connected.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/cluster-start/files/chainsaw-test.yaml b/testing/chainsaw/e2e/cluster-start/files/chainsaw-test.yaml new file mode 100755 index 0000000000..8fead6b270 --- /dev/null +++ b/testing/chainsaw/e2e/cluster-start/files/chainsaw-test.yaml @@ -0,0 +1,20 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: files +spec: + steps: + - name: step-00 + try: + - apply: + file: 00-cluster-created.yaml + - apply: + file: 00-create-cluster.yaml + - name: step-01 + try: + - apply: + file: 01-connect-psql.yaml + - apply: + file: 01-psql-connected.yaml diff --git a/testing/chainsaw/e2e/delete-namespace/00--namespace.yaml b/testing/chainsaw/e2e/delete-namespace/00--namespace.yaml new file mode 100644 index 0000000000..617c1e5399 --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/00--namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${KUTTL_TEST_DELETE_NAMESPACE} diff --git a/testing/chainsaw/e2e/delete-namespace/01--cluster.yaml b/testing/chainsaw/e2e/delete-namespace/01--cluster.yaml new file mode 100644 index 0000000000..fe6392d75a --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/01--cluster.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/delete-namespace/01-assert.yaml b/testing/chainsaw/e2e/delete-namespace/01-assert.yaml new file mode 100644 index 0000000000..3d2c7ec936 --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/01-assert.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/delete-namespace/02-errors.yaml b/testing/chainsaw/e2e/delete-namespace/02-errors.yaml new file mode 100644 index 0000000000..ee6f31178c --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/02-errors.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + name: delete-namespace +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Service +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace diff --git a/testing/chainsaw/e2e/delete-namespace/README.md b/testing/chainsaw/e2e/delete-namespace/README.md new file mode 100644 index 0000000000..697e2ae915 --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/README.md @@ -0,0 +1,11 @@ +### Delete namespace test + +* Create a namespace +* Start a regular cluster in that namespace +* Delete the namespace +* Check that nothing remains. + +Note: KUTTL provides a `$NAMESPACE` var that can be used in scripts/commands, +but which cannot be used in object definition yamls (like `01--cluster.yaml`). +Therefore, we use a given, non-random namespace that is defined in the makefile +and generated with `generate-kuttl`. diff --git a/testing/chainsaw/e2e/delete-namespace/chainsaw-test.yaml b/testing/chainsaw/e2e/delete-namespace/chainsaw-test.yaml new file mode 100755 index 0000000000..8c6fa69d26 --- /dev/null +++ b/testing/chainsaw/e2e/delete-namespace/chainsaw-test.yaml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: delete-namespace +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--namespace.yaml + - name: step-01 + try: + - apply: + file: 01--cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - delete: + ref: + apiVersion: v1 + kind: Namespace + name: ${KUTTL_TEST_DELETE_NAMESPACE} + - error: + file: 02-errors.yaml diff --git a/testing/chainsaw/e2e/delete/00--cluster.yaml b/testing/chainsaw/e2e/delete/00--cluster.yaml new file mode 100644 index 0000000000..0dbcb08204 --- /dev/null +++ b/testing/chainsaw/e2e/delete/00--cluster.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/delete/00-assert.yaml b/testing/chainsaw/e2e/delete/00-assert.yaml new file mode 100644 index 0000000000..6130475c07 --- /dev/null +++ b/testing/chainsaw/e2e/delete/00-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/delete/02-errors.yaml b/testing/chainsaw/e2e/delete/02-errors.yaml new file mode 100644 index 0000000000..091bc96b7b --- /dev/null +++ b/testing/chainsaw/e2e/delete/02-errors.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete diff --git a/testing/chainsaw/e2e/delete/10--cluster.yaml b/testing/chainsaw/e2e/delete/10--cluster.yaml new file mode 100644 index 0000000000..53c4fc434d --- /dev/null +++ b/testing/chainsaw/e2e/delete/10--cluster.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + switchover: + enabled: true + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/delete/10-assert.yaml b/testing/chainsaw/e2e/delete/10-assert.yaml new file mode 100644 index 0000000000..1940fc680a --- /dev/null +++ b/testing/chainsaw/e2e/delete/10-assert.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/delete/12-errors.yaml b/testing/chainsaw/e2e/delete/12-errors.yaml new file mode 100644 index 0000000000..cc14b60d3d --- /dev/null +++ b/testing/chainsaw/e2e/delete/12-errors.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica diff --git a/testing/chainsaw/e2e/delete/20--cluster.yaml b/testing/chainsaw/e2e/delete/20--cluster.yaml new file mode 100644 index 0000000000..2b7d34f3f6 --- /dev/null +++ b/testing/chainsaw/e2e/delete/20--cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +spec: + postgresVersion: ${KUTTL_PG_VERSION} + image: "example.com/does-not-exist" + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/delete/20-errors.yaml b/testing/chainsaw/e2e/delete/20-errors.yaml new file mode 100644 index 0000000000..f910fa9811 --- /dev/null +++ b/testing/chainsaw/e2e/delete/20-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/delete/22-errors.yaml b/testing/chainsaw/e2e/delete/22-errors.yaml new file mode 100644 index 0000000000..4527a3659d --- /dev/null +++ b/testing/chainsaw/e2e/delete/22-errors.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running diff --git a/testing/chainsaw/e2e/delete/README.md b/testing/chainsaw/e2e/delete/README.md new file mode 100644 index 0000000000..3a7d4fd848 --- /dev/null +++ b/testing/chainsaw/e2e/delete/README.md @@ -0,0 +1,19 @@ +### Delete test + +#### Regular cluster delete + +* Start a regular cluster +* Delete it +* Check that nothing remains. + +#### Delete cluster with replica + +* Start a regular cluster with 2 replicas +* Delete it +* Check that nothing remains + +#### Delete a cluster that never started + +* Start a cluster with a bad image +* Delete it +* Check that nothing remains diff --git a/testing/chainsaw/e2e/delete/chainsaw-test.yaml b/testing/chainsaw/e2e/delete/chainsaw-test.yaml new file mode 100755 index 0000000000..6aa62fdf0f --- /dev/null +++ b/testing/chainsaw/e2e/delete/chainsaw-test.yaml @@ -0,0 +1,59 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: delete +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete + - name: step-02 + try: + - error: + file: 02-errors.yaml + - name: step-10 + try: + - apply: + file: 10--cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete-with-replica + - name: step-12 + try: + - error: + file: 12-errors.yaml + - name: step-20 + try: + - apply: + file: 20--cluster.yaml + - error: + file: 20-errors.yaml + - name: step-21 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete-not-running + - name: step-22 + try: + - error: + file: 22-errors.yaml diff --git a/testing/chainsaw/e2e/exporter-custom-queries/README.md b/testing/chainsaw/e2e/exporter-custom-queries/README.md new file mode 100644 index 0000000000..801b6d02a8 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/README.md @@ -0,0 +1,3 @@ +# Exporter + +**Note**: This series of tests depends on PGO being deployed with the `AppendCustomQueries` feature gate OFF. There is a separate set of tests in `e2e-other` that tests the `AppendCustomQueries` functionality. diff --git a/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml new file mode 100755 index 0000000000..6721a79d28 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml @@ -0,0 +1,100 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-custom-queries +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-custom-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + queries_files=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- ls /conf + ) + + { + contains "${queries_files}" "queries.yml" && + !(contains "${queries_files}" "defaultQueries.yml") + } || { + echo >&2 'The /conf directory should contain the queries.yml file. Instead it has:' + echo "${queries_files}" + exit 1 + } + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a test." && + !(contains "${master_queries_contents}" "ccp_postgresql_version") + } || { + echo >&2 'The master queries.yml file should only contain the contents of the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} + name: step-00 + try: + - apply: + file: files/exporter-custom-queries-configmap.yaml + - apply: + file: files/exporter-custom-queries-cluster.yaml + - assert: + file: files/exporter-custom-queries-cluster-checks.yaml + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-custom-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a different test." && + !(contains "${master_queries_contents}" "ccp_postgresql_version") + } || { + echo >&2 'The master queries.yml file should only contain the contents of the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } + name: step-01 + try: + - apply: + file: files/exporter-custom-queries-configmap-update.yaml + - assert: + file: files/exporter-custom-queries-configmap-update-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml new file mode 100644 index 0000000000..ed6fd22b7c --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml @@ -0,0 +1,31 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-custom-queries +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-custom-queries + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-custom-queries-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml new file mode 100644 index 0000000000..6ff8ed5e67 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-custom-queries +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + configuration: + - configMap: + name: custom-queries-test diff --git a/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml new file mode 100644 index 0000000000..72af1103af --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a different test." diff --git a/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml new file mode 100644 index 0000000000..72af1103af --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a different test." diff --git a/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml new file mode 100644 index 0000000000..9964d6bc1e --- /dev/null +++ b/testing/chainsaw/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml new file mode 100755 index 0000000000..225c352ca5 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml @@ -0,0 +1,56 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-no-tls +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-no-tls \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}" -c exporter) + { contains "${logs}" 'TLS is disabled'; } || { + echo 'tls is not disabled - it should be' + exit 1 + } + + scrape_metrics=$(kubectl exec "${pod}" -c exporter -n "${NAMESPACE}" -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL + name: step-00 + try: + - apply: + file: files/exporter-no-tls-cluster.yaml + - assert: + file: files/exporter-no-tls-cluster-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml b/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml new file mode 100644 index 0000000000..eab02c6888 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-no-tls +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-no-tls + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-no-tls-exporter-queries-config diff --git a/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml b/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml new file mode 100644 index 0000000000..9cc6ec4877 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml @@ -0,0 +1,18 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-no-tls +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e/exporter-password-change/README.md b/testing/chainsaw/e2e/exporter-password-change/README.md new file mode 100644 index 0000000000..2a5b596309 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/README.md @@ -0,0 +1,36 @@ +# Exporter Password Change + +## 00--create-cluster: +The TestStep will: + +1) Apply the `files/inital-postgrescluster.yaml` file to create a cluster with monitoring enabled +2) Assert that conditions outlined in `files/initial-postgrescluster-checks.yaml` are met + - PostgresCluster exists with a single ready replica + - A pod with `cluster` and `crunchy-postgres-exporter` labels has the status `{phase: Running}` + - A `-monitoring` secret exists with correct labels and ownerReferences + +## 00-assert: + +This TestAssert will loop through a script until: +1) the instance pod has the `ContainersReady` condition with status `true` +2) the asserts from `00--create-cluster` are met. + +## 01-assert: + +This TestAssert will loop through a script until: +1) The metrics endpoint returns `pg_exporter_last_scrape_error 0` meaning the exporter was able to access postgres metrics +2) It is able to store the pid of the running postgres_exporter process + +## 02-change-password: + +This TestStep will: +1) Apply the `files/update-monitoring-password.yaml` file to set the monitoring password to `password` +2) Assert that conditions outlined in `files/update-monitoring-password-checks.yaml` are met + - A `-monitoring` secret exists with `data.password` set to the encoded value for `password` + +## 02-assert: + +This TestAssert will loop through a script until: +1) An exec command can confirm that `/opt/crunchy/password` file contains the updated password +2) It can confirm that the pid of the postgres_exporter process has changed +3) The metrics endpoint returns `pg_exporter_last_scrape_error 0` meaning the exporter was able to access postgres metrics using the updated password diff --git a/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml new file mode 100755 index 0000000000..b58da5e4ec --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml @@ -0,0 +1,92 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-password-change +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true + name: step-00 + try: + - apply: + file: files/initial-postgrescluster.yaml + - assert: + file: files/initial-postgrescluster-checks.yaml + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} + - podLogs: + container: exporter + selector: postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true + name: step-01 + try: null + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@";} + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + password=$(kubectl exec -n ${NAMESPACE} ${pod} -c exporter -- bash -c 'cat /opt/crunchy/password') + { contains "${password}" "password"; } || { + retry "unexpected password: ${password}" + exit 1 + } + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + name: step-02 + try: + - apply: + file: files/update-monitoring-password.yaml + - assert: + file: files/update-monitoring-password-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-password-change/files/check-restarted-pod.yaml b/testing/chainsaw/e2e/exporter-password-change/files/check-restarted-pod.yaml new file mode 100644 index 0000000000..012dafa41c --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/files/check-restarted-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml b/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml new file mode 100644 index 0000000000..19887a0e10 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: exporter-password-change diff --git a/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster.yaml b/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster.yaml new file mode 100644 index 0000000000..e3fbb7b94a --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/files/initial-postgrescluster.yaml @@ -0,0 +1,18 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-password-change +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml b/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml new file mode 100644 index 0000000000..dcf1703861 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: exporter-password-change +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= diff --git a/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password.yaml b/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password.yaml new file mode 100644 index 0000000000..7832c89f69 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-password-change/files/update-monitoring-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml new file mode 100755 index 0000000000..4fe7a3b54a --- /dev/null +++ b/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml @@ -0,0 +1,59 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exporter-tls +spec: + steps: + - catch: + - script: + content: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-tls \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}" -c exporter) + { contains "${logs}" 'TLS is enabled'; } || { + echo >&2 'TLS is not enabled - it should be' + echo "${LOGS}" + exit 1 + } + + scrape_metrics=$(kubectl exec "${pod}" -c exporter -n "${NAMESPACE}" -- \ + curl --insecure --silent https://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL + name: step-00 + try: + - apply: + file: files/exporter-tls-certs.yaml + - apply: + file: files/exporter-tls-cluster.yaml + - assert: + file: files/exporter-tls-cluster-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-certs.yaml b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-certs.yaml new file mode 100644 index 0000000000..1a1340a7b3 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-certs.yaml @@ -0,0 +1,12 @@ +# Generated certs using openssl +# openssl req -x509 -nodes -newkey ec -pkeyopt ec_paramgen_curve:prime256v1 \ +# -pkeyopt ec_param_enc:named_curve -sha384 -keyout ca.key -out ca.crt \ +# -days 365 -subj "/CN=*" +apiVersion: v1 +kind: Secret +metadata: + name: cluster-cert +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJiakNDQVJPZ0F3SUJBZ0lVUUU3T0pqRDM5WHUvelZlenZQYjdSQ0ZTcE1Jd0NnWUlLb1pJemowRUF3TXcKRERFS01BZ0dBMVVFQXd3QktqQWVGdzB5TWpFd01USXhPRE14TURoYUZ3MHlNekV3TVRJeE9ETXhNRGhhTUF3eApDakFJQmdOVkJBTU1BU293V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVJjaUYyckNlbmg4UFFLClZGUWJaRVcvWi9XUGgwZkk1aHhVb1ZkVVpuRTBTNGhCK1U3aGV5L3QvQVJNbDF3cXovazQ0cmlBa1g1ckFMakgKei9hTm16bnJvMU13VVRBZEJnTlZIUTRFRmdRVTQvUFc2MEdUcWFQdGpYWXdsMk56d0RGMFRmY3dId1lEVlIwagpCQmd3Rm9BVTQvUFc2MEdUcWFQdGpYWXdsMk56d0RGMFRmY3dEd1lEVlIwVEFRSC9CQVV3QXdFQi96QUtCZ2dxCmhrak9QUVFEQXdOSkFEQkdBaUVBbG9iemo3Uml4NkU0OW8yS2JjOUdtYlRSbWE1SVdGb0k4Uk1zcGZDQzVOUUMKSVFET0hzLzhLNVkxeWhoWDc3SGIxSUpsdnFaVVNjdm5NTjBXeS9JUWRuemJ4QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1preDQ4cktidnZtUVRLSC8KSTN4STZzYW45Wk55MjQrOUQ4ODd5a2svb1l1aFJBTkNBQVJjaUYyckNlbmg4UFFLVkZRYlpFVy9aL1dQaDBmSQo1aHhVb1ZkVVpuRTBTNGhCK1U3aGV5L3QvQVJNbDF3cXovazQ0cmlBa1g1ckFMakh6L2FObXpucgotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml new file mode 100644 index 0000000000..e192191fcd --- /dev/null +++ b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-tls +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-tls + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-tls-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-tls-exporter-web-config diff --git a/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster.yaml b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster.yaml new file mode 100644 index 0000000000..d445062bf3 --- /dev/null +++ b/testing/chainsaw/e2e/exporter-tls/files/exporter-tls-cluster.yaml @@ -0,0 +1,20 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-tls +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + customTLSSecret: + name: cluster-cert diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml new file mode 100644 index 0000000000..fa3985231d --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -0,0 +1,11 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # postgres version that is no longer available + fromPostgresVersion: 10 + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade-empty-image diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/01-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/01-assert.yaml new file mode 100644 index 0000000000..b7d0f936fb --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/10--cluster.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/10--cluster.yaml new file mode 100644 index 0000000000..c85a9b8dae --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/10--cluster.yaml @@ -0,0 +1,23 @@ +--- +# Create the cluster we will do an actual upgrade on, but set the postgres version +# to '10' to force a missing image scenario +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + # postgres version that is no longer available + postgresVersion: 10 + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/10-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/10-assert.yaml new file mode 100644 index 0000000000..72e9ff6387 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/10-assert.yaml @@ -0,0 +1,12 @@ +--- +# The cluster is not running due to the missing image, not due to a proper +# shutdown status. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/11-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/11-assert.yaml new file mode 100644 index 0000000000..5bd9d447cb --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/11-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml new file mode 100644 index 0000000000..fcdf4f62e3 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml @@ -0,0 +1,17 @@ +--- +# Update the postgres version and restart the cluster. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: false + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # update postgres version + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/12-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/12-assert.yaml new file mode 100644 index 0000000000..14c33cccfe --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/12-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/13-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/13-assert.yaml new file mode 100644 index 0000000000..78e51e566a --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/13-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml new file mode 100644 index 0000000000..2fa2c949a9 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image + annotations: + postgres-operator.crunchydata.com/allow-upgrade: empty-image-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/14-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/14-assert.yaml new file mode 100644 index 0000000000..bd828180f4 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/14-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciliation is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/15--start-cluster.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/15--start-cluster.yaml new file mode 100644 index 0000000000..e5f270fb2f --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/15--start-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/15-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/15-assert.yaml new file mode 100644 index 0000000000..dfcbd4c819 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/15-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/17--check-version.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/17--check-version.yaml new file mode 100644 index 0000000000..5315c1d14f --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/17--check-version.yaml @@ -0,0 +1,39 @@ +--- +# Check the version reported by PostgreSQL +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-empty-image-pguser-major-upgrade-empty-image, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/17-assert.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/17-assert.yaml new file mode 100644 index 0000000000..56289c35c1 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/17-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/README.md b/testing/chainsaw/e2e/major-upgrade-missing-image/README.md new file mode 100644 index 0000000000..341cc854f7 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/README.md @@ -0,0 +1,36 @@ +## Major upgrade missing image tests + +This is a variation derived from our major upgrade KUTTL tests designed to +test scenarios where required container images are not defined in either the +PostgresCluster spec or via the RELATED_IMAGES environment variables. + +### Basic PGUpgrade controller and CRD instance validation + +* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-assert: check that the PGUpgrade instance exists and has the expected status + +### Verify new statuses for missing required container images + +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" +* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade +* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" + +### Update to an available Postgres version, start and upgrade PostgresCluster + +* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false +* 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" +* 13--shutdown-cluster: set spec.shutdown to 'true' +* 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" +* 14--annotate-cluster: set the required annotation +* 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status +* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' + +### Verify upgraded PostgresCluster + +* 15-assert: verify that the cluster is running +* 16-check-pgbackrest: check that the pgbackrest setup has successfully completed +* 17--check-version: check the version reported by PostgreSQL +* 17-assert: assert the Job from the previous step succeeded + + diff --git a/testing/chainsaw/e2e/major-upgrade-missing-image/chainsaw-test.yaml b/testing/chainsaw/e2e/major-upgrade-missing-image/chainsaw-test.yaml new file mode 100755 index 0000000000..d0e61a1214 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade-missing-image/chainsaw-test.yaml @@ -0,0 +1,61 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: major-upgrade-missing-image +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--valid-upgrade.yaml + - assert: + file: 01-assert.yaml + - name: step-10 + try: + - apply: + file: 10--cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - apply: + file: 11--shutdown-cluster.yaml + - assert: + file: 11-assert.yaml + - name: step-12 + try: + - apply: + file: 12--start-and-update-version.yaml + - assert: + file: 12-assert.yaml + - name: step-13 + try: + - apply: + file: 13--shutdown-cluster.yaml + - assert: + file: 13-assert.yaml + - name: step-14 + try: + - apply: + file: 14--annotate-cluster.yaml + - assert: + file: 14-assert.yaml + - name: step-15 + try: + - apply: + file: 15--start-cluster.yaml + - assert: + file: 15-assert.yaml + - name: step-16 + try: + - script: + content: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-empty-image-repo-host" -c pgbackrest -- pgbackrest check --stanza=db + - name: step-17 + try: + - apply: + file: 17--check-version.yaml + - assert: + file: 17-assert.yaml diff --git a/testing/chainsaw/e2e/major-upgrade/01--invalid-pgupgrade.yaml b/testing/chainsaw/e2e/major-upgrade/01--invalid-pgupgrade.yaml new file mode 100644 index 0000000000..ea90f5718a --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/01--invalid-pgupgrade.yaml @@ -0,0 +1,10 @@ +--- +# This pgupgrade is invalid and should get that condition (even with no cluster) +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +spec: + fromPostgresVersion: ${KUTTL_PG_VERSION} + toPostgresVersion: ${KUTTL_PG_VERSION} + postgresClusterName: major-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade/01-assert.yaml b/testing/chainsaw/e2e/major-upgrade/01-assert.yaml new file mode 100644 index 0000000000..f4cef66aa7 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGUpgradeInvalid" diff --git a/testing/chainsaw/e2e/major-upgrade/02--valid-upgrade.yaml b/testing/chainsaw/e2e/major-upgrade/02--valid-upgrade.yaml new file mode 100644 index 0000000000..f76ff06a9f --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/02--valid-upgrade.yaml @@ -0,0 +1,10 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +spec: + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade/02-assert.yaml b/testing/chainsaw/e2e/major-upgrade/02-assert.yaml new file mode 100644 index 0000000000..4df0ecc4d9 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/02-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/chainsaw/e2e/major-upgrade/10--already-updated-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/10--already-updated-cluster.yaml new file mode 100644 index 0000000000..0591645221 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/10--already-updated-cluster.yaml @@ -0,0 +1,16 @@ +--- +# Create a cluster that is already at the correct version +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/major-upgrade/10-assert.yaml b/testing/chainsaw/e2e/major-upgrade/10-assert.yaml new file mode 100644 index 0000000000..202864ef09 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/10-assert.yaml @@ -0,0 +1,11 @@ +--- +# pgupgrade should exit since the cluster is already at the requested version +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGUpgradeResolved" diff --git a/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml new file mode 100644 index 0000000000..14eab0efbb --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Delete the existing cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: major-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade/20--cluster-with-invalid-version.yaml b/testing/chainsaw/e2e/major-upgrade/20--cluster-with-invalid-version.yaml new file mode 100644 index 0000000000..8d73277292 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/20--cluster-with-invalid-version.yaml @@ -0,0 +1,18 @@ +--- +# Create a cluster where the version does not match the pgupgrade's `from` +# TODO(benjaminjb): this isn't quite working out +# apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PostgresCluster +# metadata: +# name: major-upgrade +# spec: +# shutdown: true +# postgresVersion: ${KUTTL_PG_UPGRADE_TOO_EARLY_FROM_VERSION} +# instances: +# - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } +# backups: +# pgbackrest: +# repos: +# - name: repo1 +# volume: +# volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/major-upgrade/20-assert.yaml b/testing/chainsaw/e2e/major-upgrade/20-assert.yaml new file mode 100644 index 0000000000..2ea1486284 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/20-assert.yaml @@ -0,0 +1,11 @@ +--- +# # pgupgrade should exit since the cluster is already at the requested version +# apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PGUpgrade +# metadata: +# name: major-upgrade-do-it +# status: +# conditions: +# - type: "Progressing" +# status: "False" +# reason: "PGUpgradeInvalidForCluster" diff --git a/testing/chainsaw/e2e/major-upgrade/21-delete-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/21-delete-cluster.yaml new file mode 100644 index 0000000000..535c6311a4 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/21-delete-cluster.yaml @@ -0,0 +1,8 @@ +--- +# # Delete the existing cluster. +# apiVersion: kuttl.dev/v1beta1 +# kind: TestStep +# delete: +# - apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PostgresCluster +# name: major-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade/30--cluster.yaml b/testing/chainsaw/e2e/major-upgrade/30--cluster.yaml new file mode 100644 index 0000000000..01e1ef6175 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/30--cluster.yaml @@ -0,0 +1,22 @@ +--- +# Create the cluster we will do an actual upgrade on +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 3 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/major-upgrade/30-assert.yaml b/testing/chainsaw/e2e/major-upgrade/30-assert.yaml new file mode 100644 index 0000000000..1db8ec257d --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/30-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e/major-upgrade/31--create-data.yaml b/testing/chainsaw/e2e/major-upgrade/31--create-data.yaml new file mode 100644 index 0000000000..ed8c27b06b --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/31--create-data.yaml @@ -0,0 +1,94 @@ +--- +# Check the version reported by PostgreSQL and create some data. +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + CREATE SCHEMA very; + CREATE TABLE very.important (data) AS VALUES ('treasure'); +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "major-upgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/chainsaw/e2e/major-upgrade/31-assert.yaml b/testing/chainsaw/e2e/major-upgrade/31-assert.yaml new file mode 100644 index 0000000000..dab4dc9de0 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/31-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before-replica +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/major-upgrade/32--shutdown-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/32--shutdown-cluster.yaml new file mode 100644 index 0000000000..9e4a575a3a --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/32--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + shutdown: true diff --git a/testing/chainsaw/e2e/major-upgrade/32-assert.yaml b/testing/chainsaw/e2e/major-upgrade/32-assert.yaml new file mode 100644 index 0000000000..2ad7f2869a --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/32-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/chainsaw/e2e/major-upgrade/33--annotate-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/33--annotate-cluster.yaml new file mode 100644 index 0000000000..35cd269035 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/33--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade + annotations: + postgres-operator.crunchydata.com/allow-upgrade: major-upgrade-do-it diff --git a/testing/chainsaw/e2e/major-upgrade/33-assert.yaml b/testing/chainsaw/e2e/major-upgrade/33-assert.yaml new file mode 100644 index 0000000000..aadb5e3bb1 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/33-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciling is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/chainsaw/e2e/major-upgrade/34--restart-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/34--restart-cluster.yaml new file mode 100644 index 0000000000..ee674151ca --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/34--restart-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/chainsaw/e2e/major-upgrade/34-assert.yaml b/testing/chainsaw/e2e/major-upgrade/34-assert.yaml new file mode 100644 index 0000000000..aba583f74c --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/34-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml b/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml new file mode 100644 index 0000000000..be1c3ff357 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml @@ -0,0 +1,11 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# Check that the pgbackrest setup has successfully completed +- script: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-repo-host" -c pgbackrest -- pgbackrest check --stanza=db +# Check that the replica data dir has been successfully cleaned +- script: | + # Check that the old pg folders do not exist on the replica + REPLICA=$(kubectl get pod -l=postgres-operator.crunchydata.com/role=replica -n "${NAMESPACE}" -o=jsonpath='{ .items[0].metadata.name }') + kubectl -n "${NAMESPACE}" exec "${REPLICA}" -c database -- [ ! -d "pgdata/pg${KUTTL_PG_UPGRADE_FROM_VERSION}" ] diff --git a/testing/chainsaw/e2e/major-upgrade/36--check-data-and-version.yaml b/testing/chainsaw/e2e/major-upgrade/36--check-data-and-version.yaml new file mode 100644 index 0000000000..135f34c7df --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/36--check-data-and-version.yaml @@ -0,0 +1,108 @@ +--- +# Check the version reported by PostgreSQL and confirm that data was upgraded. +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "major-upgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; diff --git a/testing/chainsaw/e2e/major-upgrade/36-assert.yaml b/testing/chainsaw/e2e/major-upgrade/36-assert.yaml new file mode 100644 index 0000000000..a545bfd756 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/36-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after-replica +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/00--cluster.yaml b/testing/chainsaw/e2e/password-change/00--cluster.yaml new file mode 100644 index 0000000000..2777286880 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/00--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/password-change/00-assert.yaml b/testing/chainsaw/e2e/password-change/00-assert.yaml new file mode 100644 index 0000000000..bfedc0b25e --- /dev/null +++ b/testing/chainsaw/e2e/password-change/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: password-change-primary diff --git a/testing/chainsaw/e2e/password-change/01--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/01--psql-connect-uri.yaml new file mode 100644 index 0000000000..2c9b769f89 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/01--psql-connect-uri.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/01--psql-connect.yaml b/testing/chainsaw/e2e/password-change/01--psql-connect.yaml new file mode 100644 index 0000000000..28ffa3a0e5 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/01--psql-connect.yaml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/01-assert.yaml b/testing/chainsaw/e2e/password-change/01-assert.yaml new file mode 100644 index 0000000000..f9e5dca807 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/01-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/02--secret.yaml b/testing/chainsaw/e2e/password-change/02--secret.yaml new file mode 100644 index 0000000000..03e4816e91 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/02--secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # Hardcoding the password as "datalake" + password: ZGF0YWxha2U= + verifier: "" diff --git a/testing/chainsaw/e2e/password-change/02-errors.yaml b/testing/chainsaw/e2e/password-change/02-errors.yaml new file mode 100644 index 0000000000..300ace7737 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/02-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # `02-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + verifier: "" diff --git a/testing/chainsaw/e2e/password-change/03--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/03--psql-connect-uri.yaml new file mode 100644 index 0000000000..175482704a --- /dev/null +++ b/testing/chainsaw/e2e/password-change/03--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri2 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./02-errors.yaml checks that the secret is not in the state that we set it to + # in the ./02-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/03--psql-connect.yaml b/testing/chainsaw/e2e/password-change/03--psql-connect.yaml new file mode 100644 index 0000000000..fc03215183 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/03--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect2 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./02-secret.yaml + # The ./02-errors.yaml checks that the secret is not in the state that we set it to + # in the ./02-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: datalake + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/03-assert.yaml b/testing/chainsaw/e2e/password-change/03-assert.yaml new file mode 100644 index 0000000000..9db69d0367 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/03-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect2 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri2 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/04--secret.yaml b/testing/chainsaw/e2e/password-change/04--secret.yaml new file mode 100644 index 0000000000..f5cd1537c9 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/04--secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +# Updating the password with the stringData field and an md5-based verifier +stringData: + password: infopond + verifier: "md585eb8fa4f697b2ea949d3aba788e8631" + uri: "" diff --git a/testing/chainsaw/e2e/password-change/04-errors.yaml b/testing/chainsaw/e2e/password-change/04-errors.yaml new file mode 100644 index 0000000000..f23cdded80 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/04-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # `04-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + uri: "" diff --git a/testing/chainsaw/e2e/password-change/05--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/05--psql-connect-uri.yaml new file mode 100644 index 0000000000..8e96ccfde5 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/05--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri3 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./04-errors.yaml checks that the secret is not in the state that we set it to + # in the ./04-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/05--psql-connect.yaml b/testing/chainsaw/e2e/password-change/05--psql-connect.yaml new file mode 100644 index 0000000000..7209235f31 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/05--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect3 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./04-secret.yaml + # The ./04-errors.yaml checks that the secret is not in the state that we set it to + # in the ./04-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: infopond + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/05-assert.yaml b/testing/chainsaw/e2e/password-change/05-assert.yaml new file mode 100644 index 0000000000..07c2349b06 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/05-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect3 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri3 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/06--cluster.yaml b/testing/chainsaw/e2e/password-change/06--cluster.yaml new file mode 100644 index 0000000000..4cb70defdd --- /dev/null +++ b/testing/chainsaw/e2e/password-change/06--cluster.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +spec: + # Adding a custom user to the spec + users: + - name: rhino + databases: + - rhino diff --git a/testing/chainsaw/e2e/password-change/06-assert.yaml b/testing/chainsaw/e2e/password-change/06-assert.yaml new file mode 100644 index 0000000000..bfedc0b25e --- /dev/null +++ b/testing/chainsaw/e2e/password-change/06-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: password-change-primary diff --git a/testing/chainsaw/e2e/password-change/07--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/07--psql-connect-uri.yaml new file mode 100644 index 0000000000..2fb8057021 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/07--psql-connect-uri.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri4 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/07--psql-connect.yaml b/testing/chainsaw/e2e/password-change/07--psql-connect.yaml new file mode 100644 index 0000000000..277cce24c4 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/07--psql-connect.yaml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect4 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/07-assert.yaml b/testing/chainsaw/e2e/password-change/07-assert.yaml new file mode 100644 index 0000000000..4f6afd5d98 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/07-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect4 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri4 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/08--secret.yaml b/testing/chainsaw/e2e/password-change/08--secret.yaml new file mode 100644 index 0000000000..b104ce7ae7 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/08--secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # Hardcoding the password as "datalake" + password: ZGF0YWxha2U= + verifier: "" diff --git a/testing/chainsaw/e2e/password-change/08-errors.yaml b/testing/chainsaw/e2e/password-change/08-errors.yaml new file mode 100644 index 0000000000..a7ab60c9eb --- /dev/null +++ b/testing/chainsaw/e2e/password-change/08-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # `08-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + verifier: "" diff --git a/testing/chainsaw/e2e/password-change/09--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/09--psql-connect-uri.yaml new file mode 100644 index 0000000000..5d83af7933 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/09--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri5 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./08-errors.yaml checks that the secret is not in the state that we set it to + # in the ./08-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/09--psql-connect.yaml b/testing/chainsaw/e2e/password-change/09--psql-connect.yaml new file mode 100644 index 0000000000..912fb33561 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/09--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect5 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./08-secret.yaml + # The ./08-errors.yaml checks that the secret is not in the state that we set it to + # in the ./08-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: datalake + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/09-assert.yaml b/testing/chainsaw/e2e/password-change/09-assert.yaml new file mode 100644 index 0000000000..399b7cb17d --- /dev/null +++ b/testing/chainsaw/e2e/password-change/09-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect5 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri5 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/10--secret.yaml b/testing/chainsaw/e2e/password-change/10--secret.yaml new file mode 100644 index 0000000000..7002cc622e --- /dev/null +++ b/testing/chainsaw/e2e/password-change/10--secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +# Updating the password with the stringData field and a scram verifier +stringData: + password: infopond + verifier: "SCRAM-SHA-256$4096:RI03PMRQH2oAFMH6AOQHdA==$D74VOn98ErW3J8CIiFYldUVO+kjsXj+Ju7jhmMURHQo=:c5hC/1V2TYNnoJ6VcaSJCcoGQ2eTcYJBP/pfKFv+k54=" + uri: "" diff --git a/testing/chainsaw/e2e/password-change/10-errors.yaml b/testing/chainsaw/e2e/password-change/10-errors.yaml new file mode 100644 index 0000000000..16d7b1642a --- /dev/null +++ b/testing/chainsaw/e2e/password-change/10-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # `10-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + uri: "" diff --git a/testing/chainsaw/e2e/password-change/11--psql-connect-uri.yaml b/testing/chainsaw/e2e/password-change/11--psql-connect-uri.yaml new file mode 100644 index 0000000000..f7f6d8287a --- /dev/null +++ b/testing/chainsaw/e2e/password-change/11--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri6 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./10-errors.yaml checks that the secret is not in the state that we set it to + # in the ./10-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/11--psql-connect.yaml b/testing/chainsaw/e2e/password-change/11--psql-connect.yaml new file mode 100644 index 0000000000..420de82024 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/11--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect6 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./10-secret.yaml + # The ./10-errors.yaml checks that the secret is not in the state that we set it to + # in the ./10-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: infopond + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/password-change/11-assert.yaml b/testing/chainsaw/e2e/password-change/11-assert.yaml new file mode 100644 index 0000000000..589c2cbf21 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/11-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect6 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri6 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/password-change/README.md b/testing/chainsaw/e2e/password-change/README.md new file mode 100644 index 0000000000..e898bd5ac2 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/README.md @@ -0,0 +1,27 @@ +### Password Change Test with Kuttl + +This Kuttl routine runs through the following steps: + +#### Create cluster and test connection + +- 00: Creates the cluster and verifies that it exists and is ready for connection +- 01: Connects to the cluster with the PGO-generated password (both with env vars and with the URI) + +#### Default user connection tests + +- 02: Change the password (using Kuttl's update object method on the secret's `data` field) and verify that the password changes by asserting that the `verifier` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 03: Connects to the cluster with the user-defined password (both with env vars and with the URI) +- 04: Change the password and verifier (using Kuttl's update object method on the secret's `stringData` field) and verify that the password changes by asserting that the `uri` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 05: Connects to the cluster with the second user-defined password (both with env vars and with the URI) + +#### Create custom user and test connection + +- 06: Updates the postgrescluster spec with a custom user and password +- 07: Connects to the cluster with the PGO-generated password (both with env vars and with the URI) for the custom user + +#### Custom user connection tests + +- 08: Change the custom user's password (using Kuttl's update object method on the secret's `data` field) and verify that the password changes by asserting that the `verifier` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 09: Connects to the cluster with the user-defined password (both with env vars and with the URI) for the custom user +- 10: Change the custom user's password and verifier (using Kuttl's update object method on the secret's `stringData` field) and verify that the password changes by asserting that the `uri` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 11: Connects to the cluster with the second user-defined password (both with env vars and with the URI) for the custom user diff --git a/testing/chainsaw/e2e/password-change/chainsaw-test.yaml b/testing/chainsaw/e2e/password-change/chainsaw-test.yaml new file mode 100755 index 0000000000..18b6670c23 --- /dev/null +++ b/testing/chainsaw/e2e/password-change/chainsaw-test.yaml @@ -0,0 +1,92 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: password-change +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--psql-connect-uri.yaml + - apply: + file: 01--psql-connect.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--secret.yaml + - error: + file: 02-errors.yaml + - name: step-03 + try: + - apply: + file: 03--psql-connect-uri.yaml + - apply: + file: 03--psql-connect.yaml + - assert: + file: 03-assert.yaml + - name: step-04 + try: + - apply: + file: 04--secret.yaml + - error: + file: 04-errors.yaml + - name: step-05 + try: + - apply: + file: 05--psql-connect-uri.yaml + - apply: + file: 05--psql-connect.yaml + - assert: + file: 05-assert.yaml + - name: step-06 + try: + - apply: + file: 06--cluster.yaml + - assert: + file: 06-assert.yaml + - name: step-07 + try: + - apply: + file: 07--psql-connect-uri.yaml + - apply: + file: 07--psql-connect.yaml + - assert: + file: 07-assert.yaml + - name: step-08 + try: + - apply: + file: 08--secret.yaml + - error: + file: 08-errors.yaml + - name: step-09 + try: + - apply: + file: 09--psql-connect-uri.yaml + - apply: + file: 09--psql-connect.yaml + - assert: + file: 09-assert.yaml + - name: step-10 + try: + - apply: + file: 10--secret.yaml + - error: + file: 10-errors.yaml + - name: step-11 + try: + - apply: + file: 11--psql-connect-uri.yaml + - apply: + file: 11--psql-connect.yaml + - assert: + file: 11-assert.yaml diff --git a/testing/chainsaw/e2e/pgadmin/01--cluster.yaml b/testing/chainsaw/e2e/pgadmin/01--cluster.yaml new file mode 100644 index 0000000000..2cc932c463 --- /dev/null +++ b/testing/chainsaw/e2e/pgadmin/01--cluster.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + configMap: config +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +type: Opaque +stringData: + password: myPassword +--- +# Create a cluster with a configured pgAdmin UI. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: interfaced + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + userInterface: + pgAdmin: + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + config: + files: + - secret: + name: test-secret + - configMap: + name: test-cm + settings: + SHOW_GRAVATAR_IMAGE: False + LOGIN_BANNER: | + Custom KUTTL Login Banner diff --git a/testing/chainsaw/e2e/pgadmin/01-assert.yaml b/testing/chainsaw/e2e/pgadmin/01-assert.yaml new file mode 100644 index 0000000000..e4192a1217 --- /dev/null +++ b/testing/chainsaw/e2e/pgadmin/01-assert.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: interfaced +status: + instances: + - name: instance1 + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: interfaced-pgadmin +status: + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm diff --git a/testing/chainsaw/e2e/pgadmin/chainsaw-test.yaml b/testing/chainsaw/e2e/pgadmin/chainsaw-test.yaml new file mode 100755 index 0000000000..75be66a6bd --- /dev/null +++ b/testing/chainsaw/e2e/pgadmin/chainsaw-test.yaml @@ -0,0 +1,67 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pgadmin +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - script: + content: | + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- df --block-size=1K /etc/pgadmin | + awk '{ print } END { exit ($3 != "4") }' + - script: + content: | + SETTINGS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin.json + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${SETTINGS}" '"LOGIN_BANNER": "Custom KUTTL Login Banner\n"' && + contains "${SETTINGS}" '"SHOW_GRAVATAR_IMAGE": false' + } || { + echo >&2 'Wrong settings!' + echo "${SETTINGS}" + exit 1 + } + - script: + content: | + CONTENTS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/configMap + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${CONTENTS}" 'config' + } || { + echo >&2 'Wrong settings!' + echo "${CONTENTS}" + exit 1 + } + - script: + content: | + CONTENTS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/password + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${CONTENTS}" 'myPassword' + } || { + echo >&2 'Wrong settings!' + echo "${CONTENTS}" + exit 1 + } diff --git a/testing/chainsaw/e2e/pgbackrest-init/00--cluster.yaml b/testing/chainsaw/e2e/pgbackrest-init/00--cluster.yaml new file mode 100644 index 0000000000..03391359a1 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/00--cluster.yaml @@ -0,0 +1,38 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + manual: + repoName: repo2 + options: + - --type=full + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + # Adding a second PVC repo for testing, rather than test with S3/GCS/Azure + - name: repo2 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/pgbackrest-init/00-assert.yaml b/testing/chainsaw/e2e/pgbackrest-init/00-assert.yaml new file mode 100644 index 0000000000..5181c95993 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/00-assert.yaml @@ -0,0 +1,68 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: +# Assert that the status has the two repos, with only the first having the `replicaCreateBackupComplete` field + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true + - bound: true + name: repo2 + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 +--- +# Assert the existence of two PVCs +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 + postgres-operator.crunchydata.com/pgbackrest-volume: "" + name: init-pgbackrest-repo1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +status: + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-repo: repo2 + postgres-operator.crunchydata.com/pgbackrest-volume: "" + name: init-pgbackrest-repo2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +status: + phase: Bound diff --git a/testing/chainsaw/e2e/pgbackrest-init/02-assert.yaml b/testing/chainsaw/e2e/pgbackrest-init/02-assert.yaml new file mode 100644 index 0000000000..589a04e738 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/02-assert.yaml @@ -0,0 +1,10 @@ +# Manual backup job should have pushed to repo2 +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: manual + postgres-operator.crunchydata.com/pgbackrest-repo: repo2 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-init/04--cluster.yaml b/testing/chainsaw/e2e/pgbackrest-init/04--cluster.yaml new file mode 100644 index 0000000000..e732f1fd9a --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/04--cluster.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + manual: + repoName: repo2 + options: + - --type=full + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + # Adding a second PVC repo for testing, rather than test with S3/GCS/Azure + - name: repo2 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/pgbackrest-init/04-assert.yaml b/testing/chainsaw/e2e/pgbackrest-init/04-assert.yaml new file mode 100644 index 0000000000..04a38ac9f4 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/04-assert.yaml @@ -0,0 +1,34 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: +# Assert that the status has the two repos, with only the first having the `replicaCreateBackupComplete` field + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true + - bound: true + name: repo2 + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-init/README.md b/testing/chainsaw/e2e/pgbackrest-init/README.md new file mode 100644 index 0000000000..d319a31b09 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/README.md @@ -0,0 +1,6 @@ +### pgBackRest Init test + +* 00: Create a cluster with two PVC repos and set up for manual backups to go to the second; verify that the PVCs exist and that the backup job completed successfully +* 01: Run pgbackrest-initialization.sh, which checks that the status matches the expected status of `mixed` (because the second repo in the repo list has not yet been pushed to) and that there is only one full backup +* 02: Use `kubectl` to annotate the cluster to initiate a manual backup; verify that the job completed successfully +* 03: Rerun pgbackrest-initialization.sh, now expecting the status to be `ok` since both repos have been pushed to and there to be two full backups diff --git a/testing/chainsaw/e2e/pgbackrest-init/chainsaw-test.yaml b/testing/chainsaw/e2e/pgbackrest-init/chainsaw-test.yaml new file mode 100755 index 0000000000..477015abec --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-init/chainsaw-test.yaml @@ -0,0 +1,68 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pgbackrest-init +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - script: + content: CLUSTER=init-pgbackrest ../../scripts/pgbackrest-initialization.sh + "mixed" 1 + - name: step-02 + try: + - command: + args: + - annotate + - -n + - $NAMESPACE + - postgrescluster + - init-pgbackrest + - postgres-operator.crunchydata.com/pgbackrest-backup=manual + entrypoint: kubectl + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - script: + content: CLUSTER=init-pgbackrest ../../scripts/pgbackrest-initialization.sh + "ok" 2 + - name: step-04 + try: + - apply: + file: 04--cluster.yaml + - assert: + file: 04-assert.yaml + - name: step-05 + try: + - script: + content: | + # Assumes the cluster only has a single replica + NEW_REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=init-pgbackrest, + postgres-operator.crunchydata.com/role=replica' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" "${NEW_REPLICA}" -- \ + ls /pgdata/pg${KUTTL_PG_VERSION}/ + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + !(contains "${LIST}" 'recovery.signal') + } || { + echo >&2 'Signal file(s) found' + echo "${LIST}" + exit 1 + } diff --git a/testing/chainsaw/e2e/pgbackrest-restore/01--create-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/01--create-cluster.yaml new file mode 100644 index 0000000000..c414806892 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/01--create-cluster.yaml @@ -0,0 +1,26 @@ +--- +# Create a cluster with a single pgBackRest repository and some parameters that +# require attention during PostgreSQL recovery. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + max_connections: 200 + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 2 + backups: + pgbackrest: + manual: + repoName: repo1 + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/pgbackrest-restore/01-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/01-assert.yaml new file mode 100644 index 0000000000..25b5bbee76 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/01-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the replica backup to complete. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original +status: + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/chainsaw/e2e/pgbackrest-restore/02--create-data.yaml b/testing/chainsaw/e2e/pgbackrest-restore/02--create-data.yaml new file mode 100644 index 0000000000..6801edbf61 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/02--create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that will be restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE SCHEMA "original"; + CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/chainsaw/e2e/pgbackrest-restore/02-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/02-assert.yaml new file mode 100644 index 0000000000..5115ba97c9 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/03-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/03-assert.yaml new file mode 100644 index 0000000000..a2c5b3bb22 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/03-assert.yaml @@ -0,0 +1,13 @@ +--- +# Wait for the backup job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + postgres-operator.crunchydata.com/pgbackrest-backup: one + labels: + postgres-operator.crunchydata.com/cluster: original + postgres-operator.crunchydata.com/pgbackrest-backup: manual + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/04--clone-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/04--clone-cluster.yaml new file mode 100644 index 0000000000..4bc1ce56a9 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/04--clone-cluster.yaml @@ -0,0 +1,22 @@ +--- +# Clone the cluster using a pgBackRest restore. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-one + labels: { postgres-operator-test: kuttl } +spec: + dataSource: + postgresCluster: + clusterName: original + repoName: repo1 + + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/pgbackrest-restore/04-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/04-assert.yaml new file mode 100644 index 0000000000..8aa51fc440 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/04-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the clone cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-one +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/05--check-data.yaml b/testing/chainsaw/e2e/pgbackrest-restore/05--check-data.yaml new file mode 100644 index 0000000000..1ee6fe9c32 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/05--check-data.yaml @@ -0,0 +1,49 @@ +--- +# Confirm that all the data was restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-one-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect to the cluster using the restored database and original credentials. + - name: PGHOST + valueFrom: { secretKeyRef: { name: clone-one-pguser-clone-one, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: clone-one-pguser-clone-one, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data was restored. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[{"data":"treasure"}]', format('got %L', restored); + END $$$$; diff --git a/testing/chainsaw/e2e/pgbackrest-restore/05-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/05-assert.yaml new file mode 100644 index 0000000000..1b6fad318b --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/05-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-one-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/07--update-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/07--update-cluster.yaml new file mode 100644 index 0000000000..f83a02c7c6 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/07--update-cluster.yaml @@ -0,0 +1,25 @@ +--- +# Update the cluster with PostgreSQL parameters that require attention during recovery. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + max_connections: 1000 + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 2 + backups: + pgbackrest: + manual: + repoName: repo1 + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/pgbackrest-restore/09--add-data.yaml b/testing/chainsaw/e2e/pgbackrest-restore/09--add-data.yaml new file mode 100644 index 0000000000..41c2255239 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/09--add-data.yaml @@ -0,0 +1,31 @@ +--- +# Add more data to the WAL archive. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-more-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + INSERT INTO important (data) VALUES ('water'), ('socks'); diff --git a/testing/chainsaw/e2e/pgbackrest-restore/09-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/09-assert.yaml new file mode 100644 index 0000000000..a60cd9ab8f --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/09-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-more-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/11--clone-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/11--clone-cluster.yaml new file mode 100644 index 0000000000..fcbdde4ea7 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/11--clone-cluster.yaml @@ -0,0 +1,22 @@ +--- +# Clone the cluster using a pgBackRest restore. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-two + labels: { postgres-operator-test: kuttl } +spec: + dataSource: + postgresCluster: + clusterName: original + repoName: repo1 + + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/pgbackrest-restore/11-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/11-assert.yaml new file mode 100644 index 0000000000..0ad9669a62 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/11-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the clone cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-two +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/12--check-data.yaml b/testing/chainsaw/e2e/pgbackrest-restore/12--check-data.yaml new file mode 100644 index 0000000000..2cd2e4932b --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/12--check-data.yaml @@ -0,0 +1,51 @@ +--- +# Confirm that all the data was restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-two-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect to the cluster using the restored database and original credentials. + - name: PGHOST + valueFrom: { secretKeyRef: { name: clone-two-pguser-clone-two, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: clone-two-pguser-clone-two, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data was restored. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; diff --git a/testing/chainsaw/e2e/pgbackrest-restore/12-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/12-assert.yaml new file mode 100644 index 0000000000..198d196836 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/12-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-two-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/15-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/15-assert.yaml new file mode 100644 index 0000000000..c408b75a60 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/15-assert.yaml @@ -0,0 +1,16 @@ +--- +# Wait for the restore to complete and the cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original +status: + instances: + - name: '00' + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 + pgbackrest: + restore: + id: one + finished: true diff --git a/testing/chainsaw/e2e/pgbackrest-restore/16--check-data.yaml b/testing/chainsaw/e2e/pgbackrest-restore/16--check-data.yaml new file mode 100644 index 0000000000..b0ae252831 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/16--check-data.yaml @@ -0,0 +1,100 @@ +--- +# Confirm that data was restored to the point-in-time. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-primary-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; + +--- +# Confirm that replicas are also restored and streaming from the primary. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-replica-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGPORT + valueFrom: { secretKeyRef: { name: original-pguser-original, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # The user secret does not contain the replica service. + - name: NAMESPACE + valueFrom: { fieldRef: { fieldPath: metadata.namespace } } + - name: PGHOST + value: "original-replicas.$(NAMESPACE).svc" + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + ASSERT pg_is_in_recovery(), 'expected replica'; + -- only users with "pg_read_all_settings" role may examine "primary_conninfo" + -- ASSERT current_setting('primary_conninfo') <> '', 'expected streaming'; + + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; diff --git a/testing/chainsaw/e2e/pgbackrest-restore/16-assert.yaml b/testing/chainsaw/e2e/pgbackrest-restore/16-assert.yaml new file mode 100644 index 0000000000..0baadef25b --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/16-assert.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-primary-data +status: + succeeded: 1 + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-replica-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml b/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml new file mode 100755 index 0000000000..473a8afe47 --- /dev/null +++ b/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml @@ -0,0 +1,260 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pgbackrest-restore +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--create-cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--create-data.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - script: + content: | + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/original \ + 'postgres-operator.crunchydata.com/pgbackrest-backup=one' + - assert: + file: 03-assert.yaml + - name: step-04 + try: + - apply: + file: 04--clone-cluster.yaml + - assert: + file: 04-assert.yaml + - name: step-05 + try: + - apply: + file: 05--check-data.yaml + - assert: + file: 05-assert.yaml + - name: step-06 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: clone-one + - name: step-07 + try: + - script: + content: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + START=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT pg_postmaster_start_time()' + ) + kubectl annotate --namespace "${NAMESPACE}" postgrescluster/original \ + "testing/start-before=${START}" + - apply: + file: 07--update-cluster.yaml + - name: step-08 + try: + - script: + content: | + BEFORE=$( + kubectl get --namespace "${NAMESPACE}" postgrescluster/original \ + --output 'go-template={{ index .metadata.annotations "testing/start-before" }}' + ) + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + + # Wait for PostgreSQL to restart. + while true; do + START=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT pg_postmaster_start_time()' + ) + if [ "${START}" ] && [ "${START}" != "${BEFORE}" ]; then break; else sleep 1; fi + done + echo "${START} != ${BEFORE}" + + # Reset counters in the "pg_stat_archiver" view. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --command "SELECT pg_stat_reset_shared('archiver')" + - name: step-09 + try: + - apply: + file: 09--add-data.yaml + - assert: + file: 09-assert.yaml + - name: step-10 + try: + - script: + content: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + + # Wait for the data to be sent to the WAL archive. A prior step reset the + # "pg_stat_archiver" counters, so anything more than zero should suffice. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -c 'SELECT pg_switch_wal()' + while [ 0 = "$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + )" ]; do sleep 1; done + - name: step-11 + try: + - apply: + file: 11--clone-cluster.yaml + - assert: + file: 11-assert.yaml + - name: step-12 + try: + - apply: + file: 12--check-data.yaml + - assert: + file: 12-assert.yaml + - name: step-13 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: clone-two + - name: step-14 + try: + - script: + content: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + OBJECTIVE=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT clock_timestamp()' + ) + + # Store the recovery objective for later steps. + kubectl annotate --namespace "${NAMESPACE}" postgrescluster/original \ + "testing/objective=${OBJECTIVE}" + + # A reason to restore. Wait for the change to be sent to the WAL archive. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --command 'DROP TABLE original.important' \ + --command "SELECT pg_stat_reset_shared('archiver')" \ + --command 'SELECT pg_switch_wal()' + + while [ 0 = "$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + )" ]; do sleep 1; done + - script: + content: | + REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=replica' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${REPLICA}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --file=- <<'SQL' + DO $$ + BEGIN + ASSERT to_regclass('important') IS NULL, 'expected no table'; + PERFORM * FROM information_schema.tables WHERE table_name = 'important'; + ASSERT NOT FOUND, 'expected no table'; + END $$ + SQL + - name: step-15 + try: + - script: + content: | + TARGET_JSON=$( + kubectl get --namespace "${NAMESPACE}" postgrescluster/original \ + --output 'go-template={{ index .metadata.annotations "testing/objective" | printf "--target=%q" | printf "%q" }}' + ) + + # Configure the cluster for an in-place point-in-time restore (PITR). + kubectl patch --namespace "${NAMESPACE}" postgrescluster/original \ + --type 'merge' --patch ' + {"spec":{"backups":{"pgbackrest":{"restore":{ + "enabled": true, + "repoName": "repo1", + "options": ["--type=time", '"${TARGET_JSON}"'] + }}}}}' + + # Annotate the cluster to trigger the restore. + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/original \ + 'postgres-operator.crunchydata.com/pgbackrest-restore=one' + + # TODO(benjaminjb): remove this when PG10 is no longer being supported + # For PG10, we need to run a patronictl reinit for the replica when that is running + # Get the replica name--the replica will exist during the PITR process so we don't need to wait + if [[ ${KUTTL_PG_VERSION} == 10 ]]; then + # Find replica + REPLICA=$(kubectl get pods --namespace "${NAMESPACE}" \ + --selector=' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/data=postgres, + postgres-operator.crunchydata.com/role!=master' \ + --output=jsonpath={.items..metadata.name}) + + # Wait for replica to be deleted + kubectl wait pod/"${REPLICA}" --namespace "${NAMESPACE}" --for=delete --timeout=-1s + + # Wait for the restarted replica to be started + NOT_RUNNING="" + while [[ "${NOT_RUNNING}" == "" ]]; do + kubectl get pods --namespace "${NAMESPACE}" "${REPLICA}" || (sleep 1 && continue) + + NOT_RUNNING=$(kubectl get pods --namespace "${NAMESPACE}" "${REPLICA}" \ + --output jsonpath="{.status.containerStatuses[?(@.name=='database')].state.running.startedAt}") + sleep 1 + done + + kubectl exec --namespace "${NAMESPACE}" "${REPLICA}" -- patronictl reinit original-ha "${REPLICA}" --force + fi + - assert: + file: 15-assert.yaml + - name: step-16 + try: + - apply: + file: 16--check-data.yaml + - assert: + file: 16-assert.yaml + - name: step-17 + try: + - script: + content: | + REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=replica' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${REPLICA}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --file=- <<'SQL' + DO $$ + BEGIN + PERFORM * FROM pg_stat_wal_receiver WHERE status = 'streaming'; + ASSERT FOUND, 'expected streaming replication'; + END $$ + SQL diff --git a/testing/chainsaw/e2e/pgbouncer/00--cluster.yaml b/testing/chainsaw/e2e/pgbouncer/00--cluster.yaml new file mode 100644 index 0000000000..c83bfea9d3 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/00--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: proxied + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + proxy: + pgBouncer: + replicas: 1 + config: + # Set the pgBouncer verbosity level to debug to print connection logs + # --https://www.pgbouncer.org/config.html#log-settings + global: + verbose: '1' diff --git a/testing/chainsaw/e2e/pgbouncer/00-assert.yaml b/testing/chainsaw/e2e/pgbouncer/00-assert.yaml new file mode 100644 index 0000000000..afe492faa0 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: proxied +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: proxied-pgbouncer diff --git a/testing/chainsaw/e2e/pgbouncer/01--psql-connect.yaml b/testing/chainsaw/e2e/pgbouncer/01--psql-connect.yaml new file mode 100644 index 0000000000..0f7099d4e8 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/01--psql-connect.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGSSLMODE + value: verify-full + - name: PGSSLROOTCERT + value: "/tmp/certs/ca.crt" + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: password } } + volumeMounts: + - name: certs + mountPath: "/tmp/certs" + volumes: + - name: certs + secret: + secretName: proxied-cluster-cert diff --git a/testing/chainsaw/e2e/pgbouncer/01-assert.yaml b/testing/chainsaw/e2e/pgbouncer/01-assert.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbouncer/10--read-certificate.yaml b/testing/chainsaw/e2e/pgbouncer/10--read-certificate.yaml new file mode 100644 index 0000000000..87739116ae --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/10--read-certificate.yaml @@ -0,0 +1,28 @@ +--- +# Print the certificate presented by PgBouncer. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-before + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: openssl + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + command: + - bash + - -ceu + - | + openssl s_client --connect '$(PGHOST):$(PGPORT)' --starttls postgres < /dev/null 2> /dev/null | + openssl x509 --noout --text diff --git a/testing/chainsaw/e2e/pgbouncer/10-assert.yaml b/testing/chainsaw/e2e/pgbouncer/10-assert.yaml new file mode 100644 index 0000000000..87d1a262fb --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/10-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-before +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbouncer/11--open-connection.yaml b/testing/chainsaw/e2e/pgbouncer/11--open-connection.yaml new file mode 100644 index 0000000000..f43c586e7f --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/11--open-connection.yaml @@ -0,0 +1,43 @@ +--- +# Connect through PgBouncer and wait long enough for TLS certificates to rotate. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-open-connection + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + volumes: + # TODO(cbandy): Provide a CA bundle that clients can use for verification. + - { name: tls, secret: { secretName: proxied-cluster-cert } } + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect through PgBouncer. + - name: PGURI + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-uri } } + + # Verify the certificate presented by PgBouncer. + - { name: PGSSLMODE, value: verify-full } + - { name: PGSSLROOTCERT, value: /mnt/ca.crt } + + volumeMounts: + - { name: tls, mountPath: /mnt } + + command: + - psql + - $(PGURI) + - -qAt + - --set=ON_ERROR_STOP=1 + + # Print connection details. + - --command=SELECT pid, backend_start FROM pg_stat_activity WHERE pid = pg_backend_pid(); + + # Wait here so later test steps can see this open connection. + - --command=SELECT pg_sleep_for('5 minutes'); diff --git a/testing/chainsaw/e2e/pgbouncer/11-assert.yaml b/testing/chainsaw/e2e/pgbouncer/11-assert.yaml new file mode 100644 index 0000000000..4c1f3a752d --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/11-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the job to start. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-open-connection +status: + active: 1 + +--- +# Wait for the pod to start. +apiVersion: v1 +kind: Pod +metadata: + labels: + job-name: psql-open-connection +status: + phase: Running diff --git a/testing/chainsaw/e2e/pgbouncer/13--read-certificate.yaml b/testing/chainsaw/e2e/pgbouncer/13--read-certificate.yaml new file mode 100644 index 0000000000..5134c75ab0 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/13--read-certificate.yaml @@ -0,0 +1,28 @@ +--- +# Print the certificate presented by PgBouncer. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: openssl + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + command: + - bash + - -ceu + - | + openssl s_client --connect '$(PGHOST):$(PGPORT)' --starttls postgres < /dev/null 2> /dev/null | + openssl x509 --noout --text diff --git a/testing/chainsaw/e2e/pgbouncer/13-assert.yaml b/testing/chainsaw/e2e/pgbouncer/13-assert.yaml new file mode 100644 index 0000000000..ca9eae62a0 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/13-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-after +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbouncer/16--reconnect.yaml b/testing/chainsaw/e2e/pgbouncer/16--reconnect.yaml new file mode 100644 index 0000000000..e070430169 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/16--reconnect.yaml @@ -0,0 +1,46 @@ +--- +# Verify the new PgBouncer certificate and transport encryption. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-tls-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + volumes: + # TODO(cbandy): Provide a CA bundle that clients can use for verification. + - { name: tls, secret: { secretName: proxied-cluster-cert } } + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect through PgBouncer. + - name: PGURI + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-uri } } + + # Verify the certificate presented by PgBouncer. + - { name: PGSSLMODE, value: verify-full } + - { name: PGSSLROOTCERT, value: /mnt/ca.crt } + + volumeMounts: + - { name: tls, mountPath: /mnt } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - -qb + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + PERFORM * FROM pg_stat_ssl WHERE ssl AND pid = pg_backend_pid(); + ASSERT FOUND, 'expected TLS end-to-end'; + END $$$$; diff --git a/testing/chainsaw/e2e/pgbouncer/16-assert.yaml b/testing/chainsaw/e2e/pgbouncer/16-assert.yaml new file mode 100644 index 0000000000..b6fbbf95f2 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/16-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-tls-after +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/pgbouncer/chainsaw-test.yaml b/testing/chainsaw/e2e/pgbouncer/chainsaw-test.yaml new file mode 100755 index 0000000000..6b20fc0195 --- /dev/null +++ b/testing/chainsaw/e2e/pgbouncer/chainsaw-test.yaml @@ -0,0 +1,107 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pgbouncer +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--psql-connect.yaml + - assert: + file: 01-assert.yaml + - name: step-10 + try: + - apply: + file: 10--read-certificate.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - apply: + file: 11--open-connection.yaml + - assert: + file: 11-assert.yaml + - name: step-12 + try: + - script: + content: "BEFORE=$(date -u +%FT%TZ)\n\n# Wipe out the stored PgBouncer certificate.\nkubectl + patch --namespace \"${NAMESPACE}\" secret/proxied-pgbouncer \\\n --patch + '{\"data\":{\"pgbouncer-frontend.crt\":\"\"}}'\n\n# Wait for the certificate + to be regenerated then loaded.\n# Changing this from \"wait until timeout\" + to \"try X times\"\n# so that we can get the logs before exiting 1 in case + we cannot find the reload.\nfor _ in $(seq 120); do\n kubectl logs --namespace + \"${NAMESPACE}\" deployment.apps/proxied-pgbouncer \\\n --container pgbouncer-config + --since-time \"${BEFORE}\" | grep 'Loaded' && \\\n found=true && break\n + \ sleep 1\ndone\n\n# This test has been flaky in the past, potentially around + rotating/reloading the cert.\n# To help debug, we set the pgBouncer verbosity + to 1 (debug) and print the logs\nkubectl logs --namespace \"${NAMESPACE}\" + deployment.apps/proxied-pgbouncer \\\n --all-containers --prefix --timestamps\n\n# + If we haven't found the `Loaded` log statement, exit with an error\nif [ + -z \"$found\" ]; then \n echo \"pgbouncer-config has failed to reload in + time\"\n exit 1; \nfi\n" + - name: step-13 + try: + - apply: + file: 13--read-certificate.yaml + - assert: + file: 13-assert.yaml + - name: step-14 + try: + - script: + content: | + bash -c '! diff -u \ + <(kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-before) \ + <(kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-after) \ + ' || { + echo 'Certificate did not change!' + kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-after + exit 1 + } + - name: step-15 + try: + - script: + content: | + CONNECTION=$( + kubectl logs --namespace "${NAMESPACE}" job.batch/psql-open-connection + ) + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=proxied, + postgres-operator.crunchydata.com/role=master' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --set ON_ERROR_STOP=1 --set CONNECTION="${CONNECTION}" \ + --file=- <<'SQL' + SELECT + set_config('testing.pid', (string_to_array(:'CONNECTION', '|'))[1], false) AS "testing.pid", + set_config('testing.start', (string_to_array(:'CONNECTION', '|'))[2], false) AS "testing.start"; + + DO $$ + BEGIN + PERFORM * FROM pg_stat_ssl + WHERE ssl AND pid = current_setting('testing.pid')::integer; + ASSERT FOUND, 'expected TLS end-to-end'; + + PERFORM * FROM pg_stat_activity + WHERE pid = current_setting('testing.pid')::integer + AND backend_start = current_setting('testing.start')::timestamptz; + ASSERT FOUND, 'expected to stay connected'; + END $$; + SQL + - name: step-16 + try: + - apply: + file: 16--reconnect.yaml + - assert: + file: 16-assert.yaml diff --git a/testing/chainsaw/e2e/replica-read/00--cluster.yaml b/testing/chainsaw/e2e/replica-read/00--cluster.yaml new file mode 100644 index 0000000000..a79666f4e1 --- /dev/null +++ b/testing/chainsaw/e2e/replica-read/00--cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: replica-read +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + replicas: 2 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/replica-read/00-assert.yaml b/testing/chainsaw/e2e/replica-read/00-assert.yaml new file mode 100644 index 0000000000..17c2942eb0 --- /dev/null +++ b/testing/chainsaw/e2e/replica-read/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: replica-read +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: replica-read-replicas diff --git a/testing/chainsaw/e2e/replica-read/01--psql-replica-read.yaml b/testing/chainsaw/e2e/replica-read/01--psql-replica-read.yaml new file mode 100644 index 0000000000..3d000aee85 --- /dev/null +++ b/testing/chainsaw/e2e/replica-read/01--psql-replica-read.yaml @@ -0,0 +1,44 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-replica-read +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + # https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html#PLPGSQL-STATEMENTS-ASSERT + # If run on a non-replica, this assertion fails, resulting in the pod erroring + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + - psql + - -qc + - | + DO $$$$ + BEGIN + ASSERT pg_is_in_recovery(); + END $$$$; + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "replica-read-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/chainsaw/e2e/replica-read/01-assert.yaml b/testing/chainsaw/e2e/replica-read/01-assert.yaml new file mode 100644 index 0000000000..97ea0972c3 --- /dev/null +++ b/testing/chainsaw/e2e/replica-read/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-replica-read +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/replica-read/chainsaw-test.yaml b/testing/chainsaw/e2e/replica-read/chainsaw-test.yaml new file mode 100755 index 0000000000..7c788cd235 --- /dev/null +++ b/testing/chainsaw/e2e/replica-read/chainsaw-test.yaml @@ -0,0 +1,20 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: replica-read +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--psql-replica-read.yaml + - assert: + file: 01-assert.yaml diff --git a/testing/chainsaw/e2e/root-cert-ownership/00--cluster.yaml b/testing/chainsaw/e2e/root-cert-ownership/00--cluster.yaml new file mode 100644 index 0000000000..461ae7ccba --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/00--cluster.yaml @@ -0,0 +1,35 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/root-cert-ownership/00-assert.yaml b/testing/chainsaw/e2e/root-cert-ownership/00-assert.yaml new file mode 100644 index 0000000000..406465b691 --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/00-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgo-root-cacert diff --git a/testing/chainsaw/e2e/root-cert-ownership/02-assert.yaml b/testing/chainsaw/e2e/root-cert-ownership/02-assert.yaml new file mode 100644 index 0000000000..839f6a9b29 --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/02-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgo-root-cacert diff --git a/testing/chainsaw/e2e/root-cert-ownership/02-errors.yaml b/testing/chainsaw/e2e/root-cert-ownership/02-errors.yaml new file mode 100644 index 0000000000..d8f159d59c --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/02-errors.yaml @@ -0,0 +1,4 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 diff --git a/testing/chainsaw/e2e/root-cert-ownership/04-errors.yaml b/testing/chainsaw/e2e/root-cert-ownership/04-errors.yaml new file mode 100644 index 0000000000..b117c4561b --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/04-errors.yaml @@ -0,0 +1,9 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 diff --git a/testing/chainsaw/e2e/root-cert-ownership/README.md b/testing/chainsaw/e2e/root-cert-ownership/README.md new file mode 100644 index 0000000000..fe29596938 --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/README.md @@ -0,0 +1,23 @@ +### Root Certificate Ownership Test + +This Kuttl routine runs through the following steps: + +#### Create two clusters and verify the root certificate secret ownership + +- 00: Creates the two clusters and verifies they and the root cert secret exist +- 01: Check that the secret shows both clusters as owners + +#### Delete the first cluster and verify the root certificate secret ownership + +- 02: Delete the first cluster, assert that the second cluster and the root cert +secret are still present and that the first cluster is not present +- 03: Check that the secret shows the second cluster as an owner but does not show +the first cluster as an owner + +#### Delete the second cluster and verify the root certificate secret ownership + +- 04: Delete the second cluster, assert that both clusters are not present +- 05: Check the number of clusters in the namespace. If there are any remaining +clusters, ensure that the secret shows neither the first nor second cluster as an +owner. If there are no clusters remaining in the namespace, ensure the root cert +secret has been deleted. diff --git a/testing/chainsaw/e2e/root-cert-ownership/chainsaw-test.yaml b/testing/chainsaw/e2e/root-cert-ownership/chainsaw-test.yaml new file mode 100755 index 0000000000..0dd57bea15 --- /dev/null +++ b/testing/chainsaw/e2e/root-cert-ownership/chainsaw-test.yaml @@ -0,0 +1,93 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: root-cert-ownership +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do + sleep 1 # this sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If owner1 and owner2 are both listed, exit successfully + if contains "${CURRENT_OWNERS}" "owner1" && contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done + - name: step-02 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: owner1 + - assert: + file: 02-assert.yaml + - error: + file: 02-errors.yaml + - name: step-03 + try: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do + sleep 1 # this sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If owner1 is removed and owner2 is still listed, exit successfully + if !(contains "${CURRENT_OWNERS}" "owner1") && contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done + - name: step-04 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: owner2 + - error: + file: 04-errors.yaml + - name: step-05 + try: + - script: + content: | + NUM_CLUSTERS=$(kubectl --namespace="${NAMESPACE}" get postgrescluster --output name | wc -l) + echo "Found ${NUM_CLUSTERS} clusters" + if [ "$NUM_CLUSTERS" != 0 ]; then + # Continue checking until Kuttl times out + # If at least one owner is never removed the test fails + while true; do + sleep 5 # This sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If neither owner is listed, exit successfully + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + if ! contains "${CURRENT_OWNERS}" "owner1" && ! contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done + else + # Continue checking until Kuttl times out + # If the secret is never removed, the test fails + while true; do + sleep 5 # this sleep allows time for garbage collector to delete the secret + ROOT_SECRET=$(kubectl --namespace="${NAMESPACE}" get --ignore-not-found \ + secret pgo-root-cacert --output name | wc -l) + if [ "$ROOT_SECRET" = 0 ]; then + exit 0 + fi + done + fi diff --git a/testing/chainsaw/e2e/scaledown/00--create-cluster.yaml b/testing/chainsaw/e2e/scaledown/00--create-cluster.yaml new file mode 100644 index 0000000000..50377c2fb6 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/00--create-cluster.yaml @@ -0,0 +1,32 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/00-assert.yaml b/testing/chainsaw/e2e/scaledown/00-assert.yaml new file mode 100644 index 0000000000..b5fa5a9051 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/00-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/scaledown/01--update-cluster.yaml b/testing/chainsaw/e2e/scaledown/01--update-cluster.yaml new file mode 100644 index 0000000000..d6409a8fd1 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/01--update-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/01-assert.yaml b/testing/chainsaw/e2e/scaledown/01-assert.yaml new file mode 100644 index 0000000000..45bb0b6d04 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/01-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/scaledown/10--create-cluster.yaml b/testing/chainsaw/e2e/scaledown/10--create-cluster.yaml new file mode 100644 index 0000000000..3847e588c0 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/10--create-cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/10-assert.yaml b/testing/chainsaw/e2e/scaledown/10-assert.yaml new file mode 100644 index 0000000000..cf8bcb461a --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/10-assert.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: replica +status: + phase: Running diff --git a/testing/chainsaw/e2e/scaledown/12--update-cluster.yaml b/testing/chainsaw/e2e/scaledown/12--update-cluster.yaml new file mode 100644 index 0000000000..3b4f62094a --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/12--update-cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/12-assert.yaml b/testing/chainsaw/e2e/scaledown/12-assert.yaml new file mode 100644 index 0000000000..079435b67d --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/12-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master + testing/role-before: master +status: + phase: Running diff --git a/testing/chainsaw/e2e/scaledown/20--create-cluster.yaml b/testing/chainsaw/e2e/scaledown/20--create-cluster.yaml new file mode 100644 index 0000000000..796f88db3c --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/20--create-cluster.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/20-assert.yaml b/testing/chainsaw/e2e/scaledown/20-assert.yaml new file mode 100644 index 0000000000..f65cef60b8 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/20-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/scaledown/21--update-cluster.yaml b/testing/chainsaw/e2e/scaledown/21--update-cluster.yaml new file mode 100644 index 0000000000..02d8936d0b --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/21--update-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/scaledown/21-assert.yaml b/testing/chainsaw/e2e/scaledown/21-assert.yaml new file mode 100644 index 0000000000..f137a616b8 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/21-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/chainsaw/e2e/scaledown/chainsaw-test.yaml b/testing/chainsaw/e2e/scaledown/chainsaw-test.yaml new file mode 100755 index 0000000000..1b97fb0362 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/chainsaw-test.yaml @@ -0,0 +1,70 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: scaledown +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--create-cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--update-cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: scaledown + - name: step-10 + try: + - apply: + file: 10--create-cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=master' \ + 'testing/role-before=master' + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + 'testing/role-before=replica' + - name: step-12 + try: + - apply: + file: 12--update-cluster.yaml + - assert: + file: 12-assert.yaml + - name: step-13 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: scaledown1 + - name: step-20 + try: + - apply: + file: 20--create-cluster.yaml + - assert: + file: 20-assert.yaml + - name: step-21 + try: + - apply: + file: 21--update-cluster.yaml + - assert: + file: 21-assert.yaml diff --git a/testing/chainsaw/e2e/scaledown/readme.MD b/testing/chainsaw/e2e/scaledown/readme.MD new file mode 100644 index 0000000000..44fd880ed1 --- /dev/null +++ b/testing/chainsaw/e2e/scaledown/readme.MD @@ -0,0 +1,31 @@ +## Scaledown tests + +This is a KUTTL version of a previous `TestScaleDown` test that was prone to flaky behavior; +The KUTTL test captures the three test-cases enumerated in that test, and for ease of reading, +all three tests exist in this folder, which necessitates a clean-up step after tests one and two. +This tests makes extensive use of `status.instances` to make sure that the expected instances +have the expected number of pods. + +### From two sets to one set + +* 00--create-cluster: create the cluster with two instance sets, one replica each +* 00-assert: check that the cluster exists with the expected status +* 01--update-cluster: update the cluster to remove one instance set +* 01-assert: check that the cluster exists with the expected status +* 02--delete-cluster + +### From one set with multiple replicas to one set with one replica + +* 10--create-cluster: create the cluster with one instance set with two replicas +* 10-assert: check that the cluster exists with the expected status +* 11-annotate: set the roles as labels on the pods +* 12--update-cluster: update the cluster to remove one replica +* 12-assert: check that the cluster exists with the expected status; and that the `master` pod that exists was the `master` before the scaledown +* 13--delete-cluster: delete the cluster + +### From two sets with variable replicas to two set with one replica each + +* 20--create-cluster: create the cluster with two instance sets, with two and one replica +* 20-assert: check that the cluster exists with the expected status +* 21--update-cluster: update the cluster to reduce the two-replica instance to one-replica +* 21-assert: check that the cluster exists with the expected status diff --git a/testing/chainsaw/e2e/security-context/00--cluster.yaml b/testing/chainsaw/e2e/security-context/00--cluster.yaml new file mode 100644 index 0000000000..5155eb4fc6 --- /dev/null +++ b/testing/chainsaw/e2e/security-context/00--cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: security-context + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + proxy: + pgBouncer: + replicas: 1 + userInterface: + pgAdmin: + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/chainsaw/e2e/security-context/00-assert.yaml b/testing/chainsaw/e2e/security-context/00-assert.yaml new file mode 100644 index 0000000000..a6a5f48b6a --- /dev/null +++ b/testing/chainsaw/e2e/security-context/00-assert.yaml @@ -0,0 +1,186 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: security-context +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +# initial pgBackRest backup +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + containers: + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# instance +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: security-context-ha + postgres-operator.crunchydata.com/role: master +spec: + containers: + - name: database + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: replication-cert-copy + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: exporter + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: postgres-startup + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgAdmin +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + statefulset.kubernetes.io/pod-name: security-context-pgadmin-0 + name: security-context-pgadmin-0 +spec: + containers: + - name: pgadmin + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: pgadmin-startup + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgBouncer +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/role: pgbouncer +spec: + containers: + - name: pgbouncer + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbouncer-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgBackRest repo +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-dedicated: "" + statefulset.kubernetes.io/pod-name: security-context-repo-host-0 + name: security-context-repo-host-0 +spec: + containers: + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: pgbackrest-log-dir + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true diff --git a/testing/chainsaw/e2e/security-context/chainsaw-test.yaml b/testing/chainsaw/e2e/security-context/chainsaw-test.yaml new file mode 100755 index 0000000000..f508de7905 --- /dev/null +++ b/testing/chainsaw/e2e/security-context/chainsaw-test.yaml @@ -0,0 +1,71 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: security-context +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - script: + content: | + # Check that every container has the correct capabilities. + + # Capture every container name alongside its list of dropped capabilities. + CONTAINERS_DROP_CAPS=$( + kubectl --namespace "${NAMESPACE}" get pods --output "jsonpath={\ + range .items[*].spec.containers[*]\ + }{ @.name }{'\t\t'}{ @.securityContext.capabilities.drop }{'\n'}{\ + end\ + }" + ) || exit + + WRONG=$( ! echo "${CONTAINERS_DROP_CAPS}" | grep -Fv '"ALL"' ) || { + echo 'Not all containers have dropped "ALL" capabilities!' + echo "${WRONG}" + exit 1 + } + - script: + content: | + # Check that every Pod is assigned to the "restricted" SecurityContextConstraint + # in OpenShift. + + SCC=$( + kubectl api-resources --cached | + grep -F 'security.openshift.io/v1' | + grep -F 'SecurityContextConstraint' + ) + + # Skip this check when the API has no notion of SecurityContextConstraint. + [ -z "${SCC}" ] && exit + + PODS_SCC=$( + kubectl --namespace "${NAMESPACE}" get pods --no-headers \ + --output "custom-columns=\ + NAME:.metadata.name,\ + SCC:.metadata.annotations['openshift\.io/scc']\ + " + ) || exit + + WRONG=$( ! echo "${PODS_SCC}" | grep -Ev -e '\ policies.yaml + kyverno apply --cluster --namespace "${NAMESPACE}" policies.yaml diff --git a/testing/chainsaw/e2e/standalone-pgadmin/README.md b/testing/chainsaw/e2e/standalone-pgadmin/README.md new file mode 100644 index 0000000000..187c6f37af --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/README.md @@ -0,0 +1,49 @@ +** pgAdmin ** + +Note: due to the (random) namespace being part of the host, we cannot check the configmap using the usual assert/file pattern. + +*Phase one* + +* 00: + * create a pgadmin with no server groups; + * check the correct existence of the secret, configmap, and pod. +* 01: dump the servers from pgAdmin and check that the list is empty. + +*Phase two* + +* 02: + * create a postgrescluster with a label; + * update the pgadmin with a selector; + * check the correct existence of the postgrescluster. +* 03: + * check that the configmap is updated in the pgadmin pod; + * dump the servers from pgAdmin and check that the list has the expected server. + +*Phase three* + +* 04: + * create a postgrescluster with the same label; + * check the correct existence of the postgrescluster. +* 05: + * check that the configmap is updated in the pgadmin pod; + * dump the servers from pgAdmin and check that the list has the expected 2 servers. + +*Phase four* + +* 06: + * create a postgrescluster with the a different label; + * update the pgadmin with a second serverGroup; + * check the correct existence of the postgrescluster. +* 07: + * check that the configmap is updated in the pgadmin pod; + * dump the servers from pgAdmin and check that the list has the expected 3 servers. + +*Phase five* + +* 08: + * delete a postgrescluster; + * update the pgadmin with a second serverGroup; + * check the correct existence of the postgrescluster. +* 09: + * check that the configmap is updated in the pgadmin pod; + * dump the servers from pgAdmin and check that the list has the expected 2 servers diff --git a/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml b/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml new file mode 100755 index 0000000000..6be8c15106 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml @@ -0,0 +1,445 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: standalone-pgadmin +spec: + steps: + - name: step-00 + try: + - apply: + file: files/00-pgadmin.yaml + - assert: + file: files/00-pgadmin-check.yaml + - catch: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected="\"Servers\": {}" + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + exit 1 + } + name: step-01 + try: null + - name: step-02 + try: + - apply: + file: files/02-cluster.yaml + - apply: + file: files/02-pgadmin.yaml + - assert: + file: files/02-cluster-check.yaml + - catch: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } + name: step-03 + try: null + - name: step-04 + try: + - apply: + file: files/04-cluster.yaml + - assert: + file: files/04-cluster-check.yaml + - catch: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin2", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin2" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin2", + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin2", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } + name: step-05 + try: null + - name: step-06 + try: + - apply: + file: files/06-cluster.yaml + - apply: + file: files/06-pgadmin.yaml + - assert: + file: files/06-cluster-check.yaml + - catch: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n },\n \"3\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin2", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin2" + }, + "3": { + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin3", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin3" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin2", + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin2", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "3": { + "Name": "pgadmin3", + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin3", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } + name: step-07 + try: null + - name: step-08 + try: + - error: + file: files/04-cluster-check.yaml + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: pgadmin2 + - catch: + - script: + content: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin3", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin3" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin3", + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin3", + "Shared": true, + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } + name: step-09 + try: null diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml new file mode 100644 index 0000000000..a9fe716e2e --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +data: + pgadmin-settings.json: | + { + "DEFAULT_SERVER": "0.0.0.0", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" + } + pgadmin-shared-clusters.json: | + { + "Servers": {} + } +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin.yaml new file mode 100644 index 0000000000..692c0cd06d --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/00-pgadmin.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster-check.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster-check.yaml new file mode 100644 index 0000000000..16fa079176 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster-check.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin1 + labels: + hello: world diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster.yaml new file mode 100644 index 0000000000..c1280caa01 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/02-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin1 + labels: + hello: world +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/02-pgadmin.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/02-pgadmin.yaml new file mode 100644 index 0000000000..953150b7fa --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/02-pgadmin.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + adminUsername: admin@pgo.com + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: groupOne + postgresClusterSelector: + matchLabels: + hello: world diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster-check.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster-check.yaml new file mode 100644 index 0000000000..b3de0cfc54 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster-check.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin2 + labels: + hello: world diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster.yaml new file mode 100644 index 0000000000..63a44812e1 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/04-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin2 + labels: + hello: world +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster-check.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster-check.yaml new file mode 100644 index 0000000000..31de80c896 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster-check.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin3 + labels: + hello: world2 diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster.yaml new file mode 100644 index 0000000000..40f60cf229 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/06-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgadmin3 + labels: + hello: world2 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/06-pgadmin.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/06-pgadmin.yaml new file mode 100644 index 0000000000..5951c16270 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/06-pgadmin.yaml @@ -0,0 +1,20 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: groupOne + postgresClusterSelector: + matchLabels: + hello: world + - name: groupTwo + postgresClusterSelector: + matchLabels: + hello: world2 diff --git a/testing/chainsaw/e2e/standalone-pgadmin/files/chainsaw-test.yaml b/testing/chainsaw/e2e/standalone-pgadmin/files/chainsaw-test.yaml new file mode 100755 index 0000000000..2146fa9d21 --- /dev/null +++ b/testing/chainsaw/e2e/standalone-pgadmin/files/chainsaw-test.yaml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: files +spec: + steps: + - name: step-00 + try: + - apply: + file: 00-pgadmin-check.yaml + - apply: + file: 00-pgadmin.yaml + - name: step-02 + try: + - apply: + file: 02-cluster-check.yaml + - apply: + file: 02-cluster.yaml + - apply: + file: 02-pgadmin.yaml + - name: step-04 + try: + - apply: + file: 04-cluster-check.yaml + - apply: + file: 04-cluster.yaml + - name: step-06 + try: + - apply: + file: 06-cluster-check.yaml + - apply: + file: 06-cluster.yaml + - apply: + file: 06-pgadmin.yaml diff --git a/testing/chainsaw/e2e/streaming-standby/00--secrets.yaml b/testing/chainsaw/e2e/streaming-standby/00--secrets.yaml new file mode 100644 index 0000000000..1f8dd06ccf --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/00--secrets.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNQakNDQWVXZ0F3SUJBZ0lSQU93NURHaGVVZnVNY25KYVdKNkllall3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nak13TkRFeE1UYzBOVE01V2hjTgpNek13TkRBNE1UZzBOVE01V2pBOU1Uc3dPUVlEVlFRREV6SndjbWx0WVhKNUxXTnNkWE4wWlhJdGNISnBiV0Z5CmVTNWtaV1poZFd4MExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc0xqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDkKQXdFSEEwSUFCT3RlNytQWFlDci9RQVJkcHlwYTFHcEpkbW5wOFN3ZG9FOTIzUXoraWt4UllTalgwUHBXcytqUQpVNXlKZ0NDdGxyZmxFZVZ4S2YzaVpiVHdadFlIaHVxamdlTXdnZUF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQXdHCkExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVVkp0R0R0Yk1kMVlmemUrRXBLdGJDMTdINXFZd2daNEcKQTFVZEVRU0JsakNCazRJeWNISnBiV0Z5ZVMxamJIVnpkR1Z5TFhCeWFXMWhjbmt1WkdWbVlYVnNkQzV6ZG1NdQpZMngxYzNSbGNpNXNiMk5oYkM2Q0kzQnlhVzFoY25rdFkyeDFjM1JsY2kxd2NtbHRZWEo1TG1SbFptRjFiSFF1CmMzWmpnaDl3Y21sdFlYSjVMV05zZFhOMFpYSXRjSEpwYldGeWVTNWtaV1poZFd4MGdoZHdjbWx0WVhKNUxXTnMKZFhOMFpYSXRjSEpwYldGeWVUQUtCZ2dxaGtqT1BRUURBd05IQURCRUFpQjA3Q3YzRHJTNXUxRFdaek1MQjdvbAppcjFFWEpQTnFaOXZWQUF5ZTdDMGJRSWdWQVlDM2F0ekl4a0syNHlQUU1TSjU1OGFaN3JEdkZGZXdOaVpmdSt0CjdETT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUoxYkNXMTByR3o2VWQ1K2R3WmZWcGNUNFlqck9XVG1iVW9XNXRxYTA2b1ZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNjE3djQ5ZGdLdjlBQkYybktsclVha2wyYWVueExCMmdUM2JkRFA2S1RGRmhLTmZRK2xhego2TkJUbkltQUlLMld0K1VSNVhFcC9lSmx0UEJtMWdlRzZnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: cluster-cert +type: Opaque +--- +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqekNDQVRTZ0F3SUJBZ0lRRzA0MEprWjYwZkZtanpaVG1SekhyakFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUJjeEZUQVRCZ05WQkFNTURGOWpjblZ1WTJoNWNtVndiREJaTUJNR0J5cUdTTTQ5CkFnRUdDQ3FHU000OUF3RUhBMElBQk5HVHcvSmVtaGxGK28xUlRBb0VXSndzdjJ6WjIyc1p4N2NjT2VmL1NXdjYKeXphYkpaUmkvREFyK0kwUHNyTlhmand3a0xMa3hERGZsTklvcFZMNVYwT2pXakJZTUE0R0ExVWREd0VCL3dRRQpBd0lGb0RBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkZTYlJnN1d6SGRXSDgzdmhLU3JXd3RlCngrYW1NQmNHQTFVZEVRUVFNQTZDREY5amNuVnVZMmg1Y21Wd2JEQUtCZ2dxaGtqT1BRUURBd05KQURCR0FpRUEKcWVsYmUvdTQzRFRPWFdlell1b3Nva0dUbHg1U2ljUFRkNk05Q3pwU2VoWUNJUUNOOS91Znc0SUZzdDZOM1RtYQo4MmZpSElKSUpQY0RjM2ZKUnFna01RQmF0QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBxeTVzNVJxWThKUmdycjJreE9zaG9hc25yTWhUUkJPYjZ0alI3T2ZqTFlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMFpQRDhsNmFHVVg2alZGTUNnUlluQ3kvYk5uYmF4bkh0eHc1NS85SmEvckxOcHNsbEdMOApNQ3Y0alEreXMxZCtQRENRc3VURU1OK1UwaWlsVXZsWFF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: replication-cert +type: Opaque diff --git a/testing/chainsaw/e2e/streaming-standby/01--primary-cluster.yaml b/testing/chainsaw/e2e/streaming-standby/01--primary-cluster.yaml new file mode 100644 index 0000000000..cd0e05ac15 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/01--primary-cluster.yaml @@ -0,0 +1,19 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/streaming-standby/01-assert.yaml b/testing/chainsaw/e2e/streaming-standby/01-assert.yaml new file mode 100644 index 0000000000..55c820a116 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/01-assert.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: primary-cluster-primary diff --git a/testing/chainsaw/e2e/streaming-standby/02--create-data.yaml b/testing/chainsaw/e2e/streaming-standby/02--create-data.yaml new file mode 100644 index 0000000000..472e50aa1d --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/02--create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that will be replicated. +apiVersion: batch/v1 +kind: Job +metadata: + name: primary-cluster-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: primary-cluster-pguser-primary-cluster, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE SCHEMA "primary-cluster"; + CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/chainsaw/e2e/streaming-standby/02-assert.yaml b/testing/chainsaw/e2e/streaming-standby/02-assert.yaml new file mode 100644 index 0000000000..7693fce649 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: primary-cluster-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/streaming-standby/03--standby-cluster.yaml b/testing/chainsaw/e2e/streaming-standby/03--standby-cluster.yaml new file mode 100644 index 0000000000..a3c542addb --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/03--standby-cluster.yaml @@ -0,0 +1,22 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + standby: + enabled: true + host: primary-cluster-primary + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/streaming-standby/03-assert.yaml b/testing/chainsaw/e2e/streaming-standby/03-assert.yaml new file mode 100644 index 0000000000..9c3a95c1d3 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/03-assert.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: standby-cluster-primary diff --git a/testing/chainsaw/e2e/streaming-standby/04--check-data.yaml b/testing/chainsaw/e2e/streaming-standby/04--check-data.yaml new file mode 100644 index 0000000000..16350fd577 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/04--check-data.yaml @@ -0,0 +1,49 @@ +--- +# Confirm that all the data was replicated. +apiVersion: batch/v1 +kind: Job +metadata: + name: check-standby-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect to the cluster using the standby-cluster database and primary-cluster credentials. + - name: PGHOST + valueFrom: { secretKeyRef: { name: standby-cluster-pguser-standby-cluster, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: standby-cluster-pguser-standby-cluster, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: primary-cluster-pguser-primary-cluster, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: primary-cluster-pguser-primary-cluster, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: primary-cluster-pguser-primary-cluster, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data was replicated. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + replicated jsonb; + BEGIN + SELECT jsonb_agg(important) INTO replicated FROM important; + ASSERT replicated = '[{"data":"treasure"}]', format('got %L', replicated); + END $$$$; diff --git a/testing/chainsaw/e2e/streaming-standby/04-assert.yaml b/testing/chainsaw/e2e/streaming-standby/04-assert.yaml new file mode 100644 index 0000000000..6e789b85e3 --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/04-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: check-standby-data +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/streaming-standby/README.md b/testing/chainsaw/e2e/streaming-standby/README.md new file mode 100644 index 0000000000..5aeee1b9aa --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/README.md @@ -0,0 +1,9 @@ +# Streaming Standby Tests + +The streaming standby test will deploy two clusters, one primary and one standby. +Both clusters are created in the same namespace to allow for easy connections +over the network. + +This test scenario can be run without any specific Kubernetes environment +requirements. More standby tests can be added that will require access to a +cloud storage. diff --git a/testing/chainsaw/e2e/streaming-standby/chainsaw-test.yaml b/testing/chainsaw/e2e/streaming-standby/chainsaw-test.yaml new file mode 100755 index 0000000000..0d408da76d --- /dev/null +++ b/testing/chainsaw/e2e/streaming-standby/chainsaw-test.yaml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: streaming-standby +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--secrets.yaml + - name: step-01 + try: + - apply: + file: 01--primary-cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--create-data.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - apply: + file: 03--standby-cluster.yaml + - assert: + file: 03-assert.yaml + - name: step-04 + try: + - apply: + file: 04--check-data.yaml + - assert: + file: 04-assert.yaml diff --git a/testing/chainsaw/e2e/switchover/01--cluster.yaml b/testing/chainsaw/e2e/switchover/01--cluster.yaml new file mode 100644 index 0000000000..4b0d598ff1 --- /dev/null +++ b/testing/chainsaw/e2e/switchover/01--cluster.yaml @@ -0,0 +1,20 @@ +--- +# Create a cluster with multiple instances and manual switchover enabled. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: switchover +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + switchover: + enabled: true + instances: + - replicas: 2 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/chainsaw/e2e/switchover/01-assert.yaml b/testing/chainsaw/e2e/switchover/01-assert.yaml new file mode 100644 index 0000000000..b6b35e8126 --- /dev/null +++ b/testing/chainsaw/e2e/switchover/01-assert.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: switchover +status: + instances: + - name: "00" + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: switchover + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: switchover + postgres-operator.crunchydata.com/role: replica diff --git a/testing/chainsaw/e2e/switchover/03-assert.yaml b/testing/chainsaw/e2e/switchover/03-assert.yaml new file mode 100644 index 0000000000..cad813362f --- /dev/null +++ b/testing/chainsaw/e2e/switchover/03-assert.yaml @@ -0,0 +1,36 @@ +--- +# After switchover, a former replica should now be the primary. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: switchover + postgres-operator.crunchydata.com/data: postgres + + postgres-operator.crunchydata.com/role: master + testing/role-before: replica + +--- +# The former primary should now be a replica. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: switchover + postgres-operator.crunchydata.com/data: postgres + + postgres-operator.crunchydata.com/role: replica + testing/role-before: master + +--- +# All instances should be healthy. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: switchover +status: + instances: + - name: "00" + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 diff --git a/testing/chainsaw/e2e/switchover/chainsaw-test.yaml b/testing/chainsaw/e2e/switchover/chainsaw-test.yaml new file mode 100755 index 0000000000..5e8767519f --- /dev/null +++ b/testing/chainsaw/e2e/switchover/chainsaw-test.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: switchover +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--cluster.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=master' \ + 'testing/role-before=master' + - script: + content: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + 'testing/role-before=replica' + - script: + content: | + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/switchover \ + "postgres-operator.crunchydata.com/trigger-switchover=$(date)" + - name: step-03 + try: + - assert: + file: 03-assert.yaml diff --git a/testing/chainsaw/e2e/tablespace-enabled/00--cluster.yaml b/testing/chainsaw/e2e/tablespace-enabled/00--cluster.yaml new file mode 100644 index 0000000000..edeebeb8bb --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/00--cluster.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: tablespace-script +data: + script.sql: | + CREATE TABLESPACE trial OWNER "tablespace-enabled" LOCATION '/tablespaces/library/data'; + CREATE TABLESPACE castle OWNER "tablespace-enabled" LOCATION '/tablespaces/user/data'; +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: tablespace-enabled +spec: + databaseInitSQL: + name: tablespace-script + key: script.sql + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + tablespaceVolumes: + - name: user + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: library + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/chainsaw/e2e/tablespace-enabled/00-assert.yaml b/testing/chainsaw/e2e/tablespace-enabled/00-assert.yaml new file mode 100644 index 0000000000..9351766c4f --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/00-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: tablespace-enabled +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: tablespace-enabled + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: tablespace-enabled-primary diff --git a/testing/chainsaw/e2e/tablespace-enabled/01--psql-connect.yaml b/testing/chainsaw/e2e/tablespace-enabled/01--psql-connect.yaml new file mode 100644 index 0000000000..7c72fe5093 --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/01--psql-connect.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: tablespace-enabled-pguser-tablespace-enabled, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + tbsp_count integer; + BEGIN + SELECT COUNT(*) INTO tbsp_count FROM pg_tablespace WHERE spcname = 'trial'; + ASSERT tbsp_count = 1, 'tablespace not found'; + SELECT COUNT(*) INTO tbsp_count FROM pg_tablespace WHERE spcname = 'castle'; + ASSERT tbsp_count = 1, 'tablespace not found'; + END $$$$; + - --command + - | + CREATE SCHEMA "tablespace-enabled"; + CREATE TABLE important (data) TABLESPACE trial AS VALUES ('treasure'); + CREATE TABLE also_important (data) TABLESPACE castle AS VALUES ('treasure'); + CREATE TABLE moving_important (data) AS VALUES ('treasure'); + ALTER TABLE moving_important SET TABLESPACE trial; diff --git a/testing/chainsaw/e2e/tablespace-enabled/01-assert.yaml b/testing/chainsaw/e2e/tablespace-enabled/01-assert.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/tablespace-enabled/README.md b/testing/chainsaw/e2e/tablespace-enabled/README.md new file mode 100644 index 0000000000..d4df788b61 --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/README.md @@ -0,0 +1,6 @@ +# Tablespace Enabled + +**Note**: This series of tests depends on PGO being deployed with the `TablespaceVolume` feature gate enabled. + +00: Start a cluster with tablespace volumes and a configmap `databaseInitSQL` to create tablespaces with the non-superuser as owner +01: Connect to the db; check that the tablespaces exist; create tables in the tablespaces; and create a table outside the tablespaces and move it into a tablespace diff --git a/testing/chainsaw/e2e/tablespace-enabled/chainsaw-test.yaml b/testing/chainsaw/e2e/tablespace-enabled/chainsaw-test.yaml new file mode 100755 index 0000000000..45e1d56609 --- /dev/null +++ b/testing/chainsaw/e2e/tablespace-enabled/chainsaw-test.yaml @@ -0,0 +1,20 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: tablespace-enabled +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--cluster.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--psql-connect.yaml + - assert: + file: 01-assert.yaml diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/00--create-resources.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/00--create-resources.yaml new file mode 100644 index 0000000000..4ec3e7c22b --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/00--create-resources.yaml @@ -0,0 +1,28 @@ +--- +# Create the cluster we will do an actual upgrade on +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + walVolumeClaimSpec: { accessModes: ["ReadWriteOnce"], resources: { requests: { storage: 1Gi } } } + replicas: 3 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: wal-pvc-pgupgrade-do-it +spec: + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: wal-pvc-pgupgrade diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/00-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/00-assert.yaml new file mode 100644 index 0000000000..b3267d072b --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/00-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: wal-pvc-pgupgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/01--create-data.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/01--create-data.yaml new file mode 100644 index 0000000000..ea93bad021 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/01--create-data.yaml @@ -0,0 +1,94 @@ +--- +# Check the version reported by PostgreSQL and create some data. +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-before + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + CREATE SCHEMA very; + CREATE TABLE very.important (data) AS VALUES ('treasure'); +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-before-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "wal-pvc-pgupgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/01-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/01-assert.yaml new file mode 100644 index 0000000000..cbcadea8cd --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/01-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-before +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-before-replica +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml new file mode 100644 index 0000000000..6d44b8b23b --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +spec: + shutdown: true diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/02-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/02-assert.yaml new file mode 100644 index 0000000000..a6b1faf669 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/02-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: wal-pvc-pgupgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml new file mode 100644 index 0000000000..fd9739c9e1 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade + annotations: + postgres-operator.crunchydata.com/allow-upgrade: wal-pvc-pgupgrade-do-it diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/03-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/03-assert.yaml new file mode 100644 index 0000000000..0e5d8e7c20 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/03-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciling is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: wal-pvc-pgupgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml new file mode 100644 index 0000000000..95b122eed3 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/04-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/04-assert.yaml new file mode 100644 index 0000000000..089d448cbd --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/04-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: wal-pvc-pgupgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/06--check-data-and-version.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/06--check-data-and-version.yaml new file mode 100644 index 0000000000..16afd8d945 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/06--check-data-and-version.yaml @@ -0,0 +1,108 @@ +--- +# Check the version reported by PostgreSQL and confirm that data was upgraded. +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "wal-pvc-pgupgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: wal-pvc-pgupgrade-pguser-wal-pvc-pgupgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/06-assert.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/06-assert.yaml new file mode 100644 index 0000000000..f7575212e0 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/06-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after-replica +status: + succeeded: 1 diff --git a/testing/chainsaw/e2e/wal-pvc-pgupgrade/chainsaw-test.yaml b/testing/chainsaw/e2e/wal-pvc-pgupgrade/chainsaw-test.yaml new file mode 100755 index 0000000000..a19e985501 --- /dev/null +++ b/testing/chainsaw/e2e/wal-pvc-pgupgrade/chainsaw-test.yaml @@ -0,0 +1,54 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: wal-pvc-pgupgrade +spec: + steps: + - name: step-00 + try: + - apply: + file: 00--create-resources.yaml + - assert: + file: 00-assert.yaml + - name: step-01 + try: + - apply: + file: 01--create-data.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--shutdown-cluster.yaml + - assert: + file: 02-assert.yaml + - name: step-03 + try: + - apply: + file: 03--annotate-cluster.yaml + - assert: + file: 03-assert.yaml + - name: step-04 + try: + - apply: + file: 04--restart-cluster.yaml + - assert: + file: 04-assert.yaml + - name: step-05 + try: + - script: + content: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/wal-pvc-pgupgrade-repo-host" -c pgbackrest -- pgbackrest check --stanza=db + - script: + content: | + # Check that the old pg folders do not exist on the replica + REPLICA=$(kubectl get pod -l=postgres-operator.crunchydata.com/role=replica -n "${NAMESPACE}" -o=jsonpath='{ .items[0].metadata.name }') + kubectl -n "${NAMESPACE}" exec "${REPLICA}" -c database -- [ ! -d "pgdata/pg${KUTTL_PG_UPGRADE_FROM_VERSION}" ] + - name: step-06 + try: + - apply: + file: 06--check-data-and-version.yaml + - assert: + file: 06-assert.yaml diff --git a/testing/chainsaw/scripts/pgbackrest-initialization.sh b/testing/chainsaw/scripts/pgbackrest-initialization.sh new file mode 100755 index 0000000000..ba6cd4a7e5 --- /dev/null +++ b/testing/chainsaw/scripts/pgbackrest-initialization.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +EXPECTED_STATUS=$1 +EXPECTED_NUM_BACKUPS=$2 + +CLUSTER=${CLUSTER:-default} + +INFO=$(kubectl -n "${NAMESPACE}" exec "statefulset.apps/${CLUSTER}-repo-host" -c pgbackrest -- pgbackrest info) + +# Grab the `status` line from `pgbackrest info`, remove whitespace with `xargs`, +# and trim the string to only include the status in order to +# validate the status matches the expected status. +STATUS=$(grep "status" <<< "$INFO" | xargs | cut -d' ' -f 2) +if [[ "$STATUS" != "$EXPECTED_STATUS" ]]; then + echo "Expected ${EXPECTED_STATUS} but got ${STATUS}" + exit 1 +fi + +# Count the lines with `full backup` to validate that the expected number of backups are found. +NUM_BACKUPS=$(grep -c "full backup:" <<< "$INFO") +if [[ "$NUM_BACKUPS" != "$EXPECTED_NUM_BACKUPS" ]]; then + echo "Expected ${EXPECTED_NUM_BACKUPS} but got ${NUM_BACKUPS}" + exit 1 +fi From ed78245a294e1eda328b5f95dfce7d7f4c791b5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Wed, 21 Feb 2024 21:23:15 +0100 Subject: [PATCH 2/5] bump version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Charles-Edouard Brétéché --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c6c693d2b5..c86bb466c9 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -238,7 +238,7 @@ jobs: postgres-operator - name: Install chainsaw - uses: kyverno/action-install-chainsaw@07b6c986572f2abaf6647c85d37cbecfddc4a6ab # v0.1.3 + uses: kyverno/action-install-chainsaw@v0.1.6 # - run: make generate-kuttl # env: From 3c64ba22ad715d296a3248e7799c575bc6d05cf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Wed, 21 Feb 2024 21:29:44 +0100 Subject: [PATCH 3/5] regen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Charles-Edouard Brétéché --- .../chainsaw-test.yaml | 24 +++--- .../e2e/exporter-no-tls/chainsaw-test.yaml | 13 ++- .../chainsaw-test.yaml | 39 +++++---- .../e2e/exporter-tls/chainsaw-test.yaml | 17 ++-- .../e2e/major-upgrade/11-delete-cluster.yaml | 8 -- .../35-check-pgbackrest-and-replica.yaml | 11 --- .../e2e/major-upgrade/chainsaw-test.yaml | 79 +++++++++++++++++++ .../e2e/standalone-pgadmin/chainsaw-test.yaml | 25 +++--- 8 files changed, 133 insertions(+), 83 deletions(-) delete mode 100644 testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml delete mode 100644 testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml create mode 100755 testing/chainsaw/e2e/major-upgrade/chainsaw-test.yaml diff --git a/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml index 6721a79d28..25a39e95eb 100755 --- a/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/exporter-custom-queries/chainsaw-test.yaml @@ -6,7 +6,14 @@ metadata: name: exporter-custom-queries spec: steps: - - catch: + - name: step-00 + try: + - apply: + file: files/exporter-custom-queries-configmap.yaml + - apply: + file: files/exporter-custom-queries-cluster.yaml + - assert: + file: files/exporter-custom-queries-cluster-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -55,15 +62,12 @@ spec: pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} - name: step-00 + - name: step-01 try: - apply: - file: files/exporter-custom-queries-configmap.yaml - - apply: - file: files/exporter-custom-queries-cluster.yaml + file: files/exporter-custom-queries-configmap-update.yaml - assert: - file: files/exporter-custom-queries-cluster-checks.yaml - - catch: + file: files/exporter-custom-queries-configmap-update-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -92,9 +96,3 @@ spec: echo "${master_queries_contents}" exit 1 } - name: step-01 - try: - - apply: - file: files/exporter-custom-queries-configmap-update.yaml - - assert: - file: files/exporter-custom-queries-configmap-update-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml index 225c352ca5..dee4f406ae 100755 --- a/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/exporter-no-tls/chainsaw-test.yaml @@ -6,7 +6,12 @@ metadata: name: exporter-no-tls spec: steps: - - catch: + - name: step-00 + try: + - apply: + file: files/exporter-no-tls-cluster.yaml + - assert: + file: files/exporter-no-tls-cluster-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -48,9 +53,3 @@ spec: ASSERT FOUND, 'user not found'; END $$ SQL - name: step-00 - try: - - apply: - file: files/exporter-no-tls-cluster.yaml - - assert: - file: files/exporter-no-tls-cluster-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml index b58da5e4ec..1695c610ed 100755 --- a/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/exporter-password-change/chainsaw-test.yaml @@ -7,6 +7,14 @@ metadata: spec: steps: - catch: + - script: + content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true + name: step-00 + try: + - apply: + file: files/initial-postgrescluster.yaml + - assert: + file: files/initial-postgrescluster-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -23,15 +31,12 @@ spec: retry "containers not ready" exit 1 } - - script: - content: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true - name: step-00 - try: - - apply: - file: files/initial-postgrescluster.yaml - - assert: - file: files/initial-postgrescluster-checks.yaml - catch: + - podLogs: + container: exporter + selector: postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true + name: step-01 + try: - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -51,12 +56,12 @@ spec: pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} - - podLogs: - container: exporter - selector: postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true - name: step-01 - try: null - - catch: + - name: step-02 + try: + - apply: + file: files/update-monitoring-password.yaml + - assert: + file: files/update-monitoring-password-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -84,9 +89,3 @@ spec: retry "${scrape_metrics}" exit 1 } - name: step-02 - try: - - apply: - file: files/update-monitoring-password.yaml - - assert: - file: files/update-monitoring-password-checks.yaml diff --git a/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml b/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml index 4fe7a3b54a..8e7ed8ce19 100755 --- a/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/exporter-tls/chainsaw-test.yaml @@ -6,7 +6,14 @@ metadata: name: exporter-tls spec: steps: - - catch: + - name: step-00 + try: + - apply: + file: files/exporter-tls-certs.yaml + - apply: + file: files/exporter-tls-cluster.yaml + - assert: + file: files/exporter-tls-cluster-checks.yaml - script: content: | retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } @@ -49,11 +56,3 @@ spec: ASSERT FOUND, 'user not found'; END $$ SQL - name: step-00 - try: - - apply: - file: files/exporter-tls-certs.yaml - - apply: - file: files/exporter-tls-cluster.yaml - - assert: - file: files/exporter-tls-cluster-checks.yaml diff --git a/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml b/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml deleted file mode 100644 index 14eab0efbb..0000000000 --- a/testing/chainsaw/e2e/major-upgrade/11-delete-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Delete the existing cluster. -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - kind: PostgresCluster - name: major-upgrade diff --git a/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml b/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml deleted file mode 100644 index be1c3ff357..0000000000 --- a/testing/chainsaw/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: -# Check that the pgbackrest setup has successfully completed -- script: | - kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-repo-host" -c pgbackrest -- pgbackrest check --stanza=db -# Check that the replica data dir has been successfully cleaned -- script: | - # Check that the old pg folders do not exist on the replica - REPLICA=$(kubectl get pod -l=postgres-operator.crunchydata.com/role=replica -n "${NAMESPACE}" -o=jsonpath='{ .items[0].metadata.name }') - kubectl -n "${NAMESPACE}" exec "${REPLICA}" -c database -- [ ! -d "pgdata/pg${KUTTL_PG_UPGRADE_FROM_VERSION}" ] diff --git a/testing/chainsaw/e2e/major-upgrade/chainsaw-test.yaml b/testing/chainsaw/e2e/major-upgrade/chainsaw-test.yaml new file mode 100755 index 0000000000..640cedc639 --- /dev/null +++ b/testing/chainsaw/e2e/major-upgrade/chainsaw-test.yaml @@ -0,0 +1,79 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: major-upgrade +spec: + steps: + - name: step-01 + try: + - apply: + file: 01--invalid-pgupgrade.yaml + - assert: + file: 01-assert.yaml + - name: step-02 + try: + - apply: + file: 02--valid-upgrade.yaml + - assert: + file: 02-assert.yaml + - name: step-10 + try: + - apply: + file: 10--already-updated-cluster.yaml + - assert: + file: 10-assert.yaml + - name: step-11 + try: + - delete: + ref: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: major-upgrade + - name: step-30 + try: + - apply: + file: 30--cluster.yaml + - assert: + file: 30-assert.yaml + - name: step-31 + try: + - apply: + file: 31--create-data.yaml + - assert: + file: 31-assert.yaml + - name: step-32 + try: + - apply: + file: 32--shutdown-cluster.yaml + - assert: + file: 32-assert.yaml + - name: step-33 + try: + - apply: + file: 33--annotate-cluster.yaml + - assert: + file: 33-assert.yaml + - name: step-34 + try: + - apply: + file: 34--restart-cluster.yaml + - assert: + file: 34-assert.yaml + - name: step-35 + try: + - script: + content: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-repo-host" -c pgbackrest -- pgbackrest check --stanza=db + - script: + content: | + # Check that the old pg folders do not exist on the replica + REPLICA=$(kubectl get pod -l=postgres-operator.crunchydata.com/role=replica -n "${NAMESPACE}" -o=jsonpath='{ .items[0].metadata.name }') + kubectl -n "${NAMESPACE}" exec "${REPLICA}" -c database -- [ ! -d "pgdata/pg${KUTTL_PG_UPGRADE_FROM_VERSION}" ] + - name: step-36 + try: + - apply: + file: 36--check-data-and-version.yaml + - assert: + file: 36-assert.yaml diff --git a/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml b/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml index 6be8c15106..f8297fe728 100755 --- a/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/standalone-pgadmin/chainsaw-test.yaml @@ -12,7 +12,8 @@ spec: file: files/00-pgadmin.yaml - assert: file: files/00-pgadmin-check.yaml - - catch: + - name: step-01 + try: - script: content: | contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } @@ -28,8 +29,6 @@ spec: echo "Wrong servers dumped: got ${clusters_actual}" exit 1 } - name: step-01 - try: null - name: step-02 try: - apply: @@ -38,7 +37,8 @@ spec: file: files/02-pgadmin.yaml - assert: file: files/02-cluster-check.yaml - - catch: + - name: step-03 + try: - script: content: | contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } @@ -107,15 +107,14 @@ spec: diff_comp "${clusters_actual}" "${clusters_expected}" exit 1 } - name: step-03 - try: null - name: step-04 try: - apply: file: files/04-cluster.yaml - assert: file: files/04-cluster-check.yaml - - catch: + - name: step-05 + try: - script: content: | contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } @@ -209,8 +208,6 @@ spec: diff_comp "${clusters_actual}" "${clusters_expected}" exit 1 } - name: step-05 - try: null - name: step-06 try: - apply: @@ -219,7 +216,8 @@ spec: file: files/06-pgadmin.yaml - assert: file: files/06-cluster-check.yaml - - catch: + - name: step-07 + try: - script: content: | contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } @@ -336,8 +334,6 @@ spec: diff_comp "${clusters_actual}" "${clusters_expected}" exit 1 } - name: step-07 - try: null - name: step-08 try: - error: @@ -347,7 +343,8 @@ spec: apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: pgadmin2 - - catch: + - name: step-09 + try: - script: content: | contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } @@ -441,5 +438,3 @@ spec: diff_comp "${clusters_actual}" "${clusters_expected}" exit 1 } - name: step-09 - try: null From f1c2e0c5a799583d582cf8cb44fda12c45202da8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Wed, 21 Feb 2024 22:08:14 +0100 Subject: [PATCH 4/5] generate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Charles-Edouard Brétéché --- .github/workflows/test.yaml | 14 +++++++------- .gitignore | 1 + Makefile | 31 ++++++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c86bb466c9..b26d0fc3fa 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -240,13 +240,13 @@ jobs: - name: Install chainsaw uses: kyverno/action-install-chainsaw@v0.1.6 - # - run: make generate-kuttl - # env: - # KUTTL_PG_UPGRADE_FROM_VERSION: '15' - # KUTTL_PG_UPGRADE_TO_VERSION: '16' - # KUTTL_PG_VERSION: '15' - # KUTTL_POSTGIS_VERSION: '3.4' - # KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' + - run: make generate-chainsaw + env: + CHAINSAW_PG_UPGRADE_FROM_VERSION: '15' + CHAINSAW_PG_UPGRADE_TO_VERSION: '16' + CHAINSAW_PG_VERSION: '15' + CHAINSAW_POSTGIS_VERSION: '3.4' + CHAINSAW_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' - run: | make check-chainsaw && exit diff --git a/.gitignore b/.gitignore index 2fa6186778..08cde90815 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store /vendor/ +/testing/chainsaw/e2e-generated*/ /testing/kuttl/e2e-generated*/ diff --git a/Makefile b/Makefile index af2e88d6fe..8982b61334 100644 --- a/Makefile +++ b/Makefile @@ -219,7 +219,7 @@ check-envtest-existing: createnamespaces .PHONY: check-chainsaw check-chainsaw: ## Run chainsaw end-to-end tests check-chainsaw: - $(CHAINSAW) test --test-dir ./testing/chainsaw + $(CHAINSAW) test --test-dir ./testing/chainsaw/e2e-generated # Expects operator to be running .PHONY: check-kuttl @@ -257,6 +257,35 @@ generate-kuttl: ## Generate kuttl tests shift; \ done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e-other/*/*.yaml testing/kuttl/e2e/*/*/*.yaml testing/kuttl/e2e-other/*/*/*.yaml +.PHONY: generate-chainsaw +generate-chainsaw: export CHAINSAW_PG_UPGRADE_FROM_VERSION ?= 15 +generate-chainsaw: export CHAINSAW_PG_UPGRADE_TO_VERSION ?= 16 +generate-chainsaw: export CHAINSAW_PG_VERSION ?= 16 +generate-chainsaw: export CHAINSAW_POSTGIS_VERSION ?= 3.4 +generate-chainsaw: export CHAINSAW_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0 +generate-chainsaw: export CHAINSAW_TEST_DELETE_NAMESPACE ?= chainsaw-test-delete-namespace +generate-chainsaw: ## Generate chainsaw tests + [ ! -d testing/chainsaw/e2e-generated ] || rm -r testing/chainsaw/e2e-generated + [ ! -d testing/chainsaw/e2e-generated-other ] || rm -r testing/chainsaw/e2e-generated-other + bash -ceu ' \ + case $(CHAINSAW_PG_VERSION) in \ + 16 ) export CHAINSAW_BITNAMI_IMAGE_TAG=16.0.0-debian-11-r3 ;; \ + 15 ) export CHAINSAW_BITNAMI_IMAGE_TAG=15.0.0-debian-11-r4 ;; \ + 14 ) export CHAINSAW_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ + 13 ) export CHAINSAW_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ + 12 ) export CHAINSAW_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ + 11 ) export CHAINSAW_BITNAMI_IMAGE_TAG=11.17.0-debian-11-r39 ;; \ + esac; \ + render() { envsubst '"'"' \ + $$CHAINSAW_PG_UPGRADE_FROM_VERSION $$CHAINSAW_PG_UPGRADE_TO_VERSION \ + $$CHAINSAW_PG_VERSION $$CHAINSAW_POSTGIS_VERSION $$CHAINSAW_PSQL_IMAGE \ + $$CHAINSAW_BITNAMI_IMAGE_TAG $$CHAINSAW_TEST_DELETE_NAMESPACE'"'"'; }; \ + while [ $$# -gt 0 ]; do \ + source="$${1}" target="$${1/e2e/e2e-generated}"; \ + mkdir -p "$${target%/*}"; render < "$${source}" > "$${target}"; \ + shift; \ + done' - testing/chainsaw/e2e/*/*.yaml testing/chainsaw/e2e-other/*/*.yaml testing/chainsaw/e2e/*/*/*.yaml testing/chainsaw/e2e-other/*/*/*.yaml + ##@ Generate .PHONY: check-generate From 66bacd8967f879dbea9a1ac0ac5e26518f92bb1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Wed, 21 Feb 2024 22:15:42 +0100 Subject: [PATCH 5/5] revet env vars changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Charles-Edouard Brétéché --- .github/workflows/test.yaml | 10 +++++----- Makefile | 32 ++++++++++++++++---------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b26d0fc3fa..3b838371df 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -242,11 +242,11 @@ jobs: - run: make generate-chainsaw env: - CHAINSAW_PG_UPGRADE_FROM_VERSION: '15' - CHAINSAW_PG_UPGRADE_TO_VERSION: '16' - CHAINSAW_PG_VERSION: '15' - CHAINSAW_POSTGIS_VERSION: '3.4' - CHAINSAW_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' + KUTTL_PG_UPGRADE_FROM_VERSION: '15' + KUTTL_PG_UPGRADE_TO_VERSION: '16' + KUTTL_PG_VERSION: '15' + KUTTL_POSTGIS_VERSION: '3.4' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0' - run: | make check-chainsaw && exit diff --git a/Makefile b/Makefile index 8982b61334..81cbe2b74b 100644 --- a/Makefile +++ b/Makefile @@ -258,28 +258,28 @@ generate-kuttl: ## Generate kuttl tests done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e-other/*/*.yaml testing/kuttl/e2e/*/*/*.yaml testing/kuttl/e2e-other/*/*/*.yaml .PHONY: generate-chainsaw -generate-chainsaw: export CHAINSAW_PG_UPGRADE_FROM_VERSION ?= 15 -generate-chainsaw: export CHAINSAW_PG_UPGRADE_TO_VERSION ?= 16 -generate-chainsaw: export CHAINSAW_PG_VERSION ?= 16 -generate-chainsaw: export CHAINSAW_POSTGIS_VERSION ?= 3.4 -generate-chainsaw: export CHAINSAW_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0 -generate-chainsaw: export CHAINSAW_TEST_DELETE_NAMESPACE ?= chainsaw-test-delete-namespace +generate-chainsaw: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 +generate-chainsaw: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 +generate-chainsaw: export KUTTL_PG_VERSION ?= 16 +generate-chainsaw: export KUTTL_POSTGIS_VERSION ?= 3.4 +generate-chainsaw: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.1-0 +generate-chainsaw: export KUTTL_TEST_DELETE_NAMESPACE ?= chainsaw-test-delete-namespace generate-chainsaw: ## Generate chainsaw tests [ ! -d testing/chainsaw/e2e-generated ] || rm -r testing/chainsaw/e2e-generated [ ! -d testing/chainsaw/e2e-generated-other ] || rm -r testing/chainsaw/e2e-generated-other bash -ceu ' \ - case $(CHAINSAW_PG_VERSION) in \ - 16 ) export CHAINSAW_BITNAMI_IMAGE_TAG=16.0.0-debian-11-r3 ;; \ - 15 ) export CHAINSAW_BITNAMI_IMAGE_TAG=15.0.0-debian-11-r4 ;; \ - 14 ) export CHAINSAW_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ - 13 ) export CHAINSAW_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ - 12 ) export CHAINSAW_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ - 11 ) export CHAINSAW_BITNAMI_IMAGE_TAG=11.17.0-debian-11-r39 ;; \ + case $(KUTTL_PG_VERSION) in \ + 16 ) export KUTTL_BITNAMI_IMAGE_TAG=16.0.0-debian-11-r3 ;; \ + 15 ) export KUTTL_BITNAMI_IMAGE_TAG=15.0.0-debian-11-r4 ;; \ + 14 ) export KUTTL_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ + 13 ) export KUTTL_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ + 12 ) export KUTTL_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ + 11 ) export KUTTL_BITNAMI_IMAGE_TAG=11.17.0-debian-11-r39 ;; \ esac; \ render() { envsubst '"'"' \ - $$CHAINSAW_PG_UPGRADE_FROM_VERSION $$CHAINSAW_PG_UPGRADE_TO_VERSION \ - $$CHAINSAW_PG_VERSION $$CHAINSAW_POSTGIS_VERSION $$CHAINSAW_PSQL_IMAGE \ - $$CHAINSAW_BITNAMI_IMAGE_TAG $$CHAINSAW_TEST_DELETE_NAMESPACE'"'"'; }; \ + $$KUTTL_PG_UPGRADE_FROM_VERSION $$KUTTL_PG_UPGRADE_TO_VERSION \ + $$KUTTL_PG_VERSION $$KUTTL_POSTGIS_VERSION $$KUTTL_PSQL_IMAGE \ + $$KUTTL_BITNAMI_IMAGE_TAG $$KUTTL_TEST_DELETE_NAMESPACE'"'"'; }; \ while [ $$# -gt 0 ]; do \ source="$${1}" target="$${1/e2e/e2e-generated}"; \ mkdir -p "$${target%/*}"; render < "$${source}" > "$${target}"; \