diff --git a/.github/workflows/ami-release-nix-single.yml b/.github/workflows/ami-release-nix-single.yml index 500bf26d5..6209c5692 100644 --- a/.github/workflows/ami-release-nix-single.yml +++ b/.github/workflows/ami-release-nix-single.yml @@ -40,10 +40,18 @@ jobs: run: | echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT - - uses: DeterminateSystems/nix-installer-action@main + - name: Install nix + uses: cachix/install-nix-action@v27 + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - name: Set PostgreSQL version environment variable - run: echo "POSTGRES_MAJOR_VERSION=${{ github.event.inputs.postgres_version }}" >> $GITHUB_ENV + run: | + echo "POSTGRES_MAJOR_VERSION=${{ github.event.inputs.postgres_version }}" >> $GITHUB_ENV + echo "EXECUTION_ID=${{ github.run_id }}-${{ matrix.postgres_version }}" >> $GITHUB_ENV - name: Generate common-nix.vars.pkr.hcl run: | @@ -57,18 +65,18 @@ jobs: env: POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} run: | - packer init amazon-arm64-nix.pkr.hcl GIT_SHA=${{ steps.get_sha.outputs.sha }} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- init amazon-arm64-nix.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl - name: Build AMI stage 2 env: POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} run: | - packer init stage2-nix-psql.pkr.hcl GIT_SHA=${{ steps.get_sha.outputs.sha }} + nix run github:supabase/postgres/${GIT_SHA}#packer -- init stage2-nix-psql.pkr.hcl POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} - packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl - name: Grab release version id: process_release_version @@ -147,10 +155,10 @@ jobs: - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml index 0c97d8374..27a1c0eb1 100644 --- a/.github/workflows/ami-release-nix.yml +++ b/.github/workflows/ami-release-nix.yml @@ -17,14 +17,20 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-4vcpu-ubuntu-2404 outputs: postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} steps: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD - - uses: DeterminateSystems/nix-installer-action@main + - name: Install nix + uses: cachix/install-nix-action@v27 + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - name: Set PostgreSQL versions id: set-versions @@ -38,7 +44,7 @@ jobs: matrix: postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} include: - - runner: large-linux-arm + - runner: blacksmith-2vcpu-ubuntu-2404-arm runs-on: ${{ matrix.runner }} timeout-minutes: 150 @@ -52,7 +58,14 @@ jobs: aws-region: "us-east-1" output-credentials: true role-duration-seconds: 7200 - - uses: DeterminateSystems/nix-installer-action@main + + - name: Install nix + uses: cachix/install-nix-action@v27 + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - name: Run checks if triggered manually if: ${{ github.event_name == 'workflow_dispatch' }} @@ -64,7 +77,9 @@ jobs: fi - name: Set PostgreSQL version environment variable - run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + run: | + echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + echo "EXECUTION_ID=${{ github.run_id }}-${{ matrix.postgres_version }}" >> $GITHUB_ENV - name: Generate common-nix.vars.pkr.hcl run: | @@ -78,19 +93,19 @@ jobs: env: POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} run: | - packer init amazon-arm64-nix.pkr.hcl GIT_SHA=${{github.sha}} + nix run github:supabase/postgres/${GIT_SHA}#packer -- init amazon-arm64-nix.pkr.hcl # why is postgresql_major defined here instead of where the _three_ other postgresql_* variables are defined? - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl - name: Build AMI stage 2 env: POSTGRES_MAJOR_VERSION: ${{ env.POSTGRES_MAJOR_VERSION }} run: | - packer init stage2-nix-psql.pkr.hcl GIT_SHA=${{github.sha}} + nix run github:supabase/postgres/${GIT_SHA}#packer -- init stage2-nix-psql.pkr.hcl POSTGRES_MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} - packer build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git_sha=${GIT_SHA}" -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl - name: Grab release version id: process_release_version @@ -169,9 +184,9 @@ jobs: - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/check-shellscripts.yml b/.github/workflows/check-shellscripts.yml index aabf17413..9d1389e9e 100644 --- a/.github/workflows/check-shellscripts.yml +++ b/.github/workflows/check-shellscripts.yml @@ -7,6 +7,10 @@ on: pull_request: workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + permissions: contents: read diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f10f5f58e..12abc8e91 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,10 @@ on: permissions: contents: read +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: check-release-version: timeout-minutes: 5 diff --git a/.github/workflows/dockerhub-release-matrix.yml b/.github/workflows/dockerhub-release-matrix.yml index 263b0112a..5862df8a5 100644 --- a/.github/workflows/dockerhub-release-matrix.yml +++ b/.github/workflows/dockerhub-release-matrix.yml @@ -16,7 +16,7 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-4vcpu-ubuntu-2404 outputs: matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} steps: diff --git a/.github/workflows/manual-docker-release.yml b/.github/workflows/manual-docker-release.yml index 3f86643ff..6cc5a396c 100644 --- a/.github/workflows/manual-docker-release.yml +++ b/.github/workflows/manual-docker-release.yml @@ -13,7 +13,7 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-8vcpu-ubuntu-2404 outputs: matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} steps: @@ -46,7 +46,7 @@ jobs: needs: prepare strategy: matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} - runs-on: large-linux-x86 + runs-on: blacksmith-8vcpu-ubuntu-2404 outputs: build_args: ${{ steps.args.outputs.result }} steps: @@ -72,7 +72,7 @@ jobs: matrix: postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} arch: [amd64, arm64] - runs-on: ${{ matrix.arch == 'amd64' && 'large-linux-x86' || 'large-linux-arm' }} + runs-on: ${{ matrix.arch == 'amd64' && 'blacksmith-8vcpu-ubuntu-2404' || 'large-linux-arm' }} timeout-minutes: 180 steps: - name: Checkout Repo @@ -141,7 +141,7 @@ jobs: strategy: matrix: include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} - runs-on: large-linux-x86 + runs-on: blacksmith-8vcpu-ubuntu-2404 steps: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD @@ -185,7 +185,7 @@ jobs: ${{ steps.get_version.outputs.pg_version }}_arm64 combine_results: needs: [prepare, merge_manifest] - runs-on: large-linux-x86 + runs-on: blacksmith-8vcpu-ubuntu-2404 steps: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD diff --git a/.github/workflows/mirror-postgrest.yml b/.github/workflows/mirror-postgrest.yml index 0195ab695..d649445b2 100644 --- a/.github/workflows/mirror-postgrest.yml +++ b/.github/workflows/mirror-postgrest.yml @@ -6,7 +6,7 @@ on: - develop paths: - ".github/workflows/mirror-postgrest.yml" - - "common.vars*" + - "ansible/vars.yml" permissions: contents: read diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml index 077c525dd..695dc2abf 100644 --- a/.github/workflows/nix-build.yml +++ b/.github/workflows/nix-build.yml @@ -14,15 +14,19 @@ permissions: contents: write packages: write +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + jobs: build-run-image: strategy: fail-fast: false matrix: include: - - runner: large-linux-x86 + - runner: blacksmith-32vcpu-ubuntu-2404 arch: amd64 - - runner: large-linux-arm + - runner: blacksmith-32vcpu-ubuntu-2404-arm arch: arm64 - runner: macos-latest-xlarge arch: arm64 @@ -82,6 +86,7 @@ jobs: - name: Aggressive disk cleanup for DuckDB build if: matrix.runner == 'macos-latest-xlarge' run: | + nix --version echo "=== BEFORE CLEANUP ===" df -h # Remove major space consumers @@ -117,6 +122,8 @@ jobs: needs: build-run-image if: ${{ success() }} uses: ./.github/workflows/testinfra-ami-build.yml + secrets: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} run-tests: needs: build-run-image diff --git a/.github/workflows/publish-migrations-prod.yml b/.github/workflows/publish-migrations-prod.yml index ffb633683..81c686db7 100644 --- a/.github/workflows/publish-migrations-prod.yml +++ b/.github/workflows/publish-migrations-prod.yml @@ -5,7 +5,7 @@ on: jobs: build: - runs-on: large-linux-arm + runs-on: blacksmith-2vcpu-ubuntu-2404-arm timeout-minutes: 15 permissions: id-token: write diff --git a/.github/workflows/publish-migrations-staging.yml b/.github/workflows/publish-migrations-staging.yml index 7acb46d22..2ed25f84a 100644 --- a/.github/workflows/publish-migrations-staging.yml +++ b/.github/workflows/publish-migrations-staging.yml @@ -8,7 +8,7 @@ on: jobs: build: - runs-on: large-linux-arm + runs-on: blacksmith-2vcpu-ubuntu-2404-arm timeout-minutes: 15 permissions: id-token: write diff --git a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml index c8b3abe46..f4e71260a 100644 --- a/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml +++ b/.github/workflows/publish-nix-pgupgrade-bin-flake-version.yml @@ -12,7 +12,7 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-2vcpu-ubuntu-2404 outputs: postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} steps: diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml index c58e90d83..d828e819a 100644 --- a/.github/workflows/publish-nix-pgupgrade-scripts.yml +++ b/.github/workflows/publish-nix-pgupgrade-scripts.yml @@ -19,7 +19,7 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-2vcpu-ubuntu-2404 outputs: postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} steps: diff --git a/.github/workflows/qemu-image-build.yml b/.github/workflows/qemu-image-build.yml index ffd12683c..1be4caa15 100644 --- a/.github/workflows/qemu-image-build.yml +++ b/.github/workflows/qemu-image-build.yml @@ -26,12 +26,10 @@ jobs: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD - - uses: DeterminateSystems/nix-installer-action@main - - name: Set PostgreSQL versions - only builds pg17 atm id: set-versions run: | - VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[1]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + VERSIONS=$(yq '.postgres_major[1]' ansible/vars.yml | jq -R -s -c 'split("\n")[:-1]') echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT build: @@ -55,7 +53,7 @@ jobs: - name: Run checks if triggered manually if: ${{ github.event_name == 'workflow_dispatch' }} run: | - SUFFIX=$(sudo nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + SUFFIX=$(yq ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') if [[ -z $SUFFIX ]] ; then echo "Version must include non-numeric characters if built manually." exit 1 @@ -67,7 +65,9 @@ jobs: sudo chmod 666 /dev/kvm - name: Set PostgreSQL version environment variable - run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + run: | + echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + echo "EXECUTION_ID=${{ github.run_id }}-${{ matrix.postgres_version }}" >> $GITHUB_ENV - name: Generate common-nix.vars.pkr.hcl run: | @@ -144,12 +144,22 @@ jobs: docker tag "postgres:$IMAGE_TAG" "$REGISTRY/$REPOSITORY:$IMAGE_TAG" docker push "$REGISTRY/$REPOSITORY:$IMAGE_TAG" + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres QEMU artifact failed' + SLACK_FOOTER: '' + - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6be368784..766e50b72 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,7 +9,7 @@ permissions: jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-4vcpu-ubuntu-2404 outputs: postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} steps: diff --git a/.github/workflows/testinfra-ami-build.yml b/.github/workflows/testinfra-ami-build.yml index 5636a34c3..0cb0cd483 100644 --- a/.github/workflows/testinfra-ami-build.yml +++ b/.github/workflows/testinfra-ami-build.yml @@ -3,21 +3,31 @@ name: Testinfra Integration Tests Nix on: workflow_dispatch: workflow_call: + secrets: + DEV_AWS_ROLE: + description: 'AWS role for dev environment' + required: true permissions: - contents: read + contents: write id-token: write jobs: prepare: - runs-on: large-linux-x86 + runs-on: blacksmith-2vcpu-ubuntu-2404 outputs: postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} steps: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD - - uses: DeterminateSystems/nix-installer-action@main + - name: Install nix + uses: cachix/install-nix-action@v27 + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - name: Set PostgreSQL versions id: set-versions @@ -32,22 +42,44 @@ jobs: matrix: postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} include: - - runner: arm-runner + - runner: blacksmith-2vcpu-ubuntu-2404-arm arch: arm64 ubuntu_release: noble ubuntu_version: 24.04 mcpu: neoverse-n1 runs-on: ${{ matrix.runner }} timeout-minutes: 150 - permissions: - contents: write - packages: write - id-token: write steps: - name: Checkout Repo uses: supabase/postgres/.github/actions/shared-checkout@HEAD + - name: Debug AWS role secret + run: | + echo "Checking DEV_AWS_ROLE secret availability..." + if [ -z "${{ secrets.DEV_AWS_ROLE }}" ]; then + echo "❌ DEV_AWS_ROLE is empty or not available" + else + echo "✅ DEV_AWS_ROLE is available" + fi + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + output-credentials: true + role-duration-seconds: 7200 + + - name: Install nix + uses: cachix/install-nix-action@v27 + with: + install_url: https://releases.nixos.org/nix/nix-2.29.1/install + extra_nix_config: | + substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com + trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + + - id: args uses: mikefarah/yq@master with: @@ -64,11 +96,13 @@ jobs: run: echo "random_string=$(openssl rand -hex 8)" >> $GITHUB_OUTPUT - name: Set PostgreSQL version environment variable - run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + run: | + echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + echo "EXECUTION_ID=${{ github.run_id }}-${{ matrix.postgres_version }}" >> $GITHUB_ENV - name: Generate common-nix.vars.pkr.hcl run: | - PG_VERSION=$(sudo nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(nix run nixpkgs#yq -- '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) PG_VERSION=$(echo "$PG_VERSION" | tr -d '"') # Remove any surrounding quotes echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl # Ensure there's a newline at the end of the file @@ -76,15 +110,15 @@ jobs: - name: Build AMI stage 1 run: | - packer init amazon-arm64-nix.pkr.hcl GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- init amazon-arm64-nix.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl - name: Build AMI stage 2 run: | - packer init stage2-nix-psql.pkr.hcl GIT_SHA=${{github.sha}} - packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- init stage2-nix-psql.pkr.hcl + nix run github:supabase/postgres/${GIT_SHA}#packer -- build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${EXECUTION_ID}" -var "postgres_major_version=${POSTGRES_MAJOR_VERSION}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=${{ steps.random.outputs.random_string }}" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl - name: Run tests timeout-minutes: 10 @@ -98,12 +132,12 @@ jobs: - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids || true + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${EXECUTION_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids || true - name: Cleanup AMIs if: always() diff --git a/.gitignore b/.gitignore index 5372bfdeb..1e8b1d680 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,7 @@ result* .history .envrc .direnv - +.nixos-test-history #IDE .idea/ @@ -30,3 +30,4 @@ common-nix.vars.pkr.hcl # pre-commit config is managed in nix .pre-commit-config.yaml +nixos.qcow2 \ No newline at end of file diff --git a/Dockerfile-15 b/Dockerfile-15 index 68e9f4e59..28f5122c0 100644 --- a/Dockerfile-15 +++ b/Dockerfile-15 @@ -30,7 +30,7 @@ ARG pg_repack_release=1.4.8 ARG vault_release=0.2.8 ARG groonga_release=12.0.8 ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.0 +ARG wrappers_release=0.5.4 ARG hypopg_release=1.3.1 ARG pgvector_release=0.4.0 ARG pg_tle_release=1.3.2 @@ -90,10 +90,6 @@ RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/s RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ RUN chown -R postgres:postgres /usr/share/postgresql/ -# Create symbolic links for contrib directory -RUN mkdir -p /usr/lib/postgresql/share/postgresql/contrib \ - && find /nix/var/nix/profiles/default/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ - && chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/contrib/ RUN chown -R postgres:postgres /usr/lib/postgresql @@ -190,10 +186,10 @@ COPY migrations/db /docker-entrypoint-initdb.d/ COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql -# # Add upstream entrypoint script +# # Add upstream entrypoint script pinned for now to last tested version COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu ADD --chmod=0755 \ - https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/15/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql diff --git a/Dockerfile-17 b/Dockerfile-17 index 5ad8ed397..3ad03b37c 100644 --- a/Dockerfile-17 +++ b/Dockerfile-17 @@ -31,7 +31,7 @@ ARG pg_repack_release=1.4.8 ARG vault_release=0.2.8 ARG groonga_release=12.0.8 ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.0 +ARG wrappers_release=0.5.4 ARG hypopg_release=1.3.1 ARG pgvector_release=0.4.0 ARG pg_tle_release=1.3.2 @@ -91,7 +91,7 @@ RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/s RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ RUN chown -R postgres:postgres /usr/share/postgresql/ -# Create symbolic links for contrib directory + RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 RUN chown -R postgres:postgres /usr/lib/postgresql @@ -198,10 +198,10 @@ COPY migrations/db /docker-entrypoint-initdb.d/ COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql -# # Add upstream entrypoint script +# # Add upstream entrypoint script pinned for now to last tested version COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu ADD --chmod=0755 \ - https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/17/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql diff --git a/Dockerfile-orioledb-17 b/Dockerfile-orioledb-17 index 41254e4f7..29f2fa527 100644 --- a/Dockerfile-orioledb-17 +++ b/Dockerfile-orioledb-17 @@ -31,7 +31,7 @@ ARG pg_repack_release=1.4.8 ARG vault_release=0.2.8 ARG groonga_release=12.0.8 ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.0 +ARG wrappers_release=0.5.4 ARG hypopg_release=1.3.1 ARG pgvector_release=0.4.0 ARG pg_tle_release=1.3.2 @@ -91,7 +91,7 @@ RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/s RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ RUN chown -R postgres:postgres /usr/share/postgresql/ -# Create symbolic links for contrib directory + RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 RUN chown -R postgres:postgres /usr/lib/postgresql @@ -203,10 +203,10 @@ COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00- RUN echo "CREATE EXTENSION orioledb;" > /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql && \ chown postgres:postgres /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql -# # Add upstream entrypoint script +# # Add upstream entrypoint script pinned for now to last tested version COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu ADD --chmod=0755 \ - https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/17/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql diff --git a/README.md b/README.md index a176ccec5..28685b3b9 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,190 @@ -# Postgres + goodies +# Getting Started with Supabase Postgres + +This guide covers getting up and running with Supabase Postgres. After reading this guide, you will understand: + +* What Supabase Postgres provides and why you might want to use it +* How the project is organized and what each directory contains +* How to build and run Postgres with extensions locally +* The basics of working with the extension ecosystem + +--- + +## What is Supabase Postgres? + +Supabase Postgres is a batteries-included PostgreSQL distribution that provides unmodified PostgreSQL with a curated set of the most useful extensions pre-installed. Think of it as PostgreSQL with superpowers - you get the reliability and power of standard PostgreSQL, plus immediate access to extensions for tasks like: + +* Full-text search and indexing +* Geospatial data processing +* Time-series data management +* JSON validation and GraphQL support +* Cryptography and security +* Message queuing +* And much more + +The goal is simple: make it fast and easy to get started with a production-ready PostgreSQL setup without having to hunt down, compile, and configure dozens of extensions yourself. + +## Philosophy + +Supabase Postgres follows these core principles: + +1. **Unmodified PostgreSQL** - We don't fork or modify PostgreSQL itself. You get standard PostgreSQL with extensions. +2. **Curated Extensions** - We include well-maintained, production-tested extensions that solve real problems. +3. **Multi-version Support** - Currently supporting PostgreSQL 15, 17, and OrioleDB-17. +4. **Ready for Production** - Configured with sensible defaults for replication, security, and performance. +5. **Open Source** - Everything is open source and can be self-hosted. + +## Directory Structure + +Here's a comprehensive overview of the project's directory structure: + +| File/Directory | Purpose | +| -------------- | ------- | +| **nix/** | Core build system directory containing all Nix expressions for building PostgreSQL and extensions | +| nix/postgresql/ | PostgreSQL version configurations, patches, and base package definitions | +| nix/ext/ | Individual extension package definitions and build configurations | +| nix/ext/wrappers/ | Wrapper scripts and utilities for extensions | +| nix/ext/tests/ | Extension-specific integration test suites implemented using nixos-test| +| nix/overlays/ | Nix overlays for customizing and overriding package definitions | +| nix/tools/ | Build tools, utilities, and helper scripts | +| nix/docker/ | Docker image build definitions using Nix | +| nix/tests/ | postgres specific test suites for validating builds, including pg_regress tests | +| nix/tests/smoke/ | Quick smoke tests for basic functionality | +| nix/tests/migrations/ | Migration and upgrade test scenarios | +| nix/tests/expected/ | Expected `pg_regress` test outputs for validation | +| nix/tests/sql/ | SQL test scripts that are run in `pg_regress` tests | +| nix/docs/ | Build system documentation | +| **ansible/** | Infrastructure as Code for server configuration and deployment of production hosted AWS AMI image | +| ansible/playbook.yml | Main Ansible playbook for PostgreSQL/PostgREST/pgbouncer/Auth server setup | +| ansible/tasks/ | Modular Ansible tasks for specific configuration steps | +| ansible/files/ | Static files, scripts, and templates used by Ansible | +| ansible/vars.yml | AMI version tracking, legacy package version tracking | +| **migrations/** | Database migration management and upgrade tools | +| migrations/db/ | Database schema migrations | +| migrations/db/migrations/ | Individual migration files | +| migrations/db/init-scripts/ | Database initialization scripts | +| migrations/tests/ | Migration testing infrastructure | +| migrations/tests/database/ | Database-specific migration tests | +| migrations/tests/storage/ | Storage-related migration tests | +| migrations/tests/extensions/ | Extension migration tests | +| **docker/** | Container definitions and Docker-related files | +| docker/nix/ | Nix-based Docker build configurations | +| Dockerfile-15 | Docker image definition for PostgreSQL 15 | +| Dockerfile-17 | Docker image definition for PostgreSQL 17 | +| **tests/** | Integration and system tests | +| testinfra/ | Infrastructure tests using pytest framework | +| tests/ | General integration test suites | +| **scripts/** | Utility scripts for development and deployment | +| **docs/** | Additional documentation, images, and resources | +| **ebssurrogate/** | AWS EBS surrogate building for AMI creation | +| **http/** | HTTP-related configurations and files | +| **rfcs/** | Request for Comments - design documents and proposals | +| **db/** | Database-related utilities and configurations | +| **.github/** | GitHub-specific configurations (Actions, templates, etc.) | +| **Root Config Files** | | +| .gitignore | Git ignore patterns | +| .envrc.recommended | Recommended environment variables for development | +| ansible.cfg | Ansible configuration | +| amazon-arm64-nix.pkr.hcl | Packer configuration for AWS ARM64 builds | +| common-nix.vars.pkr.hcl | Common Packer variables | +| development-arm.vars.pkr.hcl | ARM development environment variables | +| CONTRIBUTING.md | Contribution guidelines | +| README.md | Main project documentation | + +## Key Concepts + +### Extensions + +Extensions are the superpower of PostgreSQL. They add functionality without modifying the core database. Supabase Postgres includes dozens of pre-built extensions covering: + +* **Data Types & Validation** - pg_jsonschema, pg_hashids +* **Search & Indexing** - pgroonga, rum, hypopg +* **Geospatial** - PostGIS, pgrouting +* **Time-series** - TimescaleDB +* **Security** - pgsodium, vault, pgaudit +* **Development** - pgtap, plpgsql_check +* **And many more...** + +### Multi-version Support + +The project supports multiple PostgreSQL versions simultaneously: + +* **PostgreSQL 15** - Stable, battle-tested version +* **PostgreSQL 17** - Latest features and improvements +* **OrioleDB-17** - Experimental storage engine for PostgreSQL 17 + +Each version has its own set of compatible extensions defined in the Nix build system. + +### Build System (Nix) + +The project uses Nix as its build system, which provides: + +* **Reproducible Builds** - Same input always produces the same output +* **Declarative Configuration** - Define what you want, not how to build it +* **Dependency Management** - Automatic handling of complex dependency trees +* **Cross-platform Support** - Build for Linux, macOS, and more + +## Common Tasks + +### Building Locally + +To build PostgreSQL with extensions locally: + +```bash +# Build PostgreSQL 15 with extensions +nix build .#psql_15/bin + +# Build PostgreSQL 17 +nix build .#psql_17/bin + +# Build a specific extension +nix build .#psql_17/exts/pg_graphql +``` + +### Running Tests + +```bash +# Run all tests +nix flake check -L + +# Run specific test suite (for macos apple silicon for example) +nix build .#checks.aarch64-darwin.psql_17 -L +``` + +### Creating Docker Images + +```bash +# Build Docker image for PostgreSQL 15 +docker build -f Dockerfile-15 -t supabase-postgres:15 . + +# Build Docker image for PostgreSQL 17 +docker build -f Dockerfile-17 -t supabase-postgres:17 . +``` + +## Next Steps + +Now that you understand the basics of Supabase Postgres: + +* Check the [Installation Guide](https://github.com/supabase/postgres/wiki) for deployment options +* Explore the [Extension Documentation](#) to learn about available extensions +* Review [Contributing Guidelines](CONTRIBUTING.md) if you want to contribute +* Join the [Supabase Community](https://github.com/supabase/postgres/discussions) for questions and discussions + +## Getting Help + +* **GitHub Issues** - For bugs and feature requests +* **Discussions** - For questions and general discussion +* **Wiki** - For detailed documentation +* **Discord** - For real-time chat with the community + +--- + +This is the same PostgreSQL build that powers [Supabase](https://supabase.io), battle-tested in production by over one million projects. -Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. ## Primary Features -- ✅ Postgres [postgresql-15.8](https://www.postgresql.org/docs/15/index.html) -- ✅ Postgres [postgresql-17.4](https://www.postgresql.org/docs/17/index.html) -- ✅ Postgres [orioledb-postgresql-17_6](https://github.com/orioledb/orioledb) +- ✅ Postgres [postgresql-15.14](https://www.postgresql.org/docs/15/index.html) +- ✅ Postgres [postgresql-17.6](https://www.postgresql.org/docs/17/index.html) +- ✅ Postgres [orioledb-postgresql-17_11](https://github.com/orioledb/orioledb) - ✅ Ubuntu 24.04 (Noble Numbat). - ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. - ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. @@ -14,14 +193,15 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to ### PostgreSQL 15 Extensions | Extension | Version | Description | | ------------- | :-------------: | ------------- | -| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | -| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [http]() | [1.6]() | | +| [hypopg]() | [1.4.1]() | | +| [index_advisor]() | [0.2.0]() | | | [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | -| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_cron]() | [1.6.4]() | Run Cron jobs through PostgreSQL (multi-version compatible) | | [pg_graphql](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | | [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | | [pg_jsonschema](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | -| [pg_net](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_net]() | [0.8.0]() | | | [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | | [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | | [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | @@ -31,31 +211,31 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | | [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | | [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | -| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | -| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgsodium]() | [3.1.8]() | | | [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | -| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | | [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | | [plv8](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | [3.1.10](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | V8 Engine Javascript Procedural Language add-on for PostgreSQL | | [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | -| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | -| [supabase-wrappers](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [rum]() | [1.3]() | | | [supautils](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | -| [timescaledb-apache](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | [2.16.1](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | Scales PostgreSQL for time-series data via automatic partitioning across time and space | +| [timescaledb]() | [2.9.1]() | | | [vault](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [vector]() | [0.8.0]() | | | [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | +| [wrappers]() | [0.5.4]() | | ### PostgreSQL 17 Extensions | Extension | Version | Description | | ------------- | :-------------: | ------------- | -| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | -| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [http]() | [1.6]() | | +| [hypopg]() | [1.4.1]() | | +| [index_advisor]() | [0.2.0]() | | | [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | -| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_cron]() | [1.6.4]() | Run Cron jobs through PostgreSQL (multi-version compatible) | | [pg_graphql](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | | [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | | [pg_jsonschema](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | -| [pg_net](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_net]() | [0.19.5]() | | | [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | | [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | | [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | @@ -65,30 +245,30 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | | [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | | [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | -| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | -| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgsodium]() | [3.1.8]() | | | [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | -| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | | [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | | [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | -| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | -| [supabase-wrappers](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [rum]() | [1.3]() | | | [supautils](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | | [vault](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [vector]() | [0.8.0]() | | | [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | +| [wrappers]() | [0.5.4]() | | ### PostgreSQL orioledb-17 Extensions | Extension | Version | Description | | ------------- | :-------------: | ------------- | -| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | -| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | -| [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | [orioledb](https://github.com/orioledb/orioledb/archive/beta10.tar.gz) | orioledb | +| [http]() | [1.6]() | | +| [hypopg]() | [1.4.1]() | | +| [index_advisor]() | [0.2.0]() | | +| [orioledb](https://github.com/orioledb/orioledb/archive/beta12.tar.gz) | [orioledb](https://github.com/orioledb/orioledb/archive/beta12.tar.gz) | orioledb | | [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | -| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_cron]() | [1.6.4]() | Run Cron jobs through PostgreSQL (multi-version compatible) | | [pg_graphql](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | [1.5.11](https://github.com/supabase/pg_graphql/archive/v1.5.11.tar.gz) | GraphQL support for PostreSQL | | [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | | [pg_jsonschema](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | -| [pg_net](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_net]() | [0.19.5]() | | | [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | | [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | | [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | @@ -98,17 +278,16 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | | [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | | [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | -| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | -| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgsodium]() | [3.1.8]() | | | [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | -| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | | [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | | [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | -| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | -| [supabase-wrappers](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | [0.5.0](https://github.com/supabase/wrappers/archive/v0.5.0.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [rum]() | [1.3]() | | | [supautils](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | [2.9.4](https://github.com/supabase/supautils/archive/refs/tags/v2.9.4.tar.gz) | PostgreSQL extension for enhanced security | | [vault](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | [0.3.1](https://github.com/supabase/vault/archive/refs/tags/v0.3.1.tar.gz) | Store encrypted secrets in PostgreSQL | +| [vector]() | [0.8.0]() | | | [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | +| [wrappers]() | [0.5.4]() | | ## Additional Goodies *This is only available for our AWS EC2* @@ -118,6 +297,7 @@ Unmodified Postgres with some useful plugins. Our goal with this repo is not to | [PostgREST](https://postgrest.org/en/stable/) | [v13.0.4](https://github.com/PostgREST/postgrest/releases/tag/v13.0.4) | Instantly transform your database into an RESTful API. | | [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | --> + ## Install See all installation instructions in the [repo wiki](https://github.com/supabase/postgres/wiki). diff --git a/ansible/files/admin_api_scripts/grow_fs.sh b/ansible/files/admin_api_scripts/grow_fs.sh index c8c14890f..01e4736ed 100644 --- a/ansible/files/admin_api_scripts/grow_fs.sh +++ b/ansible/files/admin_api_scripts/grow_fs.sh @@ -4,37 +4,61 @@ set -euo pipefail VOLUME_TYPE=${1:-data} +# lsb release +UBUNTU_VERSION=$(lsb_release -rs) + if pgrep resizefs; then echo "resize2fs is already running" exit 1 fi -# Parses the output of lsblk to get the root partition number -# Example output: -# NAME MOUNTPOINT -# nvme0n1 -# ├─nvme0n1p1 /boot -# └─nvme0n1p3 / -# nvme1n1 /data -# -# Resulting in: -# └─nvme0n1p3 / -> nvme0n1p3 -> 3 -ROOT_PARTITION_NUMBER=$(lsblk -no NAME,MOUNTPOINT | grep ' /$' | awk '{print $1;}' | sed 's/.*nvme[0-9]n[0-9]p//g') +# install amazon disk utilities if not present on 24.04 +if [ "${UBUNTU_VERSION}" = "24.04" ] && ! /usr/bin/dpkg-query -W amazon-ec2-utils >/dev/null 2>&1; then + apt-get update + apt-get install -y amazon-ec2-utils || true +fi + +# We currently mount 3 possible disks +# - /dev/xvda (root disk) +# - /dev/xvdh (data disk) +# - /dev/xvdp (upgrade data disk), not used here +# Initialize variables at 20.04 levels +XVDA_DEVICE="/dev/nvme0n1" +XVDH_DEVICE="/dev/nvme1n1" +# Map AWS devices to NVMe for ubuntu 24.04 and later +if [ "${UBUNTU_VERSION}" = "24.04" ] && /usr/bin/dpkg-query -W amazon-ec2-utils >/dev/null 2>&1; then + for nvme_dev in $(lsblk -dprno name,type | grep disk | awk '{print $1}'); do + if [ -b "$nvme_dev" ]; then + mapping=$(ebsnvme-id -b "$nvme_dev" 2>/dev/null) + case "$mapping" in + "xvda"|"/dev/xvda") XVDA_DEVICE="$nvme_dev" ;; + "xvdh"|"/dev/xvdh") XVDH_DEVICE="$nvme_dev" ;; + esac + fi + done +fi + +echo "Using devices - Root: $XVDA_DEVICE, Data: $XVDH_DEVICE" + +# Get root partition using findmnt +ROOT_DEVICE_FULL=$(findmnt -no SOURCE /) +ROOT_DEVICE=$(lsblk -no PKNAME "$ROOT_DEVICE_FULL") +ROOT_PARTITION_NUMBER=$(echo "$ROOT_DEVICE_FULL" | sed "s|.*${ROOT_DEVICE}p||") if ! [[ "$ROOT_PARTITION_NUMBER" =~ ^[0-9]+$ ]]; then echo "Error: ROOT_PARTITION_NUMBER is not a valid number: $ROOT_PARTITION_NUMBER" exit 1 fi -if [ -b /dev/nvme1n1 ] ; then +if [ -b "${XVDH_DEVICE}" ] ; then if [[ "${VOLUME_TYPE}" == "data" ]]; then - resize2fs /dev/nvme1n1 + resize2fs "${XVDH_DEVICE}" elif [[ "${VOLUME_TYPE}" == "root" ]] ; then PLACEHOLDER_FL=/home/ubuntu/50M_PLACEHOLDER rm -f "${PLACEHOLDER_FL}" || true - growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" - resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" + growpart "${XVDA_DEVICE}" "${ROOT_PARTITION_NUMBER}" + resize2fs "${XVDA_DEVICE}p${ROOT_PARTITION_NUMBER}" if [[ ! -f "${PLACEHOLDER_FL}" ]] ; then fallocate -l50M "${PLACEHOLDER_FL}" fi @@ -43,7 +67,7 @@ if [ -b /dev/nvme1n1 ] ; then exit 1 fi else - growpart /dev/nvme0n1 "${ROOT_PARTITION_NUMBER}" - resize2fs "/dev/nvme0n1p${ROOT_PARTITION_NUMBER}" + growpart "${XVDA_DEVICE}" "${ROOT_PARTITION_NUMBER}" + resize2fs "${XVDA_DEVICE}p${ROOT_PARTITION_NUMBER}" fi echo "Done resizing disk" diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh index e5c4ddbf7..71016a294 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -348,10 +348,35 @@ function initiate_upgrade { locale-gen if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then - # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere - # excluding nvme0 since it is the root disk + # DATABASE_UPGRADE_DATA_MIGRATION_DEVICE_NAME = '/dev/xvdp' can be derived from the worker mount echo "5. Determining block device to mount" - BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') + # lsb release + UBUNTU_VERSION=$(lsb_release -rs) + # install amazon disk utilities if not present on 24.04 + if [ "${UBUNTU_VERSION}" = "24.04" ] && ! /usr/bin/dpkg-query -W amazon-ec2-utils >/dev/null 2>&1; then + apt-get update + apt-get install -y amazon-ec2-utils || true + fi + if command -v ebsnvme-id >/dev/null 2>&1 && /usr/bin/dpkg-query -W amazon-ec2-utils >/dev/null 2>&1; then + for nvme_dev in $(lsblk -dprno name,size,mountpoint,type | grep disk | awk '{print $1}'); do + if [ -b "$nvme_dev" ]; then + mapping=$(ebsnvme-id -b "$nvme_dev" 2>/dev/null) + if [[ "$mapping" == "xvdp" || $mapping == "/dev/xvdp" ]]; then + BLOCK_DEVICE="$nvme_dev" + break + fi + fi + done + fi + + # Fallback to lsblk if ebsnvme-id is not available or no mapping found, pre ubuntu 20.04 + if [ -z "${BLOCK_DEVICE:-}" ]; then + echo "No block device found using ebsnvme-id, falling back to lsblk" + # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere + # excluding nvme0 since it is the root disk + BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; exit }') # exit ensures we grab the first only + fi + echo "Block device found: $BLOCK_DEVICE" mkdir -p "$MOUNT_POINT" diff --git a/ansible/files/envoy_config/lds.supabase.yaml b/ansible/files/envoy_config/lds.supabase.yaml index 6fdcb68c7..4a9a01668 100644 --- a/ansible/files/envoy_config/lds.supabase.yaml +++ b/ansible/files/envoy_config/lds.supabase.yaml @@ -259,6 +259,12 @@ resources: cluster: gotrue prefix_rewrite: / timeout: 35s + retry_policy: + retry_on: "connect-failure,refused-stream,gateway-error" + num_retries: 3 + retry_back_off: + base_interval: 1s + max_interval: 3s - match: prefix: /rest/v1/ query_parameters: diff --git a/ansible/files/envoy_config/lds.yaml b/ansible/files/envoy_config/lds.yaml index 97481c889..b5c2d6fd7 100644 --- a/ansible/files/envoy_config/lds.yaml +++ b/ansible/files/envoy_config/lds.yaml @@ -285,6 +285,12 @@ resources: cluster: gotrue prefix_rewrite: / timeout: 35s + retry_policy: + retry_on: "connect-failure,refused-stream,gateway-error" + num_retries: 3 + retry_back_off: + base_interval: 1s + max_interval: 3s - match: prefix: /rest/v1/ query_parameters: diff --git a/ansible/files/gotrue.service.j2 b/ansible/files/gotrue.service.j2 index 2478e99e6..144448cc6 100644 --- a/ansible/files/gotrue.service.j2 +++ b/ansible/files/gotrue.service.j2 @@ -1,14 +1,56 @@ [Unit] Description=Gotrue +# Avoid starting gotrue while cloud-init is running. It makes a lot of changes +# and I would like to rule out side effects of it running concurrently along +# side services. +After=cloud-init.service +Wants=cloud-init.target + +# Given the fact that auth uses SO_REUSEADDR, I want to rule out capabilities +# being modified between restarts early in boot. This plugs up the scenario that +# EADDRINUSE errors originate from a previous gotrue process starting without +# the SO_REUSEADDR flag (due to lacking capability at that point in boot proc) +# so when the next gotrue starts it can't re-use a slow releasing socket. +After=apparmor.service + +# We want sysctl's to be applied +After=systemd-sysctl.service + +# UFW Is modified by cloud init, but started non-blocking, so configuration +# could be in-flight while gotrue is starting. I want to ensure future rules +# that are relied on for security posture are applied before gotrue runs. +After=ufw.service + +# We need networking & resolution, auth uses the Go DNS resolver (not libc) +# so it's possible `localhost` resolution could be unstable early in startup. We +# care about this because SO_REUSEADDR eligibility checks the tuple +# (proto, family, addr, port) meaning the AF_INET (ipv4, ipv6) could affect the +# binding resulting in a second way for EADDRINUSE errors to surface. +# +# Note: We should consider removing localhost usage given `localhost` resolution +# can often be racey early in boot, can be difficult to debug and offers no real +# advantage in our infra. At the very least avoiding DNS resolved binding would +# be a good idea. +Wants=network-online.target systemd-resolved.service +After=network-online.target systemd-resolved.service + +# Auth server can't start unless postgres is online, lets remove a lot of auth +# server noise during slow starts by requiring it. +Wants=postgresql.service +After=postgresql.service + +# Lower start limit ival and burst to prevent the noisy flapping +StartLimitIntervalSec=10 +StartLimitBurst=5 + [Service] -Type=simple +Type=exec WorkingDirectory=/opt/gotrue -{% if qemu_mode is defined and qemu_mode %} -ExecStart=/opt/gotrue/gotrue -{% else %} + +# Both v2 & v3 need a config-dir for reloading support. ExecStart=/opt/gotrue/gotrue --config-dir /etc/auth.d -{% endif %} +ExecReload=/bin/kill -10 $MAINPID User=gotrue Restart=always @@ -17,11 +59,36 @@ RestartSec=3 MemoryAccounting=true MemoryMax=50% +# These are the historical location of env files. The /etc/auth.d dir will +# override them when present. EnvironmentFile=-/etc/gotrue.generated.env EnvironmentFile=/etc/gotrue.env EnvironmentFile=-/etc/gotrue.overrides.env +# Both v2 & v3 support reloading via signals, on linux this is SIGUSR1. +Environment=GOTRUE_RELOADING_SIGNAL_ENABLED=true +Environment=GOTRUE_RELOADING_SIGNAL_NUMBER=10 + +# Both v2 & v3 disable the poller. While gotrue sets it to off by default we +# defensively set it to false here. +Environment=GOTRUE_RELOADING_POLLER_ENABLED=false + +# Determines how much idle time must pass before triggering a reload. This +# ensures only 1 reload operation occurs during a burst of config updates. +Environment=GOTRUE_RELOADING_GRACE_PERIOD_INTERVAL=2s + +{% if qemu_mode is defined and qemu_mode %} +# v3 does not use filesystem notifications for config reloads. +Environment=GOTRUE_RELOADING_NOTIFY_ENABLED=false +{% else %} +# v2 currently relies on notify support, so we will enable it until both v2 / v3 +# have migrated to strictly use signals across all projects. The default is true +# in gotrue but we will set it defensively here. +Environment=GOTRUE_RELOADING_NOTIFY_ENABLED=true +{% endif %} + Slice=services.slice [Install] WantedBy=multi-user.target + diff --git a/ansible/files/postgres_prestart.sh.j2 b/ansible/files/postgres_prestart.sh.j2 index 3ffe54c85..cc8ff53c6 100644 --- a/ansible/files/postgres_prestart.sh.j2 +++ b/ansible/files/postgres_prestart.sh.j2 @@ -1,5 +1,11 @@ #!/bin/bash +set -x # Print commands + +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" +} + check_orioledb_enabled() { local pg_conf="/etc/postgresql/postgresql.conf" if [ ! -f "$pg_conf" ]; then @@ -26,7 +32,87 @@ update_orioledb_buffers() { fi } +check_extensions_file() { + local extensions_file="/etc/adminapi/pg-extensions.json" + if [ ! -f "$extensions_file" ]; then + log "extensions: No extensions file found, skipping extensions versions check" + return 0 #if file not found, skip + fi + if [ ! -r "$extensions_file" ]; then + log "extensions: Cannot read extensions file" + return 1 #a true error, we should be able to read file + fi + return 0 +} + +switch_extension_version() { + local extension_name="$1" + local version="$2" + + # Use BIN_PATH environment variable or default to /var/lib/postgresql/.nix-profile + : ${BIN_PATH:="/var/lib/postgresql/.nix-profile"} + + local switch_script="$BIN_PATH/bin/switch_${extension_name}_version" + + if [ ! -x "$switch_script" ]; then + log "$extension_name: No version switch script available at $switch_script, skipping" + return 0 + fi + + log "$extension_name: Switching to version $version" + # Run directly as root since we're already running as root + "$switch_script" "$version" + local exit_code=$? + if [ $exit_code -eq 0 ]; then + log "$extension_name: Version switch completed successfully" + else + log "$extension_name: Version switch failed with exit code $exit_code" + fi + return $exit_code +} + +handle_extension_versions() { + if ! check_extensions_file; then + return + fi + + local extensions_file="/etc/adminapi/pg-extensions.json" + + # Get all extension names from the JSON file + local extensions + extensions=$(jq -r 'keys[]' "$extensions_file" 2>/dev/null) + + if [ -z "$extensions" ]; then + log "extensions: No extensions found in configuration" + return + fi + + # Iterate through each extension + while IFS= read -r extension_name; do + # Get the version for this extension + local version + version=$(jq -r --arg ext "$extension_name" '.[$ext] // empty' "$extensions_file") + + if [ -z "$version" ]; then + log "$extension_name: No version specified, skipping" + continue + fi + + log "$extension_name: Found version $version in extensions file" + + # Don't fail if version switch fails - just log and continue + switch_extension_version "$extension_name" "$version" || log "$extension_name: Version switch failed but continuing" + + done <<< "$extensions" +} + main() { + log "Starting prestart script" + + # 1. Handle all extension versions from config file + handle_extension_versions + + # 2. orioledb handling local has_orioledb=$(check_orioledb_enabled) if [ "$has_orioledb" -lt 1 ]; then return 0 @@ -35,6 +121,8 @@ main() { if [ ! -z "$shared_buffers_value" ]; then update_orioledb_buffers "$shared_buffers_value" fi + + log "Prestart script completed" } # Initial locale setup @@ -46,4 +134,4 @@ if [ $(locale -a | grep -c en_US.utf8) -eq 0 ]; then locale-gen fi -main +main \ No newline at end of file diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 index f8c4aa0a2..a3456f699 100644 --- a/ansible/files/postgresql_config/supautils.conf.j2 +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -11,5 +11,5 @@ supautils.extension_custom_scripts_path = '/etc/postgresql-custom/extension-cust supautils.privileged_extensions_superuser = 'supabase_admin' supautils.privileged_role = 'postgres' supautils.privileged_role_allowed_configs = 'auto_explain.*, log_lock_waits, log_min_duration_statement, log_min_messages, log_replication_commands, log_statement, log_temp_files, pg_net.batch_size, pg_net.ttl, pg_stat_statements.*, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing, wal_compression' -supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, dashboard_user, pgbouncer, authenticator' -supautils.reserved_roles = 'supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' +supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, supabase_etl_admin, dashboard_user, pgbouncer, authenticator' +supautils.reserved_roles = 'supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, supabase_etl_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' diff --git a/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service b/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service.j2 similarity index 62% rename from ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service rename to ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service.j2 index f368008b4..344fe5b53 100644 --- a/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service +++ b/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.service.j2 @@ -6,16 +6,21 @@ Requires=local-fs.target [Service] Type=oneshot +{% if qemu_mode is defined and qemu_mode %} +ExecStart=/opt/supabase-admin-agent/supabase-admin-agent --config /opt/supabase-admin-agent/config.yaml salt --apply --store-result --salt-archive configmanv3-main.tar.gz +User=root +Group=root +{% else %} ExecStart=/opt/supabase-admin-agent/supabase-admin-agent --config /opt/supabase-admin-agent/config.yaml salt --apply --store-result User=supabase-admin-agent Group=supabase-admin-agent +{% endif %} StandardOutput=journal StandardError=journal StateDirectory=supabase-admin-agent CacheDirectory=supabase-admin-agent - -# Security hardening -PrivateTmp=true +# set tempdir on permanent disk +Environment="TMPDIR=/var/tmp" [Install] WantedBy=multi-user.target diff --git a/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.timer.j2 b/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.timer.j2 index 1c59cdc7c..390e6da57 100644 --- a/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.timer.j2 +++ b/ansible/files/supabase_admin_agent_config/supabase-admin-agent_salt.timer.j2 @@ -3,11 +3,19 @@ Description=Run Supabase supabase-admin-agent salt on a schedule Requires=supabase-admin-agent_salt.service [Timer] +{# We're using a significantly lower frequency for triggering this agent on qemu images for the moment. Once we've performed additional validations re: the aggregate impact of running it more frequently, the frequency can be increased. #} +{% if qemu_mode is defined and qemu_mode %} +OnCalendar=*-*-* 0,6,12,18:00:00 +RandomizedDelaySec={{ supabase_admin_agent_splay }} +AccuracySec=1h +OnBootSec=5m +Persistent=true +{% else %} OnCalendar=*:0/10 -# Random delay up to {{ supabase_admin_agent_splay }} seconds splay RandomizedDelaySec={{ supabase_admin_agent_splay }} AccuracySec=1s Persistent=true +{% endif %} [Install] WantedBy=timers.target diff --git a/ansible/qemu-vars.yaml b/ansible/qemu-vars.yaml new file mode 100644 index 000000000..ad06a2a01 --- /dev/null +++ b/ansible/qemu-vars.yaml @@ -0,0 +1 @@ +supabase_admin_agent_splay_secs: 2h diff --git a/ansible/tasks/clean-build-dependencies.yml b/ansible/tasks/clean-build-dependencies.yml index 43ec05179..567398f5f 100644 --- a/ansible/tasks/clean-build-dependencies.yml +++ b/ansible/tasks/clean-build-dependencies.yml @@ -1,5 +1,6 @@ - name: Remove build dependencies - apt: + ansible.builtin.apt: + autoremove: true pkg: - bison - build-essential @@ -17,5 +18,4 @@ - ninja-build - patch - python2 - state: absent - autoremove: yes + state: 'absent' diff --git a/ansible/tasks/finalize-ami.yml b/ansible/tasks/finalize-ami.yml index 7f0de3ac8..1cc729fc0 100644 --- a/ansible/tasks/finalize-ami.yml +++ b/ansible/tasks/finalize-ami.yml @@ -1,81 +1,104 @@ - name: PG logging conf - template: - src: files/postgresql_config/postgresql-csvlog.conf - dest: /etc/postgresql/logging.conf - group: postgres + ansible.builtin.template: + dest: '/etc/postgresql/logging.conf' + group: 'postgres' + src: 'files/postgresql_config/postgresql-csvlog.conf' - name: UFW - Allow SSH connections - ufw: - rule: allow - name: OpenSSH + community.general.ufw: + name: 'OpenSSH' + rule: 'allow' -- name: UFW - Allow connections to postgreSQL (5432) - ufw: - rule: allow - port: "5432" +- name: UFW - Allow SSH/PostgreSQL connections + community.general.ufw: + port: '5432' + rule: 'allow' -- name: UFW - Allow connections to postgreSQL (6543) - ufw: - rule: allow - port: "6543" +- name: UFW - Allow PgBouncer connections + community.general.ufw: + port: '6543' + rule: 'allow' tags: - install-pgbouncer -- name: UFW - Allow connections to http (80) - ufw: - rule: allow - port: http - tags: - - install-supabase-internal - -- name: UFW - Allow connections to https (443) - ufw: - rule: allow - port: https +- name: UFW - Allow HTTP/HTTPS connections + community.general.ufw: + port: "{{ port_item }}" + rule: 'allow' + loop: + - 'http' + - 'https' + loop_control: + loop_var: 'port_item' tags: - - install-supabase-internal + - install-supabase-internal - name: UFW - Deny all other incoming traffic by default - ufw: - state: enabled - policy: deny - direction: incoming + community.general.ufw: + direction: 'incoming' + policy: 'deny' + state: 'enabled' - name: Move logrotate files to /etc/logrotate.d/ - copy: - src: "files/logrotate_config/{{ item.file }}" - dest: "/etc/logrotate.d/{{ item.file }}" - mode: "0700" - owner: root + ansible.builtin.copy: + dest: "/etc/logrotate.d/{{ logrotate_item['file'] }}" + mode: '0700' + owner: 'root' + src: "files/logrotate_config/{{ logrotate_item['file'] }}" loop: - - { file: "logrotate-postgres-csv.conf" } - - { file: "logrotate-postgres.conf" } - - { file: "logrotate-walg.conf" } - - { file: "logrotate-postgres-auth.conf" } + - { file: 'logrotate-postgres.conf' } + - { file: 'logrotate-postgres-auth.conf' } + - { file: 'logrotate-postgres-csv.conf' } + - { file: 'logrotate-walg.conf' } + loop_control: + loop_var: 'logrotate_item' -- name: Ensure default Postgres logrotate config is removed - file: - path: /etc/logrotate.d/postgresql-common - state: absent +- name: Ensure default PostgreSQL logrotate config is removed + ansible.builtin.file: + path: '/etc/logrotate.d/postgresql-common' + state: 'absent' - name: Disable cron access copy: - src: files/cron.deny - dest: /etc/cron.deny + dest: '/etc/cron.deny' + src: 'files/cron.deny' + +- name: Create logrotate.timer.d overrides dir + become: true + ansible.builtin.file: + group: 'root' + mode: '0755' + owner: 'root' + path: '/etc/systemd/system/logrotate.timer.d' + state: 'directory' + +- name: Configure logrotate.timer.d overrides + become: true + community.general.ini_file: + group: 'root' + mode: '0644' + no_extra_spaces: true + option: 'OnCalendar' + owner: 'root' + path: '/etc/systemd/system/logrotate.timer.d/override.conf' + section: 'Timer' + state: 'present' + value: '*:0/5' -- name: Configure logrotation to run every hour - shell: - cmd: | - cp /usr/lib/systemd/system/logrotate.timer /etc/systemd/system/logrotate.timer - sed -i -e 's;daily;*:0/5;' /etc/systemd/system/logrotate.timer - systemctl reenable logrotate.timer - become: yes +- name: Reload systemd and start logrotate timer + become: true + ansible.builtin.systemd_service: + daemon_reload: true + enabled: true + name: 'logrotate.timer' + state: 'restarted' - name: import pgsodium_getkey script - template: - src: files/pgsodium_getkey_readonly.sh.j2 + ansible.builtin.template: dest: "{{ pg_bindir }}/pgsodium_getkey.sh" - owner: postgres - group: postgres - mode: 0700 - when: debpkg_mode or stage2_nix + group: 'postgres' + mode: '0700' + owner: 'postgres' + src: 'files/pgsodium_getkey_readonly.sh.j2' + when: + - (debpkg_mode or stage2_nix) diff --git a/ansible/tasks/fix-ipv6-ndisc.yml b/ansible/tasks/fix-ipv6-ndisc.yml index 8953fd880..1ea01bfb4 100644 --- a/ansible/tasks/fix-ipv6-ndisc.yml +++ b/ansible/tasks/fix-ipv6-ndisc.yml @@ -1,33 +1,30 @@ --- -- name: fix Network - systemd timer file - copy: - dest: /etc/systemd/system/systemd-networkd-check-and-fix.timer - src: "files/systemd-networkd/systemd-networkd-check-and-fix.timer" - owner: root - group: root - mode: 0644 - -- name: fix Network - systemd service file - copy: - dest: /etc/systemd/system/systemd-networkd-check-and-fix.service - src: "files/systemd-networkd/systemd-networkd-check-and-fix.service" - owner: root - group: root - mode: 0644 +- name: fix Network - systemd timer and service file + ansible.builtin.copy: + dest: "/etc/systemd/system/systemd-networkd-check-and-fix.{{ network_item }}" + group: 'root' + mode: '0644' + owner: 'root' + src: "files/systemd-networkd/systemd-networkd-check-and-fix.{{ network_item }}" + loop: + - service + - timer + loop_control: + loop_var: 'network_item' - name: fix Network - detect script - copy: - dest: /usr/local/bin/systemd-networkd-check-and-fix.sh - src: "files/systemd-networkd/systemd-networkd-check-and-fix.sh" - owner: root - group: root - mode: 0700 + ansible.builtin.copy: + dest: '/usr/local/bin/systemd-networkd-check-and-fix.sh' + src: 'files/systemd-networkd/systemd-networkd-check-and-fix.sh' + owner: 'root' + group: 'root' + mode: '0700' - name: fix Network - reload systemd - systemd: + ansible.builtin.systemd_service: daemon_reload: false - name: fix Network - ensure systemd timer is installed but disabled - systemd: - name: systemd-networkd-check-and-fix.timer + ansible.builtin.systemd_service: + name: 'systemd-networkd-check-and-fix.timer' enabled: false diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml index 5844b3f54..1770433e4 100644 --- a/ansible/tasks/internal/admin-api.yml +++ b/ansible/tasks/internal/admin-api.yml @@ -21,7 +21,7 @@ copy: src: files/adminapi.sudoers.conf dest: /etc/sudoers.d/adminapi - mode: "0644" + mode: "0440" - name: perms for adminapi shell: | diff --git a/ansible/tasks/internal/supabase-admin-agent.yml b/ansible/tasks/internal/supabase-admin-agent.yml index ef5c948b5..0dfc4427a 100644 --- a/ansible/tasks/internal/supabase-admin-agent.yml +++ b/ansible/tasks/internal/supabase-admin-agent.yml @@ -29,7 +29,7 @@ copy: src: files/supabase_admin_agent_config/supabase-admin-agent.sudoers.conf dest: /etc/sudoers.d/supabase-admin-agent - mode: "0644" + mode: "0440" - name: Setting arch (x86) set_fact: @@ -41,6 +41,13 @@ arch: "arm64" when: platform == "arm64" +- name: install gpg explicitly for qemu artifacts + become: yes + apt: + pkg: + - gpg + when: qemu_mode is defined + - name: Download supabase-admin-agent archive get_url: url: "https://supabase-public-artifacts-bucket.s3.amazonaws.com/supabase-admin-agent/v{{ supabase_admin_agent_release }}/supabase-admin-agent-{{ supabase_admin_agent_release }}-linux-{{ arch }}.tar.gz" @@ -71,8 +78,8 @@ dest: /etc/systemd/system/supabase-admin-agent_salt.timer - name: supabase-admin-agent - create salt service file - copy: - src: files/supabase_admin_agent_config/supabase-admin-agent_salt.service + template: + src: files/supabase_admin_agent_config/supabase-admin-agent_salt.service.j2 dest: /etc/systemd/system/supabase-admin-agent_salt.service - name: supabase-admin-agent - reload systemd diff --git a/ansible/tasks/setup-docker.yml b/ansible/tasks/setup-docker.yml index 7b37f70cc..6a3a12cc4 100644 --- a/ansible/tasks/setup-docker.yml +++ b/ansible/tasks/setup-docker.yml @@ -1,80 +1,87 @@ -- name: Copy extension packages - copy: - src: files/extensions/ - dest: /tmp/extensions/ - when: debpkg_mode +- name: debpkg_mode actions + when: + - debpkg_mode + block: + - name: Copy extension packages + ansible.builtin.copy: + dest: '/tmp/extensions/' + src: 'files/extensions/' -# Builtin apt module does not support wildcard for deb paths -- name: Install extensions - shell: | - set -e - apt-get update - apt-get install -y --no-install-recommends /tmp/extensions/*.deb - when: debpkg_mode + - name: Install extensions + ansible.builtin.apt: + deb: "{{ deb_item }}" + force_apt_get: true + install_recommends: false + state: 'present' + update_cache: true + loop_control: + loop_var: 'deb_item' + with_fileglob: + - '/tmp/extensions/*.deb' -- name: pgsodium - determine postgres bin directory - shell: pg_config --bindir - register: pg_bindir_output - when: debpkg_mode - -- set_fact: - pg_bindir: "{{ pg_bindir_output.stdout }}" - when: debpkg_mode + - name: pgsodium - determine PostgreSQL bin directory + ansible.builtin.command: + cmd: pg_config --bindir + changed_when: false + register: 'pg_bindir_output' + + - name: store the PostgreSQL bin dir as a fact + ansible.builtin.set_fact: + pg_bindir: "{{ pg_bindir_output['stdout'] }}" -- name: pgsodium - set pgsodium.getkey_script - become: yes - lineinfile: - path: /etc/postgresql/postgresql.conf - state: present - # script is expected to be placed by finalization tasks for different target platforms - line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' - when: debpkg_mode + - name: pgsodium - set pgsodium.getkey_script + ansible.builtin.lineinfile: + path: '/etc/postgresql/postgresql.conf' + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + state: 'present' + become: true -# supautils -- name: supautils - add supautils to session_preload_libraries - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: "#session_preload_libraries = ''" - replace: session_preload_libraries = 'supautils' - when: debpkg_mode or stage2_nix +- name: debpkg_mode or stage2_nix actions + when: + - (debpkg_mode or stage2_nix) + block: + # supautils + - name: supautils - add supautils to session_preload_libraries + ansible.builtin.replace: + path: '/etc/postgresql/postgresql.conf' + regexp: "#session_preload_libraries = ''" + replace: "session_preload_libraries = 'supautils'" + become: true -- name: supautils - write custom supautils.conf - template: - src: "files/postgresql_config/supautils.conf.j2" - dest: /etc/postgresql-custom/supautils.conf - mode: 0664 - owner: postgres - group: postgres - when: debpkg_mode or stage2_nix + - name: supautils - write custom supautils.conf + ansible.builtin.template: + dest: '/etc/postgresql-custom/supautils.conf' + mode: '0664' + group: 'postgres' + owner: 'postgres' + src: 'files/postgresql_config/supautils.conf.j2' -- name: supautils - copy extension custom scripts - copy: - src: files/postgresql_extension_custom_scripts/ - dest: /etc/postgresql-custom/extension-custom-scripts - become: yes - when: debpkg_mode or stage2_nix + - name: supautils - copy extension custom scripts + ansible.builtin.copy: + dest: '/etc/postgresql-custom/extension-custom-scripts' + src: 'files/postgresql_extension_custom_scripts/' + become: true -- name: supautils - chown extension custom scripts - file: - mode: 0775 - owner: postgres - group: postgres - path: /etc/postgresql-custom/extension-custom-scripts - recurse: yes - become: yes - when: debpkg_mode or stage2_nix + - name: supautils - chown extension custom scripts + ansible.builtin.file: + group: 'postgres' + mode: '0775' + owner: 'postgres' + path: '/etc/postgresql-custom/extension-custom-scripts' + recurse: true + become: true -- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf - become: yes - replace: - path: /etc/postgresql/postgresql.conf - regexp: "#include = '/etc/postgresql-custom/supautils.conf'" - replace: "include = '/etc/postgresql-custom/supautils.conf'" - when: debpkg_mode or stage2_nix + - name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + ansible.builtin.replace: + path: '/etc/postgresql/postgresql.conf' + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + become: true - name: Cleanup - extension packages - file: - path: /tmp/extensions - state: absent - when: debpkg_mode + ansible.builtin.file: + path: '/tmp/extensions' + state: 'absent' + when: + - debpkg_mode diff --git a/ansible/tasks/setup-envoy.yml b/ansible/tasks/setup-envoy.yml index 9843b5546..1552393e2 100644 --- a/ansible/tasks/setup-envoy.yml +++ b/ansible/tasks/setup-envoy.yml @@ -1,60 +1,61 @@ - name: Envoy - system user ansible.builtin.user: - name: envoy + name: 'envoy' + state: 'present' - name: Envoy - download binary ansible.builtin.get_url: checksum: "{{ envoy_release_checksum }}" - dest: /opt/envoy - group: envoy - mode: u+x - owner: envoy + dest: '/opt/envoy' + group: 'envoy' + mode: '0700' + owner: 'envoy' # yamllint disable-line rule:line-length url: "https://github.com/envoyproxy/envoy/releases/download/v{{ envoy_release }}/envoy-{{ envoy_release }}-linux-aarch_64" - name: Envoy - download hot restarter script ansible.builtin.get_url: checksum: "{{ envoy_hot_restarter_release_checksum }}" - dest: /opt/envoy-hot-restarter.py - group: envoy - mode: u+x - owner: envoy + dest: '/opt/envoy-hot-restarter.py' + group: 'envoy' + mode: '0700' + owner: 'envoy' # yamllint disable-line rule:line-length - url: https://raw.githubusercontent.com/envoyproxy/envoy/v{{ envoy_release }}/restarter/hot-restarter.py + url: "https://raw.githubusercontent.com/envoyproxy/envoy/v{{ envoy_release }}/restarter/hot-restarter.py" - name: Envoy - bump up ulimit community.general.pam_limits: - domain: envoy - limit_item: nofile - limit_type: soft - value: 4096 + domain: 'envoy' + limit_item: 'nofile' + limit_type: 'soft' + value: '4096' - name: Envoy - create script to start envoy ansible.builtin.copy: - dest: /opt/start-envoy.sh - group: envoy - mode: u+x - owner: envoy - src: files/start-envoy.sh + dest: '/opt/start-envoy.sh' + group: 'envoy' + mode: '0700' + owner: 'envoy' + src: 'files/start-envoy.sh' - name: Envoy - create configuration files ansible.builtin.copy: - dest: /etc/envoy/ - directory_mode: u=rwx,g=rwx,o=rx - group: envoy - mode: u=rw,g=rw,o=r - owner: envoy - src: files/envoy_config/ + dest: '/etc/envoy/' + directory_mode: '0775' + group: 'envoy' + mode: '0664' + owner: 'envoy' + src: 'files/envoy_config/' - name: Envoy - create service file ansible.builtin.copy: - dest: /etc/systemd/system/envoy.service - mode: u=rw,g=r,o=r - src: files/envoy.service + dest: '/etc/systemd/system/envoy.service' + mode: '0644' + src: 'files/envoy.service' - name: Envoy - disable service - ansible.builtin.systemd: + ansible.builtin.systemd_service: daemon_reload: true enabled: false - name: envoy - state: stopped + name: 'envoy' + state: 'stopped' diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml index 7d9088d46..89c336029 100644 --- a/ansible/tasks/setup-fail2ban.yml +++ b/ansible/tasks/setup-fail2ban.yml @@ -1,73 +1,70 @@ # set default bantime to 1 hour -- name: extend bantime - become: yes - replace: - path: /etc/fail2ban/jail.conf - regexp: bantime = 10m - replace: bantime = 3600 - when: debpkg_mode or nixpkg_mode +- name: do debpkg_mode or nixpkg_mode tasks + when: + - (debpkg_mode or nixpkg_mode) + block: + - name: extend the default bantime to an hour + become: true + ansible.builtin.replace: + path: '/etc/fail2ban/jail.conf' + regexp: 'bantime = 10m' + replace: 'bantime = 3600' -- name: Configure journald - copy: - src: files/fail2ban_config/jail-ssh.conf - dest: /etc/fail2ban/jail.d/sshd.local - when: debpkg_mode or nixpkg_mode + - name: configure journald + ansible.builtin.copy: + dest: '/etc/fail2ban/jail.d/sshd.local' + src: 'files/fail2ban_config/jail-ssh.conf' -- name: configure fail2ban to use nftables - copy: - src: files/fail2ban_config/jail.local - dest: /etc/fail2ban/jail.local - when: debpkg_mode or nixpkg_mode + - name: configure fail2ban to use nftables + ansible.builtin.copy: + dest: '/etc/fail2ban/jail.local' + src: 'files/fail2ban_config/jail.local' -# postgresql -- name: import jail.d/postgresql.conf - template: - src: files/fail2ban_config/jail-postgresql.conf.j2 - dest: /etc/fail2ban/jail.d/postgresql.conf - become: yes - when: debpkg_mode or nixpkg_mode + # postgresql + - name: import jail.d/postgresql.conf + ansible.builtin.template: + dest: '/etc/fail2ban/jail.d/postgresql.conf' + src: 'files/fail2ban_config/jail-postgresql.conf.j2' + become: true -- name: import filter.d/postgresql.conf - template: - src: files/fail2ban_config/filter-postgresql.conf.j2 - dest: /etc/fail2ban/filter.d/postgresql.conf - become: yes - when: debpkg_mode or nixpkg_mode + - name: import filter.d/postgresql.conf + ansible.builtin.template: + dest: '/etc/fail2ban/filter.d/postgresql.conf' + src: 'files/fail2ban_config/filter-postgresql.conf.j2' + become: true -- name: create overrides dir - file: - state: directory - owner: root - group: root - path: /etc/systemd/system/fail2ban.service.d - mode: '0700' - when: debpkg_mode or nixpkg_mode + - name: create overrides dir + ansible.builtin.file: + group: 'root' + mode: '0700' + owner: 'root' + path: '/etc/systemd/system/fail2ban.service.d' + state: 'directory' -- name: Custom systemd overrides - copy: - src: files/fail2ban_config/fail2ban.service.conf - dest: /etc/systemd/system/fail2ban.service.d/overrides.conf - when: debpkg_mode or nixpkg_mode + - name: custom systemd overrides + ansible.builtin.copy: + dest: '/etc/systemd/system/fail2ban.service.d/overrides.conf' + src: 'files/fail2ban_config/fail2ban.service.conf' -- name: add in supabase specific ignore filters - lineinfile: - path: /etc/fail2ban/filter.d/postgresql.conf - state: present - line: "{{ item.line }}" - loop: - - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_admin".*$' } - - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_auth_admin".*$' } - - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_storage_admin".*$' } - - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$' } - - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$' } - become: yes - tags: - - install-supabase-internal - when: debpkg_mode or nixpkg_mode + - name: add in supabase specific ignore filters + ansible.builtin.lineinfile: + line: "{{ ignore_item['line'] }}" + path: /etc/fail2ban/filter.d/postgresql.conf + state: present + become: true + loop: + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_auth_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_storage_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$' } + loop_control: + loop_var: 'ignore_item' + tags: + - install-supabase-internal -- name: fail2ban - disable service - systemd: - name: fail2ban - enabled: no - daemon_reload: yes - when: debpkg_mode or nixpkg_mode + - name: fail2ban - disable service + ansible.builtin.systemd_service: + daemon_reload: true + enabled: false + name: 'fail2ban' diff --git a/ansible/tasks/setup-gotrue.yml b/ansible/tasks/setup-gotrue.yml index 786f9a767..70bbbf85f 100644 --- a/ansible/tasks/setup-gotrue.yml +++ b/ansible/tasks/setup-gotrue.yml @@ -1,48 +1,47 @@ - name: UFW - Allow connections to GoTrue metrics exporter - ufw: - rule: allow - port: "9122" + community.general.ufw: + port: '9122' + rule: 'allow' # use this user for the Gotrue build and for running the service - name: Gotrue - system user - user: name=gotrue + ansible.builtin.user: + name: 'gotrue' + state: 'present' -- name: Setting arch (x86) - set_fact: - arch: "x86" - when: platform == "amd64" - -- name: Setting arch (arm) - set_fact: - arch: "arm64" - when: platform == "arm64" +- name: Setting arch as a fact + ansible.builtin.set_fact: + arch: >- + {%- if platform == 'amd64' -%} + x86 + {%- elif platform == 'arm64' -%} + arm64 + {%- endif -%} - name: gotrue - download commit archive - get_url: - url: "https://github.com/supabase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-{{ arch }}.tar.gz" - dest: /tmp/gotrue.tar.gz + ansible.builtin.get_url: checksum: "{{ gotrue_release_checksum }}" + dest: '/tmp/gotrue.tar.gz' + url: "https://github.com/supabase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-{{ arch }}.tar.gz" -- name: gotrue - create /opt/gotrue - file: - path: /opt/gotrue - state: directory - owner: gotrue - mode: 0775 - -- name: gotrue - create /etc/auth.d - file: - path: /etc/auth.d - state: directory - owner: gotrue - mode: 0775 +- name: gotrue - create /opt/gotrue and /etc/auth.d + ansible.builtin.file: + mode: '0775' + owner: 'gotrue' + path: "{{ gotrue_dir_item }}" + state: 'directory' + loop: + - '/etc/auth.d' + - '/opt/gotrue' + loop_control: + loop_var: 'gotrue_dir_item' - name: gotrue - unpack archive in /opt/gotrue - unarchive: - remote_src: yes - src: /tmp/gotrue.tar.gz - dest: /opt/gotrue - owner: gotrue + ansible.builtin.unarchive: + dest: '/opt/gotrue' + owner: 'gotrue' + remote_src: true + src: '/tmp/gotrue.tar.gz' # libpq is a C library that enables user programs to communicate with # the PostgreSQL database server. @@ -52,15 +51,15 @@ # - libpq-dev - name: gotrue - create service file - template: - src: files/gotrue.service.j2 - dest: /etc/systemd/system/gotrue.service + ansible.builtin.template: + dest: '/etc/systemd/system/gotrue.service' + src: 'files/gotrue.service.j2' - name: gotrue - create optimizations file - template: - src: files/gotrue-optimizations.service.j2 - dest: /etc/systemd/system/gotrue-optimizations.service + ansible.builtin.template: + dest: '/etc/systemd/system/gotrue-optimizations.service' + src: 'files/gotrue-optimizations.service.j2' - name: gotrue - reload systemd - systemd: - daemon_reload: yes + ansible.builtin.systemd_service: + daemon_reload: true diff --git a/ansible/tasks/setup-supabase-internal.yml b/ansible/tasks/setup-supabase-internal.yml index d5583b597..d63f3abad 100644 --- a/ansible/tasks/setup-supabase-internal.yml +++ b/ansible/tasks/setup-supabase-internal.yml @@ -29,6 +29,12 @@ shell: "/tmp/aws/install --update" become: true +- name: install utilities to manage Amazon EC2 instance storage + become: true + apt: + pkg: + - amazon-ec2-utils + - name: AWS CLI - configure ipv6 support for s3 shell: | aws configure set default.s3.use_dualstack_endpoint true diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index c1285bf6c..1f8abec62 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -53,6 +53,13 @@ update_cache: yes when: debpkg_mode or nixpkg_mode +- name: Install other useful tools + apt: + pkg: + - less + update_cache: yes + when: qemu_mode is defined + - name: Configure sysstat copy: src: files/sysstat.sysstat @@ -146,6 +153,16 @@ group: root when: debpkg_mode or nixpkg_mode +- name: configure systemd's pager + copy: + content: | + export SYSTEMD_LESS=FRXMK + dest: /etc/profile.d/10-systemd-pager.sh + mode: 0644 + owner: root + group: root + when: debpkg_mode or nixpkg_mode + - name: set hosts file copy: content: | diff --git a/ansible/tasks/stage2-setup-postgres.yml b/ansible/tasks/stage2-setup-postgres.yml index d3209fc04..74da50a57 100644 --- a/ansible/tasks/stage2-setup-postgres.yml +++ b/ansible/tasks/stage2-setup-postgres.yml @@ -217,19 +217,8 @@ recurse: yes when: stage2_nix -- name: Check psql_version and run postgis linking if not oriole-xx - block: - - name: Check if psql_version is psql_orioledb-17 - set_fact: - is_psql_oriole: "{{ psql_version == 'psql_orioledb-17' }}" - - - name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir - shell: > - sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ - sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ - && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" - become: yes - when: stage2_nix and not is_psql_oriole +# PostGIS contrib linking removed - PostGIS doesn't install to contrib directory +# It installs extensions to /share/postgresql/extension/ which is already linked above - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets shell: >- diff --git a/ansible/vars.yml b/ansible/vars.yml index fcbe8eb91..2fc0ff36f 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -1,3 +1,4 @@ +--- supabase_internal: true ebssurrogate_mode: true async_mode: true @@ -5,33 +6,33 @@ async_mode: true postgres_major: - "15" - "17" - - "orioledb-17" + - orioledb-17 # Full version strings for each major version postgres_release: - postgresorioledb-17: "17.5.1.011-orioledb" - postgres17: "17.4.1.068" - postgres15: "15.8.1.125" + postgresorioledb-17: 17.5.1.034-orioledb + postgres17: 17.6.1.013 + postgres15: 15.14.1.013 # Non Postgres Extensions -pgbouncer_release: "1.19.0" +pgbouncer_release: 1.19.0 pgbouncer_release_checksum: sha256:af0b05e97d0e1fd9ad45fe00ea6d2a934c63075f67f7e2ccef2ca59e3d8ce682 # The checksum can be found under "Assets", in the GitHub release page for each version. # The binaries used are: ubuntu-aarch64 and linux-static. # https://github.com/PostgREST/postgrest/releases -postgrest_release: "13.0.4" -postgrest_arm_release_checksum: sha256:2b400200fb15eb5849267e4375fbbc516dd727afadd8786815b48074ed8c03e1 -postgrest_x86_release_checksum: sha256:a0052c8d4726f52349e0298f98da51140ef4941855548590ee88331afa617811 +postgrest_release: 13.0.5 +postgrest_arm_release_checksum: sha256:7b4eafdaf76bc43b57f603109d460a838f89f949adccd02f452ca339f9a0a0d4 +postgrest_x86_release_checksum: sha256:05be2bd48abee6c1691fc7c5d005023466c6989e41a4fc7d1302b8212adb88b5 -gotrue_release: 2.177.0 -gotrue_release_checksum: sha1:664a26237618c4bfb1e33e4f03a540c3cef3e3c8 +gotrue_release: 2.179.0 +gotrue_release_checksum: sha1:e985fce00b2720b747e6a04420910015c4967121 -aws_cli_release: "2.23.11" +aws_cli_release: 2.23.11 salt_minion_version: 3007 -golang_version: "1.22.11" +golang_version: 1.22.11 golang_version_checksum: arm64: sha256:0fc88d966d33896384fbde56e9a8d80a305dc17a9f48f1832e061724b1719991 amd64: sha256:9ebfcab26801fa4cf0627c6439db7a4da4d3c6766142a3dd83508240e4f21031 @@ -52,10 +53,10 @@ postgres_exporter_release_checksum: arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 -adminapi_release: 0.84.1 -adminmgr_release: 0.25.1 +adminapi_release: "0.92.1" +adminmgr_release: "0.32.1" supabase_admin_agent_release: 1.4.38 -supabase_admin_agent_splay: 30 +supabase_admin_agent_splay: 30s -vector_x86_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_amd64.deb" -vector_arm_deb: "https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_arm64.deb" +vector_x86_deb: https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_amd64.deb +vector_arm_deb: https://packages.timber.io/vector/0.48.X/vector_0.48.0-1_arm64.deb diff --git a/ebssurrogate/scripts/qemu-bootstrap-nix.sh b/ebssurrogate/scripts/qemu-bootstrap-nix.sh index b52b85789..0b21959c3 100755 --- a/ebssurrogate/scripts/qemu-bootstrap-nix.sh +++ b/ebssurrogate/scripts/qemu-bootstrap-nix.sh @@ -22,7 +22,7 @@ function waitfor_boot_finished { } function install_packages { - apt-get update && sudo apt-get install software-properties-common e2fsprogs nfs-common -y + apt-get update && sudo apt-get install software-properties-common e2fsprogs nfs-common locales iptables arptables ebtables ufw logrotate -y add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y ansible-galaxy collection install community.general } @@ -39,7 +39,8 @@ EOF --extra-vars "postgresql_version=postgresql_${POSTGRES_MAJOR_VERSION}" \ --extra-vars "postgresql_major_version=${POSTGRES_MAJOR_VERSION}" \ --extra-vars "postgresql_major=${POSTGRES_MAJOR_VERSION}" \ - --extra-vars "psql_version=psql_${POSTGRES_MAJOR_VERSION}" + --extra-vars "psql_version=psql_${POSTGRES_MAJOR_VERSION}" \ + --extra-vars @./ansible/qemu-vars.yaml } function setup_postgesql_env { @@ -101,7 +102,8 @@ EOF --extra-vars "postgresql_version=postgresql_${POSTGRES_MAJOR_VERSION}" \ --extra-vars "postgresql_major_version=${POSTGRES_MAJOR_VERSION}" \ --extra-vars "postgresql_major=${POSTGRES_MAJOR_VERSION}" \ - --extra-vars "psql_version=psql_${POSTGRES_MAJOR_VERSION}" + --extra-vars "psql_version=psql_${POSTGRES_MAJOR_VERSION}" \ + --extra-vars @./ansible/qemu-vars.yaml } function clean_legacy_things { diff --git a/flake.lock b/flake.lock index 8b6d868bf..9d2865e1d 100644 --- a/flake.lock +++ b/flake.lock @@ -208,6 +208,22 @@ "type": "github" } }, + "nixpkgs-go124": { + "locked": { + "lastModified": 1754085309, + "narHash": "sha256-3RTSdhnqTcxS5wjKNEBpbt0hiSKfBZiQPlWHn90N1qQ=", + "owner": "Nixos", + "repo": "nixpkgs", + "rev": "d2ac4dfa61fba987a84a0a81555da57ae0b9a2b0", + "type": "github" + }, + "original": { + "owner": "Nixos", + "repo": "nixpkgs", + "rev": "d2ac4dfa61fba987a84a0a81555da57ae0b9a2b0", + "type": "github" + } + }, "nixpkgs-lib": { "locked": { "lastModified": 1750555020, @@ -295,6 +311,7 @@ "nix-fast-build": "nix-fast-build", "nix2container": "nix2container", "nixpkgs": "nixpkgs_4", + "nixpkgs-go124": "nixpkgs-go124", "rust-overlay": "rust-overlay", "treefmt-nix": "treefmt-nix_2" } diff --git a/flake.nix b/flake.nix index 3aa651b09..db14dac9a 100644 --- a/flake.nix +++ b/flake.nix @@ -13,6 +13,7 @@ treefmt-nix.inputs.nixpkgs.follows = "nixpkgs"; git-hooks.url = "github:cachix/git-hooks.nix"; git-hooks.inputs.nixpkgs.follows = "nixpkgs"; + nixpkgs-go124.url = "github:Nixos/nixpkgs/d2ac4dfa61fba987a84a0a81555da57ae0b9a2b0"; }; outputs = diff --git a/migrations/db/init-scripts/00000000000000-initial-schema.sql b/migrations/db/init-scripts/00000000000000-initial-schema.sql index 272c989c0..2406a7485 100644 --- a/migrations/db/init-scripts/00000000000000-initial-schema.sql +++ b/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -10,6 +10,11 @@ alter user supabase_admin with superuser createdb createrole replication bypass -- Supabase replication user create user supabase_replication_admin with login replication; +-- Supabase etl user +create user supabase_etl_admin with login replication; +grant pg_read_all_data to supabase_etl_admin; +grant create on database postgres to supabase_etl_admin; + -- Supabase read-only user create role supabase_read_only_user with login bypassrls; grant pg_read_all_data to supabase_read_only_user; diff --git a/migrations/schema-15.sql b/migrations/schema-15.sql index 00e62c8eb..1f1d98496 100644 --- a/migrations/schema-15.sql +++ b/migrations/schema-15.sql @@ -2,8 +2,10 @@ -- PostgreSQL database dump -- --- Dumped from database version 15.8 --- Dumped by pg_dump version 15.8 +\restrict SupabaseTestDumpKey123 + +-- Dumped from database version 15.14 +-- Dumped by pg_dump version 15.14 SET statement_timeout = 0; SET lock_timeout = 0; @@ -997,3 +999,5 @@ CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop -- PostgreSQL database dump complete -- +\unrestrict SupabaseTestDumpKey123 + diff --git a/migrations/schema-17.sql b/migrations/schema-17.sql index f120b1b27..e0b353e9d 100644 --- a/migrations/schema-17.sql +++ b/migrations/schema-17.sql @@ -2,8 +2,10 @@ -- PostgreSQL database dump -- --- Dumped from database version 17.4 --- Dumped by pg_dump version 17.4 +\restrict SupabaseTestDumpKey123 + +-- Dumped from database version 17.6 +-- Dumped by pg_dump version 17.6 SET statement_timeout = 0; SET lock_timeout = 0; @@ -998,3 +1000,5 @@ CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop -- PostgreSQL database dump complete -- +\unrestrict SupabaseTestDumpKey123 + diff --git a/nix/checks.nix b/nix/checks.nix index 13be10f23..74b50a84f 100644 --- a/nix/checks.nix +++ b/nix/checks.nix @@ -27,7 +27,7 @@ pgpkg: let pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; - pg_regress = self'.packages.pg_regress; + inherit (self'.packages) pg_regress; getkey-script = pkgs.stdenv.mkDerivation { name = "pgsodium-getkey"; buildCommand = '' @@ -163,11 +163,43 @@ which getkey-script supabase-groonga + python3 + netcat ]; } '' set -e + # Start HTTP mock server for http extension tests + # Use a build-specific directory for coordination + BUILD_TMP=$(mktemp -d) + HTTP_MOCK_PORT_FILE="$BUILD_TMP/http-mock-port" + + echo "Starting HTTP mock server (will find free port)..." + HTTP_MOCK_PORT_FILE="$HTTP_MOCK_PORT_FILE" ${pkgs.python3}/bin/python3 ${./tests/http-mock-server.py} & + HTTP_MOCK_PID=$! + + # Clean up on exit + trap "kill $HTTP_MOCK_PID 2>/dev/null || true; rm -rf '$BUILD_TMP'" EXIT + + # Wait for server to start and write port file + for i in {1..10}; do + if [ -f "$HTTP_MOCK_PORT_FILE" ]; then + HTTP_MOCK_PORT=$(cat "$HTTP_MOCK_PORT_FILE") + echo "HTTP mock server started on port $HTTP_MOCK_PORT" + break + fi + sleep 1 + done + + if [ ! -f "$HTTP_MOCK_PORT_FILE" ]; then + echo "Failed to start HTTP mock server" + exit 1 + fi + + # Export the port for use in SQL tests + export HTTP_MOCK_PORT + #First we need to create a generic pg cluster for pgtap tests and run those export GRN_PLUGINS_DIR=${pkgs.supabase-groonga}/lib/groonga/plugins PGTAP_CLUSTER=$(mktemp -d) @@ -228,6 +260,13 @@ pg_ctl -D "$PGTAP_CLUSTER" stop exit 1 fi + + # Create a table to store test configuration + psql -p ${pgPort} -h ${self.supabase.defaults.host} --username=supabase_admin -d testing -c " + CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); + INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + " SORTED_DIR=$(mktemp -d) for t in $(printf "%s\n" ${builtins.concatStringsSep " " sortedTestList}); do psql -p ${pgPort} -h ${self.supabase.defaults.host} --username=supabase_admin -d testing -f "${./tests/sql}/$t.sql" || true @@ -261,6 +300,13 @@ exit 1 fi + # Create a table to store test configuration for pg_regress tests + psql -p ${pgPort} -h ${self.supabase.defaults.host} --no-password --username=supabase_admin -d postgres -c " + CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); + INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + " + mkdir -p $out/regression_output if ! pg_regress \ --use-existing \ @@ -294,6 +340,7 @@ wal-g-2 wal-g-3 dbmate-tool + packer pg_regress ; } @@ -307,12 +354,14 @@ postgresql_17_src ; } - // pkgs.lib.optionalAttrs (system == "x86_64-linux") { - wrappers = import ./ext/tests/wrappers.nix { + // pkgs.lib.optionalAttrs (system == "x86_64-linux") ( + { + devShell = self'.devShells.default; + } + // (import ./ext/tests { inherit self; inherit pkgs; - }; - devShell = self'.devShells.default; - }; + }) + ); }; } diff --git a/nix/config.nix b/nix/config.nix index 267ec18a8..4076f0cea 100644 --- a/nix/config.nix +++ b/nix/config.nix @@ -42,12 +42,12 @@ in supportedPostgresVersions = { postgres = { "15" = { - version = "15.8"; - hash = "sha256-RANRX5pp7rPv68mPMLjGlhIr/fiV6Ss7I/W452nty2o="; + version = "15.14"; + hash = "sha256-Bt110wXNOHDuYrOTLmYcYkVD6vmuK6N83sCk+O3QUdI="; }; "17" = { - version = "17.4"; - hash = "sha256-xGBbc/6hGWNAZpn5Sblm5dFzp+4Myu+JON7AyoqZX+c="; + version = "17.6"; + hash = "sha256-4GMKNgCuonURcVVjJZ7CERzV9DU6SwQOC+gn+UzXqLA="; }; }; orioledb = { diff --git a/nix/devShells.nix b/nix/devShells.nix index 7ff4d0720..03768a770 100644 --- a/nix/devShells.nix +++ b/nix/devShells.nix @@ -45,9 +45,7 @@ shellcheck ansible ansible-lint - (packer.overrideAttrs (_oldAttrs: { - version = "1.7.8"; - })) + self'.packages.packer self'.packages.start-server self'.packages.start-client diff --git a/nix/docs/updating-dependencies.md b/nix/docs/updating-dependencies.md new file mode 100644 index 000000000..73c5cf79a --- /dev/null +++ b/nix/docs/updating-dependencies.md @@ -0,0 +1,68 @@ +# Updating Dependencies + +This document explains how to update various dependencies used in the nix configuration. + +## Updating Packer + +Packer is used for creating machine images and is defined in `nix/packages/packer.nix`. + +### Steps to update Packer version: + +1. Create a branch off of `develop` +2. Navigate to `nix/packages/packer.nix` +3. Update the version field: + ```nix + version = "1.15.0"; # Update to desired version + ``` +4. Update the git revision to match the new version: + ```nix + rev = "v${version}"; + ``` +5. Temporarily clear the hash to get the new SHA256: + ```nix + hash = ""; # Clear this temporarily + ``` +6. Save the file and run: + ```bash + nix build .#packer + ``` +7. Nix will fail and output the correct SHA256 hash. Copy this hash and update the file: + ```nix + hash = "sha256-NEWHASHHEREFROMBUILDOUTPUT"; + ``` +8. Update the vendorHash if needed. If the build fails due to vendor hash mismatch, temporarily set: + ```nix + vendorHash = ""; # Clear this temporarily + ``` +9. Run `nix build .#packer` again to get the correct vendorHash, then update: + ```nix + vendorHash = "sha256-NEWVENDORHASHHEREFROMBUILDOUTPUT"; + ``` +10. Verify the build works: + ```bash + nix build .#packer + ``` +11. Test the packer binary: + ```bash + ./result/bin/packer version + ``` +12. Run the full test suite to ensure nothing is broken: + ```bash + nix flake check -L + ``` +13. Commit your changes and create a PR for review +14. Update any CI/CD workflows or documentation that reference the old Packer version + +### Notes: +- Always check the [Packer changelog](https://github.com/hashicorp/packer/releases) for breaking changes +- Packer uses Go, so ensure compatibility with the Go version specified in the flake inputs +- The current Go version is specified in `flake.nix` under `nixpkgs-go124` input +- If updating to a major version, test all packer templates (`.pkr.hcl` files) in the repository + +## Updating Other Dependencies + +Similar patterns can be followed for other dependencies defined in the nix packages. Always: +1. Check for breaking changes in changelogs +2. Update version numbers and hashes +3. Run local tests +4. Verify functionality before creating PR \ No newline at end of file diff --git a/nix/ext/hypopg.nix b/nix/ext/hypopg.nix index 1e38b11c6..28e847d46 100644 --- a/nix/ext/hypopg.nix +++ b/nix/ext/hypopg.nix @@ -1,35 +1,95 @@ { lib, stdenv, + buildEnv, fetchFromGitHub, postgresql, }: -stdenv.mkDerivation rec { +let pname = "hypopg"; - version = "1.4.1"; + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; - buildInputs = [ postgresql ]; + buildInputs = [ postgresql ]; - src = fetchFromGitHub { - owner = "HypoPG"; - repo = pname; - rev = "refs/tags/${version}"; - hash = "sha256-88uKPSnITRZ2VkelI56jZ9GWazG/Rn39QlyHKJKSKMM="; - }; + src = fetchFromGitHub { + owner = "HypoPG"; + repo = pname; + rev = "refs/tags/${version}"; + inherit hash; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + mv ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + create_sql_files() { + echo "Creating SQL files for previous versions..." + if [[ "${version}" == "${latestVersion}" ]]; then + cp *.sql $out/share/postgresql/extension + fi + } + + create_control_files() { + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + + if [[ "${version}" == "${latestVersion}" ]]; then + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + fi + } - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + create_sql_files + create_control_files + ''; - cp *${postgresql.dlSuffix} $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension + meta = with lib; { + description = "Hypothetical Indexes for PostgreSQL"; + homepage = "https://github.com/HypoPG/${pname}"; + license = licenses.postgresql; + inherit (postgresql.meta) platforms; + }; + }; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); +in +buildEnv { + name = pname; + paths = packages; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; + postBuild = '' + # checks + (set -x + test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + 1) + }" + ) ''; - meta = with lib; { - description = "Hypothetical Indexes for PostgreSQL"; - homepage = "https://github.com/HypoPG/${pname}"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/index_advisor.nix b/nix/ext/index_advisor.nix index 19777f39e..9e6a54aea 100644 --- a/nix/ext/index_advisor.nix +++ b/nix/ext/index_advisor.nix @@ -1,34 +1,86 @@ { + pkgs, lib, stdenv, fetchFromGitHub, postgresql, + callPackage, }: -stdenv.mkDerivation rec { +let pname = "index_advisor"; - version = "0.2.0"; + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; - buildInputs = [ postgresql ]; + buildInputs = [ postgresql ]; - src = fetchFromGitHub { - owner = "olirice"; - repo = pname; - rev = "v${version}"; - hash = "sha256-G0eQk2bY5CNPMeokN/nb05g03CuiplRf902YXFVQFbs="; - }; + src = fetchFromGitHub { + owner = "olirice"; + repo = pname; + rev = "v${version}"; + inherit hash; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + create_sql_files() { + echo "Creating SQL files for previous versions..." + if [[ "${version}" == "${latestVersion}" ]]; then + cp *.sql $out/share/postgresql/extension + fi + } + + create_control_files() { + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + + if [[ "${version}" == "${latestVersion}" ]]; then + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + fi + } - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + create_sql_files + create_control_files + ''; - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; + meta = with lib; { + description = "Recommend indexes to improve query performance in PostgreSQL"; + homepage = "https://github.com/olirice/index_advisor"; + license = licenses.postgresql; + inherit (postgresql.meta) platforms; + }; + }; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); +in +pkgs.buildEnv { + name = pname; + # Add dependency on hypopg for the extension to work + paths = packages ++ [ (callPackage ./hypopg.nix { inherit postgresql; }) ]; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; - meta = with lib; { - description = "Recommend indexes to improve query performance in PostgreSQL"; - homepage = "https://github.com/olirice/index_advisor"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_cron.nix b/nix/ext/pg_cron.nix deleted file mode 100644 index bcaaf180c..000000000 --- a/nix/ext/pg_cron.nix +++ /dev/null @@ -1,36 +0,0 @@ -{ - lib, - stdenv, - fetchFromGitHub, - postgresql, -}: - -stdenv.mkDerivation rec { - pname = "pg_cron"; - version = "1.6.4"; - - buildInputs = [ postgresql ]; - - src = fetchFromGitHub { - owner = "citusdata"; - repo = pname; - rev = "v${version}"; - hash = "sha256-t1DpFkPiSfdoGG2NgNT7g1lkvSooZoRoUrix6cBID40="; - }; - - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} - - cp *${postgresql.dlSuffix} $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; - - meta = with lib; { - description = "Run Cron jobs through PostgreSQL"; - homepage = "https://github.com/citusdata/pg_cron"; - changelog = "https://github.com/citusdata/pg_cron/raw/v${version}/CHANGELOG.md"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; - }; -} diff --git a/nix/ext/pg_cron/default.nix b/nix/ext/pg_cron/default.nix new file mode 100644 index 000000000..75215d56a --- /dev/null +++ b/nix/ext/pg_cron/default.nix @@ -0,0 +1,122 @@ +{ + lib, + stdenv, + fetchFromGitHub, + postgresql, + buildEnv, + makeWrapper, + switch-ext-version, +}: +let + pname = "pg_cron"; + allVersions = (builtins.fromJSON (builtins.readFile ../versions.json)).${pname}; + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + build = + version: versionData: + stdenv.mkDerivation rec { + inherit pname version; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "citusdata"; + repo = pname; + rev = versionData.rev or "v${version}"; + hash = versionData.hash; + }; + + patches = map (p: ./. + "/${p}") (versionData.patches or [ ]); + + buildPhase = '' + make PG_CONFIG=${postgresql}/bin/pg_config + ''; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + # Install versioned library + install -Dm755 ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + + if [[ "${version}" == "${latestVersion}" ]]; then + cp ${pname}.sql $out/share/postgresql/extension/${pname}--1.0.0.sql + # Install upgrade scripts + find . -name 'pg_cron--*--*.sql' -exec install -Dm644 {} $out/share/postgresql/extension/ \; + mv $out/share/postgresql/extension/pg_cron--1.0--1.1.sql $out/share/postgresql/extension/pg_cron--1.0.0--1.1.0.sql + mv $out/share/postgresql/extension/pg_cron--1.1--1.2.sql $out/share/postgresql/extension/pg_cron--1.1.0--1.2.0.sql + mv $out/share/postgresql/extension/pg_cron--1.2--1.3.sql $out/share/postgresql/extension/pg_cron--1.2.0--1.3.1.sql + mv $out/share/postgresql/extension/pg_cron--1.3--1.4.sql $out/share/postgresql/extension/pg_cron--1.3.1--1.4.2.sql + mv $out/share/postgresql/extension/pg_cron--1.4--1.4-1.sql $out/share/postgresql/extension/pg_cron--1.4.0--1.4.1.sql + mv $out/share/postgresql/extension/pg_cron--1.4-1--1.5.sql $out/share/postgresql/extension/pg_cron--1.4.2--1.5.2.sql + mv $out/share/postgresql/extension/pg_cron--1.5--1.6.sql $out/share/postgresql/extension/pg_cron--1.5.2--1.6.4.sql + fi + + # Create versioned control file with modified module path + sed -e "/^default_version =/d" \ + -e "/^schema =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + ''; + + meta = with lib; { + description = "Run Cron jobs through PostgreSQL"; + homepage = "https://github.com/citusdata/pg_cron"; + changelog = "https://github.com/citusdata/pg_cron/raw/v${version}/CHANGELOG.md"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; + }; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value) supportedVersions); +in +buildEnv { + name = pname; + paths = packages; + nativeBuildInputs = [ makeWrapper ]; + + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; + + postBuild = '' + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + + # checks + (set -x + test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + 1) + }" + ) + + makeWrapper ${lib.getExe switch-ext-version} $out/bin/switch_pg_cron_version \ + --prefix EXT_WRAPPER : "$out" --prefix EXT_NAME : "${pname}" + ''; + + meta = with lib; { + description = "Run Cron jobs through PostgreSQL (multi-version compatible)"; + homepage = "https://github.com/citusdata/pg_cron"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; + + passthru = { + inherit versions numberOfVersions switch-ext-version; + pname = "${pname}-all"; + hasBackgroundWorker = true; + defaultSettings = { + shared_preload_libraries = [ "pg_cron" ]; + "cron.database_name" = "postgres"; + }; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + }; +} diff --git a/nix/ext/pg_cron/pg_cron-1.3.1-pg15.patch b/nix/ext/pg_cron/pg_cron-1.3.1-pg15.patch new file mode 100644 index 000000000..6e496aabe --- /dev/null +++ b/nix/ext/pg_cron/pg_cron-1.3.1-pg15.patch @@ -0,0 +1,31 @@ +diff --git a/src/pg_cron.c b/src/pg_cron.c +index e0ca973..4d51b2c 100644 +--- a/src/pg_cron.c ++++ b/src/pg_cron.c +@@ -14,6 +14,8 @@ + #include + + #include "postgres.h" ++#include "commands/async.h" ++#include "miscadmin.h" + #include "fmgr.h" + + /* these are always necessary for a bgworker */ +@@ -1908,7 +1910,7 @@ CronBackgroundWorker(Datum main_arg) + /* Post-execution cleanup. */ + disable_timeout(STATEMENT_TIMEOUT, false); + CommitTransactionCommand(); +- ProcessCompletedNotifies(); ++ /* ProcessCompletedNotifies removed */ + pgstat_report_activity(STATE_IDLE, command); + pgstat_report_stat(true); + +@@ -2025,7 +2027,7 @@ ExecuteSqlString(const char *sql) + */ + oldcontext = MemoryContextSwitchTo(parsecontext); + #if PG_VERSION_NUM >= 100000 +- querytree_list = pg_analyze_and_rewrite(parsetree, sql, NULL, 0,NULL); ++ querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, sql, NULL, 0, NULL); + #else + querytree_list = pg_analyze_and_rewrite(parsetree, sql, NULL, 0); + #endif \ No newline at end of file diff --git a/nix/ext/pg_net.nix b/nix/ext/pg_net.nix index 02d673297..62dad4386 100644 --- a/nix/ext/pg_net.nix +++ b/nix/ext/pg_net.nix @@ -1,41 +1,128 @@ { + pkgs, lib, stdenv, fetchFromGitHub, curl, postgresql, + libuv, + makeWrapper, + switch-ext-version, }: -stdenv.mkDerivation rec { +let pname = "pg_net"; - version = "0.14.0"; - - buildInputs = [ - curl - postgresql - ]; - - src = fetchFromGitHub { - owner = "supabase"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-c1pxhTyrE5j6dY+M5eKAboQNofIORS+Dccz+7HKEKQI="; - }; + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; + + buildInputs = [ + curl + postgresql + ] ++ lib.optional (version == "0.6") libuv; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "refs/tags/v${version}"; + inherit hash; + }; + + buildPhase = '' + make PG_CONFIG=${postgresql}/bin/pg_config + ''; + + postPatch = + lib.optionalString (version == "0.6") '' + # handle collision with pg_net 0.10.0 + rm sql/pg_net--0.2--0.3.sql + rm sql/pg_net--0.4--0.5.sql + rm sql/pg_net--0.5.1--0.6.sql + '' + + lib.optionalString (version == "0.7.1") '' + # handle collision with pg_net 0.10.0 + rm sql/pg_net--0.5.1--0.6.sql + ''; + + env.NIX_CFLAGS_COMPILE = lib.optionalString (lib.versionOlder version "0.19.1") "-Wno-error"; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + # Install versioned library + install -Dm755 ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + if [ -f sql/${pname}.sql ]; then + cp sql/${pname}.sql $out/share/postgresql/extension/${pname}--${version}.sql + else + cp sql/${pname}--${version}.sql $out/share/postgresql/extension/${pname}--${version}.sql + fi + + # Install upgrade scripts + find . -name '${pname}--*--*.sql' -exec install -Dm644 {} $out/share/postgresql/extension/ \; + + # Create versioned control file with modified module path + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + ''; + + meta = with lib; { + description = "Async networking for Postgres"; + homepage = "https://github.com/supabase/pg_net"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; + }; + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).pg_net; + # Filter out versions that don't work on current platform + platformFilteredVersions = lib.filterAttrs ( + name: _: + # Exclude 0.11.0 on macOS due to epoll.h dependency + !(stdenv.isDarwin && name == "0.11.0") + ) allVersions; + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) platformFilteredVersions; + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); +in +pkgs.buildEnv { + name = pname; + paths = packages; + nativeBuildInputs = [ makeWrapper ]; + postBuild = '' + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} - env.NIX_CFLAGS_COMPILE = "-Wno-error"; - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + # checks + (set -x + test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + 1) + }" + ) - cp *${postgresql.dlSuffix} $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension + makeWrapper ${lib.getExe switch-ext-version} $out/bin/switch_pg_net_version \ + --prefix EXT_WRAPPER : "$out" --prefix EXT_NAME : "${pname}" ''; - meta = with lib; { - description = "Async networking for Postgres"; - homepage = "https://github.com/supabase/pg_net"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + hasBackgroundWorker = true; + defaultSettings = { + shared_preload_libraries = [ "pg_net" ]; + }; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgsodium.nix b/nix/ext/pgsodium.nix index d843d434f..5c8b07df6 100644 --- a/nix/ext/pgsodium.nix +++ b/nix/ext/pgsodium.nix @@ -1,39 +1,109 @@ { + pkgs, lib, stdenv, fetchFromGitHub, - libsodium, postgresql, + libsodium, }: - -stdenv.mkDerivation rec { +let pname = "pgsodium"; - version = "3.1.8"; - buildInputs = [ - libsodium - postgresql - ]; + # Load version configuration from external file + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; - src = fetchFromGitHub { - owner = "michelp"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-j5F1PPdwfQRbV8XJ8Mloi8FvZF0MTl4eyIJcBYQy1E4="; - }; + # Filter versions compatible with current PostgreSQL version + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + + # Derived version information + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); + + # Build function for individual pgsodium versions + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; + + buildInputs = [ + libsodium + postgresql + ]; + + src = fetchFromGitHub { + owner = "michelp"; + repo = pname; + rev = "refs/tags/v${version}"; + inherit hash; + }; + + installPhase = '' + runHook preInstall + + mkdir -p $out/{lib,share/postgresql/extension} + + # Install shared library with version suffix + mv ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + # Create version-specific control file + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + + # For the latest version, create default control file and symlink + if [[ "${version}" == "${latestVersion}" ]]; then + # sql/pgsodium--3.1.5--3.1.6.sql isn't a proper upgrade sql file + cp sql/pgsodium--3.1.4--3.1.5.sql sql/pgsodium--3.1.5--3.1.6.sql + cp sql/*.sql $out/share/postgresql/extension + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + fi + + runHook postInstall + ''; + + meta = with lib; { + description = "Modern cryptography for PostgreSQL"; + homepage = "https://github.com/michelp/${pname}"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; + }; +in +pkgs.buildEnv { + name = pname; + paths = packages; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + postBuild = '' + # Verify all expected library files are present + expectedFiles=${toString (numberOfVersions + 1)} + actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) - cp *${postgresql.dlSuffix} $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension + if [[ "$actualFiles" != "$expectedFiles" ]]; then + echo "Error: Expected $expectedFiles library files, found $actualFiles" + echo "Files found:" + ls -la $out/lib/${pname}*${postgresql.dlSuffix} || true + exit 1 + fi ''; - meta = with lib; { - description = "Modern cryptography for PostgreSQL"; - homepage = "https://github.com/michelp/${pname}"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgsql-http.nix b/nix/ext/pgsql-http.nix index 9f4bae789..ba669e22d 100644 --- a/nix/ext/pgsql-http.nix +++ b/nix/ext/pgsql-http.nix @@ -1,39 +1,111 @@ { + pkgs, lib, stdenv, fetchFromGitHub, - curl, postgresql, + curl, }: +let + pname = "http"; -stdenv.mkDerivation rec { - pname = "pgsql-http"; - version = "1.6.1"; + # Load version configuration from external file + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; - buildInputs = [ - curl - postgresql - ]; + # Filter versions compatible with current PostgreSQL version + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; - src = fetchFromGitHub { - owner = "pramsey"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-C8eqi0q1dnshUAZjIsZFwa5FTYc7vmATF3vv2CReWPM="; - }; + # Derived version information + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); + + # Build function for individual versions + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; + # Use major.minor version for filenames (e.g., 1.5 instead of 1.5.0) + fileVersion = lib.versions.majorMinor version; + + buildInputs = [ + curl + postgresql + ]; + + src = fetchFromGitHub { + owner = "pramsey"; + repo = "pgsql-http"; + rev = "refs/tags/v${version}"; + inherit hash; + }; + + installPhase = '' + runHook preInstall - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + mkdir -p $out/{lib,share/postgresql/extension} + + # Install versioned library + install -Dm755 ${pname}${postgresql.dlSuffix} $out/lib/${pname}--${fileVersion}${postgresql.dlSuffix} + + cp ${pname}--${fileVersion}.sql $out/share/postgresql/extension/${pname}--${fileVersion}.sql + + # Create versioned control file with modified module path + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${fileVersion}.control + + # For the latest version, create default control file and symlink and copy SQL upgrade scripts + if [[ "${version}" == "${latestVersion}" ]]; then + { + echo "default_version = '${fileVersion}'" + cat $out/share/postgresql/extension/${pname}--${fileVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}--${fileVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + cp *.sql $out/share/postgresql/extension + fi + + runHook postInstall + ''; + + meta = with lib; { + description = "HTTP client for Postgres"; + homepage = "https://github.com/pramsey/${pname}"; + inherit (postgresql.meta) platforms; + license = licenses.postgresql; + }; + }; +in +pkgs.buildEnv { + name = pname; + paths = packages; + + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; + postBuild = '' + # Verify all expected library files are present + expectedFiles=${toString (numberOfVersions + 1)} + actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) - cp *${postgresql.dlSuffix} $out/lib - cp *.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension + if [[ "$actualFiles" != "$expectedFiles" ]]; then + echo "Error: Expected $expectedFiles library files, found $actualFiles" + echo "Files found:" + ls -la $out/lib/${pname}*${postgresql.dlSuffix} || true + exit 1 + fi ''; - meta = with lib; { - description = "HTTP client for Postgres"; - homepage = "https://github.com/pramsey/${pname}"; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgvector.nix b/nix/ext/pgvector.nix index 60a8efea3..c49a02e71 100644 --- a/nix/ext/pgvector.nix +++ b/nix/ext/pgvector.nix @@ -1,36 +1,92 @@ { + pkgs, lib, stdenv, fetchFromGitHub, postgresql, }: +let + pname = "vector"; -stdenv.mkDerivation rec { - pname = "pgvector"; - version = "0.8.0"; + # Load version configuration from external file + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; - buildInputs = [ postgresql ]; + # Filter versions compatible with current PostgreSQL version + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; - src = fetchFromGitHub { - owner = "pgvector"; - repo = pname; - rev = "refs/tags/v${version}"; - hash = "sha256-JsZV+I4eRMypXTjGmjCtMBXDVpqTIPHQa28ogXncE/Q="; - }; + # Derived version information + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); + + # Build function for individual versions + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgvector"; + repo = "pgvector"; + rev = "refs/tags/v${version}"; + inherit hash; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + # Install shared library with version suffix + mv ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + # Create version-specific control file + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + + # Copy SQL file to install the specific version + cp sql/${pname}.sql $out/share/postgresql/extension/${pname}--${version}.sql + + # For the latest version, copy sql upgrade script, default control file and symlink + if [[ "${version}" == "${latestVersion}" ]]; then + cp sql/*.sql $out/share/postgresql/extension + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + fi - installPhase = '' - mkdir -p $out/{lib,share/postgresql/extension} + runHook postInstall + ''; - cp *${postgresql.dlSuffix} $out/lib - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension - ''; + meta = with lib; { + description = "Open-source vector similarity search for Postgres"; + homepage = "https://github.com/${src.owner}/${src.repo}"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; + }; +in +pkgs.buildEnv { + name = pname; + paths = packages; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; - meta = with lib; { - description = "Open-source vector similarity search for Postgres"; - homepage = "https://github.com/${src.owner}/${src.repo}"; - maintainers = with maintainers; [ olirice ]; - platforms = postgresql.meta.platforms; - license = licenses.postgresql; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix index 27a449210..6a152ef43 100644 --- a/nix/ext/postgis.nix +++ b/nix/ext/postgis.nix @@ -15,88 +15,198 @@ pcre2, nixosTests, callPackage, + buildEnv, }: let sfcgal = callPackage ./sfcgal/sfcgal.nix { }; gdal = callPackage ./gdal.nix { inherit postgresql; }; -in -stdenv.mkDerivation rec { pname = "postgis"; - version = "3.3.7"; - outputs = [ - "out" - "doc" - ]; + # Load version configuration from external file + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; - src = fetchurl { - url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; - sha256 = "sha256-UHJKDd5JrcJT5Z4CTYsY/va+ToU0GUPG1eHhuXTkP84="; - }; + # Filter versions compatible with current PostgreSQL version + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + + # Derived version information + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) supportedVersions + ); - buildInputs = [ - libxml2 - postgresql - geos - proj - gdal - json_c - protobufc - pcre2.dev - sfcgal - ] ++ lib.optional stdenv.isDarwin libiconv; - nativeBuildInputs = [ - perl - pkg-config + # List of C extensions to be included in the build + cExtensions = [ + "address_standardizer" + "postgis" + "postgis_raster" + "postgis_sfcgal" + "postgis_topology" ]; - dontDisableStatic = true; - env.NIX_LDFLAGS = "-L${lib.getLib json_c}/lib"; + sqlExtensions = [ + "address_standardizer_data_us" + "postgis_tiger_geocoder" + ]; - preConfigure = '' - sed -i 's@/usr/bin/file@${file}/bin/file@' configure - configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install --with-sfcgal" + # Build function for individual versions + build = + version: hash: + stdenv.mkDerivation rec { + inherit pname version; - makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" - ''; + outputs = [ + "out" + "doc" + ]; - postConfigure = '' - sed -i "s|@mkdir -p \$(DESTDIR)\$(PGSQL_BINDIR)||g ; - s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g - " \ - "raster/loader/Makefile"; - sed -i "s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g - " \ - "raster/scripts/python/Makefile"; - mkdir -p $out/bin - ln -s ${postgresql}/bin/postgres $out/bin/postgres - ''; + src = fetchurl { + url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; + inherit hash; + }; - postInstall = '' - rm $out/bin/postgres - for prog in $out/bin/*; do # */ - ln -s $prog $prog-${version} - done - # Add function definition and usage to tiger geocoder files - for file in $out/share/postgresql/extension/postgis_tiger_geocoder*--${version}.sql; do - sed -i "/SELECT postgis_extension_AddToSearchPath('tiger');/a SELECT postgis_extension_AddToSearchPath('extensions');" "$file" - done - # Original topology patching - for file in $out/share/postgresql/extension/postgis_topology*--${version}.sql; do - sed -i "/SELECT topology.AddToSearchPath('topology');/i SELECT topology.AddToSearchPath('extensions');" "$file" - done - mkdir -p $doc/share/doc/postgis - mv doc/* $doc/share/doc/postgis/ - ''; + buildInputs = [ + libxml2 + postgresql + geos + proj + gdal + json_c + protobufc + pcre2.dev + sfcgal + ] ++ lib.optional stdenv.isDarwin libiconv; + nativeBuildInputs = [ + perl + pkg-config + ]; + dontDisableStatic = true; + + env.NIX_LDFLAGS = "-L${lib.getLib json_c}/lib"; + + preConfigure = '' + sed -i 's@/usr/bin/file@${file}/bin/file@' configure + configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --with-sfcgal" + + makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" + ''; + + postConfigure = '' + sed -i "s|@mkdir -p \$(DESTDIR)\$(PGSQL_BINDIR)||g ; + s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/loader/Makefile"; + sed -i "s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/scripts/python/Makefile"; + mkdir -p $out/bin + ln -s ${postgresql}/bin/postgres $out/bin/postgres + ''; + + postInstall = '' + MIN_MAJ_VERSION=${lib.concatStringsSep "." (lib.take 2 (builtins.splitVersion version))} + rm $out/bin/postgres + + # Rename C extension libraries with full version suffix + for ext in ${lib.concatStringsSep " " cExtensions}; do + if [ -f "$out/lib/$ext-3${postgresql.dlSuffix}" ]; then + mv $out/lib/$ext-3${postgresql.dlSuffix} $out/lib/$ext-${version}${postgresql.dlSuffix} + fi + done - passthru.tests.postgis = nixosTests.postgis; + # Create version-specific control files (without default_version, pointing to unversioned library) + for ext in ${lib.concatStringsSep " " (cExtensions ++ sqlExtensions)}; do + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/$ext-3'|" \ + $out/share/postgresql/extension/$ext.control > $out/share/postgresql/extension/$ext--${version}.control + rm $out/share/postgresql/extension/$ext.control + done + + # Add function definition and usage to tiger geocoder files + for file in $out/share/postgresql/extension/postgis_tiger_geocoder*--${version}.sql; do + sed -i "/SELECT postgis_extension_AddToSearchPath('tiger');/a SELECT postgis_extension_AddToSearchPath('extensions');" "$file" + done + # Original topology patching + for file in $out/share/postgresql/extension/postgis_topology*--${version}.sql; do + sed -i "/SELECT topology.AddToSearchPath('topology');/i SELECT topology.AddToSearchPath('extensions');" "$file" + done + + # For the latest version, create default control file and library symlinks + if [[ "${version}" == "${latestVersion}" ]]; then + # Copy all SQL upgrade scripts only for latest version + cp $out/share/postgresql/extension/*.sql $out/share/postgresql/extension/ 2>/dev/null || true + + for ext in ${lib.concatStringsSep " " (cExtensions ++ sqlExtensions)}; do + { + echo "default_version = '${version}'" + cat $out/share/postgresql/extension/$ext--${version}.control + } > $out/share/postgresql/extension/$ext.control + done + + # Create symlinks for C extension libraries (latest version becomes the default) + for ext in ${lib.concatStringsSep " " cExtensions}; do + ln -sfn $ext-${version}${postgresql.dlSuffix} $out/lib/$ext-3${postgresql.dlSuffix} + done + + for prog in $out/bin/*; do # */ + ln -s $prog $prog-${version} + done + else + # remove migration scripts for non-latest version + find $out/share/postgresql/extension -regex '.*--.*--.*\.sql' -delete + + for prog in $out/bin/*; do # */ + mv $prog $prog-${version} + done + fi + + mkdir -p $doc/share/doc/postgis + mv doc/* $doc/share/doc/postgis/ + ''; + + passthru.tests.postgis = nixosTests.postgis; + + meta = with lib; { + description = "Geographic Objects for PostgreSQL"; + homepage = "https://postgis.net/"; + changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; + license = licenses.gpl2; + inherit (postgresql.meta) platforms; + }; + }; +in +buildEnv { + name = pname; + paths = packages; + + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; + postBuild = '' + # Verify all expected library files are present + # We expect: (numberOfVersions * cExtensions) versioned libraries + cExtensions symlinks + expectedFiles=${ + toString ((numberOfVersions * builtins.length cExtensions) + builtins.length cExtensions) + } + actualFiles=$(ls -A $out/lib/*${postgresql.dlSuffix} | wc -l) + + if [[ "$actualFiles" != "$expectedFiles" ]]; then + echo "Error: Expected $expectedFiles library files, found $actualFiles" + echo "Files found:" + ls -la $out/lib/*${postgresql.dlSuffix} || true + exit 1 + fi + ''; - meta = with lib; { - description = "Geographic Objects for PostgreSQL"; - homepage = "https://postgis.net/"; - changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; - license = licenses.gpl2; - inherit (postgresql.meta) platforms; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/rum.nix b/nix/ext/rum.nix index 9dc5d9525..f0839241f 100644 --- a/nix/ext/rum.nix +++ b/nix/ext/rum.nix @@ -3,33 +3,100 @@ stdenv, fetchFromGitHub, postgresql, + buildEnv, }: - -stdenv.mkDerivation rec { +let pname = "rum"; - version = "1.3.14"; - src = fetchFromGitHub { - owner = "postgrespro"; - repo = "rum"; - rev = version; - hash = "sha256-VsfpxQqRBu9bIAP+TfMRXd+B3hSjuhU2NsutocNiCt8="; - }; + # Load version configuration from external file + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).${pname}; + + # Filter versions compatible with current PostgreSQL version + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + + # Derived version information + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash value.revision) supportedVersions + ); + + # Build function for individual versions + build = + version: hash: revision: + stdenv.mkDerivation { + inherit pname version; + + src = fetchFromGitHub { + owner = "postgrespro"; + repo = "rum"; + rev = revision; + inherit hash; + }; + + buildInputs = [ postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + # Install shared library with version suffix + mv ${pname}${postgresql.dlSuffix} $out/lib/${pname}-${version}${postgresql.dlSuffix} + + # Create version-specific control file + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/${pname}-${version}'|" \ + ${pname}.control > $out/share/postgresql/extension/${pname}--${version}.control + + # For the latest version, create default control file and symlink and copy SQL upgrade scripts + if [[ "${version}" == "${latestVersion}" ]]; then + { + echo "default_version = '${version}'" + cat $out/share/postgresql/extension/${pname}--${version}.control + } > $out/share/postgresql/extension/${pname}.control + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + cp *.sql $out/share/postgresql/extension + fi + ''; + + meta = with lib; { + description = "Full text search index method for PostgreSQL"; + homepage = "https://github.com/postgrespro/rum"; + license = licenses.postgresql; + inherit (postgresql.meta) platforms; + }; + }; +in +buildEnv { + name = pname; + paths = packages; - buildInputs = [ postgresql ]; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; - makeFlags = [ "USE_PGXS=1" ]; + postBuild = '' + # Verify all expected library files are present + expectedFiles=${toString (numberOfVersions + 1)} + actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) - installPhase = '' - install -D -t $out/lib *${postgresql.dlSuffix} - install -D -t $out/share/postgresql/extension *.control - install -D -t $out/share/postgresql/extension *.sql + if [[ "$actualFiles" != "$expectedFiles" ]]; then + echo "Error: Expected $expectedFiles library files, found $actualFiles" + echo "Files found:" + ls -la $out/lib/*${postgresql.dlSuffix} || true + exit 1 + fi ''; - meta = with lib; { - description = "Full text search index method for PostgreSQL"; - homepage = "https://github.com/postgrespro/rum"; - license = licenses.postgresql; - platforms = postgresql.meta.platforms; + passthru = { + inherit versions numberOfVersions; + pname = "${pname}-all"; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/tests/default.nix b/nix/ext/tests/default.nix new file mode 100644 index 000000000..afea82fc8 --- /dev/null +++ b/nix/ext/tests/default.nix @@ -0,0 +1,192 @@ +{ self, pkgs }: +let + testsDir = ./.; + testFiles = builtins.attrNames (builtins.readDir testsDir); + nixFiles = builtins.filter ( + name: builtins.match ".*\\.nix$" name != null && name != "default.nix" + ) testFiles; + extTest = + extension_name: + let + pname = extension_name; + inherit (pkgs) lib; + installedExtension = + postgresMajorVersion: self.packages.${pkgs.system}."psql_${postgresMajorVersion}/exts/${pname}-all"; + versions = postgresqlMajorVersion: (installedExtension postgresqlMajorVersion).versions; + postgresqlWithExtension = + postgresql: + let + majorVersion = lib.versions.major postgresql.version; + pkg = pkgs.buildEnv { + name = "postgresql-${majorVersion}-${pname}"; + paths = [ + postgresql + postgresql.lib + (installedExtension majorVersion) + ]; + passthru = { + inherit (postgresql) version psqlSchema; + lib = pkg; + withPackages = _: pkg; + }; + nativeBuildInputs = [ pkgs.makeWrapper ]; + pathsToLink = [ + "/" + "/bin" + "/lib" + ]; + postBuild = '' + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_ctl --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_upgrade --set NIX_PGLIBDIR $out/lib + ''; + }; + in + pkg; + psql_15 = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; + psql_17 = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_17; + in + self.inputs.nixpkgs.lib.nixos.runTest { + name = pname; + hostPkgs = pkgs; + nodes.server = + { config, ... }: + { + virtualisation = { + forwardPorts = [ + { + from = "host"; + host.port = 13022; + guest.port = 22; + } + ]; + }; + services.openssh = { + enable = true; + }; + + services.postgresql = { + enable = true; + package = psql_15; + enableTCPIP = true; + initialScript = pkgs.writeText "init-postgres-with-password" '' + CREATE USER test WITH PASSWORD 'secret'; + ''; + authentication = '' + host test postgres samenet scram-sha-256 + ''; + settings = (installedExtension "15").defaultSettings or { }; + }; + + networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ]; + + specialisation.postgresql17.configuration = { + services.postgresql = { + package = lib.mkForce psql_17; + }; + + systemd.services.postgresql-migrate = { + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + User = "postgres"; + Group = "postgres"; + StateDirectory = "postgresql"; + WorkingDirectory = "${builtins.dirOf config.services.postgresql.dataDir}"; + }; + script = + let + oldPostgresql = psql_15; + newPostgresql = psql_17; + oldDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${oldPostgresql.psqlSchema}"; + newDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${newPostgresql.psqlSchema}"; + in + '' + if [[ ! -d ${newDataDir} ]]; then + install -d -m 0700 -o postgres -g postgres "${newDataDir}" + ${newPostgresql}/bin/initdb -D "${newDataDir}" + ${newPostgresql}/bin/pg_upgrade --old-datadir "${oldDataDir}" --new-datadir "${newDataDir}" \ + --old-bindir "${oldPostgresql}/bin" --new-bindir "${newPostgresql}/bin" + else + echo "${newDataDir} already exists" + fi + ''; + }; + + systemd.services.postgresql = { + after = [ "postgresql-migrate.service" ]; + requires = [ "postgresql-migrate.service" ]; + }; + }; + }; + testScript = + { nodes, ... }: + let + pg17-configuration = "${nodes.server.system.build.toplevel}/specialisation/postgresql17"; + in + '' + versions = { + "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "15"))}], + "17": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "17"))}], + } + extension_name = "${pname}" + support_upgrade = True + pg17_configuration = "${pg17-configuration}" + ext_has_background_worker = ${ + if (installedExtension "15") ? hasBackgroundWorker then "True" else "False" + } + + ${builtins.readFile ./lib.py} + + start_all() + + server.wait_for_unit("multi-user.target") + server.wait_for_unit("postgresql.service") + + test = PostgresExtensionTest(server, extension_name, versions, support_upgrade) + + with subtest("Check upgrade path with postgresql 15"): + test.check_upgrade_path("15") + + last_version = None + with subtest("Check the install of the last version of the extension"): + last_version = test.check_install_last_version("15") + + if ext_has_background_worker: + with subtest("Test switch_${pname}_version"): + test.check_switch_extension_with_background_worker(Path("${psql_15}/lib/${pname}.so"), "15") + + with subtest("switch to postgresql 17"): + server.succeed( + f"{pg17_configuration}/bin/switch-to-configuration test >&2" + ) + + with subtest("Check last version of the extension after upgrade"): + test.assert_version_matches(last_version) + + with subtest("Check upgrade path with postgresql 17"): + test.check_upgrade_path("17") + ''; + }; +in +builtins.listToAttrs ( + map (file: { + name = "ext-" + builtins.replaceStrings [ ".nix" ] [ "" ] file; + value = import (testsDir + "/${file}") { inherit self pkgs; }; + }) nixFiles +) +// builtins.listToAttrs ( + map + (extName: { + name = "ext-${extName}"; + value = extTest extName; + }) + [ + "hypopg" + "index_advisor" + "pg_cron" + "pg_net" + "vector" + "wrappers" + ] +) diff --git a/nix/ext/tests/wrappers.nix b/nix/ext/tests/http.nix similarity index 94% rename from nix/ext/tests/wrappers.nix rename to nix/ext/tests/http.nix index 54cc63944..09075c374 100644 --- a/nix/ext/tests/wrappers.nix +++ b/nix/ext/tests/http.nix @@ -1,6 +1,6 @@ { self, pkgs }: let - pname = "wrappers"; + pname = "http"; inherit (pkgs) lib; installedExtension = postgresMajorVersion: self.packages.${pkgs.system}."psql_${postgresMajorVersion}/exts/${pname}-all"; @@ -54,9 +54,6 @@ self.inputs.nixpkgs.lib.nixos.runTest { services.openssh = { enable = true; }; - users.users.root.openssh.authorizedKeys.keys = [ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIo+ulCUfJjnCVgfM4946Ih5Nm8DeZZiayYeABHGPEl7 jfroche" - ]; services.postgresql = { enable = true; @@ -106,11 +103,13 @@ self.inputs.nixpkgs.lib.nixos.runTest { { nodes, ... }: let pg17-configuration = "${nodes.server.system.build.toplevel}/specialisation/postgresql17"; + # Convert versions to major.minor format (e.g., "1.5.0" -> "1.5") + toMajorMinor = map (v: lib.versions.majorMinor v); in '' versions = { - "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "15"))}], - "17": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "17"))}], + "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (toMajorMinor (versions "15")))}], + "17": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (toMajorMinor (versions "17")))}], } def run_sql(query): @@ -120,7 +119,7 @@ self.inputs.nixpkgs.lib.nixos.runTest { with subtest("Check ${pname} upgrade path"): firstVersion = versions[pg_version][0] server.succeed("sudo -u postgres psql -c 'DROP EXTENSION IF EXISTS ${pname};'") - run_sql(f"""CREATE EXTENSION ${pname} WITH VERSION '{firstVersion}';""") + run_sql(f"""CREATE EXTENSION ${pname} WITH VERSION '{firstVersion}' CASCADE;""") installed_version = run_sql(r"""SELECT extversion FROM pg_extension WHERE extname = '${pname}';""") assert installed_version == firstVersion, f"Expected ${pname} version {firstVersion}, but found {installed_version}" for version in versions[pg_version][1:]: @@ -137,7 +136,7 @@ self.inputs.nixpkgs.lib.nixos.runTest { with subtest("Check ${pname} latest extension version"): server.succeed("sudo -u postgres psql -c 'DROP EXTENSION ${pname};'") - server.succeed("sudo -u postgres psql -c 'CREATE EXTENSION ${pname};'") + server.succeed("sudo -u postgres psql -c 'CREATE EXTENSION ${pname} CASCADE;'") installed_extensions=run_sql(r"""SELECT extname, extversion FROM pg_extension;""") latestVersion = versions["15"][-1] assert f"${pname},{latestVersion}" in installed_extensions @@ -147,7 +146,7 @@ self.inputs.nixpkgs.lib.nixos.runTest { "${pg17-configuration}/bin/switch-to-configuration test >&2" ) - with subtest("Check ${pname} latest extension version"): + with subtest("Check ${pname} latest extension version after upgrade"): installed_extensions=run_sql(r"""SELECT extname, extversion FROM pg_extension;""") latestVersion = versions["17"][-1] assert f"${pname},{latestVersion}" in installed_extensions diff --git a/nix/ext/tests/lib.py b/nix/ext/tests/lib.py new file mode 100644 index 000000000..35c5b0e04 --- /dev/null +++ b/nix/ext/tests/lib.py @@ -0,0 +1,169 @@ +"""PostgreSQL extension testing framework for multi-version compatibility. + +This module provides a test framework for PostgreSQL extensions that need to be +tested across multiple PostgreSQL versions and extension versions. It handles +installation, upgrades, and version verification of PostgreSQL extensions. +""" + +from typing import Sequence, Mapping +from pathlib import Path +from test_driver.machine import Machine + +Versions = Mapping[str, Sequence[str]] + + +class PostgresExtensionTest(object): + def __init__( + self, + vm: Machine, + extension_name: str, + versions: Versions, + support_upgrade: bool = True, + ): + """Initialize the PostgreSQL extension test framework. + + Args: + vm: Test machine instance for executing commands + extension_name: Name of the PostgreSQL extension to test + versions: Mapping of PostgreSQL versions to available extension versions + support_upgrade: Whether the extension supports in-place upgrades + """ + self.vm = vm + self.extension_name = extension_name + self.versions = versions + self.support_upgrade = support_upgrade + + def run_sql(self, query: str) -> str: + return self.vm.succeed( + f"""sudo -u postgres psql -t -A -F\",\" -c \"{query}\" """ + ).strip() + + def drop_extension(self): + self.run_sql(f"DROP EXTENSION IF EXISTS {self.extension_name};") + + def install_extension(self, version: str): + self.run_sql( + f"""CREATE EXTENSION {self.extension_name} WITH VERSION '{version}' CASCADE;""" + ) + # Verify version was installed correctly + self.assert_version_matches(version) + + def update_extension(self, version: str): + self.run_sql( + f"""ALTER EXTENSION {self.extension_name} UPDATE TO '{version}';""" + ) + # Verify version was installed correctly + self.assert_version_matches(version) + + def get_installed_version(self) -> str: + """Get the currently installed version of the extension. + + Returns: + Version string of the currently installed extension, + or empty string if extension is not installed + """ + return self.run_sql( + f"""SELECT extversion FROM pg_extension WHERE extname = '{self.extension_name}';""" + ) + + def assert_version_matches(self, expected_version: str): + """Check if the installed version matches the expected version. + + Args: + expected_version: Expected version string to verify against + + Raises: + AssertionError: If the installed version does not match the expected version + """ + installed_version = self.get_installed_version() + assert ( + installed_version == expected_version + ), f"Expected version {expected_version}, but found {installed_version}" + + def check_upgrade_path(self, pg_version: str): + """Test the complete upgrade path for a PostgreSQL version. + + This method tests all available extension versions for a given PostgreSQL + version, either through in-place upgrades or reinstallation depending on + the support_upgrade setting. + + Args: + pg_version: PostgreSQL version to test (e.g., "14", "15") + + Raises: + ValueError: If no versions are available for the specified PostgreSQL version + AssertionError: If version installation or upgrade fails + """ + available_versions = self.versions.get(pg_version, []) + if not available_versions: + raise ValueError( + f"No versions available for PostgreSQL version {pg_version}" + ) + + # Install and verify first version + firstVersion = available_versions[0] + self.drop_extension() + self.install_extension(firstVersion) + + # Test remaining versions + for version in available_versions[1:]: + if self.support_upgrade: + self.update_extension(version) + else: + self.drop_extension() + self.install_extension(version) + + def check_install_last_version(self, pg_version: str) -> str: + """Test if the install of the last version of the extension works for a given PostgreSQL version. + + Args: + pg_version: PostgreSQL version to check (e.g., "14", "15") + """ + available_versions = self.versions.get(pg_version, []) + if not available_versions: + raise ValueError( + f"No versions available for PostgreSQL version {pg_version}" + ) + last_version = available_versions[-1] + self.drop_extension() + self.install_extension(last_version) + return last_version + + def check_switch_extension_with_background_worker( + self, extension_lib_path: Path, pg_version: str + ): + """Test manual switching between two versions of an extension with a background worker. + + Args: + extension_lib_path: Path to the directory containing the extension shared library of the extension + pg_version: PostgreSQL version to check (e.g., "14", "15") + """ + # Check that we are using the last version first + ext_version = self.vm.succeed(f"readlink -f {extension_lib_path}").strip() + available_versions = self.versions.get(pg_version, []) + if not available_versions: + raise ValueError( + f"No versions available for PostgreSQL version {pg_version}" + ) + last_version = available_versions[-1] + assert ext_version.endswith( + f"{last_version}.so" + ), f"Expected {self.extension_name} version {last_version}, but found {ext_version}" + + # Switch to the first version + first_version = available_versions[0] + self.vm.succeed(f"switch_{self.extension_name}_version {first_version}") + + # Check that we are using the first version now + ext_version = self.vm.succeed(f"readlink -f {extension_lib_path}").strip() + assert ext_version.endswith( + f"{first_version}.so" + ), f"Expected {self.extension_name} version {first_version}, but found {ext_version}" + + # Switch to the first version + self.vm.succeed(f"switch_{self.extension_name}_version {last_version}") + # Check that we are using the last version now + ext_version = self.vm.succeed(f"readlink -f {extension_lib_path}").strip() + assert ext_version.endswith( + f"{last_version}.so" + ), f"Expected {self.extension_name} version {last_version}, but found {ext_version}" diff --git a/nix/ext/tests/pgsodium.nix b/nix/ext/tests/pgsodium.nix new file mode 100644 index 000000000..9ad1aec4b --- /dev/null +++ b/nix/ext/tests/pgsodium.nix @@ -0,0 +1,158 @@ +{ self, pkgs }: +let + pname = "pgsodium"; + inherit (pkgs) lib; + installedExtension = + postgresMajorVersion: self.packages.${pkgs.system}."psql_${postgresMajorVersion}/exts/${pname}-all"; + versions = postgresqlMajorVersion: (installedExtension postgresqlMajorVersion).versions; + postgresqlWithExtension = + postgresql: + let + majorVersion = lib.versions.major postgresql.version; + pkg = pkgs.buildEnv { + name = "postgresql-${majorVersion}-${pname}"; + paths = [ + postgresql + postgresql.lib + (installedExtension majorVersion) + self.packages.${pkgs.system}."psql_${majorVersion}/exts/hypopg-all" + ]; + passthru = { + inherit (postgresql) version psqlSchema; + lib = pkg; + withPackages = _: pkg; + }; + nativeBuildInputs = [ pkgs.makeWrapper ]; + pathsToLink = [ + "/" + "/bin" + "/lib" + ]; + postBuild = '' + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_ctl --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_upgrade --set NIX_PGLIBDIR $out/lib + ''; + }; + in + pkg; + pgsodiumGetKey = lib.getExe ( + pkgs.writeShellScriptBin "pgsodium-getkey" '' + echo 0000000000000000000000000000000000000000000000000000000000000000 + '' + ); +in +self.inputs.nixpkgs.lib.nixos.runTest { + name = pname; + hostPkgs = pkgs; + nodes.server = + { config, ... }: + { + virtualisation = { + forwardPorts = [ + { + from = "host"; + host.port = 13022; + guest.port = 22; + } + ]; + }; + + services.postgresql = { + enable = true; + package = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; + settings = { + "shared_preload_libraries" = pname; + "pgsodium.getkey_script" = pgsodiumGetKey; + }; + }; + + specialisation.postgresql17.configuration = { + services.postgresql = { + package = lib.mkForce (postgresqlWithExtension self.packages.${pkgs.system}.postgresql_17); + }; + + systemd.services.postgresql-migrate = { + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + User = "postgres"; + Group = "postgres"; + StateDirectory = "postgresql"; + WorkingDirectory = "${builtins.dirOf config.services.postgresql.dataDir}"; + }; + script = + let + oldPostgresql = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; + newPostgresql = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_17; + oldDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${oldPostgresql.psqlSchema}"; + newDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${newPostgresql.psqlSchema}"; + in + '' + if [[ ! -d ${newDataDir} ]]; then + install -d -m 0700 -o postgres -g postgres "${newDataDir}" + ${newPostgresql}/bin/initdb -D "${newDataDir}" + echo "shared_preload_libraries = '${pname}'" >> "${newDataDir}/postgresql.conf" + echo "pgsodium.getkey_script = '${pgsodiumGetKey}'" >> "${newDataDir}/postgresql.conf"; + ${newPostgresql}/bin/pg_upgrade --old-datadir "${oldDataDir}" --new-datadir "${newDataDir}" \ + --old-bindir "${oldPostgresql}/bin" --new-bindir "${newPostgresql}/bin" + else + echo "${newDataDir} already exists" + fi + ''; + }; + + systemd.services.postgresql = { + after = [ "postgresql-migrate.service" ]; + requires = [ "postgresql-migrate.service" ]; + }; + }; + }; + testScript = + { nodes, ... }: + let + pg17-configuration = "${nodes.server.system.build.toplevel}/specialisation/postgresql17"; + in + '' + versions = { + "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "15"))}], + "17": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "17"))}], + } + + def run_sql(query): + return server.succeed(f"""sudo -u postgres psql -t -A -F\",\" -c \"{query}\" """).strip() + + def check_upgrade_path(pg_version): + with subtest("Check ${pname} upgrade path"): + firstVersion = versions[pg_version][0] + server.succeed("sudo -u postgres psql -c 'DROP EXTENSION IF EXISTS ${pname};'") + run_sql(f"""CREATE EXTENSION ${pname} WITH VERSION '{firstVersion}' CASCADE;""") + installed_version = run_sql(r"""SELECT extversion FROM pg_extension WHERE extname = '${pname}';""") + assert installed_version == firstVersion, f"Expected ${pname} version {firstVersion}, but found {installed_version}" + for version in versions[pg_version][1:]: + run_sql(f"""ALTER EXTENSION ${pname} UPDATE TO '{version}';""") + installed_version = run_sql(r"""SELECT extversion FROM pg_extension WHERE extname = '${pname}';""") + assert installed_version == version, f"Expected ${pname} version {version}, but found {installed_version}" + + start_all() + + server.wait_for_unit("multi-user.target") + server.wait_for_unit("postgresql.service") + + check_upgrade_path("15") + + with subtest("Check ${pname} latest extension version"): + server.succeed("sudo -u postgres psql -c 'DROP EXTENSION ${pname};'") + server.succeed("sudo -u postgres psql -c 'CREATE EXTENSION ${pname} CASCADE;'") + installed_extensions=run_sql(r"""SELECT extname, extversion FROM pg_extension;""") + latestVersion = versions["15"][-1] + assert f"${pname},{latestVersion}" in installed_extensions + + with subtest("switch to postgresql 17"): + server.succeed( + "${pg17-configuration}/bin/switch-to-configuration test >&2" + ) + + check_upgrade_path("17") + ''; +} diff --git a/nix/ext/tests/postgis.nix b/nix/ext/tests/postgis.nix new file mode 100644 index 000000000..ab6a4b3f8 --- /dev/null +++ b/nix/ext/tests/postgis.nix @@ -0,0 +1,156 @@ +{ self, pkgs }: +let + pname = "postgis"; + inherit (pkgs) lib; + installedExtension = + postgresMajorVersion: self.packages.${pkgs.system}."psql_${postgresMajorVersion}/exts/${pname}-all"; + versions = postgresqlMajorVersion: (installedExtension postgresqlMajorVersion).versions; + postgresqlWithExtension = + postgresql: + let + majorVersion = lib.versions.major postgresql.version; + pkg = pkgs.buildEnv { + name = "postgresql-${majorVersion}-${pname}"; + paths = [ + postgresql + postgresql.lib + (installedExtension majorVersion) + ]; + passthru = { + inherit (postgresql) version psqlSchema; + lib = pkg; + withPackages = _: pkg; + }; + nativeBuildInputs = [ pkgs.makeWrapper ]; + pathsToLink = [ + "/" + "/bin" + "/lib" + ]; + postBuild = '' + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_ctl --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_upgrade --set NIX_PGLIBDIR $out/lib + ''; + }; + in + pkg; +in +self.inputs.nixpkgs.lib.nixos.runTest { + name = pname; + hostPkgs = pkgs; + nodes.server = + { config, ... }: + { + virtualisation = { + forwardPorts = [ + { + from = "host"; + host.port = 13022; + guest.port = 22; + } + ]; + }; + services.openssh = { + enable = true; + }; + + services.postgresql = { + enable = true; + package = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; + }; + + specialisation.postgresql17.configuration = { + services.postgresql = { + package = lib.mkForce (postgresqlWithExtension self.packages.${pkgs.system}.postgresql_17); + }; + + systemd.services.postgresql-migrate = { + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + User = "postgres"; + Group = "postgres"; + StateDirectory = "postgresql"; + WorkingDirectory = "${builtins.dirOf config.services.postgresql.dataDir}"; + }; + script = + let + oldPostgresql = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; + newPostgresql = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_17; + oldDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${oldPostgresql.psqlSchema}"; + newDataDir = "${builtins.dirOf config.services.postgresql.dataDir}/${newPostgresql.psqlSchema}"; + in + '' + if [[ ! -d ${newDataDir} ]]; then + install -d -m 0700 -o postgres -g postgres "${newDataDir}" + ${newPostgresql}/bin/initdb -D "${newDataDir}" + ${newPostgresql}/bin/pg_upgrade --old-datadir "${oldDataDir}" --new-datadir "${newDataDir}" \ + --old-bindir "${oldPostgresql}/bin" --new-bindir "${newPostgresql}/bin" + else + echo "${newDataDir} already exists" + fi + ''; + }; + + systemd.services.postgresql = { + after = [ "postgresql-migrate.service" ]; + requires = [ "postgresql-migrate.service" ]; + }; + }; + }; + testScript = + { nodes, ... }: + let + pg17-configuration = "${nodes.server.system.build.toplevel}/specialisation/postgresql17"; + in + '' + versions = { + "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "15"))}], + "17": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') (versions "17"))}], + } + + def run_sql(query): + return server.succeed(f"""sudo -u postgres psql -t -A -F\",\" -c \"{query}\" """).strip() + + def check_upgrade_path(pg_version): + with subtest("Check ${pname} upgrade path"): + firstVersion = versions[pg_version][0] + server.succeed("sudo -u postgres psql -c 'DROP EXTENSION IF EXISTS ${pname};'") + run_sql(f"""CREATE EXTENSION ${pname} WITH VERSION '{firstVersion}' CASCADE;""") + installed_version = run_sql(r"""SELECT extversion FROM pg_extension WHERE extname = '${pname}';""") + assert installed_version == firstVersion, f"Expected ${pname} version {firstVersion}, but found {installed_version}" + for version in versions[pg_version][1:]: + run_sql(f"""ALTER EXTENSION ${pname} UPDATE TO '{version}';""") + installed_version = run_sql(r"""SELECT extversion FROM pg_extension WHERE extname = '${pname}';""") + assert installed_version == version, f"Expected ${pname} version {version}, but found {installed_version}" + + start_all() + + server.wait_for_unit("multi-user.target") + server.wait_for_unit("postgresql.service") + + check_upgrade_path("15") + + with subtest("Check ${pname} latest extension version"): + server.succeed("sudo -u postgres psql -c 'DROP EXTENSION ${pname};'") + server.succeed("sudo -u postgres psql -c 'CREATE EXTENSION ${pname} CASCADE;'") + installed_extensions=run_sql(r"""SELECT extname, extversion FROM pg_extension where extname = '${pname}';""") + latestVersion = versions["15"][-1] + majMinVersion = ".".join(latestVersion.split('.')[:1]) + assert f"${pname},{majMinVersion}" in installed_extensions, f"Expected ${pname} version {latestVersion}, but found {installed_extensions}" + + with subtest("switch to postgresql 17"): + server.succeed( + "${pg17-configuration}/bin/switch-to-configuration test >&2" + ) + + with subtest("Check ${pname} latest extension version after upgrade"): + installed_extensions=run_sql(r"""SELECT extname, extversion FROM pg_extension;""") + latestVersion = versions["17"][-1] + majMinVersion = ".".join(latestVersion.split('.')[:1]) + assert f"${pname},{majMinVersion}" in installed_extensions + + check_upgrade_path("17") + ''; +} diff --git a/nix/ext/tests/timescaledb.nix b/nix/ext/tests/timescaledb.nix new file mode 100644 index 000000000..597fe9e0d --- /dev/null +++ b/nix/ext/tests/timescaledb.nix @@ -0,0 +1,78 @@ +{ self, pkgs }: +let + pname = "timescaledb"; + inherit (pkgs) lib; + installedExtension = + postgresMajorVersion: self.packages.${pkgs.system}."psql_${postgresMajorVersion}/exts/${pname}-all"; + versions = (installedExtension "15").versions; + postgresqlWithExtension = + postgresql: + let + majorVersion = lib.versions.major postgresql.version; + pkg = pkgs.buildEnv { + name = "postgresql-${majorVersion}-${pname}"; + paths = [ + postgresql + postgresql.lib + (installedExtension majorVersion) + ]; + passthru = { + inherit (postgresql) version psqlSchema; + lib = pkg; + withPackages = _: pkg; + }; + nativeBuildInputs = [ pkgs.makeWrapper ]; + pathsToLink = [ + "/" + "/bin" + "/lib" + ]; + postBuild = '' + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_ctl --set NIX_PGLIBDIR $out/lib + wrapProgram $out/bin/pg_upgrade --set NIX_PGLIBDIR $out/lib + ''; + }; + in + pkg; + psql_15 = postgresqlWithExtension self.packages.${pkgs.system}.postgresql_15; +in +self.inputs.nixpkgs.lib.nixos.runTest { + name = "timescaledb"; + hostPkgs = pkgs; + nodes.server = + { ... }: + { + services.postgresql = { + enable = true; + package = (postgresqlWithExtension psql_15); + settings = { + shared_preload_libraries = "timescaledb"; + }; + }; + }; + testScript = + { ... }: + '' + ${builtins.readFile ./lib.py} + + start_all() + + server.wait_for_unit("multi-user.target") + server.wait_for_unit("postgresql.service") + + versions = { + "15": [${lib.concatStringsSep ", " (map (s: ''"${s}"'') versions)}], + } + extension_name = "${pname}" + support_upgrade = True + + test = PostgresExtensionTest(server, extension_name, versions, support_upgrade) + + with subtest("Check upgrade path with postgresql 15"): + test.check_upgrade_path("15") + + with subtest("Test switch_${pname}_version"): + test.check_switch_extension_with_background_worker(Path("${psql_15}/lib/${pname}.so"), "15") + ''; +} diff --git a/nix/ext/timescaledb-2.9.1.nix b/nix/ext/timescaledb-2.9.1.nix deleted file mode 100644 index 0df743671..000000000 --- a/nix/ext/timescaledb-2.9.1.nix +++ /dev/null @@ -1,65 +0,0 @@ -{ - lib, - stdenv, - fetchFromGitHub, - cmake, - postgresql, - openssl, - libkrb5, -}: - -stdenv.mkDerivation rec { - pname = "timescaledb-apache"; - version = "2.9.1"; - - nativeBuildInputs = [ cmake ]; - buildInputs = [ - postgresql - openssl - libkrb5 - ]; - - src = fetchFromGitHub { - owner = "timescale"; - repo = "timescaledb"; - rev = version; - hash = "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8="; - }; - - cmakeFlags = [ - "-DSEND_TELEMETRY_DEFAULT=OFF" - "-DREGRESS_CHECKS=OFF" - "-DTAP_CHECKS=OFF" - "-DAPACHE_ONLY=1" - ] ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; - - # Fix the install phase which tries to install into the pgsql extension dir, - # and cannot be manually overridden. This is rather fragile but works OK. - postPatch = '' - for x in CMakeLists.txt sql/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" - done - - for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" - done - ''; - - # timescaledb-2.9.1.so already exists in the lib directory - # we have no need for the timescaledb.so or control file - postInstall = '' - rm $out/lib/timescaledb.so - rm $out/share/postgresql/extension/timescaledb.control - ''; - - meta = with lib; { - description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; - homepage = "https://www.timescale.com/"; - changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; - platforms = postgresql.meta.platforms; - license = licenses.asl20; - broken = versionOlder postgresql.version "13"; - }; -} diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix index 6f5681546..a58e8e2b4 100644 --- a/nix/ext/timescaledb.nix +++ b/nix/ext/timescaledb.nix @@ -6,53 +6,147 @@ postgresql, openssl, libkrb5, + buildEnv, + makeWrapper, + switch-ext-version, + coreutils, + writeShellApplication, }: -stdenv.mkDerivation rec { - pname = "timescaledb-apache"; - version = "2.16.1"; +let + pname = "timescaledb"; + build = + version: hash: _revision: + stdenv.mkDerivation rec { + inherit pname version; - nativeBuildInputs = [ cmake ]; - buildInputs = [ - postgresql - openssl - libkrb5 - ]; + nativeBuildInputs = [ cmake ]; + buildInputs = [ + postgresql + openssl + libkrb5 + ]; + + src = fetchFromGitHub { + owner = "timescale"; + repo = "timescaledb"; + rev = version; + inherit hash; + }; + + cmakeFlags = [ + "-DSEND_TELEMETRY_DEFAULT=OFF" + "-DREGRESS_CHECKS=OFF" + "-DTAP_CHECKS=OFF" + "-DAPACHE_ONLY=1" + ] ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; + + postPatch = '' + for x in CMakeLists.txt sql/CMakeLists.txt; do + if [ -f "$x" ]; then + substituteInPlace "$x" \ + --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" + fi + done + + for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do + if [ -f "$x" ]; then + substituteInPlace "$x" \ + --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" + fi + done + ''; + + installPhase = '' + # Run cmake install first + cmake --install . --prefix=$out + + # TimescaleDB creates two libraries: + # 1. timescaledb.so (loader) -> rename to timescaledb--loader.so + # 2. timescaledb-.so (actual extension) -> keep as is + + # Rename the loader library to be version-specific + if [ -f $out/lib/timescaledb${postgresql.dlSuffix} ]; then + mv $out/lib/timescaledb${postgresql.dlSuffix} $out/lib/timescaledb-loader-${version}${postgresql.dlSuffix} + fi - src = fetchFromGitHub { - owner = "timescale"; - repo = "timescaledb"; - rev = version; - hash = "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w="; + # The versioned library (timescaledb-VERSION.so) is already correctly named + + # Create versioned control file with default_version removed and module_pathname pointing to symlink + if [ -f $out/share/postgresql/extension/timescaledb.control ]; then + sed -e "/^default_version =/d" \ + -e "s|^module_pathname = .*|module_pathname = '\$libdir/timescaledb'|" \ + $out/share/postgresql/extension/timescaledb.control > $out/share/postgresql/extension/timescaledb--${version}.control + rm $out/share/postgresql/extension/timescaledb.control + fi + ''; + + meta = with lib; { + description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; + homepage = "https://www.timescale.com/"; + changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; + license = licenses.asl20; + inherit (postgresql.meta) platforms; + }; + }; + + allVersions = (builtins.fromJSON (builtins.readFile ./versions.json)).timescaledb; + supportedVersions = lib.filterAttrs ( + _: value: builtins.elem (lib.versions.major postgresql.version) value.postgresql + ) allVersions; + versions = lib.naturalSort (lib.attrNames supportedVersions); + latestVersion = lib.last versions; + numberOfVersions = builtins.length versions; + packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash (value.revision or name)) supportedVersions + ); + switch-timescaledb-loader = writeShellApplication { + name = "switch_timescaledb_loader"; + runtimeInputs = [ coreutils ]; + text = '' + EXT_LOADER_TO_USE="$EXT_WRAPPER_LIB/$EXT_NAME-loader-$VERSION${postgresql.dlSuffix}" + if [ -f "$EXT_LOADER_TO_USE" ]; then + ln -sfnv "$EXT_LOADER_TO_USE" "$EXT_WRAPPER_LIB/$EXT_NAME${postgresql.dlSuffix}" + fi + ''; }; +in +buildEnv { + name = pname; + paths = packages; + nativeBuildInputs = [ makeWrapper ]; + postBuild = '' + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + + # Create symlink from the latest versioned loader to timescaledb.so + ln -sfn ${pname}-loader-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} - cmakeFlags = [ - "-DSEND_TELEMETRY_DEFAULT=OFF" - "-DREGRESS_CHECKS=OFF" - "-DTAP_CHECKS=OFF" - "-DAPACHE_ONLY=1" - ] ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; - - # Fix the install phase which tries to install into the pgsql extension dir, - # and cannot be manually overridden. This is rather fragile but works OK. - postPatch = '' - for x in CMakeLists.txt sql/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" - done - - for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do - substituteInPlace "$x" \ - --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" - done + # The versioned extension libraries (timescaledb-VERSION.so) are already in place + + # checks - we should have loader files and versioned extension files + (set -x + test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" -gt 0 + ) + makeWrapper ${lib.getExe switch-ext-version} $out/bin/switch_timescaledb_version \ + --prefix EXT_WRAPPER : "$out" --prefix EXT_NAME : "${pname}" --prefix EXTRA_STEPS : ${lib.getExe switch-timescaledb-loader} ''; - meta = with lib; { - description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; - homepage = "https://www.timescale.com/"; - changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; - platforms = postgresql.meta.platforms; - license = licenses.asl20; - broken = versionOlder postgresql.version "13"; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; + + passthru = { + inherit versions numberOfVersions switch-ext-version; + pname = "${pname}-all"; + hasBackgroundWorker = true; + defaultSettings = { + shared_preload_libraries = [ "timescaledb" ]; + }; + version = + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/versions.json b/nix/ext/versions.json index e5c4e90b1..fe34cb814 100644 --- a/nix/ext/versions.json +++ b/nix/ext/versions.json @@ -1,12 +1,284 @@ { - "wrappers": { - "0.5.3": { + "http": { + "1.5.0": { + "postgresql": [ + "15" + ], + "hash": "sha256-+N/CXm4arRgvhglanfvO0FNOBUWV5RL8mn/9FpNvcjY=" + }, + "1.6.1": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-C8eqi0q1dnshUAZjIsZFwa5FTYc7vmATF3vv2CReWPM=" + } + }, + "hypopg": { + "1.3.1": { + "postgresql": [ + "15" + ], + "hash": "sha256-AIBXy+LxyHUo+1hd8gQTwaBdFiTEzKaCVc4cx5tZgME=" + }, + "1.4.1": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-88uKPSnITRZ2VkelI56jZ9GWazG/Rn39QlyHKJKSKMM=" + } + }, + "index_advisor": { + "0.2.0": { + "postgresql": [ + "15", + "17", + "orioledb-17" + ], + "hash": "sha256-G0eQk2bY5CNPMeokN/nb05g03CuiplRf902YXFVQFbs=" + } + }, + "pg_cron": { + "1.3.1": { + "postgresql": [ + "15" + ], + "rev": "v1.3.1", + "hash": "sha256-rXotNOtQNmA55ErNxGoNSKZ0pP1uxEVlDGITFHuqGG4=", + "patches": [ + "pg_cron-1.3.1-pg15.patch" + ] + }, + "1.4.2": { + "postgresql": [ + "15" + ], + "rev": "v1.4.2", + "hash": "sha256-P0Fd10Q1p+KrExb35G6otHpc6pD61WnMll45H2jkevM=" + }, + "1.5.2": { + "postgresql": [ + "15" + ], + "rev": "v1.5.2", + "hash": "sha256-+quVWbKJy6wXpL/zwTk5FF7sYwHA7I97WhWmPO/HSZ4=" + }, + "1.6.4": { + "postgresql": [ + "15", + "17", + "orioledb-17" + ], + "rev": "v1.6.4", + "hash": "sha256-t1DpFkPiSfdoGG2NgNT7g1lkvSooZoRoUrix6cBID40=" + } + }, + "pg_net": { + "0.1": { + "postgresql": [ + "15" + ], + "hash": "sha256-geXGMb9MxU2vVB3ZBkGEwNqHixPbqjywyUumk7kbdbQ=" + }, + "0.2": { + "postgresql": [ + "15" + ], + "hash": "sha256-ArJmZTh7rc6OgvA6RIndMqcDRJl91QPt6pgEeCuHA6M=" + }, + "0.6": { + "postgresql": [ + "15" + ], + "hash": "sha256-SpQbF/ZeAVa8zf0+N6uluHrjpmGy0NLd2/hvyiOyNsY=" + }, + "0.7": { + "postgresql": [ + "15" + ], + "hash": "sha256-FRaTZPCJQPYAFmsJg22hYJJ0+gH1tMdDQoCQgiqEnaA=" + }, + "0.7.1": { + "postgresql": [ + "15" + ], + "hash": "sha256-VScRKzY/skQu9SWGx9iZvifH7pv7SRXcvLfybB+XX4Q=" + }, + "0.8.0": { + "postgresql": [ + "15" + ], + "hash": "sha256-ZPsRPWV1G3lMM2mT+H139Wvgoy8QnmeUbzEnGeDJmZA=" + }, + "0.10.0": { + "postgresql": [ + "15" + ], + "hash": "sha256-R9Mzw5gvV7b2R59LTOzuOc0AI99+3ncFNzijI4mySUg=" + }, + "0.11.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-XN441jXK1q+I/LZRNwvzbSsebXHgZ8iYsslZvcPFlAs=" + }, + "0.13.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-FRaTZPCJQPYAFmsJg22hYJJ0+gH1tMdDQoCQgiqEnaA=" + }, + "0.14.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-c1pxhTyrE5j6dY+M5eKAboQNofIORS+Dccz+7HKEKQI=" + }, + "0.19.5": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-Cpi2iASi1QJoED0Qs1dANqg/BNZTsz5S+pw8iYyW03Y=" + } + }, + "pgsodium": { + "3.0.4": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-GbUUkSbQe05x7JssSyCdMrP6Uk9ix0JmO+JB1MsFMSg=" + }, + "3.1.5": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-Rv7y0gPEDHeZ+KLD+M/pUQI8Ye5GdaV144Xq05z29Sk=" + }, + "3.1.6": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-D07r/yF50JNihvG/0X7R+1bXnlK1z0dt+/Xbic6W1Hs=" + }, + "3.1.7": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-0QOh21kKtoM1L38pvkVumr4dyMdINaaMLI6z1RE8540=" + }, + "3.1.8": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-j5F1PPdwfQRbV8XJ8Mloi8FvZF0MTl4eyIJcBYQy1E4=" + } + }, + "postgis": { + "3.3.2": { + "postgresql": [ + "15" + ], + "hash": "sha256-miohnaAFoXMKOdGVmhx87GGbHvsAm2W+gP/CW60pkGg=" + }, + "3.3.7": { + "postgresql": [ + "17" + ], + "hash": "sha256-UHJKDd5JrcJT5Z4CTYsY/va+ToU0GUPG1eHhuXTkP84=" + } + }, + "rum": { + "1.3": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-VsfpxQqRBu9bIAP+TfMRXd+B3hSjuhU2NsutocNiCt8=", + "revision": "1.3.14" + } + }, + "timescaledb": { + "2.9.1": { + "postgresql": [ + "15" + ], + "hash": "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8=" + }, + "2.16.1": { + "postgresql": [ + "15" + ], + "hash": "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w=" + } + }, + "vector": { + "0.4.0": { + "postgresql": [ + "15" + ], + "hash": "sha256-bOckX7zvHhgJDDhoAm+VZVIeVIf2hG/3oWZWuTtnZPo=" + }, + "0.5.1": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-ZNzq+dATZn9LUgeOczsaadr5hwdbt9y/+sAOPIdr77U=" + }, + "0.6.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-hXm+k0BZ9xZP1Tnek14jPoKCPQkA5ovscu9IX2mW7Kc=" + }, + "0.6.2": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-r+TpFJg6WrMn0L2B7RpmSRvw3XxpHzMRtpFWDCzLvgs=" + }, + "0.7.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-vFn7sNphOYyig6Jl1HILMaC2t9strFQBQ8ywL8Ibx1M=" + }, + "0.7.4": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-qwPaguQUdDHV8q6GDneLq5MuhVroPizpbqt7f08gKJI=" + }, + "0.8.0": { + "postgresql": [ + "15", + "17" + ], + "hash": "sha256-JsZV+I4eRMypXTjGmjCtMBXDVpqTIPHQa28ogXncE/Q=" + } + }, + "wrappers": { + "0.5.4": { "postgresql": [ "15", "17", "orioledb-17" ], - "hash": "sha256-iaJriPEa0iVLpmnuUk9R3HS545Jhz7aH1clYvHEuEvs=", + "hash": "sha256-W1RokXH4Vfj2FIuEzGEP5SzzWsv2Pbzfa816nXKnSoc=", "pgrx": "0.14.3", "rust": "1.87.0" } diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix index 9b36bec9c..606eca7e9 100644 --- a/nix/ext/wrappers/default.nix +++ b/nix/ext/wrappers/default.nix @@ -78,6 +78,11 @@ let "clickhouse-rs-1.1.0-alpha.1" = "sha256-nKiGzdsAgJej8NgyVOqHaD1sZLrNF1RPfEhu2pRwZ6o="; "iceberg-catalog-s3tables-0.5.1" = "sha256-1JkB2JExukABlbW1lZPolNQCYb9URi8xNYY3APmiGq0="; } + else if builtins.compareVersions "0.5.4" version == 0 then + { + "clickhouse-rs-1.1.0-alpha.1" = "sha256-nKiGzdsAgJej8NgyVOqHaD1sZLrNF1RPfEhu2pRwZ6o="; + "iceberg-catalog-s3tables-0.5.1" = "sha256-1JkB2JExukABlbW1lZPolNQCYb9URi8xNYY3APmiGq0="; + } else { "clickhouse-rs-1.1.0-alpha.1" = "sha256-nKiGzdsAgJej8NgyVOqHaD1sZLrNF1RPfEhu2pRwZ6o="; @@ -157,6 +162,9 @@ let } ); previouslyPackagedVersions = [ + "0.5.3" + "0.5.2" + "0.5.1" "0.5.0" "0.4.6" "0.4.5" diff --git a/nix/fmt.nix b/nix/fmt.nix index 760cb5930..562c3b3c5 100644 --- a/nix/fmt.nix +++ b/nix/fmt.nix @@ -4,9 +4,13 @@ perSystem = { pkgs, ... }: { - treefmt.programs.deadnix.enable = true; - - treefmt.programs.nixfmt.enable = true; - treefmt.programs.nixfmt.package = pkgs.nixfmt-rfc-style; + treefmt.programs = { + deadnix.enable = true; + nixfmt = { + enable = true; + package = pkgs.nixfmt-rfc-style; + }; + ruff-format.enable = true; + }; }; } diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index f6eda4243..a3fd52034 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -11,6 +11,7 @@ postgresql_17 postgresql_orioledb-17 supabase-groonga + switch-ext-version ; xmrig = throw "The xmrig package has been explicitly disabled in this flake."; diff --git a/nix/packages/default.nix b/nix/packages/default.nix index cd6d6a1d0..f297c8359 100644 --- a/nix/packages/default.nix +++ b/nix/packages/default.nix @@ -1,4 +1,4 @@ -{ self, ... }: +{ self, inputs, ... }: { imports = [ ./postgres.nix ]; perSystem = @@ -35,8 +35,11 @@ dbmate-tool = pkgs.callPackage ./dbmate-tool.nix { inherit (self.supabase) defaults; }; docs = pkgs.callPackage ./docs.nix { }; supabase-groonga = pkgs.callPackage ./groonga { }; + http-mock-server = pkgs.callPackage ./http-mock-server.nix { }; local-infra-bootstrap = pkgs.callPackage ./local-infra-bootstrap.nix { }; migrate-tool = pkgs.callPackage ./migrate-tool.nix { psql_15 = self'.packages."psql_15/bin"; }; + overlayfs-on-package = pkgs.callPackage ./overlayfs-on-package.nix { }; + packer = pkgs.callPackage ./packer.nix { inherit inputs; }; pg-restore = pkgs.callPackage ./pg-restore.nix { psql_15 = self'.packages."psql_15/bin"; }; pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; pg_regress = makePgRegress activeVersion; @@ -56,6 +59,9 @@ inherit pkgs; name = "start-postgres-server"; }; + switch-ext-version = pkgs.callPackage ./switch-ext-version.nix { + inherit (self'.packages) overlayfs-on-package; + }; sync-exts-versions = pkgs.callPackage ./sync-exts-versions.nix { inherit (inputs') nix-editor; }; trigger-nix-build = pkgs.callPackage ./trigger-nix-build.nix { }; update-readme = pkgs.callPackage ./update-readme.nix { }; diff --git a/nix/packages/http-mock-server.nix b/nix/packages/http-mock-server.nix new file mode 100644 index 000000000..67a4af520 --- /dev/null +++ b/nix/packages/http-mock-server.nix @@ -0,0 +1,35 @@ +{ + pkgs, + lib, + stdenv, +}: + +stdenv.mkDerivation { + pname = "http-mock-server"; + version = "1.0.0"; + + src = ../tests/http-mock-server.py; + + nativeBuildInputs = with pkgs; [ + python3 + makeWrapper + ]; + + dontUnpack = true; + + installPhase = '' + mkdir -p $out/bin + cp $src $out/bin/http-mock-server.py + chmod +x $out/bin/http-mock-server.py + + # Create a wrapper script + makeWrapper ${pkgs.python3}/bin/python3 $out/bin/http-mock-server \ + --add-flags "$out/bin/http-mock-server.py" + ''; + + meta = with lib; { + description = "Simple HTTP mock server for testing"; + license = licenses.mit; + platforms = platforms.all; + }; +} diff --git a/nix/packages/overlayfs-on-package.nix b/nix/packages/overlayfs-on-package.nix new file mode 100644 index 000000000..2883c7a4e --- /dev/null +++ b/nix/packages/overlayfs-on-package.nix @@ -0,0 +1,49 @@ +{ writeShellApplication, coreutils }: +writeShellApplication { + name = "overlayfs-on-package"; + runtimeInputs = [ coreutils ]; + text = '' + # This script enable overlayfs on a specific nix store path + set -euo pipefail + + if [ $# -ne 1 ]; then + echo "Usage: $0 " + exit 1 + fi + + PACKAGE_PATH="$1" + PACKAGE_NAME=$(basename "$1"|cut -c 34-) + + # Nixos compatibility: use systemd mount unit + #shellcheck disable=SC1091 + source /etc/os-release || true + if [[ "$ID" == "nixos" ]]; then + # This script is used in NixOS test only for the moment + SYSTEMD_DIR="/run/systemd/system" + else + SYSTEMD_DIR="/etc/systemd/system" + fi + + # Create required directories for overlay + echo "$PACKAGE_NAME" + mkdir -p "/var/lib/overlay/$PACKAGE_NAME/"{upper,work} + + PACKAGE_MOUNT_PATH=$(systemd-escape -p --suffix=mount "$PACKAGE_PATH") + + cat > "$SYSTEMD_DIR/$PACKAGE_MOUNT_PATH" <" + echo "Example: $0 0.10.0" + echo "" + echo "Optional environment variables:" + echo " NIX_PROFILE - Path to nix profile (default: /var/lib/postgresql/.nix-profile)" + echo " LIB_DIR - Override library directory" + echo " EXTENSION_DIR - Override extension directory" + exit 1 + fi + + VERSION="$1" + echo "$VERSION" + + # Enable overlay on the wrapper package to be able to switch version + ${lib.getExe overlayfs-on-package} "$EXT_WRAPPER" + + # Check if version exists + EXT_WRAPPER_LIB="$EXT_WRAPPER/lib" + EXT_LIB_TO_USE="$EXT_WRAPPER_LIB/$EXT_NAME-$VERSION${postgresql.dlSuffix}" + if [ ! -f "$EXT_LIB_TO_USE" ]; then + echo "Error: Version $VERSION not found in $EXT_WRAPPER_LIB" + echo "Available versions:" + #shellcheck disable=SC2012 + ls "$EXT_WRAPPER_LIB/$EXT_NAME"-*${postgresql.dlSuffix} 2>/dev/null | sed "s/.*$EXT_NAME-/ /" | sed 's/${postgresql.dlSuffix}$//' || echo " No versions found" + exit 1 + fi + + # Update library symlink + ln -sfnv "$EXT_LIB_TO_USE" "$EXT_WRAPPER_LIB/$EXT_NAME${postgresql.dlSuffix}" + + # Handle extension specific steps + if [ -x "''${EXTRA_STEPS:-}" ]; then + #shellcheck disable=SC1090 + source "''${EXTRA_STEPS}" + fi + + # Update control file + EXT_WRAPPER_SHARE="$EXT_WRAPPER/share/postgresql/extension" + echo "default_version = '$VERSION'" > "$EXT_WRAPPER_SHARE/$EXT_NAME.control" + cat "$EXT_WRAPPER_SHARE/$EXT_NAME--$VERSION.control" >> "$EXT_WRAPPER_SHARE/$EXT_NAME.control" + + echo "Successfully switched $EXT_NAME to version $VERSION" + ''; +} diff --git a/nix/tests/expected/http.out b/nix/tests/expected/http.out new file mode 100644 index 000000000..d83488006 --- /dev/null +++ b/nix/tests/expected/http.out @@ -0,0 +1,105 @@ +-- Test for http extension +-- Basic HTTP functionality tests +-- Test basic HTTP GET request +SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); + status +-------- + 200 +(1 row) + +-- Test HTTP GET with headers +SELECT status, content_type +FROM http(( + 'GET', + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', + ARRAY[http_header('User-Agent', 'pg_http_test')], + NULL, + NULL +)::http_request); + status | content_type +--------+--------------------------------- + 200 | application/json; charset=utf-8 +(1 row) + +-- Test HTTP POST request with JSON body +SELECT status FROM http_post( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', + '{"test": "data"}', + 'application/json' +); + status +-------- + 200 +(1 row) + +-- Test HTTP PUT request +SELECT status FROM http_put( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', + '{"update": "data"}', + 'application/json' +); + status +-------- + 200 +(1 row) + +-- Test HTTP DELETE request +SELECT status FROM http_delete('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); + status +-------- + 200 +(1 row) + +-- Test HTTP PATCH request +SELECT status FROM http_patch( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', + '{"patch": "data"}', + 'application/json' +); + status +-------- + 200 +(1 row) + +-- Test HTTP HEAD request +SELECT status FROM http_head('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); + status +-------- + 200 +(1 row) + +-- Test response headers parsing +WITH response AS ( + SELECT * FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') +) +SELECT + status, + content_type, + headers IS NOT NULL as has_headers +FROM response; + status | content_type | has_headers +--------+--------------+------------- + 200 | text/plain | t +(1 row) + +-- Test timeout handling (using a delay endpoint) +-- This should complete successfully with reasonable timeout +SELECT status FROM http(( + 'GET', + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', + ARRAY[]::http_header[], + 'application/json', + 2000 -- 2 second timeout +)::http_request); + status +-------- + 200 +(1 row) + +-- Test URL encoding +SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); + status +-------- + 200 +(1 row) + diff --git a/nix/tests/expected/roles.out b/nix/tests/expected/roles.out index 69446110c..9c5a47a71 100644 --- a/nix/tests/expected/roles.out +++ b/nix/tests/expected/roles.out @@ -46,11 +46,12 @@ order by rolname; service_role | f | f | f | t | f | f | -1 | t | supabase_admin | t | t | t | t | t | t | -1 | t | supabase_auth_admin | t | t | f | f | f | f | -1 | f | + supabase_etl_admin | f | t | f | t | f | t | -1 | f | supabase_functions_admin | t | t | f | f | f | f | -1 | f | supabase_read_only_user | f | t | f | t | f | f | -1 | t | supabase_replication_admin | f | t | f | t | f | t | -1 | f | supabase_storage_admin | t | t | f | f | f | f | -1 | f | -(29 rows) +(30 rows) select rolname, @@ -85,11 +86,12 @@ order by rolname; service_role | supabase_admin | {"search_path=\"$user\", public, auth, extensions",log_statement=none} supabase_auth_admin | {search_path=auth,idle_in_transaction_session_timeout=60000,log_statement=none} + supabase_etl_admin | supabase_functions_admin | supabase_read_only_user | {default_transaction_read_only=on} supabase_replication_admin | supabase_storage_admin | {search_path=storage,log_statement=none} -(29 rows) +(30 rows) -- Check all privileges of the roles on the schemas select schema_name, privilege_type, grantee, default_for diff --git a/nix/tests/expected/z_15_ext_interface.out b/nix/tests/expected/z_15_ext_interface.out index c652a95eb..d3d9f7c4f 100644 --- a/nix/tests/expected/z_15_ext_interface.out +++ b/nix/tests/expected/z_15_ext_interface.out @@ -1161,9 +1161,11 @@ order by pg_net | net | _urlencode_string | string character varying | text pg_net | net | check_worker_is_up | | void pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result - pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer, body jsonb | bigint pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | wait_until_running | | void + pg_net | net | wake | | void pg_net | net | worker_restart | | boolean pg_prewarm | public | autoprewarm_dump_now | | bigint pg_prewarm | public | autoprewarm_start_worker | | void @@ -5227,7 +5229,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(5057 rows) +(5059 rows) /* diff --git a/nix/tests/expected/z_15_roles.out b/nix/tests/expected/z_15_roles.out index 42c2314e8..796b298bb 100644 --- a/nix/tests/expected/z_15_roles.out +++ b/nix/tests/expected/z_15_roles.out @@ -29,7 +29,8 @@ order by postgres | pg_signal_backend | f postgres | pgtle_admin | f postgres | service_role | f + supabase_etl_admin | pg_read_all_data | f supabase_read_only_user | pg_read_all_data | f supabase_storage_admin | authenticator | f -(18 rows) +(19 rows) diff --git a/nix/tests/expected/z_17_ext_interface.out b/nix/tests/expected/z_17_ext_interface.out index d31894bc3..46792e48f 100644 --- a/nix/tests/expected/z_17_ext_interface.out +++ b/nix/tests/expected/z_17_ext_interface.out @@ -1146,9 +1146,11 @@ order by pg_net | net | _urlencode_string | string character varying | text pg_net | net | check_worker_is_up | | void pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result - pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer, body jsonb | bigint pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | wait_until_running | | void + pg_net | net | wake | | void pg_net | net | worker_restart | | boolean pg_prewarm | public | autoprewarm_dump_now | | bigint pg_prewarm | public | autoprewarm_start_worker | | void @@ -4871,7 +4873,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(4714 rows) +(4716 rows) /* diff --git a/nix/tests/expected/z_17_roles.out b/nix/tests/expected/z_17_roles.out index 40ce6007d..ecda2ffde 100644 --- a/nix/tests/expected/z_17_roles.out +++ b/nix/tests/expected/z_17_roles.out @@ -66,9 +66,10 @@ order by postgres | pg_signal_backend | t postgres | pgtle_admin | f postgres | service_role | t + supabase_etl_admin | pg_read_all_data | f supabase_read_only_user | pg_read_all_data | f supabase_storage_admin | authenticator | f -(20 rows) +(21 rows) -- Check version-specific privileges of the roles on the schemas select schema_name, privilege_type, grantee, default_for @@ -157,7 +158,8 @@ order by postgres | pg_signal_backend | t postgres | pgtle_admin | f postgres | service_role | t + supabase_etl_admin | pg_read_all_data | f supabase_read_only_user | pg_read_all_data | f supabase_storage_admin | authenticator | f -(19 rows) +(20 rows) diff --git a/nix/tests/expected/z_17_rum.out b/nix/tests/expected/z_17_rum.out new file mode 100644 index 000000000..1296befa7 --- /dev/null +++ b/nix/tests/expected/z_17_rum.out @@ -0,0 +1,41 @@ +/* +This extension is excluded from oriole-17 because it uses an unsupported index type +*/ +create schema v; +create table v.test_rum( + t text, + a tsvector +); +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); +select + t, + round(a <=> to_tsquery('english', 'beautiful | place')) as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + t | rank +---------------------------------+------ + it looks like a beautiful place | 8 + the situation is most beautiful | 16 + it is a beautiful | 16 +(3 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.test_rum diff --git a/nix/tests/expected/z_orioledb-17_ext_interface.out b/nix/tests/expected/z_orioledb-17_ext_interface.out index d31894bc3..46792e48f 100644 --- a/nix/tests/expected/z_orioledb-17_ext_interface.out +++ b/nix/tests/expected/z_orioledb-17_ext_interface.out @@ -1146,9 +1146,11 @@ order by pg_net | net | _urlencode_string | string character varying | text pg_net | net | check_worker_is_up | | void pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result - pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer, body jsonb | bigint pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | wait_until_running | | void + pg_net | net | wake | | void pg_net | net | worker_restart | | boolean pg_prewarm | public | autoprewarm_dump_now | | bigint pg_prewarm | public | autoprewarm_start_worker | | void @@ -4871,7 +4873,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(4714 rows) +(4716 rows) /* diff --git a/nix/tests/http-mock-server.py b/nix/tests/http-mock-server.py new file mode 100644 index 000000000..fedeb40ad --- /dev/null +++ b/nix/tests/http-mock-server.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +""" +Simple HTTP mock server for testing pg_http extension offline. +Mimics basic endpoints similar to httpbingo/postman-echo services. +""" + +from http.server import HTTPServer, BaseHTTPRequestHandler +import json +import urllib.parse +import time + + +class MockHTTPHandler(BaseHTTPRequestHandler): + def _send_json_response(self, status_code=200, data=None): + """Send a JSON response""" + self.send_response(status_code) + self.send_header("Content-Type", "application/json; charset=utf-8") + self.end_headers() + response_data = data or {} + self.wfile.write(json.dumps(response_data).encode("utf-8")) + + def _send_text_response( + self, status_code=200, content="", content_type="text/plain" + ): + """Send a text response""" + self.send_response(status_code) + self.send_header("Content-Type", content_type) + self.end_headers() + self.wfile.write(content.encode("utf-8")) + + def _get_request_info(self): + """Get request information""" + parsed_path = urllib.parse.urlparse(self.path) + query_params = urllib.parse.parse_qs(parsed_path.query) + + # Read body if present + content_length = int(self.headers.get("Content-Length", 0)) + body = ( + self.rfile.read(content_length).decode("utf-8") + if content_length > 0 + else "" + ) + + return { + "method": self.command, + "url": self.path, + "path": parsed_path.path, + "query": query_params, + "headers": dict(self.headers), + "body": body, + } + + def do_GET(self): + """Handle GET requests""" + request_info = self._get_request_info() + path = request_info["path"] + + if path == "/get": + response = { + "args": request_info["query"], + "headers": request_info["headers"], + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + } + self._send_json_response(200, response) + + elif path == "/headers": + response = {"headers": request_info["headers"]} + self._send_json_response(200, response) + + elif path == "/response-headers": + # Check if Content-Type is specified in query params + query_params = request_info["query"] + if "Content-Type" in query_params: + content_type = query_params["Content-Type"][0] + self._send_text_response( + 200, "Response with custom content type", content_type + ) + else: + self._send_json_response(200, {"message": "response-headers endpoint"}) + + elif path.startswith("/delay/"): + # Extract delay time from path + try: + delay = int(path.split("/delay/")[1]) + time.sleep(min(delay, 5)) # Cap at 5 seconds + self._send_json_response(200, {"delay": delay}) + except (ValueError, IndexError): + self._send_json_response(400, {"error": "Invalid delay value"}) + + elif path == "/anything" or path.startswith("/anything"): + response = { + "method": "GET", + "args": request_info["query"], + "headers": request_info["headers"], + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + "data": "", + "json": None, + } + self._send_json_response(200, response) + + else: + self._send_json_response(404, {"error": "Not found"}) + + def do_POST(self): + """Handle POST requests""" + request_info = self._get_request_info() + path = request_info["path"] + + if path == "/post": + response = { + "args": request_info["query"], + "data": request_info["body"], + "headers": request_info["headers"], + "json": None, + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + } + + # Try to parse JSON if content-type is json + if "application/json" in request_info["headers"].get("Content-Type", ""): + try: + response["json"] = json.loads(request_info["body"]) + except json.JSONDecodeError: + pass + + self._send_json_response(200, response) + else: + self._send_json_response(404, {"error": "Not found"}) + + def do_PUT(self): + """Handle PUT requests""" + request_info = self._get_request_info() + path = request_info["path"] + + if path == "/put": + response = { + "args": request_info["query"], + "data": request_info["body"], + "headers": request_info["headers"], + "json": None, + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + } + + # Try to parse JSON if content-type is json + if "application/json" in request_info["headers"].get("Content-Type", ""): + try: + response["json"] = json.loads(request_info["body"]) + except json.JSONDecodeError: + pass + + self._send_json_response(200, response) + else: + self._send_json_response(404, {"error": "Not found"}) + + def do_DELETE(self): + """Handle DELETE requests""" + request_info = self._get_request_info() + path = request_info["path"] + + if path == "/delete": + response = { + "args": request_info["query"], + "headers": request_info["headers"], + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + } + self._send_json_response(200, response) + else: + self._send_json_response(404, {"error": "Not found"}) + + def do_PATCH(self): + """Handle PATCH requests""" + request_info = self._get_request_info() + path = request_info["path"] + + if path == "/patch": + response = { + "args": request_info["query"], + "data": request_info["body"], + "headers": request_info["headers"], + "json": None, + "url": f"http://{self.headers.get('Host', 'localhost:8080')}{self.path}", + } + + # Try to parse JSON if content-type is json + if "application/json" in request_info["headers"].get("Content-Type", ""): + try: + response["json"] = json.loads(request_info["body"]) + except json.JSONDecodeError: + pass + + self._send_json_response(200, response) + else: + self._send_json_response(404, {"error": "Not found"}) + + def do_HEAD(self): + """Handle HEAD requests""" + path = urllib.parse.urlparse(self.path).path + + if path == "/get": + self.send_response(200) + self.send_header("Content-Type", "application/json; charset=utf-8") + self.end_headers() + else: + self.send_response(404) + self.end_headers() + + def log_message(self, format, *args): + """Suppress default logging""" + pass + + +def find_free_port(start_port=8880, end_port=8899): + """Find a free port within the given range""" + import socket + + for port in range(start_port, end_port + 1): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("0.0.0.0", port)) + return port + except OSError: + continue + + raise RuntimeError(f"No free port found in range {start_port}-{end_port}") + + +def run_server(port=None): + """Run the mock HTTP server""" + if port is None: + port = find_free_port() + + try: + server = HTTPServer(("0.0.0.0", port), MockHTTPHandler) + print(f"Mock HTTP server running on port {port}") + + # Write port to a file that can be read by the test environment + import os + + port_file = os.environ.get("HTTP_MOCK_PORT_FILE", "/tmp/http-mock-port") + try: + with open(port_file, "w") as f: + f.write(str(port)) + except: + pass # Ignore if we can't write the port file + + server.serve_forever() + except OSError as e: + if port is not None: + # If specific port was requested but failed, try to find a free one + print(f"Port {port} not available, finding free port...") + run_server(None) + else: + raise e + + +if __name__ == "__main__": + import sys + + port = int(sys.argv[1]) if len(sys.argv) > 1 else None + run_server(port) diff --git a/nix/tests/sql/http.sql b/nix/tests/sql/http.sql new file mode 100644 index 000000000..df80feb52 --- /dev/null +++ b/nix/tests/sql/http.sql @@ -0,0 +1,65 @@ +-- Test for http extension +-- Basic HTTP functionality tests + +-- Test basic HTTP GET request +SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); + +-- Test HTTP GET with headers +SELECT status, content_type +FROM http(( + 'GET', + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', + ARRAY[http_header('User-Agent', 'pg_http_test')], + NULL, + NULL +)::http_request); + +-- Test HTTP POST request with JSON body +SELECT status FROM http_post( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', + '{"test": "data"}', + 'application/json' +); + +-- Test HTTP PUT request +SELECT status FROM http_put( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', + '{"update": "data"}', + 'application/json' +); + +-- Test HTTP DELETE request +SELECT status FROM http_delete('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); + +-- Test HTTP PATCH request +SELECT status FROM http_patch( + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', + '{"patch": "data"}', + 'application/json' +); + +-- Test HTTP HEAD request +SELECT status FROM http_head('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); + +-- Test response headers parsing +WITH response AS ( + SELECT * FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') +) +SELECT + status, + content_type, + headers IS NOT NULL as has_headers +FROM response; + +-- Test timeout handling (using a delay endpoint) +-- This should complete successfully with reasonable timeout +SELECT status FROM http(( + 'GET', + 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', + ARRAY[]::http_header[], + 'application/json', + 2000 -- 2 second timeout +)::http_request); + +-- Test URL encoding +SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); diff --git a/nix/tests/sql/z_17_rum.sql b/nix/tests/sql/z_17_rum.sql new file mode 100644 index 000000000..6ae945975 --- /dev/null +++ b/nix/tests/sql/z_17_rum.sql @@ -0,0 +1,40 @@ +/* +This extension is excluded from oriole-17 because it uses an unsupported index type +*/ +create schema v; + +create table v.test_rum( + t text, + a tsvector +); + +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); + +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); + +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); + +select + t, + round(a <=> to_tsquery('english', 'beautiful | place')) as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + + +drop schema v cascade; diff --git a/nix/tools/dbmate-tool.sh.in b/nix/tools/dbmate-tool.sh.in index 8fa266fce..934724c03 100644 --- a/nix/tools/dbmate-tool.sh.in +++ b/nix/tools/dbmate-tool.sh.in @@ -177,7 +177,21 @@ perform_dump() { while [ $attempt -le $max_attempts ]; do echo "Attempting pg_dump (attempt $attempt/$max_attempts)" - if "${PSQLBIN}/pg_dump" -h localhost -p "$PORTNO" -U "$PGSQL_SUPERUSER" -d postgres --schema-only --no-owner --no-privileges > "./db/schema.sql"; then + # Build the dump command + local dump_cmd="${PSQLBIN}/pg_dump -h localhost -p $PORTNO -U $PGSQL_SUPERUSER -d postgres --schema-only --no-owner --no-privileges" + + # Only use --restrict-key for standard PostgreSQL 15 and 17 versions + # OrioleDB doesn't support this flag yet + if [ "$PSQL_VERSION" = "15" ] || [ "$PSQL_VERSION" = "17" ]; then + # Use a fixed restrict key for reproducible test dumps + # This is safe in testing contexts but should not be used in production + dump_cmd="$dump_cmd --restrict-key=SupabaseTestDumpKey123" + echo "Using --restrict-key for reproducible dumps (PostgreSQL $PSQL_VERSION)" + else + echo "Skipping --restrict-key (version: $PSQL_VERSION)" + fi + + if $dump_cmd > "./db/schema.sql"; then return 0 fi @@ -257,7 +271,6 @@ EOSQL echo "CURRENT_SYSTEM: $CURRENT_SYSTEM" if [ -f "./db/schema.sql" ]; then - trim_schema cp "./db/schema.sql" "./migrations/schema-$PSQL_VERSION.sql" echo "Schema file moved to ./migrations/schema-$PSQL_VERSION.sql" echo "PSQLBIN is $PSQLBIN" diff --git a/nix/tools/postgresql_schema.sql b/nix/tools/postgresql_schema.sql index 4547ab239..7c2ae21a9 100644 --- a/nix/tools/postgresql_schema.sql +++ b/nix/tools/postgresql_schema.sql @@ -7,5 +7,6 @@ ALTER USER pgbouncer WITH PASSWORD 'postgres'; ALTER USER supabase_auth_admin WITH PASSWORD 'postgres'; ALTER USER supabase_storage_admin WITH PASSWORD 'postgres'; ALTER USER supabase_replication_admin WITH PASSWORD 'postgres'; +ALTER USER supabase_etl_admin WITH PASSWORD 'postgres'; ALTER ROLE supabase_read_only_user WITH PASSWORD 'postgres'; ALTER ROLE supabase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/nix/tools/update_readme.nu b/nix/tools/update_readme.nu index b19c266a3..0b233ebce 100755 --- a/nix/tools/update_readme.nu +++ b/nix/tools/update_readme.nu @@ -57,6 +57,55 @@ def get_src_url [pkg_attr] { } } +def get_latest_version_for_pg [ext_name, pg_info] { + # Load versions.json to get the latest version for multi-version extensions + let versions_file = ([$env.PWD "nix/ext/versions.json"] | path join) + + if not ($versions_file | path exists) { + return null + } + + let versions_data = (open $versions_file) + + # Extract the base extension name (remove -all suffix) + let base_name = if ($ext_name | str ends-with "-all") { + $ext_name | str replace "-all" "" + } else { + $ext_name + } + + # Check if this extension exists in versions.json + if not ($versions_data | columns | any {|col| $col == $base_name}) { + return null + } + + let ext_versions = ($versions_data | get $base_name) + let pg_major = $pg_info.version + + # For orioledb, use "17" as the PostgreSQL version for lookups + let pg_type = if $pg_info.is_orioledb { + "17" # Use regular PG 17 for orioledb multi-version lookups + } else { + $pg_major + } + + # Find versions that support this PostgreSQL version + let supported_versions = ($ext_versions + | transpose version info + | where {|row| + $row.info.postgresql | any {|pg| $pg == $pg_type} + } + | get version + ) + + if ($supported_versions | is-empty) { + return null + } + + # Return the latest version (last in the sorted list) + $supported_versions | sort | last +} + def get_extension_info [flake_json, pg_info] { let major_version = ($pg_info.version | split row "." | first) let version_prefix = if $pg_info.is_orioledb { @@ -76,19 +125,45 @@ def get_extension_info [flake_json, pg_info] { let all_exts = ($ext_names | each {|ext_name| let ext_info = ($sys_packages | get $ext_name) - let name = ($ext_name | str replace $version_prefix "") - let version = if $name == "orioledb" { + let raw_name = ($ext_name | str replace $version_prefix "") + + # Remove -all suffix from the display name + let display_name = if ($raw_name | str ends-with "-all") { + $raw_name | str replace "-all" "" + } else { + $raw_name + } + + # Check if this is a multi-version extension + let version = if ($raw_name | str ends-with "-all") { + let latest_ver = (get_latest_version_for_pg $raw_name $pg_info) + if $latest_ver != null { + $latest_ver + } else if $raw_name == "orioledb" { + $ext_info.name # Use name directly for orioledb + } else if ($ext_info.name | str contains "-") { + $ext_info.name | split row "-" | last + } else { + $ext_info.name + } + } else if $raw_name == "orioledb" { $ext_info.name # Use name directly for orioledb } else if ($ext_info.name | str contains "-") { $ext_info.name | split row "-" | last } else { $ext_info.name } + let src_url = (get_src_url $ext_name) + let description = if ($ext_info | columns | any {|col| $col == "description"}) { + $ext_info.description + } else { + "" # Default to empty string if description field doesn't exist + } { - name: $name, + name: $display_name, # Use the cleaned name without -all suffix version: $version, - description: $ext_info.description, + description: $description, url: $src_url } }) diff --git a/qemu-arm64-nix.pkr.hcl b/qemu-arm64-nix.pkr.hcl index 497871774..17cca3a6e 100644 --- a/qemu-arm64-nix.pkr.hcl +++ b/qemu-arm64-nix.pkr.hcl @@ -74,11 +74,8 @@ source "qemu" "cloudimg" { format = "qcow2" headless = true http_directory = "http" - # TODO (darora): switch to minimal images - # iso_checksum = "file:https://cloud-images.ubuntu.com/minimal/releases/noble/release/SHA256SUMS" - # iso_url = "https://cloud-images.ubuntu.com/minimal/releases/noble/release/ubuntu-24.04-minimal-cloudimg-arm64.img" - iso_checksum = "file:https://cloud-images.ubuntu.com/noble/current/SHA256SUMS" - iso_url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-arm64.img" + iso_checksum = "file:https://cloud-images.ubuntu.com/minimal/releases/noble/release/SHA256SUMS" + iso_url = "https://cloud-images.ubuntu.com/minimal/releases/noble/release/ubuntu-24.04-minimal-cloudimg-arm64.img" memory = 40000 qemu_img_args { convert = ["-o", "compression_type=zstd"] diff --git a/qemu_artifact.md b/qemu_artifact.md index 847a1c8c2..bc71d7009 100644 --- a/qemu_artifact.md +++ b/qemu_artifact.md @@ -9,22 +9,28 @@ Given the size of the image, the first VM using it on a node might take a while The current AMI process involves a few steps: 1. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) - - this builds Postgres along with the PG extensions we use. + +- this builds Postgres along with the PG extensions we use. + 2. "stage1" build (`amazon-arm64-nix.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) - - uses an upstream Ubuntu image to initialize the AMI - - installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) + +- uses an upstream Ubuntu image to initialize the AMI +- installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) + 3. "stage2" build (`stage2-nix-psql.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) - - uses the image published from (2) - - installs and configures the software that is build and published using nix in (1) - - cleans up build dependencies etc + +- uses the image published from (2) +- installs and configures the software that is build and published using nix in (1) +- cleans up build dependencies etc The QEMU artifact process collapses (2) and (3): a. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) b. packer build (`qemu-arm64-nix.pkr.hcl`) - - uses an upstream Ubuntu live image as the base - - performs the work that was performed as part of the "stage1" and "stage2" builds - - this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` + +- uses an upstream Ubuntu live image as the base +- performs the work that was performed as part of the "stage1" and "stage2" builds +- this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` While the AMI build uses the EBS Surrogate Packer builder to create a minimal boot environment that it then adds things to, the QEMU build merely adds things to the Ubuntu Cloud Image. As such, it's likely possible to make something more minimal with a bit more work, but this was deemed unnecessary for now. Collapsing Stage1 and Stage2 was done in the interest of iteration speed, as executing them together is much faster than saving an artifact off stage1, booting another VM off it, and then executing stage2. @@ -36,6 +42,14 @@ Following `make init alpine-image`, the generated VM image should be bundled as For faster iteration, it's more convenient to build the image on an ubuntu bare-metal node that's part of the EKS cluster you're using. Build the image in the `k8s.io` namespace in order for it to be available for immediate use on that node. +list of packages installed on the EKS to build images: + +``` + apt-get install -y git emacs ripgrep vim-tiny byobu build-essential unzip + curl -L "https://releases.hashicorp.com/packer/1.14.1/packer_1.14.1_linux_$(dpkg --print-architecture).zip" -o packer.zip && unzip packer.zip && rm -f packer.zip && sudo mv packer /usr/local/bin/ + apt-get install -y qemu-system qemu-system-arm qemu-utils qemu-efi-aarch64 libvirt-clients libvirt-daemon libqcow-utils software-properties-common git make libnbd-bin nbdkit fuse2fs cloud-image-utils +``` + ### Dependencies note Installing `docker.io` on an EKS node might interfere with the k8s setup of the node. You can instead install `nerdctl` and `buildkit`: diff --git a/testinfra/test_ami_nix.py b/testinfra/test_ami_nix.py index d7b3ecca7..42442de18 100644 --- a/testinfra/test_ami_nix.py +++ b/testinfra/test_ami_nix.py @@ -9,12 +9,11 @@ from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger from ec2instanceconnectcli.EC2InstanceConnectKey import EC2InstanceConnectKey from time import sleep -import subprocess import paramiko -# if GITHUB_RUN_ID is not set, use a default value that includes the user and hostname +# if EXECUTION_ID is not set, use a default value that includes the user and hostname RUN_ID = os.environ.get( - "GITHUB_RUN_ID", + "EXECUTION_ID", "unknown-ci-run-" + os.environ.get("USER", "unknown-user") + "@" @@ -32,6 +31,7 @@ ALTER USER supabase_auth_admin WITH PASSWORD 'postgres'; ALTER USER supabase_storage_admin WITH PASSWORD 'postgres'; ALTER USER supabase_replication_admin WITH PASSWORD 'postgres'; +ALTER USER supabase_etl_admin WITH PASSWORD 'postgres'; ALTER ROLE supabase_read_only_user WITH PASSWORD 'postgres'; ALTER ROLE supabase_admin SET search_path TO "$user",public,auth,extensions; """ @@ -178,24 +178,27 @@ def get_ssh_connection(instance_ip, ssh_identity_file, max_retries=10): # Create SSH client ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - + # Connect with our working parameters ssh.connect( hostname=instance_ip, - username='ubuntu', + username="ubuntu", key_filename=ssh_identity_file, timeout=10, - banner_timeout=10 + banner_timeout=10, ) - + # Test the connection stdin, stdout, stderr = ssh.exec_command('echo "SSH test"') - if stdout.channel.recv_exit_status() == 0 and "SSH test" in stdout.read().decode(): + if ( + stdout.channel.recv_exit_status() == 0 + and "SSH test" in stdout.read().decode() + ): logger.info("SSH connection established successfully") return ssh else: raise Exception("SSH test command failed") - + except Exception as e: if attempt == max_retries - 1: raise @@ -205,14 +208,14 @@ def get_ssh_connection(instance_ip, ssh_identity_file, max_retries=10): sleep(5) -def run_ssh_command(ssh, command): +def run_ssh_command(ssh, command, timeout=None): """Run a command over the established SSH connection.""" - stdin, stdout, stderr = ssh.exec_command(command) + stdin, stdout, stderr = ssh.exec_command(command, timeout=timeout) exit_code = stdout.channel.recv_exit_status() return { - 'succeeded': exit_code == 0, - 'stdout': stdout.read().decode(), - 'stderr': stderr.read().decode() + "succeeded": exit_code == 0, + "stdout": stdout.read().decode(), + "stderr": stderr.read().decode(), } @@ -232,6 +235,10 @@ def host(): def gzip_then_base64_encode(s: str) -> str: return base64.b64encode(gzip.compress(s.encode())).decode() + # Create temporary SSH key pair + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + instance = list( ec2.create_instances( BlockDeviceMappings=[ @@ -278,6 +285,10 @@ def gzip_then_base64_encode(s: str) -> str: - 'bash init.sh "staging"' - 'touch /var/lib/init-complete' - 'rm -rf /tmp/*' +users: + - name: ubuntu + ssh_authorized_keys: + - {temp_key.get_pub_key()} """, TagSpecifications=[ { @@ -296,16 +307,6 @@ def gzip_then_base64_encode(s: str) -> str: # Increase wait time before starting health checks sleep(30) # Wait for 30 seconds to allow services to start - ec2logger = EC2InstanceConnectLogger(debug=False) - temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) - ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") - response = ec2ic.send_ssh_public_key( - InstanceId=instance.id, - InstanceOSUser="ubuntu", - SSHPublicKey=temp_key.get_pub_key(), - ) - assert response["Success"] - # Wait for instance to have public IP while not instance.public_ip_address: logger.warning("waiting for ip to be available") @@ -321,10 +322,10 @@ def gzip_then_base64_encode(s: str) -> str: # Check PostgreSQL data directory logger.info("Checking PostgreSQL data directory...") result = run_ssh_command(ssh, "ls -la /var/lib/postgresql") - if result['succeeded']: - logger.info("PostgreSQL data directory contents:\n" + result['stdout']) + if result["succeeded"]: + logger.info("PostgreSQL data directory contents:\n" + result["stdout"]) else: - logger.warning("Failed to list PostgreSQL data directory: " + result['stderr']) + logger.warning("Failed to list PostgreSQL data directory: " + result["stderr"]) # Wait for init.sh to complete logger.info("Waiting for init.sh to complete...") @@ -332,15 +333,17 @@ def gzip_then_base64_encode(s: str) -> str: attempt = 0 while attempt < max_attempts: try: - result = run_ssh_command(ssh, "test -f /var/lib/init-complete") - if result['succeeded']: + result = run_ssh_command(ssh, "test -f /var/lib/init-complete", timeout=5) + if result["succeeded"]: logger.info("init.sh has completed") break except Exception as e: logger.warning(f"Error checking init.sh status: {str(e)}") - + attempt += 1 - logger.warning(f"Waiting for init.sh to complete (attempt {attempt}/{max_attempts})") + logger.warning( + f"Waiting for init.sh to complete (attempt {attempt}/{max_attempts})" + ) sleep(5) if attempt >= max_attempts: @@ -351,9 +354,18 @@ def gzip_then_base64_encode(s: str) -> str: def is_healthy(ssh) -> bool: health_checks = [ ("postgres", "sudo -u postgres /usr/bin/pg_isready -U postgres"), - ("adminapi", f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {supabase_admin_key}'"), - ("postgrest", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready"), - ("gotrue", "curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health"), + ( + "adminapi", + f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {supabase_admin_key}'", + ), + ( + "postgrest", + "curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready", + ), + ( + "gotrue", + "curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health", + ), ("kong", "sudo kong health"), ("fail2ban", "sudo fail2ban-client status"), ] @@ -361,13 +373,19 @@ def is_healthy(ssh) -> bool: for service, command in health_checks: try: result = run_ssh_command(ssh, command) - if not result['succeeded']: - logger.warning(f"{service} not ready") + if not result["succeeded"]: + info_text = "" + info_command = f"sudo journalctl -b -u {service} -n 20 --no-pager" + info_result = run_ssh_command(ssh, info_command) + if info_result["succeeded"]: + info_text = "\n" + info_result["stdout"].strip() + + logger.warning(f"{service} not ready{info_text}") return False + except Exception: logger.warning(f"Connection failed during {service} check") return False - return True while True: @@ -376,10 +394,7 @@ def is_healthy(ssh) -> bool: sleep(1) # Return both the SSH connection and instance IP for use in tests - yield { - 'ssh': ssh, - 'ip': instance.public_ip_address - } + yield {"ssh": ssh, "ip": instance.public_ip_address} # at the end of the test suite, destroy the instance instance.terminate() @@ -387,8 +402,10 @@ def is_healthy(ssh) -> bool: def test_postgrest_is_running(host): """Check if postgrest service is running using our SSH connection.""" - result = run_ssh_command(host['ssh'], "systemctl is-active postgrest") - assert result['succeeded'] and result['stdout'].strip() == 'active', "PostgREST service is not running" + result = run_ssh_command(host["ssh"], "systemctl is-active postgrest") + assert ( + result["succeeded"] and result["stdout"].strip() == "active" + ), "PostgREST service is not running" def test_postgrest_responds_to_requests(host): @@ -514,229 +531,280 @@ def test_postgrest_ending_empty_key_query_parameter_is_removed(host): def test_postgresql_version(host): """Print the PostgreSQL version being tested and ensure it's >= 14.""" - result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c 'SELECT version();'") - if result['succeeded']: + result = run_ssh_command( + host["ssh"], "sudo -u postgres psql -c 'SELECT version();'" + ) + if result["succeeded"]: print(f"\nPostgreSQL Version:\n{result['stdout']}") # Extract version number from the output - version_line = result['stdout'].strip().split('\n')[2] # Skip header and get the actual version + version_line = ( + result["stdout"].strip().split("\n")[2] + ) # Skip header and get the actual version # Extract major version number (e.g., "15.8" -> 15) import re - version_match = re.search(r'PostgreSQL (\d+)\.', version_line) + + version_match = re.search(r"PostgreSQL (\d+)\.", version_line) if version_match: major_version = int(version_match.group(1)) print(f"PostgreSQL major version: {major_version}") - assert major_version >= 14, f"PostgreSQL version {major_version} is less than 14" + assert ( + major_version >= 14 + ), f"PostgreSQL version {major_version} is less than 14" else: assert False, "Could not parse PostgreSQL version number" else: print(f"\nFailed to get PostgreSQL version: {result['stderr']}") assert False, "Failed to get PostgreSQL version" - + # Also get the version from the command line - result = run_ssh_command(host['ssh'], "sudo -u postgres psql --version") - if result['succeeded']: + result = run_ssh_command(host["ssh"], "sudo -u postgres psql --version") + if result["succeeded"]: print(f"PostgreSQL Client Version: {result['stdout'].strip()}") else: print(f"Failed to get PostgreSQL client version: {result['stderr']}") - + print("✓ PostgreSQL version is >= 14") def test_libpq5_version(host): """Print the libpq5 version installed and ensure it's >= 14.""" # Try different package managers to find libpq5 - result = run_ssh_command(host['ssh'], "dpkg -l | grep libpq5 || true") - if result['succeeded'] and result['stdout'].strip(): + result = run_ssh_command(host["ssh"], "dpkg -l | grep libpq5 || true") + if result["succeeded"] and result["stdout"].strip(): print(f"\nlibpq5 package info:\n{result['stdout']}") # Extract version from dpkg output (format: ii libpq5:arm64 17.5-1.pgdg20.04+1) import re - version_match = re.search(r'libpq5[^ ]* +(\d+)\.', result['stdout']) + + version_match = re.search(r"libpq5[^ ]* +(\d+)\.", result["stdout"]) if version_match: major_version = int(version_match.group(1)) print(f"libpq5 major version: {major_version}") - assert major_version >= 14, f"libpq5 version {major_version} is less than 14" + assert ( + major_version >= 14 + ), f"libpq5 version {major_version} is less than 14" else: print("Could not parse libpq5 version from dpkg output") else: print("\nlibpq5 not found via dpkg") - + # Also try to find libpq.so files - result = run_ssh_command(host['ssh'], "find /usr -name '*libpq*' -type f 2>/dev/null | head -10") - if result['succeeded'] and result['stdout'].strip(): + result = run_ssh_command( + host["ssh"], "find /usr -name '*libpq*' -type f 2>/dev/null | head -10" + ) + if result["succeeded"] and result["stdout"].strip(): print(f"\nlibpq files found:\n{result['stdout']}") else: print("\nNo libpq files found") - + # Check if we can get version from a libpq file - result = run_ssh_command(host['ssh'], "ldd /usr/bin/psql | grep libpq || true") - if result['succeeded'] and result['stdout'].strip(): + result = run_ssh_command(host["ssh"], "ldd /usr/bin/psql | grep libpq || true") + if result["succeeded"] and result["stdout"].strip(): print(f"\npsql libpq dependency:\n{result['stdout']}") else: print("\nCould not find libpq dependency for psql") - + # Try to get version from libpq directly - result = run_ssh_command(host['ssh'], "psql --version 2>&1 | head -1") - if result['succeeded'] and result['stdout'].strip(): + result = run_ssh_command(host["ssh"], "psql --version 2>&1 | head -1") + if result["succeeded"] and result["stdout"].strip(): print(f"\npsql version output: {result['stdout'].strip()}") # The psql version should match the libpq version import re - version_match = re.search(r'psql \(PostgreSQL\) (\d+)\.', result['stdout']) + + version_match = re.search(r"psql \(PostgreSQL\) (\d+)\.", result["stdout"]) if version_match: major_version = int(version_match.group(1)) print(f"psql/libpq major version: {major_version}") - assert major_version >= 14, f"psql/libpq version {major_version} is less than 14" + assert ( + major_version >= 14 + ), f"psql/libpq version {major_version} is less than 14" else: print("Could not parse psql version") - + print("✓ libpq5 version is >= 14") def test_postgrest_read_only_session_attrs(host): """Test PostgREST with target_session_attrs=read-only and check for session errors.""" # First, check if PostgreSQL is configured for read-only mode - result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") - if result['succeeded']: - default_read_only = result['stdout'].strip() + result = run_ssh_command( + host["ssh"], 'sudo -u postgres psql -c "SHOW default_transaction_read_only;"' + ) + if result["succeeded"]: + default_read_only = result["stdout"].strip() print(f"PostgreSQL default_transaction_read_only: {default_read_only}") else: print("Could not check PostgreSQL read-only setting") default_read_only = "unknown" - + # Check if PostgreSQL is in recovery mode (standby) - result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SELECT pg_is_in_recovery();\"") - if result['succeeded']: - in_recovery = result['stdout'].strip() + result = run_ssh_command( + host["ssh"], 'sudo -u postgres psql -c "SELECT pg_is_in_recovery();"' + ) + if result["succeeded"]: + in_recovery = result["stdout"].strip() print(f"PostgreSQL pg_is_in_recovery: {in_recovery}") else: print("Could not check PostgreSQL recovery status") in_recovery = "unknown" - + # Find PostgreSQL configuration file - result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW config_file;\"") - if result['succeeded']: - config_file = result['stdout'].strip().split('\n')[2].strip() # Skip header and get the actual path + result = run_ssh_command( + host["ssh"], 'sudo -u postgres psql -c "SHOW config_file;"' + ) + if result["succeeded"]: + config_file = ( + result["stdout"].strip().split("\n")[2].strip() + ) # Skip header and get the actual path print(f"PostgreSQL config file: {config_file}") else: print("Could not find PostgreSQL config file") config_file = "/etc/postgresql/15/main/postgresql.conf" # Default fallback - + # Backup PostgreSQL config - result = run_ssh_command(host['ssh'], f"sudo cp {config_file} {config_file}.backup") - assert result['succeeded'], "Failed to backup PostgreSQL config" - + result = run_ssh_command(host["ssh"], f"sudo cp {config_file} {config_file}.backup") + assert result["succeeded"], "Failed to backup PostgreSQL config" + # Add read-only setting to PostgreSQL config - result = run_ssh_command(host['ssh'], f"echo 'default_transaction_read_only = on' | sudo tee -a {config_file}") - assert result['succeeded'], "Failed to add read-only setting to PostgreSQL config" - + result = run_ssh_command( + host["ssh"], + f"echo 'default_transaction_read_only = on' | sudo tee -a {config_file}", + ) + assert result["succeeded"], "Failed to add read-only setting to PostgreSQL config" + # Restart PostgreSQL to apply the new configuration - result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") - assert result['succeeded'], "Failed to restart PostgreSQL" - + result = run_ssh_command(host["ssh"], "sudo systemctl restart postgresql") + assert result["succeeded"], "Failed to restart PostgreSQL" + # Wait for PostgreSQL to start up sleep(5) - + # Verify the change took effect - result = run_ssh_command(host['ssh'], "sudo -u postgres psql -c \"SHOW default_transaction_read_only;\"") - if result['succeeded']: - new_default_read_only = result['stdout'].strip() - print(f"PostgreSQL default_transaction_read_only after change: {new_default_read_only}") + result = run_ssh_command( + host["ssh"], 'sudo -u postgres psql -c "SHOW default_transaction_read_only;"' + ) + if result["succeeded"]: + new_default_read_only = result["stdout"].strip() + print( + f"PostgreSQL default_transaction_read_only after change: {new_default_read_only}" + ) else: print("Could not verify PostgreSQL read-only setting change") - + # First, backup the current PostgREST config - result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf /etc/postgrest/base.conf.backup") - assert result['succeeded'], "Failed to backup PostgREST config" - + result = run_ssh_command( + host["ssh"], "sudo cp /etc/postgrest/base.conf /etc/postgrest/base.conf.backup" + ) + assert result["succeeded"], "Failed to backup PostgREST config" + try: # Read the current config to get the db-uri - result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") - assert result['succeeded'], "Failed to read current db-uri" - - current_db_uri = result['stdout'].strip() + result = run_ssh_command( + host["ssh"], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'" + ) + assert result["succeeded"], "Failed to read current db-uri" + + current_db_uri = result["stdout"].strip() print(f"Current db-uri: {current_db_uri}") - + # Extract just the URI part (remove the db-uri = " prefix and trailing quote) uri_start = current_db_uri.find('"') + 1 uri_end = current_db_uri.rfind('"') base_uri = current_db_uri[uri_start:uri_end] - + # Modify the URI to add target_session_attrs=read-only - if '?' in base_uri: + if "?" in base_uri: # URI already has parameters, add target_session_attrs modified_uri = base_uri + "&target_session_attrs=read-only" else: # URI has no parameters, add target_session_attrs modified_uri = base_uri + "?target_session_attrs=read-only" - + print(f"Modified URI: {modified_uri}") - + # Use awk to replace the db-uri line more reliably - result = run_ssh_command(host['ssh'], f"sudo awk '{{if ($1 == \"db-uri\") print \"db-uri = \\\"{modified_uri}\\\"\"; else print $0}}' /etc/postgrest/base.conf > /tmp/new_base.conf && sudo mv /tmp/new_base.conf /etc/postgrest/base.conf") - assert result['succeeded'], "Failed to update db-uri in config" - + result = run_ssh_command( + host["ssh"], + f'sudo awk \'{{if ($1 == "db-uri") print "db-uri = \\"{modified_uri}\\""; else print $0}}\' /etc/postgrest/base.conf > /tmp/new_base.conf && sudo mv /tmp/new_base.conf /etc/postgrest/base.conf', + ) + assert result["succeeded"], "Failed to update db-uri in config" + # Verify the change was made correctly - result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'") + result = run_ssh_command( + host["ssh"], "sudo cat /etc/postgrest/base.conf | grep '^db-uri'" + ) print(f"Updated db-uri line: {result['stdout'].strip()}") - + # Also show the full config to debug - result = run_ssh_command(host['ssh'], "sudo cat /etc/postgrest/base.conf") + result = run_ssh_command(host["ssh"], "sudo cat /etc/postgrest/base.conf") print(f"Full config after change:\n{result['stdout']}") - + # Restart PostgREST to apply the new configuration - result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") - assert result['succeeded'], "Failed to restart PostgREST" - + result = run_ssh_command(host["ssh"], "sudo systemctl restart postgrest") + assert result["succeeded"], "Failed to restart PostgREST" + # Wait a moment for PostgREST to start up sleep(5) - + # Check if PostgREST is running - result = run_ssh_command(host['ssh'], "sudo systemctl is-active postgrest") - if not (result['succeeded'] and result['stdout'].strip() == 'active'): + result = run_ssh_command(host["ssh"], "sudo systemctl is-active postgrest") + if not (result["succeeded"] and result["stdout"].strip() == "active"): # If PostgREST failed to start, check the logs to see why - log_result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' --no-pager") + log_result = run_ssh_command( + host["ssh"], + "sudo journalctl -u postgrest --since '5 seconds ago' --no-pager", + ) print(f"PostgREST failed to start. Recent logs:\n{log_result['stdout']}") assert False, "PostgREST failed to start after config change" - + # Make a test request to trigger any potential session errors try: response = requests.get( f"http://{host['ip']}/rest/v1/", headers={"apikey": anon_key, "authorization": f"Bearer {anon_key}"}, - timeout=10 + timeout=10, ) print(f"Test request status: {response.status_code}") except Exception as e: print(f"Test request failed: {str(e)}") - + # Check PostgREST logs for "session is not read-only" errors - result = run_ssh_command(host['ssh'], "sudo journalctl -u postgrest --since '5 seconds ago' | grep -i 'session is not read-only' || true") - - if result['stdout'].strip(): - print(f"\nFound 'session is not read-only' errors in PostgREST logs:\n{result['stdout']}") + result = run_ssh_command( + host["ssh"], + "sudo journalctl -u postgrest --since '5 seconds ago' | grep -i 'session is not read-only' || true", + ) + + if result["stdout"].strip(): + print( + f"\nFound 'session is not read-only' errors in PostgREST logs:\n{result['stdout']}" + ) assert False, "PostgREST logs contain 'session is not read-only' errors even though PostgreSQL is configured for read-only mode" else: print("\nNo 'session is not read-only' errors found in PostgREST logs") - + finally: # Restore the original configuration - result = run_ssh_command(host['ssh'], "sudo cp /etc/postgrest/base.conf.backup /etc/postgrest/base.conf") - if result['succeeded']: - result = run_ssh_command(host['ssh'], "sudo systemctl restart postgrest") - if result['succeeded']: + result = run_ssh_command( + host["ssh"], + "sudo cp /etc/postgrest/base.conf.backup /etc/postgrest/base.conf", + ) + if result["succeeded"]: + result = run_ssh_command(host["ssh"], "sudo systemctl restart postgrest") + if result["succeeded"]: print("Restored original PostgREST configuration") else: print("Warning: Failed to restart PostgREST after restoring config") else: print("Warning: Failed to restore original PostgREST configuration") - + # Restore PostgreSQL to original configuration - result = run_ssh_command(host['ssh'], f"sudo cp {config_file}.backup {config_file}") - if result['succeeded']: - result = run_ssh_command(host['ssh'], "sudo systemctl restart postgresql") - if result['succeeded']: + result = run_ssh_command( + host["ssh"], f"sudo cp {config_file}.backup {config_file}" + ) + if result["succeeded"]: + result = run_ssh_command(host["ssh"], "sudo systemctl restart postgresql") + if result["succeeded"]: print("Restored PostgreSQL to original configuration") else: print("Warning: Failed to restart PostgreSQL after restoring config") else: print("Warning: Failed to restore PostgreSQL configuration") -