Compare commits
19 commits
Author | SHA1 | Date | |
---|---|---|---|
b4347cc8a2 | |||
87a06dac66 | |||
366ceece24 | |||
8b2425d16d | |||
101bf971a7 | |||
c4a43b82d3 | |||
1f285f6492 | |||
2d71a4a132 | |||
264b30e8a2 | |||
7f56a3db56 | |||
9d02a2d90b | |||
3c13eb0d6b | |||
e9302c51be | |||
45630f7326 | |||
6c61adb1c7 | |||
8066c34eb5 | |||
867daaa375 | |||
0fccef973f | |||
89b682935b |
163 changed files with 6793 additions and 6206 deletions
3
.github/workflows/docs.yml
vendored
3
.github/workflows/docs.yml
vendored
|
@ -35,7 +35,8 @@ jobs:
|
||||||
run: mkdocs build
|
run: mkdocs build
|
||||||
|
|
||||||
- name: Copy files to the s3 website content bucket
|
- name: Copy files to the s3 website content bucket
|
||||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
# for the time being, let's just always deploy the docs
|
||||||
|
# if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||||
run: rclone sync site/ HCLOUD:/1661580-supabase-operator-docs/
|
run: rclone sync site/ HCLOUD:/1661580-supabase-operator-docs/
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_HCLOUD_TYPE: s3
|
RCLONE_CONFIG_HCLOUD_TYPE: s3
|
||||||
|
|
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
|
@ -28,4 +28,4 @@ jobs:
|
||||||
- name: Run linter
|
- name: Run linter
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v6
|
||||||
with:
|
with:
|
||||||
version: v1.61
|
version: v1.63.4
|
||||||
|
|
80
.github/workflows/postgres.yml
vendored
Normal file
80
.github/workflows/postgres.yml
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
name: Postgres image
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# every Thursday 2:30am
|
||||||
|
- cron: "30 2 * * 2"
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- .github/workflows/postgres.yml
|
||||||
|
- postgres/**
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- "v*"
|
||||||
|
|
||||||
|
env:
|
||||||
|
MINOR_VERSIONS: '{"15":"12","17":"4"}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- arm64
|
||||||
|
- amd64
|
||||||
|
postgres_major:
|
||||||
|
- "15"
|
||||||
|
- "17"
|
||||||
|
runs-on: ubuntu-latest-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Login to container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: registry.icb4dc0.de
|
||||||
|
username: ${{ secrets.HARBOR_USER }}
|
||||||
|
password: ${{ secrets.HARBOR_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
file: postgres/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-${{ matrix.arch }}
|
||||||
|
build-args: |
|
||||||
|
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
||||||
|
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
postgres_major:
|
||||||
|
- "15"
|
||||||
|
- "17"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- name: Login to container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: registry.icb4dc0.de
|
||||||
|
username: ${{ secrets.HARBOR_USER }}
|
||||||
|
password: ${{ secrets.HARBOR_TOKEN }}
|
||||||
|
|
||||||
|
- name: Install skopeo
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y skopeo
|
||||||
|
|
||||||
|
- name: Create manifest
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create \
|
||||||
|
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }} \
|
||||||
|
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }} \
|
||||||
|
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64 \
|
||||||
|
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
||||||
|
|
||||||
|
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64
|
||||||
|
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
|
@ -22,9 +22,9 @@ jobs:
|
||||||
- name: Login to container registry
|
- name: Login to container registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: code.icb4dc0.de
|
registry: registry.icb4dc0.de
|
||||||
username: prskr
|
username: ${{ secrets.HARBOR_USER }}
|
||||||
password: ${{ secrets.RELEASE_TOKEN }}
|
password: ${{ secrets.HARBOR_TOKEN }}
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
|
@ -36,7 +36,6 @@ jobs:
|
||||||
- name: Init go
|
- name: Init go
|
||||||
run: |
|
run: |
|
||||||
go mod download
|
go mod download
|
||||||
go mod download -modfile tools/go.mod
|
|
||||||
|
|
||||||
- name: Snapshot release
|
- name: Snapshot release
|
||||||
uses: goreleaser/goreleaser-action@v6
|
uses: goreleaser/goreleaser-action@v6
|
||||||
|
|
5
.github/workflows/test-e2e.yml
vendored
5
.github/workflows/test-e2e.yml
vendored
|
@ -43,3 +43,8 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
go mod tidy
|
go mod tidy
|
||||||
make test-e2e
|
make test-e2e
|
||||||
|
|
||||||
|
- name: Cleanup kind cluster
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
kind delete cluster
|
||||||
|
|
110
.golangci.yml
110
.golangci.yml
|
@ -1,36 +1,18 @@
|
||||||
|
version: "2"
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
|
||||||
allow-parallel-runners: true
|
allow-parallel-runners: true
|
||||||
|
|
||||||
issues:
|
|
||||||
# don't skip warning about doc comments
|
|
||||||
# don't exclude the default set of lint
|
|
||||||
exclude-use-default: false
|
|
||||||
# restore some of the defaults
|
|
||||||
# (fill in the rest as needed)
|
|
||||||
exclude-rules:
|
|
||||||
- path: "api/*"
|
|
||||||
linters:
|
|
||||||
- lll
|
|
||||||
- path: "internal/*"
|
|
||||||
linters:
|
|
||||||
- dupl
|
|
||||||
- lll
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
default: none
|
||||||
enable:
|
enable:
|
||||||
|
- copyloopvar
|
||||||
- dupl
|
- dupl
|
||||||
- errcheck
|
- errcheck
|
||||||
- copyloopvar
|
|
||||||
- ginkgolinter
|
- ginkgolinter
|
||||||
- goconst
|
- goconst
|
||||||
- gocyclo
|
- gocyclo
|
||||||
- gofmt
|
|
||||||
- goimports
|
|
||||||
- goheader
|
|
||||||
- gosimple
|
|
||||||
# enable when the TODOs are fixed
|
# enable when the TODOs are fixed
|
||||||
# - godox
|
# - godox
|
||||||
|
- goheader
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- lll
|
- lll
|
||||||
|
@ -39,43 +21,65 @@ linters:
|
||||||
- prealloc
|
- prealloc
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- typecheck
|
|
||||||
- unconvert
|
- unconvert
|
||||||
- unparam
|
- unparam
|
||||||
- unused
|
- unused
|
||||||
|
settings:
|
||||||
linters-settings:
|
goheader:
|
||||||
revive:
|
values:
|
||||||
|
const:
|
||||||
|
AUTHOR: Peter Kurfer
|
||||||
|
template-path: hack/header.tmpl
|
||||||
|
importas:
|
||||||
|
alias:
|
||||||
|
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||||
|
alias: $1$2
|
||||||
|
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||||
|
alias: metav1
|
||||||
|
no-unaliased: true
|
||||||
|
no-extra-aliases: true
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
- name: comment-spacings
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
rules:
|
rules:
|
||||||
- name: comment-spacings
|
- linters:
|
||||||
gci:
|
- lll
|
||||||
sections:
|
path: api/*
|
||||||
- standard
|
- linters:
|
||||||
- default
|
- dupl
|
||||||
- prefix(code.icb4dc0.de/prskr/supabase-operator)
|
- lll
|
||||||
- alias
|
path: internal/*
|
||||||
- blank
|
paths:
|
||||||
- dot
|
- third_party$
|
||||||
goimports:
|
- builtin$
|
||||||
local-prefixes: code.icb4dc0.de/prskr/supabase-operator
|
- examples$
|
||||||
goheader:
|
|
||||||
values:
|
|
||||||
const:
|
|
||||||
AUTHOR: Peter Kurfer
|
|
||||||
template-path: hack/header.tmpl
|
|
||||||
|
|
||||||
importas:
|
|
||||||
no-unaliased: true
|
|
||||||
no-extra-aliases: true
|
|
||||||
alias:
|
|
||||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
|
||||||
alias: $1$2
|
|
||||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
alias: metav1
|
|
||||||
|
|
||||||
severity:
|
severity:
|
||||||
default-severity: error
|
default: error
|
||||||
rules:
|
rules:
|
||||||
- linters:
|
- linters:
|
||||||
- godox
|
- godox
|
||||||
severity: info
|
severity: info
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
settings:
|
||||||
|
gci:
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- prefix(code.icb4dc0.de/prskr/supabase-operator)
|
||||||
|
- alias
|
||||||
|
- blank
|
||||||
|
- dot
|
||||||
|
goimports:
|
||||||
|
local-prefixes:
|
||||||
|
- code.icb4dc0.de/prskr/supabase-operator
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
paths:
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
|
|
|
@ -8,7 +8,7 @@ before:
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
- go run mage.go GenerateAll
|
- go run mage.go GenerateAll
|
||||||
- mkdir -p out
|
- mkdir -p out
|
||||||
- sh -c "cd config/release/default && kustomize edit set image supabase-operator=code.icb4dc0.de/prskr/supabase-operator:{{.Version}}"
|
- sh -c "cd config/release/default && kustomize edit set image supabase-operator=registry.icb4dc0.de/supabase-operator/controller:{{.Version}}"
|
||||||
- sh -c "kustomize build config/release/default > out/operator_manifest.yaml"
|
- sh -c "kustomize build config/release/default > out/operator_manifest.yaml"
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
|
@ -28,7 +28,7 @@ kos:
|
||||||
base_image: gcr.io/distroless/static:nonroot
|
base_image: gcr.io/distroless/static:nonroot
|
||||||
user: "65532:65532"
|
user: "65532:65532"
|
||||||
repositories:
|
repositories:
|
||||||
- code.icb4dc0.de/prskr/supabase-operator
|
- registry.icb4dc0.de/supabase-operator/controller
|
||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
- linux/amd64
|
||||||
- linux/arm64
|
- linux/arm64
|
||||||
|
|
|
@ -2,8 +2,7 @@
|
||||||
|
|
||||||
# git hook pre commit
|
# git hook pre commit
|
||||||
pre-commit = [
|
pre-commit = [
|
||||||
"go mod tidy -go=1.23.5",
|
"go mod tidy -go=1.24",
|
||||||
"go run mage.go GenerateAll",
|
|
||||||
"husky lint-staged",
|
"husky lint-staged",
|
||||||
# "golangci-lint run",
|
# "golangci-lint run",
|
||||||
]
|
]
|
||||||
|
|
|
@ -4,6 +4,19 @@
|
||||||
"initialization_options": {
|
"initialization_options": {
|
||||||
"local": "code.icb4dc0.de/prskr/supabase-operator"
|
"local": "code.icb4dc0.de/prskr/supabase-operator"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"golangci-lint": {
|
||||||
|
"initialization_options": {
|
||||||
|
"command": [
|
||||||
|
"go",
|
||||||
|
"tool",
|
||||||
|
"golangci-lint",
|
||||||
|
"run",
|
||||||
|
"--output.json.path",
|
||||||
|
"stderr",
|
||||||
|
"--issues-exit-code=1"
|
||||||
|
]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
47
.zed/tasks.json
Normal file
47
.zed/tasks.json
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"label": "Tilt Up",
|
||||||
|
"command": "tilt",
|
||||||
|
"args": ["up"],
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Generate CRDs",
|
||||||
|
"command": "go",
|
||||||
|
"args": ["run", "mage.go", "CRDs"],
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false,
|
||||||
|
"tags": ["mage", "generate"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Generate CRD docs",
|
||||||
|
"command": "go",
|
||||||
|
"args": ["run", "mage.go", "CRDDocs"],
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false,
|
||||||
|
"tags": ["mage", "generate"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Update image meta",
|
||||||
|
"command": "go",
|
||||||
|
"args": ["run", "mage.go", "FetchImageMeta"],
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false,
|
||||||
|
"tags": ["mage"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Update DB migrations",
|
||||||
|
"command": "go",
|
||||||
|
"args": ["run", "mage.go", "FetchMigrations"],
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false,
|
||||||
|
"tags": ["mage"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Run all Go tests",
|
||||||
|
"command": "go tool gotestsum ./...",
|
||||||
|
"use_new_terminal": false,
|
||||||
|
"allow_concurrent_runs": false
|
||||||
|
}
|
||||||
|
]
|
|
@ -1,5 +1,5 @@
|
||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.23.4 AS builder
|
FROM golang:1.24-alpine AS builder
|
||||||
ARG TARGETOS
|
ARG TARGETOS
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
@ -16,10 +16,7 @@ COPY [ "go.*", "./" ]
|
||||||
COPY [ "api", "api" ]
|
COPY [ "api", "api" ]
|
||||||
COPY [ "assets/migrations", "assets/migrations" ]
|
COPY [ "assets/migrations", "assets/migrations" ]
|
||||||
COPY [ "cmd", "cmd" ]
|
COPY [ "cmd", "cmd" ]
|
||||||
COPY [ "infrastructure", "infrastructure" ]
|
|
||||||
COPY [ "internal", "internal" ]
|
COPY [ "internal", "internal" ]
|
||||||
COPY [ "magefiles", "magefiles" ]
|
|
||||||
COPY [ "tools", "tools" ]
|
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||||
|
|
64
Makefile
64
Makefile
|
@ -44,11 +44,11 @@ help: ## Display this help.
|
||||||
##@ Development
|
##@ Development
|
||||||
|
|
||||||
.PHONY: manifests
|
.PHONY: manifests
|
||||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
manifests: ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||||
|
|
||||||
.PHONY: generate
|
.PHONY: generate
|
||||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
generate: ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||||
|
|
||||||
.PHONY: fmt
|
.PHONY: fmt
|
||||||
|
@ -60,7 +60,7 @@ vet: ## Run go vet against code.
|
||||||
go vet ./...
|
go vet ./...
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: manifests generate fmt vet envtest ## Run tests.
|
test: manifests generate fmt vet ## Run tests.
|
||||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||||
|
|
||||||
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
||||||
|
@ -81,11 +81,11 @@ test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated
|
||||||
go test ./test/e2e/ -v -ginkgo.v
|
go test ./test/e2e/ -v -ginkgo.v
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint: golangci-lint ## Run golangci-lint linter
|
lint: ## Run golangci-lint linter
|
||||||
$(GOLANGCI_LINT) run
|
$(GOLANGCI_LINT) run
|
||||||
|
|
||||||
.PHONY: lint-fix
|
.PHONY: lint-fix
|
||||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
|
lint-fix: ## Run golangci-lint linter and perform fixes
|
||||||
$(GOLANGCI_LINT) run --fix
|
$(GOLANGCI_LINT) run --fix
|
||||||
|
|
||||||
##@ Build
|
##@ Build
|
||||||
|
@ -127,7 +127,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
|
||||||
rm Dockerfile.cross
|
rm Dockerfile.cross
|
||||||
|
|
||||||
.PHONY: build-installer
|
.PHONY: build-installer
|
||||||
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
|
build-installer: manifests generate ## Generate a consolidated YAML with CRDs and deployment.
|
||||||
mkdir -p dist
|
mkdir -p dist
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
$(KUSTOMIZE) build config/default > dist/install.yaml
|
$(KUSTOMIZE) build config/default > dist/install.yaml
|
||||||
|
@ -148,7 +148,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
|
||||||
|
|
||||||
.PHONY: deploy
|
.PHONY: deploy
|
||||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
cd config/manager && $(KUSTOMIZE) edit set image supabase-operator=${IMG}
|
||||||
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||||
|
|
||||||
.PHONY: undeploy
|
.PHONY: undeploy
|
||||||
|
@ -164,49 +164,7 @@ $(LOCALBIN):
|
||||||
|
|
||||||
## Tool Binaries
|
## Tool Binaries
|
||||||
KUBECTL ?= kubectl
|
KUBECTL ?= kubectl
|
||||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
KUSTOMIZE ?= go tool kustomize
|
||||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
CONTROLLER_GEN ?= go tool controller-gen
|
||||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
ENVTEST ?= go tool setup-envtest
|
||||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
|
GOLANGCI_LINT = go tool golangci-lint
|
||||||
|
|
||||||
## Tool Versions
|
|
||||||
KUSTOMIZE_VERSION ?= v5.5.0
|
|
||||||
CONTROLLER_TOOLS_VERSION ?= v0.16.4
|
|
||||||
ENVTEST_VERSION ?= release-0.19
|
|
||||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
|
||||||
|
|
||||||
.PHONY: kustomize
|
|
||||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
|
||||||
$(KUSTOMIZE): $(LOCALBIN)
|
|
||||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
|
||||||
|
|
||||||
.PHONY: controller-gen
|
|
||||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
|
||||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
|
||||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
|
||||||
|
|
||||||
.PHONY: envtest
|
|
||||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
|
||||||
$(ENVTEST): $(LOCALBIN)
|
|
||||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
|
||||||
|
|
||||||
.PHONY: golangci-lint
|
|
||||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
|
||||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
|
||||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
|
|
||||||
|
|
||||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
|
||||||
# $1 - target path with name of binary
|
|
||||||
# $2 - package url which can be installed
|
|
||||||
# $3 - specific version of package
|
|
||||||
define go-install-tool
|
|
||||||
@[ -f "$(1)-$(3)" ] || { \
|
|
||||||
set -e; \
|
|
||||||
package=$(2)@$(3) ;\
|
|
||||||
echo "Downloading $${package}" ;\
|
|
||||||
rm -f $(1) || true ;\
|
|
||||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
|
||||||
mv $(1) $(1)-$(3) ;\
|
|
||||||
} ;\
|
|
||||||
ln -sf $(1)-$(3) $(1)
|
|
||||||
endef
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ This operator tries to be as un-opionionated as possible and thereofore does not
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
- go version v1.23.x+
|
- go version v1.24.x+
|
||||||
- docker version 27.+.
|
- docker version 27.+.
|
||||||
- kubectl version v1.30.0+.
|
- kubectl version v1.30.0+.
|
||||||
- Access to a Kubernetes v1.30.+ cluster.
|
- Access to a Kubernetes v1.30.+ cluster.
|
||||||
|
|
16
Tiltfile
16
Tiltfile
|
@ -4,7 +4,6 @@ load('ext://restart_process', 'docker_build_with_restart')
|
||||||
allow_k8s_contexts('kind-kind')
|
allow_k8s_contexts('kind-kind')
|
||||||
|
|
||||||
k8s_yaml(kustomize('config/dev'))
|
k8s_yaml(kustomize('config/dev'))
|
||||||
k8s_yaml(kustomize('config/samples'))
|
|
||||||
|
|
||||||
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
||||||
|
|
||||||
|
@ -23,8 +22,10 @@ local_resource(
|
||||||
resource_deps=[]
|
resource_deps=[]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
k8s_kind('Cluster', api_version='postgresql.cnpg.io/v1')
|
||||||
|
|
||||||
docker_build_with_restart(
|
docker_build_with_restart(
|
||||||
'supabase-operator',
|
'controller',
|
||||||
'.',
|
'.',
|
||||||
entrypoint=['/app/bin/supabase-operator'],
|
entrypoint=['/app/bin/supabase-operator'],
|
||||||
dockerfile='dev/Dockerfile',
|
dockerfile='dev/Dockerfile',
|
||||||
|
@ -40,10 +41,11 @@ k8s_resource('supabase-controller-manager')
|
||||||
k8s_resource(
|
k8s_resource(
|
||||||
workload='supabase-control-plane',
|
workload='supabase-control-plane',
|
||||||
port_forwards=18000,
|
port_forwards=18000,
|
||||||
|
resource_deps=[]
|
||||||
)
|
)
|
||||||
|
|
||||||
k8s_resource(
|
k8s_resource(
|
||||||
objects=["cluster-example:Cluster:supabase-demo"],
|
workload='cluster-example',
|
||||||
new_name='Postgres cluster',
|
new_name='Postgres cluster',
|
||||||
port_forwards=5432
|
port_forwards=5432
|
||||||
)
|
)
|
||||||
|
@ -62,7 +64,12 @@ k8s_resource(
|
||||||
k8s_resource(
|
k8s_resource(
|
||||||
objects=["gateway-sample:APIGateway:supabase-demo"],
|
objects=["gateway-sample:APIGateway:supabase-demo"],
|
||||||
extra_pod_selectors={"app.kubernetes.io/component": "api-gateway"},
|
extra_pod_selectors={"app.kubernetes.io/component": "api-gateway"},
|
||||||
port_forwards=[8000, 19000],
|
port_forwards=[3000, 8000, 19000],
|
||||||
|
links=[
|
||||||
|
link("https://localhost:3000", "Studio"),
|
||||||
|
link("http://localhost:8000", "API"),
|
||||||
|
link("http://localhost:19000", "Envoy Admin Interface")
|
||||||
|
],
|
||||||
new_name='API Gateway',
|
new_name='API Gateway',
|
||||||
resource_deps=[
|
resource_deps=[
|
||||||
'supabase-controller-manager'
|
'supabase-controller-manager'
|
||||||
|
@ -73,7 +80,6 @@ k8s_resource(
|
||||||
objects=["dashboard-sample:Dashboard:supabase-demo"],
|
objects=["dashboard-sample:Dashboard:supabase-demo"],
|
||||||
extra_pod_selectors={"app.kubernetes.io/component": "dashboard", "app.kubernetes.io/name": "studio"},
|
extra_pod_selectors={"app.kubernetes.io/component": "dashboard", "app.kubernetes.io/name": "studio"},
|
||||||
discovery_strategy="selectors-only",
|
discovery_strategy="selectors-only",
|
||||||
port_forwards=[3000],
|
|
||||||
new_name='Dashboard',
|
new_name='Dashboard',
|
||||||
resource_deps=[
|
resource_deps=[
|
||||||
'supabase-controller-manager'
|
'supabase-controller-manager'
|
||||||
|
|
|
@ -19,6 +19,7 @@ package v1alpha1
|
||||||
import (
|
import (
|
||||||
"iter"
|
"iter"
|
||||||
"maps"
|
"maps"
|
||||||
|
"strings"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -37,6 +38,40 @@ type ControlPlaneSpec struct {
|
||||||
Port uint16 `json:"port"`
|
Port uint16 `json:"port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EnvoyLogLevel string
|
||||||
|
|
||||||
|
type EnvoyComponentLogLevel struct {
|
||||||
|
// Component - the component to set the log level for
|
||||||
|
// the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36)
|
||||||
|
Component string `json:"component"`
|
||||||
|
// Level - the log level to set for the component
|
||||||
|
// +kubebuilder:validation:Enum=trace;debug;info;warning;error;critical;off
|
||||||
|
Level EnvoyLogLevel `json:"level"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EnvoyDebuggingOptions struct {
|
||||||
|
ComponentLogLevels []EnvoyComponentLogLevel `json:"componentLogLevels,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *EnvoyDebuggingOptions) DebugLogging() string {
|
||||||
|
if o == nil || len(o.ComponentLogLevels) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var builder strings.Builder
|
||||||
|
for i, lvl := range o.ComponentLogLevels {
|
||||||
|
if i > 0 {
|
||||||
|
builder.WriteString(",")
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.WriteString(lvl.Component)
|
||||||
|
builder.WriteRune(':')
|
||||||
|
builder.WriteString(string(lvl.Level))
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
type EnvoySpec struct {
|
type EnvoySpec struct {
|
||||||
// NodeName - identifies the Envoy cluster within the current namespace
|
// NodeName - identifies the Envoy cluster within the current namespace
|
||||||
// if not set, the name of the APIGateway resource will be used
|
// if not set, the name of the APIGateway resource will be used
|
||||||
|
@ -45,15 +80,137 @@ type EnvoySpec struct {
|
||||||
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
||||||
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
||||||
// WorkloadTemplate - customize the Envoy deployment
|
// WorkloadTemplate - customize the Envoy deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
|
// DisableIPv6 - disable IPv6 for the Envoy instance
|
||||||
|
// this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||||
|
DisableIPv6 bool `json:"disableIPv6,omitempty"`
|
||||||
|
Debugging *EnvoyDebuggingOptions `json:"debugging,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TlsCertRef struct {
|
||||||
|
SecretName string `json:"secretName"`
|
||||||
|
// ServerCertKey - key in the secret that contains the server certificate
|
||||||
|
// +kubebuilder:default="tls.crt"
|
||||||
|
ServerCertKey string `json:"serverCertKey"`
|
||||||
|
// ServerKeyKey - key in the secret that contains the server private key
|
||||||
|
// +kubebuilder:default="tls.key"
|
||||||
|
ServerKeyKey string `json:"serverKeyKey"`
|
||||||
|
// CaCertKey - key in the secret that contains the CA certificate
|
||||||
|
// +kubebuilder:default="ca.crt"
|
||||||
|
CaCertKey string `json:"caCertKey,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EndpointTlsSpec struct {
|
||||||
|
Cert *TlsCertRef `json:"cert"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ApiEndpointSpec struct {
|
type ApiEndpointSpec struct {
|
||||||
// JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs
|
// JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs
|
||||||
JWKSSelector *corev1.SecretKeySelector `json:"jwks"`
|
JWKSSelector *corev1.SecretKeySelector `json:"jwks"`
|
||||||
|
// TLS - enable and configure TLS for the API endpoint
|
||||||
|
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DashboardEndpointSpec struct{}
|
func (s *ApiEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.TLS
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardAuthType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
DashboardAuthTypeNone DashboardAuthType = "none"
|
||||||
|
DashboardAuthTypeOAuth2 DashboardAuthType = "oauth2"
|
||||||
|
DashboardAuthTypeBasic DashboardAuthType = "basic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DashboardOAuth2Spec struct {
|
||||||
|
// OpenIDIssuer - if set the defaulter will fetch the discovery document and fill
|
||||||
|
// TokenEndpoint and AuthorizationEndpoint based on the discovery document
|
||||||
|
OpenIDIssuer string `json:"openIdIssuer,omitempty"`
|
||||||
|
// TokenEndpoint - endpoint where Envoy will retrieve the OAuth2 access and identity token from
|
||||||
|
TokenEndpoint string `json:"tokenEndpoint,omitempty"`
|
||||||
|
// AuthorizationEndpoint - endpoint where the user will be redirected to authenticate
|
||||||
|
AuthorizationEndpoint string `json:"authorizationEndpoint,omitempty"`
|
||||||
|
// ClientID - client ID to authenticate with the OAuth2 provider
|
||||||
|
ClientID string `json:"clientId"`
|
||||||
|
// Scopes - scopes to request from the OAuth2 provider (e.g. "openid", "profile", ...) - optional
|
||||||
|
Scopes []string `json:"scopes,omitempty"`
|
||||||
|
// Resources - resources to request from the OAuth2 provider (e.g. "user", "email", ...) - optional
|
||||||
|
Resources []string `json:"resources,omitempty"`
|
||||||
|
// ClientSecretRef - reference to the secret that contains the client secret
|
||||||
|
ClientSecretRef *corev1.SecretKeySelector `json:"clientSecretRef"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardBasicAuthSpec struct {
|
||||||
|
// UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)
|
||||||
|
// +kubebuilder:validation:items:Pattern="^[\\w_.]+:\\{SHA\\}[A-z0-9]+=*$"
|
||||||
|
UsersInline []string `json:"usersInline,omitempty"`
|
||||||
|
// PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form
|
||||||
|
// if not empty, credentials will be merged with inline users
|
||||||
|
PlaintextUsersSecretRef string `json:"plaintextUsersSecretRef,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardAuthSpec struct {
|
||||||
|
// OAuth2 - configure oauth2 authentication for the dashhboard listener
|
||||||
|
// if configured, will be preferred over Basic authentication configuration
|
||||||
|
// effectively disabling basic auth
|
||||||
|
OAuth2 *DashboardOAuth2Spec `json:"oauth2,omitempty"`
|
||||||
|
// Basic - HTTP basic auth configuration, this should only be used in exceptions
|
||||||
|
// e.g. during evaluations or for local development
|
||||||
|
// only used if no other authentication is configured
|
||||||
|
Basic *DashboardBasicAuthSpec `json:"basic,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardEndpointSpec struct {
|
||||||
|
// Auth - configure authentication for the dashboard endpoint
|
||||||
|
Auth *DashboardAuthSpec `json:"auth,omitempty"`
|
||||||
|
// TLS - enable and configure TLS for the Dashboard endpoint
|
||||||
|
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DashboardEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.TLS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DashboardEndpointSpec) AuthType() DashboardAuthType {
|
||||||
|
if s == nil || s.Auth == nil {
|
||||||
|
return DashboardAuthTypeNone
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Auth.OAuth2 != nil {
|
||||||
|
return DashboardAuthTypeOAuth2
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Auth.Basic != nil {
|
||||||
|
return DashboardAuthTypeBasic
|
||||||
|
}
|
||||||
|
|
||||||
|
return DashboardAuthTypeNone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DashboardEndpointSpec) OAuth2() *DashboardOAuth2Spec {
|
||||||
|
if s == nil || s.Auth == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Auth.OAuth2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DashboardEndpointSpec) Basic() *DashboardBasicAuthSpec {
|
||||||
|
if s == nil || s.Auth == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Auth.Basic
|
||||||
|
}
|
||||||
|
|
||||||
// APIGatewaySpec defines the desired state of APIGateway.
|
// APIGatewaySpec defines the desired state of APIGateway.
|
||||||
type APIGatewaySpec struct {
|
type APIGatewaySpec struct {
|
||||||
|
@ -74,8 +231,7 @@ type APIGatewaySpec struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type EnvoyStatus struct {
|
type EnvoyStatus struct {
|
||||||
ConfigVersion string `json:"configVersion,omitempty"`
|
ResourceHash []byte `json:"resourceHash,omitempty"`
|
||||||
ResourceHash []byte `json:"resourceHash,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIGatewayStatus defines the observed state of APIGateway.
|
// APIGatewayStatus defines the observed state of APIGateway.
|
||||||
|
@ -88,7 +244,7 @@ type APIGatewayStatus struct {
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
|
|
||||||
// APIGateway is the Schema for the apigateways API.
|
// APIGateway is the Schema for the apigateways API.
|
||||||
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.configVersion`
|
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.resourceHash`
|
||||||
type APIGateway struct {
|
type APIGateway struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
|
@ -91,16 +91,16 @@ type ContainerTemplate struct {
|
||||||
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type WorkloadTemplate struct {
|
type WorkloadSpec struct {
|
||||||
Replicas *int32 `json:"replicas,omitempty"`
|
Replicas *int32 `json:"replicas,omitempty"`
|
||||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||||
// Workload - customize the container template of the workload
|
// ContainerSpec - customize the container template of the workload
|
||||||
Workload *ContainerTemplate `json:"workload,omitempty"`
|
ContainerSpec *ContainerTemplate `json:"container,omitempty"`
|
||||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
func (t *WorkloadSpec) ReplicaCount() *int32 {
|
||||||
if t != nil && t.Replicas != nil {
|
if t != nil && t.Replicas != nil {
|
||||||
return t.Replicas
|
return t.Replicas
|
||||||
}
|
}
|
||||||
|
@ -108,20 +108,20 @@ func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
func (t *WorkloadSpec) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||||
if t == nil || t.Workload == nil || len(t.Workload.AdditionalEnv) == 0 {
|
if t == nil || t.ContainerSpec == nil || len(t.ContainerSpec.AdditionalEnv) == 0 {
|
||||||
return basicEnv
|
return basicEnv
|
||||||
}
|
}
|
||||||
|
|
||||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.Workload.AdditionalEnv))
|
existingKeys := make(map[string]bool, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv))
|
||||||
|
|
||||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.Workload.AdditionalEnv)), basicEnv...)
|
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv)), basicEnv...)
|
||||||
|
|
||||||
for _, v := range basicEnv {
|
for _, v := range basicEnv {
|
||||||
existingKeys[v.Name] = true
|
existingKeys[v.Name] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range t.Workload.AdditionalEnv {
|
for _, v := range t.ContainerSpec.AdditionalEnv {
|
||||||
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||||
return merged
|
return merged
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
func (t *WorkloadSpec) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||||
result := make(map[string]string)
|
result := make(map[string]string)
|
||||||
|
|
||||||
maps.Copy(result, initial)
|
maps.Copy(result, initial)
|
||||||
|
@ -156,47 +156,47 @@ func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...ma
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Image(defaultImage string) string {
|
func (t *WorkloadSpec) Image(defaultImage string) string {
|
||||||
if t != nil && t.Workload != nil && t.Workload.Image != "" {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.Image != "" {
|
||||||
return t.Workload.Image
|
return t.ContainerSpec.Image
|
||||||
}
|
}
|
||||||
|
|
||||||
return defaultImage
|
return defaultImage
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ImagePullPolicy() corev1.PullPolicy {
|
func (t *WorkloadSpec) ImagePullPolicy() corev1.PullPolicy {
|
||||||
if t != nil && t.Workload != nil && t.Workload.PullPolicy != "" {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.PullPolicy != "" {
|
||||||
return t.Workload.PullPolicy
|
return t.ContainerSpec.PullPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
return corev1.PullIfNotPresent
|
return corev1.PullIfNotPresent
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) PullSecrets() []corev1.LocalObjectReference {
|
func (t *WorkloadSpec) PullSecrets() []corev1.LocalObjectReference {
|
||||||
if t != nil && t.Workload != nil && len(t.Workload.ImagePullSecrets) > 0 {
|
if t != nil && t.ContainerSpec != nil && len(t.ContainerSpec.ImagePullSecrets) > 0 {
|
||||||
return t.Workload.ImagePullSecrets
|
return t.ContainerSpec.ImagePullSecrets
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Resources() corev1.ResourceRequirements {
|
func (t *WorkloadSpec) Resources() corev1.ResourceRequirements {
|
||||||
if t != nil && t.Workload != nil {
|
if t != nil && t.ContainerSpec != nil {
|
||||||
return t.Workload.Resources
|
return t.ContainerSpec.Resources
|
||||||
}
|
}
|
||||||
|
|
||||||
return corev1.ResourceRequirements{}
|
return corev1.ResourceRequirements{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
func (t *WorkloadSpec) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||||
if t != nil && t.Workload != nil {
|
if t != nil && t.ContainerSpec != nil {
|
||||||
return append(defaultMounts, t.Workload.VolumeMounts...)
|
return append(defaultMounts, t.ContainerSpec.VolumeMounts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return defaultMounts
|
return defaultMounts
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
func (t *WorkloadSpec) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return defaultVolumes
|
return defaultVolumes
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Vol
|
||||||
return append(defaultVolumes, t.AdditionalVolumes...)
|
return append(defaultVolumes, t.AdditionalVolumes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
func (t *WorkloadSpec) PodSecurityContext() *corev1.PodSecurityContext {
|
||||||
if t != nil && t.SecurityContext != nil {
|
if t != nil && t.SecurityContext != nil {
|
||||||
return t.SecurityContext
|
return t.SecurityContext
|
||||||
}
|
}
|
||||||
|
@ -214,9 +214,9 @@ func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
func (t *WorkloadSpec) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||||
if t != nil && t.Workload != nil && t.Workload.SecurityContext != nil {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.SecurityContext != nil {
|
||||||
return t.Workload.SecurityContext
|
return t.ContainerSpec.SecurityContext
|
||||||
}
|
}
|
||||||
|
|
||||||
return &corev1.SecurityContext{
|
return &corev1.SecurityContext{
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -25,6 +26,7 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -59,7 +61,7 @@ type DatabaseRoles struct {
|
||||||
|
|
||||||
type Database struct {
|
type Database struct {
|
||||||
DSN *string `json:"dsn,omitempty"`
|
DSN *string `json:"dsn,omitempty"`
|
||||||
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef,omitempty"`
|
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef"`
|
||||||
Roles DatabaseRoles `json:"roles,omitempty"`
|
Roles DatabaseRoles `json:"roles,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,8 +169,8 @@ type PostgrestSpec struct {
|
||||||
// MaxRows - maximum number of rows PostgREST will load at a time
|
// MaxRows - maximum number of rows PostgREST will load at a time
|
||||||
// +kubebuilder:default=1000
|
// +kubebuilder:default=1000
|
||||||
MaxRows int `json:"maxRows,omitempty"`
|
MaxRows int `json:"maxRows,omitempty"`
|
||||||
// WorkloadTemplate - customize the PostgREST workload
|
// WorkloadSpec - customize the PostgREST workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthProviderMeta struct {
|
type AuthProviderMeta struct {
|
||||||
|
@ -365,12 +367,12 @@ func (p *AuthProviders) Vars(apiExternalURL string) []corev1.EnvVar {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthSpec struct {
|
type AuthSpec struct {
|
||||||
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
||||||
DisableSignup *bool `json:"disableSignup,omitempty"`
|
DisableSignup *bool `json:"disableSignup,omitempty"`
|
||||||
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
||||||
Providers *AuthProviders `json:"providers,omitempty"`
|
Providers *AuthProviders `json:"providers,omitempty"`
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadTemplate *WorkloadSpec `json:"workloadTemplate,omitempty"`
|
||||||
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CoreSpec defines the desired state of Core.
|
// CoreSpec defines the desired state of Core.
|
||||||
|
@ -387,20 +389,89 @@ type CoreSpec struct {
|
||||||
Auth *AuthSpec `json:"auth,omitempty"`
|
Auth *AuthSpec `json:"auth,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MigrationStatus map[string]metav1.Time
|
type MigrationConditionStatus string
|
||||||
|
|
||||||
func (s MigrationStatus) IsApplied(name string) bool {
|
const (
|
||||||
_, ok := s[name]
|
MigrationConditionStatusApplied MigrationConditionStatus = "Applied"
|
||||||
return ok
|
MigrationConditionStatusFailed MigrationConditionStatus = "Failed"
|
||||||
}
|
)
|
||||||
|
|
||||||
func (s MigrationStatus) Record(name string) {
|
type MigrationScriptCondition struct {
|
||||||
s[name] = metav1.Now()
|
// Name - file name of the migration script
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Hash - SHA256 hash of the script when it was last successfully applied
|
||||||
|
Hash []byte `json:"hash"`
|
||||||
|
// Status - whether the migration was applied or not
|
||||||
|
// +kubebuilder:validation:Enum=Applied;Failed
|
||||||
|
Status MigrationConditionStatus `json:"status"`
|
||||||
|
// LastProbeTime - last time the operator tried to execute the migration script
|
||||||
|
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
|
||||||
|
// LastTransitionTime - last time the condition transitioned from one status to another
|
||||||
|
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||||
|
// Reason - one-word, CamcelCase reason for the condition's last transition
|
||||||
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// Message - human-readable message indicating details about the last transition
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DatabaseStatus struct {
|
type DatabaseStatus struct {
|
||||||
AppliedMigrations MigrationStatus `json:"appliedMigrations,omitempty"`
|
MigrationConditions []MigrationScriptCondition `json:"migrationConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
Roles map[string][]byte `json:"roles,omitempty"`
|
Roles map[string][]byte `json:"roles,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DatabaseStatus) IsMigrationUpToDate(name string, hash []byte) (found bool, upToDate bool) {
|
||||||
|
for _, cond := range s.MigrationConditions {
|
||||||
|
if cond.Name == name {
|
||||||
|
return true, bytes.Equal(cond.Hash, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DatabaseStatus) RecordMigrationCondition(name string, hash []byte, err error) error {
|
||||||
|
var (
|
||||||
|
now = time.Now()
|
||||||
|
newStatus = MigrationConditionStatusApplied
|
||||||
|
lastProbeTime = metav1.NewTime(now)
|
||||||
|
lastTransitionTime = metav1.NewTime(now)
|
||||||
|
message string
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
newStatus = MigrationConditionStatusFailed
|
||||||
|
message = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, cond := range s.MigrationConditions {
|
||||||
|
if cond.Name == name {
|
||||||
|
lastTransitionTime = cond.LastTransitionTime
|
||||||
|
if cond.Status != newStatus {
|
||||||
|
lastTransitionTime = metav1.NewTime(now)
|
||||||
|
}
|
||||||
|
|
||||||
|
cond.Hash = hash
|
||||||
|
cond.Status = newStatus
|
||||||
|
cond.LastProbeTime = lastProbeTime
|
||||||
|
cond.LastTransitionTime = lastTransitionTime
|
||||||
|
cond.Reason = "Outdated"
|
||||||
|
cond.Message = message
|
||||||
|
|
||||||
|
s.MigrationConditions[idx] = cond
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.MigrationConditions = append(s.MigrationConditions, MigrationScriptCondition{
|
||||||
|
Name: name,
|
||||||
|
Hash: hash,
|
||||||
|
Status: newStatus,
|
||||||
|
LastProbeTime: lastProbeTime,
|
||||||
|
LastTransitionTime: lastTransitionTime,
|
||||||
|
Message: message,
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type CoreConditionType string
|
type CoreConditionType string
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
type StudioSpec struct {
|
type StudioSpec struct {
|
||||||
JWT *JwtSpec `json:"jwt,omitempty"`
|
JWT *JwtSpec `json:"jwt,omitempty"`
|
||||||
// WorkloadTemplate - customize the studio deployment
|
// WorkloadTemplate - customize the studio deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
// GatewayServiceSelector - selector to find the service for the API gateway
|
// GatewayServiceSelector - selector to find the service for the API gateway
|
||||||
// Required to configure the API URL in the studio deployment
|
// Required to configure the API URL in the studio deployment
|
||||||
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
||||||
|
@ -37,7 +37,7 @@ type StudioSpec struct {
|
||||||
|
|
||||||
type PGMetaSpec struct {
|
type PGMetaSpec struct {
|
||||||
// WorkloadTemplate - customize the pg-meta deployment
|
// WorkloadTemplate - customize the pg-meta deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DbCredentialsReference struct {
|
type DbCredentialsReference struct {
|
||||||
|
|
|
@ -191,7 +191,7 @@ type StorageApiSpec struct {
|
||||||
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
||||||
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
||||||
// WorkloadTemplate - customize the Storage API workload
|
// WorkloadTemplate - customize the Storage API workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ImageProxySpec struct {
|
type ImageProxySpec struct {
|
||||||
|
@ -199,7 +199,7 @@ type ImageProxySpec struct {
|
||||||
Enable bool `json:"enable,omitempty"`
|
Enable bool `json:"enable,omitempty"`
|
||||||
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
||||||
// WorkloadTemplate - customize the image proxy workload
|
// WorkloadTemplate - customize the image proxy workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageSpec defines the desired state of Storage.
|
// StorageSpec defines the desired state of Storage.
|
||||||
|
|
|
@ -101,7 +101,7 @@ func (in *APIGatewaySpec) DeepCopyInto(out *APIGatewaySpec) {
|
||||||
if in.DashboardEndpoint != nil {
|
if in.DashboardEndpoint != nil {
|
||||||
in, out := &in.DashboardEndpoint, &out.DashboardEndpoint
|
in, out := &in.DashboardEndpoint, &out.DashboardEndpoint
|
||||||
*out = new(DashboardEndpointSpec)
|
*out = new(DashboardEndpointSpec)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.ServiceSelector != nil {
|
if in.ServiceSelector != nil {
|
||||||
in, out := &in.ServiceSelector, &out.ServiceSelector
|
in, out := &in.ServiceSelector, &out.ServiceSelector
|
||||||
|
@ -160,6 +160,11 @@ func (in *ApiEndpointSpec) DeepCopyInto(out *ApiEndpointSpec) {
|
||||||
*out = new(v1.SecretKeySelector)
|
*out = new(v1.SecretKeySelector)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.TLS != nil {
|
||||||
|
in, out := &in.TLS, &out.TLS
|
||||||
|
*out = new(EndpointTlsSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiEndpointSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiEndpointSpec.
|
||||||
|
@ -247,7 +252,7 @@ func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadTemplate != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.EmailSignupDisabled != nil {
|
if in.EmailSignupDisabled != nil {
|
||||||
|
@ -490,6 +495,51 @@ func (in *Dashboard) DeepCopyObject() runtime.Object {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DashboardAuthSpec) DeepCopyInto(out *DashboardAuthSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.OAuth2 != nil {
|
||||||
|
in, out := &in.OAuth2, &out.OAuth2
|
||||||
|
*out = new(DashboardOAuth2Spec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Basic != nil {
|
||||||
|
in, out := &in.Basic, &out.Basic
|
||||||
|
*out = new(DashboardBasicAuthSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardAuthSpec.
|
||||||
|
func (in *DashboardAuthSpec) DeepCopy() *DashboardAuthSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DashboardAuthSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DashboardBasicAuthSpec) DeepCopyInto(out *DashboardBasicAuthSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.UsersInline != nil {
|
||||||
|
in, out := &in.UsersInline, &out.UsersInline
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardBasicAuthSpec.
|
||||||
|
func (in *DashboardBasicAuthSpec) DeepCopy() *DashboardBasicAuthSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DashboardBasicAuthSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DashboardDbSpec) DeepCopyInto(out *DashboardDbSpec) {
|
func (in *DashboardDbSpec) DeepCopyInto(out *DashboardDbSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
@ -513,6 +563,16 @@ func (in *DashboardDbSpec) DeepCopy() *DashboardDbSpec {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DashboardEndpointSpec) DeepCopyInto(out *DashboardEndpointSpec) {
|
func (in *DashboardEndpointSpec) DeepCopyInto(out *DashboardEndpointSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.Auth != nil {
|
||||||
|
in, out := &in.Auth, &out.Auth
|
||||||
|
*out = new(DashboardAuthSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.TLS != nil {
|
||||||
|
in, out := &in.TLS, &out.TLS
|
||||||
|
*out = new(EndpointTlsSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardEndpointSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardEndpointSpec.
|
||||||
|
@ -557,6 +617,36 @@ func (in *DashboardList) DeepCopyObject() runtime.Object {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DashboardOAuth2Spec) DeepCopyInto(out *DashboardOAuth2Spec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Scopes != nil {
|
||||||
|
in, out := &in.Scopes, &out.Scopes
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Resources != nil {
|
||||||
|
in, out := &in.Resources, &out.Resources
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.ClientSecretRef != nil {
|
||||||
|
in, out := &in.ClientSecretRef, &out.ClientSecretRef
|
||||||
|
*out = new(v1.SecretKeySelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOAuth2Spec.
|
||||||
|
func (in *DashboardOAuth2Spec) DeepCopy() *DashboardOAuth2Spec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DashboardOAuth2Spec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) {
|
func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
@ -662,11 +752,11 @@ func (in *DatabaseRolesSecrets) DeepCopy() *DatabaseRolesSecrets {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.AppliedMigrations != nil {
|
if in.MigrationConditions != nil {
|
||||||
in, out := &in.AppliedMigrations, &out.AppliedMigrations
|
in, out := &in.MigrationConditions, &out.MigrationConditions
|
||||||
*out = make(MigrationStatus, len(*in))
|
*out = make([]MigrationScriptCondition, len(*in))
|
||||||
for key, val := range *in {
|
for i := range *in {
|
||||||
(*out)[key] = *val.DeepCopy()
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.Roles != nil {
|
if in.Roles != nil {
|
||||||
|
@ -768,6 +858,61 @@ func (in *EmailAuthSmtpSpec) DeepCopy() *EmailAuthSmtpSpec {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *EndpointTlsSpec) DeepCopyInto(out *EndpointTlsSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Cert != nil {
|
||||||
|
in, out := &in.Cert, &out.Cert
|
||||||
|
*out = new(TlsCertRef)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointTlsSpec.
|
||||||
|
func (in *EndpointTlsSpec) DeepCopy() *EndpointTlsSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(EndpointTlsSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *EnvoyComponentLogLevel) DeepCopyInto(out *EnvoyComponentLogLevel) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyComponentLogLevel.
|
||||||
|
func (in *EnvoyComponentLogLevel) DeepCopy() *EnvoyComponentLogLevel {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(EnvoyComponentLogLevel)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *EnvoyDebuggingOptions) DeepCopyInto(out *EnvoyDebuggingOptions) {
|
||||||
|
*out = *in
|
||||||
|
if in.ComponentLogLevels != nil {
|
||||||
|
in, out := &in.ComponentLogLevels, &out.ComponentLogLevels
|
||||||
|
*out = make([]EnvoyComponentLogLevel, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyDebuggingOptions.
|
||||||
|
func (in *EnvoyDebuggingOptions) DeepCopy() *EnvoyDebuggingOptions {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(EnvoyDebuggingOptions)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
@ -776,9 +921,14 @@ func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
||||||
*out = new(ControlPlaneSpec)
|
*out = new(ControlPlaneSpec)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Debugging != nil {
|
||||||
|
in, out := &in.Debugging, &out.Debugging
|
||||||
|
*out = new(EnvoyDebuggingOptions)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -848,9 +998,9 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -896,24 +1046,25 @@ func (in *JwtSpec) DeepCopy() *JwtSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in MigrationStatus) DeepCopyInto(out *MigrationStatus) {
|
func (in *MigrationScriptCondition) DeepCopyInto(out *MigrationScriptCondition) {
|
||||||
{
|
*out = *in
|
||||||
in := &in
|
if in.Hash != nil {
|
||||||
*out = make(MigrationStatus, len(*in))
|
in, out := &in.Hash, &out.Hash
|
||||||
for key, val := range *in {
|
*out = make([]byte, len(*in))
|
||||||
(*out)[key] = *val.DeepCopy()
|
copy(*out, *in)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationScriptCondition.
|
||||||
func (in MigrationStatus) DeepCopy() MigrationStatus {
|
func (in *MigrationScriptCondition) DeepCopy() *MigrationScriptCondition {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(MigrationStatus)
|
out := new(MigrationScriptCondition)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return *out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
@ -939,9 +1090,9 @@ func (in *OAuthProvider) DeepCopy() *OAuthProvider {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -985,9 +1136,9 @@ func (in *PostgrestSpec) DeepCopyInto(out *PostgrestSpec) {
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1144,9 +1295,9 @@ func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
|
||||||
*out = new(UploadTempSpec)
|
*out = new(UploadTempSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1237,9 +1388,9 @@ func (in *StudioSpec) DeepCopyInto(out *StudioSpec) {
|
||||||
*out = new(JwtSpec)
|
*out = new(JwtSpec)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.GatewayServiceMatchLabels != nil {
|
if in.GatewayServiceMatchLabels != nil {
|
||||||
|
@ -1261,6 +1412,21 @@ func (in *StudioSpec) DeepCopy() *StudioSpec {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *TlsCertRef) DeepCopyInto(out *TlsCertRef) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TlsCertRef.
|
||||||
|
func (in *TlsCertRef) DeepCopy() *TlsCertRef {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(TlsCertRef)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
|
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
@ -1282,7 +1448,7 @@ func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
func (in *WorkloadSpec) DeepCopyInto(out *WorkloadSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Replicas != nil {
|
if in.Replicas != nil {
|
||||||
in, out := &in.Replicas, &out.Replicas
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
@ -1301,8 +1467,8 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||||
(*out)[key] = val
|
(*out)[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.Workload != nil {
|
if in.ContainerSpec != nil {
|
||||||
in, out := &in.Workload, &out.Workload
|
in, out := &in.ContainerSpec, &out.ContainerSpec
|
||||||
*out = new(ContainerTemplate)
|
*out = new(ContainerTemplate)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
@ -1315,12 +1481,12 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpec.
|
||||||
func (in *WorkloadTemplate) DeepCopy() *WorkloadTemplate {
|
func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(WorkloadTemplate)
|
out := new(WorkloadSpec)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,15 @@ grant pg_read_all_data to supabase_read_only_user;
|
||||||
create schema if not exists extensions;
|
create schema if not exists extensions;
|
||||||
create extension if not exists "uuid-ossp" with schema extensions;
|
create extension if not exists "uuid-ossp" with schema extensions;
|
||||||
create extension if not exists pgcrypto with schema extensions;
|
create extension if not exists pgcrypto with schema extensions;
|
||||||
create extension if not exists pgjwt with schema extensions;
|
do $$
|
||||||
|
begin
|
||||||
|
if exists (select 1 from pg_available_extensions where name = 'pgjwt') then
|
||||||
|
if not exists (select 1 from pg_extension where extname = 'pgjwt') then
|
||||||
|
create extension if not exists pgjwt with schema "extensions" cascade;
|
||||||
|
end if;
|
||||||
|
end if;
|
||||||
|
end $$;
|
||||||
|
|
||||||
|
|
||||||
-- Set up auth roles for the developer
|
-- Set up auth roles for the developer
|
||||||
create role anon nologin noinherit;
|
create role anon nologin noinherit;
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
"embed"
|
"embed"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
@ -32,6 +33,7 @@ var migrationsFS embed.FS
|
||||||
type Script struct {
|
type Script struct {
|
||||||
FileName string
|
FileName string
|
||||||
Content string
|
Content string
|
||||||
|
Hash []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitScripts() iter.Seq2[Script, error] {
|
func InitScripts() iter.Seq2[Script, error] {
|
||||||
|
@ -49,10 +51,18 @@ func RoleCreationScript(roleName string) (Script, error) {
|
||||||
return Script{}, err
|
return Script{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Script{fileName, string(content)}, nil
|
hash := sha256.New()
|
||||||
|
_, _ = hash.Write(content)
|
||||||
|
|
||||||
|
return Script{
|
||||||
|
FileName: fileName,
|
||||||
|
Content: string(content),
|
||||||
|
Hash: hash.Sum(nil),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readScripts(dir string) iter.Seq2[Script, error] {
|
func readScripts(dir string) iter.Seq2[Script, error] {
|
||||||
|
hash := sha256.New()
|
||||||
return func(yield func(Script, error) bool) {
|
return func(yield func(Script, error) bool) {
|
||||||
files, err := migrationsFS.ReadDir(dir)
|
files, err := migrationsFS.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -76,11 +86,16 @@ func readScripts(dir string) iter.Seq2[Script, error] {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, _ = hash.Write(content)
|
||||||
|
|
||||||
s := Script{
|
s := Script{
|
||||||
FileName: file.Name(),
|
FileName: file.Name(),
|
||||||
Content: string(content),
|
Content: string(content),
|
||||||
|
Hash: hash.Sum(nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hash.Reset()
|
||||||
|
|
||||||
if !yield(s, nil) {
|
if !yield(s, nil) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
-- migrate:up
|
|
||||||
|
|
||||||
-- demote postgres user
|
|
||||||
GRANT ALL ON DATABASE postgres TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres;
|
|
||||||
ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS;
|
|
||||||
|
|
||||||
-- migrate:down
|
|
|
@ -5,34 +5,44 @@ DECLARE
|
||||||
pgsodium_exists boolean;
|
pgsodium_exists boolean;
|
||||||
vault_exists boolean;
|
vault_exists boolean;
|
||||||
BEGIN
|
BEGIN
|
||||||
pgsodium_exists = (
|
IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'supabase_vault' AND default_version != '0.2.8') THEN
|
||||||
select count(*) = 1
|
CREATE EXTENSION IF NOT EXISTS supabase_vault;
|
||||||
from pg_available_extensions
|
|
||||||
where name = 'pgsodium'
|
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||||
and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9')
|
-- we manually run it here
|
||||||
);
|
GRANT USAGE ON SCHEMA vault TO postgres WITH GRANT OPTION;
|
||||||
|
GRANT SELECT, DELETE ON vault.secrets, vault.decrypted_secrets TO postgres WITH GRANT OPTION;
|
||||||
vault_exists = (
|
GRANT EXECUTE ON FUNCTION vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt TO postgres WITH GRANT OPTION;
|
||||||
|
ELSE
|
||||||
|
pgsodium_exists = (
|
||||||
select count(*) = 1
|
select count(*) = 1
|
||||||
from pg_available_extensions
|
from pg_available_extensions
|
||||||
where name = 'supabase_vault'
|
where name = 'pgsodium'
|
||||||
);
|
and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9')
|
||||||
|
);
|
||||||
IF pgsodium_exists
|
|
||||||
THEN
|
vault_exists = (
|
||||||
create extension if not exists pgsodium;
|
select count(*) = 1
|
||||||
|
from pg_available_extensions
|
||||||
grant pgsodium_keyiduser to postgres with admin option;
|
where name = 'supabase_vault'
|
||||||
grant pgsodium_keyholder to postgres with admin option;
|
);
|
||||||
grant pgsodium_keymaker to postgres with admin option;
|
|
||||||
|
IF pgsodium_exists
|
||||||
grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role;
|
|
||||||
grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role;
|
|
||||||
grant execute on function pgsodium.crypto_aead_det_keygen to service_role;
|
|
||||||
|
|
||||||
IF vault_exists
|
|
||||||
THEN
|
THEN
|
||||||
create extension if not exists supabase_vault;
|
create extension if not exists pgsodium;
|
||||||
|
|
||||||
|
grant pgsodium_keyiduser to postgres with admin option;
|
||||||
|
grant pgsodium_keyholder to postgres with admin option;
|
||||||
|
grant pgsodium_keymaker to postgres with admin option;
|
||||||
|
|
||||||
|
grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||||
|
grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||||
|
grant execute on function pgsodium.crypto_aead_det_keygen to service_role;
|
||||||
|
|
||||||
|
IF vault_exists
|
||||||
|
THEN
|
||||||
|
create extension if not exists supabase_vault;
|
||||||
|
END IF;
|
||||||
END IF;
|
END IF;
|
||||||
END IF;
|
END IF;
|
||||||
END $$;
|
END $$;
|
||||||
|
|
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
-- migrate:up
|
||||||
|
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (select from pg_available_extensions where name = 'supabase_vault')
|
||||||
|
THEN
|
||||||
|
create extension if not exists supabase_vault;
|
||||||
|
|
||||||
|
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||||
|
-- we manually run it here
|
||||||
|
grant usage on schema vault to postgres with grant option;
|
||||||
|
grant select, delete on vault.secrets, vault.decrypted_secrets to postgres with grant option;
|
||||||
|
grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- migrate:down
|
|
@ -4,7 +4,12 @@ ALTER ROLE authenticated inherit;
|
||||||
ALTER ROLE anon inherit;
|
ALTER ROLE anon inherit;
|
||||||
ALTER ROLE service_role inherit;
|
ALTER ROLE service_role inherit;
|
||||||
|
|
||||||
GRANT pgsodium_keyholder to service_role;
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN
|
||||||
|
GRANT pgsodium_keyholder to service_role;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
-- migrate:down
|
-- migrate:down
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
-- migrate:up
|
||||||
|
alter role supabase_admin set log_statement = none;
|
||||||
|
alter role supabase_auth_admin set log_statement = none;
|
||||||
|
alter role supabase_storage_admin set log_statement = none;
|
||||||
|
|
||||||
|
-- migrate:down
|
|
@ -0,0 +1,26 @@
|
||||||
|
-- migrate:up
|
||||||
|
do $$
|
||||||
|
declare
|
||||||
|
ext_schema text;
|
||||||
|
extensions_schema_exists boolean;
|
||||||
|
begin
|
||||||
|
-- check if the "extensions" schema exists
|
||||||
|
select exists (
|
||||||
|
select 1 from pg_namespace where nspname = 'extensions'
|
||||||
|
) into extensions_schema_exists;
|
||||||
|
|
||||||
|
if extensions_schema_exists then
|
||||||
|
-- check if the "orioledb" extension is in the "public" schema
|
||||||
|
select nspname into ext_schema
|
||||||
|
from pg_extension e
|
||||||
|
join pg_namespace n on e.extnamespace = n.oid
|
||||||
|
where extname = 'orioledb';
|
||||||
|
|
||||||
|
if ext_schema = 'public' then
|
||||||
|
execute 'alter extension orioledb set schema extensions';
|
||||||
|
end if;
|
||||||
|
end if;
|
||||||
|
end $$;
|
||||||
|
|
||||||
|
-- migrate:down
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
-- migrate:up
|
||||||
|
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pgsodium') THEN
|
||||||
|
CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text)
|
||||||
|
RETURNS void
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
SECURITY DEFINER
|
||||||
|
SET search_path TO ''
|
||||||
|
AS $function$
|
||||||
|
BEGIN
|
||||||
|
EXECUTE format(
|
||||||
|
'GRANT SELECT ON pgsodium.key TO %s',
|
||||||
|
masked_role);
|
||||||
|
|
||||||
|
EXECUTE format(
|
||||||
|
'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s',
|
||||||
|
masked_role);
|
||||||
|
|
||||||
|
EXECUTE format(
|
||||||
|
'GRANT ALL ON %I TO %s',
|
||||||
|
view_name,
|
||||||
|
masked_role);
|
||||||
|
RETURN;
|
||||||
|
END
|
||||||
|
$function$;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- migrate:down
|
|
@ -0,0 +1,64 @@
|
||||||
|
-- migrate:up
|
||||||
|
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||||
|
RETURNS event_trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_event_trigger_ddl_commands() AS ev
|
||||||
|
JOIN pg_extension AS ext
|
||||||
|
ON ev.objid = ext.oid
|
||||||
|
WHERE ext.extname = 'pg_net'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_roles
|
||||||
|
WHERE rolname = 'supabase_functions_admin'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT FROM pg_extension
|
||||||
|
WHERE extname = 'pg_net'
|
||||||
|
-- all versions in use on existing projects as of 2025-02-20
|
||||||
|
-- version 0.12.0 onwards don't need these applied
|
||||||
|
AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0')
|
||||||
|
) THEN
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
|
||||||
|
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
END IF;
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pg_net')
|
||||||
|
THEN
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER;
|
||||||
|
|
||||||
|
REVOKE EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
REVOKE EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
|
||||||
|
GRANT ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC;
|
||||||
|
GRANT ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- migrate:down
|
3
assets/migrations/setup/realtime.sql
Normal file
3
assets/migrations/setup/realtime.sql
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
create schema if not exists _realtime;
|
||||||
|
|
||||||
|
alter schema _realtime owner to supabase_admin;
|
|
@ -19,10 +19,13 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
|
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
|
||||||
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
endpointservice "github.com/envoyproxy/go-control-plane/envoy/service/endpoint/v3"
|
endpointservice "github.com/envoyproxy/go-control-plane/envoy/service/endpoint/v3"
|
||||||
|
@ -31,31 +34,53 @@ import (
|
||||||
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
|
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
|
||||||
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
||||||
cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
||||||
|
"github.com/envoyproxy/go-control-plane/pkg/log"
|
||||||
"github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
"github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
grpchealth "google.golang.org/grpc/health"
|
grpchealth "google.golang.org/grpc/health"
|
||||||
"google.golang.org/grpc/health/grpc_health_v1"
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/reflection"
|
"google.golang.org/grpc/reflection"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||||
mgr "sigs.k8s.io/controller-runtime/pkg/manager"
|
mgr "sigs.k8s.io/controller-runtime/pkg/manager"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||||
|
|
||||||
|
"code.icb4dc0.de/prskr/supabase-operator/internal/certs"
|
||||||
"code.icb4dc0.de/prskr/supabase-operator/internal/controlplane"
|
"code.icb4dc0.de/prskr/supabase-operator/internal/controlplane"
|
||||||
|
"code.icb4dc0.de/prskr/supabase-operator/internal/health"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:lll // flag declaration with struct tags is as long as it is
|
//nolint:lll // flag declaration with struct tags is as long as it is
|
||||||
type controlPlane struct {
|
type controlPlane struct {
|
||||||
ListenAddr string `name:"listen-address" default:":18000" help:"The address the control plane binds to."`
|
caCert tls.Certificate `kong:"-"`
|
||||||
|
|
||||||
|
ListenAddr string `name:"listen-address" default:":18000" help:"The address the control plane binds to."`
|
||||||
|
Tls struct {
|
||||||
|
CA struct {
|
||||||
|
Cert FileContent `env:"CERT" name:"server-cert" required:"" help:"The path to the server certificate file."`
|
||||||
|
Key FileContent `env:"KEY" name:"server-key" required:"" help:"The path to the server key file."`
|
||||||
|
} `embed:"" prefix:"ca." envprefix:"CA_"`
|
||||||
|
ServerSecretName string `name:"server-secret-name" help:"The name of the secret containing the server certificate and key." default:"control-plane-xds-tls"`
|
||||||
|
} `embed:"" prefix:"tls." envprefix:"TLS_"`
|
||||||
MetricsAddr string `name:"metrics-bind-address" default:"0" help:"The address the metrics endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service."`
|
MetricsAddr string `name:"metrics-bind-address" default:"0" help:"The address the metrics endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service."`
|
||||||
EnableLeaderElection bool `name:"leader-elect" default:"false" help:"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager."`
|
EnableLeaderElection bool `name:"leader-elect" default:"false" help:"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager."`
|
||||||
ProbeAddr string `name:"health-probe-bind-address" default:":8081" help:"The address the probe endpoint binds to."`
|
ProbeAddr string `name:"health-probe-bind-address" default:":8081" help:"The address the probe endpoint binds to."`
|
||||||
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
||||||
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
||||||
|
ServiceName string `name:"service-name" env:"CONTROL_PLANE_SERVICE_NAME" default:"" required:"" help:"The name of the control plane service."`
|
||||||
|
Namespace string `name:"namespace" env:"CONTROL_PLANE_NAMESPACE" default:"" required:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp controlPlane) Run(ctx context.Context) error {
|
func (cp *controlPlane) Run(ctx context.Context, logger logr.Logger) error {
|
||||||
var tlsOpts []func(*tls.Config)
|
var tlsOpts []func(*tls.Config)
|
||||||
|
|
||||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||||
|
@ -91,6 +116,11 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
||||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bootstrapClient, err := client.New(ctrl.GetConfigOrDie(), client.Options{Scheme: scheme})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create bootstrap client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Metrics: metricsServerOptions,
|
Metrics: metricsServerOptions,
|
||||||
|
@ -104,9 +134,15 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
||||||
return fmt.Errorf("unable to start control plane: %w", err)
|
return fmt.Errorf("unable to start control plane: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, nil)
|
cacheLoggerInst := cacheLogger(logger.WithName("envoy-snapshot-cache"))
|
||||||
|
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, cacheLoggerInst)
|
||||||
|
|
||||||
envoySrv, err := cp.envoyServer(ctx, envoySnapshotCache)
|
serverCert, err := cp.ensureControlPlaneTlsCert(ctx, bootstrapClient)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to ensure control plane TLS cert: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
envoySrv, err := cp.envoyServer(ctx, logger, envoySnapshotCache, serverCert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -123,6 +159,18 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
||||||
return fmt.Errorf("unable to create controller Core DB: %w", err)
|
return fmt.Errorf("unable to create controller Core DB: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
return fmt.Errorf("unable to set up health check: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.AddHealthzCheck("server-cert", health.CertValidCheck(serverCert)); err != nil {
|
||||||
|
return fmt.Errorf("unable to set up health check: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||||
|
return fmt.Errorf("unable to set up ready check: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
setupLog.Info("starting manager")
|
setupLog.Info("starting manager")
|
||||||
if err := mgr.Start(ctx); err != nil {
|
if err := mgr.Start(ctx); err != nil {
|
||||||
return fmt.Errorf("problem running manager: %w", err)
|
return fmt.Errorf("problem running manager: %w", err)
|
||||||
|
@ -131,9 +179,20 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp controlPlane) envoyServer(
|
func (cp *controlPlane) AfterApply() (err error) {
|
||||||
|
cp.caCert, err = tls.X509KeyPair(cp.Tls.CA.Cert, cp.Tls.CA.Key)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse server certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *controlPlane) envoyServer(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
logger logr.Logger,
|
||||||
cache cachev3.SnapshotCache,
|
cache cachev3.SnapshotCache,
|
||||||
|
serverCert tls.Certificate,
|
||||||
) (runnable mgr.Runnable, err error) {
|
) (runnable mgr.Runnable, err error) {
|
||||||
const (
|
const (
|
||||||
grpcKeepaliveTime = 30 * time.Second
|
grpcKeepaliveTime = 30 * time.Second
|
||||||
|
@ -141,11 +200,8 @@ func (cp controlPlane) envoyServer(
|
||||||
grpcKeepaliveMinTime = 30 * time.Second
|
grpcKeepaliveMinTime = 30 * time.Second
|
||||||
grpcMaxConcurrentStreams = 1000000
|
grpcMaxConcurrentStreams = 1000000
|
||||||
)
|
)
|
||||||
|
srv := server.NewServer(ctx, cache, xdsServerCallbacks(logger))
|
||||||
var (
|
logger = logger.WithName("control-plane")
|
||||||
logger = ctrl.Log.WithName("control-plane")
|
|
||||||
srv = server.NewServer(ctx, cache, nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// gRPC golang library sets a very small upper bound for the number gRPC/h2
|
// gRPC golang library sets a very small upper bound for the number gRPC/h2
|
||||||
// streams over a single TCP connection. If a proxy multiplexes requests over
|
// streams over a single TCP connection. If a proxy multiplexes requests over
|
||||||
|
@ -153,7 +209,13 @@ func (cp controlPlane) envoyServer(
|
||||||
// availability problems. Keepalive timeouts based on connection_keepalive parameter
|
// availability problems. Keepalive timeouts based on connection_keepalive parameter
|
||||||
// https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/examples#dynamic
|
// https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/examples#dynamic
|
||||||
|
|
||||||
|
tlsCfg, err := cp.tlsConfig(serverCert)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create TLS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
grpcOptions := append(make([]grpc.ServerOption, 0, 4),
|
grpcOptions := append(make([]grpc.ServerOption, 0, 4),
|
||||||
|
grpc.Creds(credentials.NewTLS(tlsCfg)),
|
||||||
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
|
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
|
||||||
grpc.KeepaliveParams(keepalive.ServerParameters{
|
grpc.KeepaliveParams(keepalive.ServerParameters{
|
||||||
Time: grpcKeepaliveTime,
|
Time: grpcKeepaliveTime,
|
||||||
|
@ -195,3 +257,169 @@ func (cp controlPlane) envoyServer(
|
||||||
return grpcServer.Serve(lis)
|
return grpcServer.Serve(lis)
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cp *controlPlane) ensureControlPlaneTlsCert(
|
||||||
|
ctx context.Context,
|
||||||
|
k8sClient client.Client,
|
||||||
|
) (tls.Certificate, error) {
|
||||||
|
var (
|
||||||
|
controlPlaneServerCert = &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: cp.Tls.ServerSecretName,
|
||||||
|
Namespace: cp.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
serverCert tls.Certificate
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err := controllerutil.CreateOrUpdate(ctx, k8sClient, controlPlaneServerCert, func() (err error) {
|
||||||
|
controlPlaneServerCert.Type = corev1.SecretTypeTLS
|
||||||
|
|
||||||
|
if controlPlaneServerCert.Data == nil {
|
||||||
|
controlPlaneServerCert.Data = make(map[string][]byte, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
cert = controlPlaneServerCert.Data[corev1.TLSCertKey]
|
||||||
|
privateKey = controlPlaneServerCert.Data[corev1.TLSPrivateKeyKey]
|
||||||
|
)
|
||||||
|
|
||||||
|
var requireRenewal bool
|
||||||
|
if cert != nil && privateKey != nil {
|
||||||
|
if serverCert, err = tls.X509KeyPair(cert, privateKey); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse server certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
renewGracePeriod := time.Duration(float64(serverCert.Leaf.NotAfter.Sub(serverCert.Leaf.NotBefore)) * 0.1)
|
||||||
|
if serverCert.Leaf.NotAfter.Before(time.Now().Add(-renewGracePeriod)) {
|
||||||
|
requireRenewal = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
requireRenewal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if requireRenewal {
|
||||||
|
dnsNames := []string{
|
||||||
|
strings.Join([]string{cp.ServiceName, cp.Namespace, "svc"}, "."),
|
||||||
|
strings.Join([]string{cp.ServiceName, cp.Namespace, "svc", "cluster", "local"}, "."),
|
||||||
|
}
|
||||||
|
if certResult, err := certs.ServerCert("supabase-control-plane", dnsNames, cp.caCert); err != nil {
|
||||||
|
return fmt.Errorf("failed to generate server certificate: %w", err)
|
||||||
|
} else {
|
||||||
|
serverCert = certResult.ServerCert
|
||||||
|
controlPlaneServerCert.Data[corev1.TLSCertKey] = certResult.PublicKey
|
||||||
|
controlPlaneServerCert.Data[corev1.TLSPrivateKeyKey] = certResult.PrivateKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, fmt.Errorf("failed to create or update control plane server certificate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return serverCert, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *controlPlane) tlsConfig(serverCert tls.Certificate) (*tls.Config, error) {
|
||||||
|
tlsCfg := &tls.Config{
|
||||||
|
RootCAs: x509.NewCertPool(),
|
||||||
|
ClientCAs: x509.NewCertPool(),
|
||||||
|
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsCfg.Certificates = append(tlsCfg.Certificates, serverCert)
|
||||||
|
if !tlsCfg.RootCAs.AppendCertsFromPEM(cp.Tls.CA.Cert) {
|
||||||
|
return nil, fmt.Errorf("failed to parse CA certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !tlsCfg.ClientCAs.AppendCertsFromPEM(cp.Tls.CA.Cert) {
|
||||||
|
return nil, fmt.Errorf("failed to parse client CA certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
return tlsCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func xdsServerCallbacks(logger logr.Logger) server.Callbacks {
|
||||||
|
return server.CallbackFuncs{
|
||||||
|
StreamOpenFunc: func(ctx context.Context, streamId int64, nodeId string) error {
|
||||||
|
logger.Info("Stream opened", "stream-id", streamId, "node-id", nodeId)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
StreamClosedFunc: func(streamId int64, node *corev3.Node) {
|
||||||
|
logger.Info("Stream closed", "stream-id", streamId,
|
||||||
|
"node.id", node.Id,
|
||||||
|
"node.cluster", node.Cluster,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
StreamRequestFunc: func(streamId int64, request *discoverygrpc.DiscoveryRequest) error {
|
||||||
|
logger.Info("Stream request",
|
||||||
|
"stream-id", streamId,
|
||||||
|
"request.node.id", request.Node.Id,
|
||||||
|
"request.node.cluster", request.Node.Cluster,
|
||||||
|
"request.version", request.VersionInfo,
|
||||||
|
"request.error", request.ErrorDetail,
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
StreamResponseFunc: func(
|
||||||
|
ctx context.Context,
|
||||||
|
streamId int64,
|
||||||
|
request *discoverygrpc.DiscoveryRequest,
|
||||||
|
response *discoverygrpc.DiscoveryResponse,
|
||||||
|
) {
|
||||||
|
logger.Info("Stream delta response",
|
||||||
|
"stream-id", streamId,
|
||||||
|
"request.node.id", request.Node.Id,
|
||||||
|
"request.node.cluster", request.Node.Cluster,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
DeltaStreamOpenFunc: func(ctx context.Context, streamId int64, nodeId string) error {
|
||||||
|
logger.Info("Delta stream opened", "stream-id", streamId, "node-id", nodeId)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
|
||||||
|
DeltaStreamClosedFunc: func(streamId int64, node *corev3.Node) {
|
||||||
|
logger.Info("Delta stream closed",
|
||||||
|
"stream-id", streamId,
|
||||||
|
"node.id", node.Id,
|
||||||
|
"node.cluster", node.Cluster,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
StreamDeltaRequestFunc: func(i int64, request *discoverygrpc.DeltaDiscoveryRequest) error {
|
||||||
|
logger.Info("Stream delta request",
|
||||||
|
"stream-id", i,
|
||||||
|
"request.node.id", request.Node.Id,
|
||||||
|
"request.node.cluster", request.Node.Cluster,
|
||||||
|
"request.error", request.ErrorDetail,
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
StreamDeltaResponseFunc: func(
|
||||||
|
i int64,
|
||||||
|
request *discoverygrpc.DeltaDiscoveryRequest,
|
||||||
|
response *discoverygrpc.DeltaDiscoveryResponse,
|
||||||
|
) {
|
||||||
|
logger.Info("Stream delta response",
|
||||||
|
"stream-id", i,
|
||||||
|
"request.node", request.Node,
|
||||||
|
"response.resources", response.Resources,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cacheLogger(logger logr.Logger) log.Logger {
|
||||||
|
wrapper := func(delegate func(msg string, keysAndValues ...any)) func(string, ...any) {
|
||||||
|
return func(s string, i ...any) {
|
||||||
|
delegate(fmt.Sprintf(s, i...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return log.LoggerFuncs{
|
||||||
|
DebugFunc: nil, // enable for debug info
|
||||||
|
InfoFunc: wrapper(logger.Info),
|
||||||
|
WarnFunc: wrapper(logger.Info),
|
||||||
|
ErrorFunc: wrapper(logger.Info),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
41
cmd/flags.go
Normal file
41
cmd/flags.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 Peter Kurfer.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/alecthomas/kong"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ kong.MapperValue = (*FileContent)(nil)
|
||||||
|
|
||||||
|
type FileContent []byte
|
||||||
|
|
||||||
|
func (f *FileContent) Decode(ctx *kong.DecodeContext) (err error) {
|
||||||
|
var filePath string
|
||||||
|
if err := ctx.Scan.PopValueInto("file-content", &filePath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if *f, err = os.ReadFile(filePath); err != nil {
|
||||||
|
return fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -40,6 +40,10 @@ type manager struct {
|
||||||
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
||||||
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
||||||
Namespace string `name:"controller-namespace" env:"CONTROLLER_NAMESPACE" default:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
Namespace string `name:"controller-namespace" env:"CONTROLLER_NAMESPACE" default:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
||||||
|
Tls struct {
|
||||||
|
CACert FileContent `env:"CA_CERT" name:"ca-cert" required:"" help:"The path to the CA certificate file."`
|
||||||
|
CAKey FileContent `env:"CA_KEY" name:"ca-key" required:"" help:"The path to the CA key file."`
|
||||||
|
} `embed:"" prefix:"tls." envprefix:"TLS_"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m manager) Run(ctx context.Context) error {
|
func (m manager) Run(ctx context.Context) error {
|
||||||
|
@ -68,6 +72,11 @@ func (m manager) Run(ctx context.Context) error {
|
||||||
TLSOpts: tlsOpts,
|
TLSOpts: tlsOpts,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
caCert, err := tls.X509KeyPair(m.Tls.CACert, m.Tls.CAKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to load CA cert: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||||
// More info:
|
// More info:
|
||||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||||
|
@ -145,6 +154,7 @@ func (m manager) Run(ctx context.Context) error {
|
||||||
if err = (&controller.APIGatewayReconciler{
|
if err = (&controller.APIGatewayReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
|
CACert: caCert,
|
||||||
}).SetupWithManager(ctx, mgr); err != nil {
|
}).SetupWithManager(ctx, mgr); err != nil {
|
||||||
return fmt.Errorf("unable to create controller APIGateway: %w", err)
|
return fmt.Errorf("unable to create controller APIGateway: %w", err)
|
||||||
}
|
}
|
||||||
|
|
33
config/control-plane/cert-ca.yaml
Normal file
33
config/control-plane/cert-ca.yaml
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Issuer
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: supabase-operator
|
||||||
|
app.kubernetes.io/managed-by: kustomize
|
||||||
|
name: cp-selfsigned-issuer
|
||||||
|
namespace: system
|
||||||
|
spec:
|
||||||
|
selfSigned: {}
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: certificate
|
||||||
|
app.kubernetes.io/instance: serving-cert
|
||||||
|
app.kubernetes.io/component: certificate
|
||||||
|
app.kubernetes.io/created-by: supabase-operator
|
||||||
|
app.kubernetes.io/part-of: supabase-operator
|
||||||
|
app.kubernetes.io/managed-by: kustomize
|
||||||
|
name: cp-ca-cert
|
||||||
|
namespace: supabase-system
|
||||||
|
spec:
|
||||||
|
commonName: control-plane-ca
|
||||||
|
privateKey:
|
||||||
|
algorithm: ECDSA
|
||||||
|
issuerRef:
|
||||||
|
kind: Issuer
|
||||||
|
name: selfsigned-issuer
|
||||||
|
secretName: control-plane-ca-cert-tls
|
||||||
|
isCA: true
|
|
@ -18,40 +18,26 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: control-plane
|
app.kubernetes.io/name: control-plane
|
||||||
spec:
|
spec:
|
||||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
|
||||||
# according to the platforms which are supported by your solution.
|
|
||||||
# It is considered best practice to support multiple architectures. You can
|
|
||||||
# build your manager image using the makefile target docker-buildx.
|
|
||||||
# affinity:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: kubernetes.io/arch
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - amd64
|
|
||||||
# - arm64
|
|
||||||
# - ppc64le
|
|
||||||
# - s390x
|
|
||||||
# - key: kubernetes.io/os
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - linux
|
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
||||||
# TODO(user): For common cases that do not require escalating privileges
|
seccompProfile:
|
||||||
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
type: RuntimeDefault
|
||||||
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
|
||||||
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
|
||||||
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
|
||||||
# seccompProfile:
|
|
||||||
# type: RuntimeDefault
|
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- args:
|
||||||
- control-plane
|
- control-plane
|
||||||
image: supabase-operator:latest
|
image: controller:latest
|
||||||
name: control-plane
|
name: control-plane
|
||||||
|
env:
|
||||||
|
- name: CONTROL_PLANE_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: CONTROL_PLANE_SERVICE_NAME
|
||||||
|
value: control-plane
|
||||||
|
- name: TLS_CA_CERT
|
||||||
|
value: /etc/supabase/control-plane/certs/tls.crt
|
||||||
|
- name: TLS_CA_KEY
|
||||||
|
value: /etc/supabase/control-plane/certs/tls.key
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 18000
|
- containerPort: 18000
|
||||||
name: grpc
|
name: grpc
|
||||||
|
@ -62,17 +48,17 @@ spec:
|
||||||
drop:
|
drop:
|
||||||
- "ALL"
|
- "ALL"
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
grpc:
|
httpGet:
|
||||||
port: 18000
|
path: /healthz
|
||||||
|
port: 8081
|
||||||
initialDelaySeconds: 15
|
initialDelaySeconds: 15
|
||||||
periodSeconds: 20
|
periodSeconds: 20
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
grpc:
|
httpGet:
|
||||||
port: 18000
|
path: /readyz
|
||||||
|
port: 8081
|
||||||
initialDelaySeconds: 5
|
initialDelaySeconds: 5
|
||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
|
||||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 150m
|
cpu: 150m
|
||||||
|
@ -80,5 +66,12 @@ spec:
|
||||||
requests:
|
requests:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
memory: 64Mi
|
memory: 64Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: tls-certs
|
||||||
|
mountPath: /etc/supabase/control-plane/certs
|
||||||
|
volumes:
|
||||||
|
- name: tls-certs
|
||||||
|
secret:
|
||||||
|
secretName: control-plane-ca-cert-tls
|
||||||
serviceAccountName: control-plane
|
serviceAccountName: control-plane
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
|
|
|
@ -2,5 +2,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
|
- cert-ca.yaml
|
||||||
- control-plane.yaml
|
- control-plane.yaml
|
||||||
- service.yaml
|
- service.yaml
|
||||||
|
|
||||||
|
configurations:
|
||||||
|
- kustomizeconfig.yaml
|
||||||
|
|
8
config/control-plane/kustomizeconfig.yaml
Normal file
8
config/control-plane/kustomizeconfig.yaml
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# This configuration is for teaching kustomize how to update name ref substitution
|
||||||
|
nameReference:
|
||||||
|
- kind: Issuer
|
||||||
|
group: cert-manager.io
|
||||||
|
fieldSpecs:
|
||||||
|
- kind: Certificate
|
||||||
|
group: cert-manager.io
|
||||||
|
path: spec/issuerRef/name
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
controller-gen.kubebuilder.io/version: v0.17.1
|
controller-gen.kubebuilder.io/version: v0.17.2
|
||||||
name: apigateways.supabase.k8s.icb4dc0.de
|
name: apigateways.supabase.k8s.icb4dc0.de
|
||||||
spec:
|
spec:
|
||||||
group: supabase.k8s.icb4dc0.de
|
group: supabase.k8s.icb4dc0.de
|
||||||
|
@ -15,7 +15,7 @@ spec:
|
||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
- jsonPath: .status.envoy.configVersion
|
- jsonPath: .status.envoy.resourceHash
|
||||||
name: EnvoyConfigVersion
|
name: EnvoyConfigVersion
|
||||||
type: string
|
type: string
|
||||||
name: v1alpha1
|
name: v1alpha1
|
||||||
|
@ -73,6 +73,36 @@ spec:
|
||||||
- key
|
- key
|
||||||
type: object
|
type: object
|
||||||
x-kubernetes-map-type: atomic
|
x-kubernetes-map-type: atomic
|
||||||
|
tls:
|
||||||
|
description: TLS - enable and configure TLS for the API endpoint
|
||||||
|
properties:
|
||||||
|
cert:
|
||||||
|
properties:
|
||||||
|
caCertKey:
|
||||||
|
default: ca.crt
|
||||||
|
description: CaCertKey - key in the secret that contains
|
||||||
|
the CA certificate
|
||||||
|
type: string
|
||||||
|
secretName:
|
||||||
|
type: string
|
||||||
|
serverCertKey:
|
||||||
|
default: tls.crt
|
||||||
|
description: ServerCertKey - key in the secret that contains
|
||||||
|
the server certificate
|
||||||
|
type: string
|
||||||
|
serverKeyKey:
|
||||||
|
default: tls.key
|
||||||
|
description: ServerKeyKey - key in the secret that contains
|
||||||
|
the server private key
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- secretName
|
||||||
|
- serverCertKey
|
||||||
|
- serverKeyKey
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- cert
|
||||||
|
type: object
|
||||||
required:
|
required:
|
||||||
- jwks
|
- jwks
|
||||||
type: object
|
type: object
|
||||||
|
@ -85,6 +115,125 @@ spec:
|
||||||
description: |-
|
description: |-
|
||||||
DashboardEndpoint - Configure the endpoint for the Supabase dashboard (studio)
|
DashboardEndpoint - Configure the endpoint for the Supabase dashboard (studio)
|
||||||
this includes optional authentication (basic or Oauth2) for the dashboard
|
this includes optional authentication (basic or Oauth2) for the dashboard
|
||||||
|
properties:
|
||||||
|
auth:
|
||||||
|
description: Auth - configure authentication for the dashboard
|
||||||
|
endpoint
|
||||||
|
properties:
|
||||||
|
basic:
|
||||||
|
description: |-
|
||||||
|
Basic - HTTP basic auth configuration, this should only be used in exceptions
|
||||||
|
e.g. during evaluations or for local development
|
||||||
|
only used if no other authentication is configured
|
||||||
|
properties:
|
||||||
|
plaintextUsersSecretRef:
|
||||||
|
description: |-
|
||||||
|
PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form
|
||||||
|
if not empty, credentials will be merged with inline users
|
||||||
|
type: string
|
||||||
|
usersInline:
|
||||||
|
description: UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)
|
||||||
|
items:
|
||||||
|
pattern: ^[\w_.]+:\{SHA\}[A-z0-9]+=*$
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
oauth2:
|
||||||
|
description: |-
|
||||||
|
OAuth2 - configure oauth2 authentication for the dashhboard listener
|
||||||
|
if configured, will be preferred over Basic authentication configuration
|
||||||
|
effectively disabling basic auth
|
||||||
|
properties:
|
||||||
|
authorizationEndpoint:
|
||||||
|
description: AuthorizationEndpoint - endpoint where the
|
||||||
|
user will be redirected to authenticate
|
||||||
|
type: string
|
||||||
|
clientId:
|
||||||
|
description: ClientID - client ID to authenticate with
|
||||||
|
the OAuth2 provider
|
||||||
|
type: string
|
||||||
|
clientSecretRef:
|
||||||
|
description: ClientSecretRef - reference to the secret
|
||||||
|
that contains the client secret
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: The key of the secret to select from. Must
|
||||||
|
be a valid secret key.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
default: ""
|
||||||
|
description: |-
|
||||||
|
Name of the referent.
|
||||||
|
This field is effectively required, but due to backwards compatibility is
|
||||||
|
allowed to be empty. Instances of this type with an empty value here are
|
||||||
|
almost certainly wrong.
|
||||||
|
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||||
|
type: string
|
||||||
|
optional:
|
||||||
|
description: Specify whether the Secret or its key
|
||||||
|
must be defined
|
||||||
|
type: boolean
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
type: object
|
||||||
|
x-kubernetes-map-type: atomic
|
||||||
|
openIdIssuer:
|
||||||
|
description: |-
|
||||||
|
OpenIDIssuer - if set the defaulter will fetch the discovery document and fill
|
||||||
|
TokenEndpoint and AuthorizationEndpoint based on the discovery document
|
||||||
|
type: string
|
||||||
|
resources:
|
||||||
|
description: Resources - resources to request from the
|
||||||
|
OAuth2 provider (e.g. "user", "email", ...) - optional
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
scopes:
|
||||||
|
description: Scopes - scopes to request from the OAuth2
|
||||||
|
provider (e.g. "openid", "profile", ...) - optional
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
tokenEndpoint:
|
||||||
|
description: TokenEndpoint - endpoint where Envoy will
|
||||||
|
retrieve the OAuth2 access and identity token from
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- clientId
|
||||||
|
- clientSecretRef
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
tls:
|
||||||
|
description: TLS - enable and configure TLS for the Dashboard
|
||||||
|
endpoint
|
||||||
|
properties:
|
||||||
|
cert:
|
||||||
|
properties:
|
||||||
|
caCertKey:
|
||||||
|
default: ca.crt
|
||||||
|
description: CaCertKey - key in the secret that contains
|
||||||
|
the CA certificate
|
||||||
|
type: string
|
||||||
|
secretName:
|
||||||
|
type: string
|
||||||
|
serverCertKey:
|
||||||
|
default: tls.crt
|
||||||
|
description: ServerCertKey - key in the secret that contains
|
||||||
|
the server certificate
|
||||||
|
type: string
|
||||||
|
serverKeyKey:
|
||||||
|
default: tls.key
|
||||||
|
description: ServerKeyKey - key in the secret that contains
|
||||||
|
the server private key
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- secretName
|
||||||
|
- serverCertKey
|
||||||
|
- serverKeyKey
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- cert
|
||||||
|
type: object
|
||||||
type: object
|
type: object
|
||||||
envoy:
|
envoy:
|
||||||
description: Envoy - configure the envoy instance and most importantly
|
description: Envoy - configure the envoy instance and most importantly
|
||||||
|
@ -108,13 +257,45 @@ spec:
|
||||||
- host
|
- host
|
||||||
- port
|
- port
|
||||||
type: object
|
type: object
|
||||||
|
debugging:
|
||||||
|
properties:
|
||||||
|
componentLogLevels:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
component:
|
||||||
|
description: |-
|
||||||
|
Component - the component to set the log level for
|
||||||
|
the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36)
|
||||||
|
type: string
|
||||||
|
level:
|
||||||
|
description: Level - the log level to set for the component
|
||||||
|
enum:
|
||||||
|
- trace
|
||||||
|
- debug
|
||||||
|
- info
|
||||||
|
- warning
|
||||||
|
- error
|
||||||
|
- critical
|
||||||
|
- "off"
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- component
|
||||||
|
- level
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
disableIPv6:
|
||||||
|
description: |-
|
||||||
|
DisableIPv6 - disable IPv6 for the Envoy instance
|
||||||
|
this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||||
|
type: boolean
|
||||||
nodeName:
|
nodeName:
|
||||||
description: |-
|
description: |-
|
||||||
NodeName - identifies the Envoy cluster within the current namespace
|
NodeName - identifies the Envoy cluster within the current namespace
|
||||||
if not set, the name of the APIGateway resource will be used
|
if not set, the name of the APIGateway resource will be used
|
||||||
The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit.
|
The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit.
|
||||||
type: string
|
type: string
|
||||||
workloadTemplate:
|
workloadSpec:
|
||||||
description: WorkloadTemplate - customize the Envoy deployment
|
description: WorkloadTemplate - customize the Envoy deployment
|
||||||
properties:
|
properties:
|
||||||
additionalLabels:
|
additionalLabels:
|
||||||
|
@ -1921,248 +2102,9 @@ spec:
|
||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
replicas:
|
container:
|
||||||
format: int32
|
description: ContainerSpec - customize the container template
|
||||||
type: integer
|
of the workload
|
||||||
securityContext:
|
|
||||||
description: |-
|
|
||||||
PodSecurityContext holds pod-level security attributes and common container settings.
|
|
||||||
Some fields are also present in container.securityContext. Field values of
|
|
||||||
container.securityContext take precedence over field values of PodSecurityContext.
|
|
||||||
properties:
|
|
||||||
appArmorProfile:
|
|
||||||
description: |-
|
|
||||||
appArmorProfile is the AppArmor options to use by the containers in this pod.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
properties:
|
|
||||||
localhostProfile:
|
|
||||||
description: |-
|
|
||||||
localhostProfile indicates a profile loaded on the node that should be used.
|
|
||||||
The profile must be preconfigured on the node to work.
|
|
||||||
Must match the loaded name of the profile.
|
|
||||||
Must be set if and only if type is "Localhost".
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: |-
|
|
||||||
type indicates which kind of AppArmor profile will be applied.
|
|
||||||
Valid options are:
|
|
||||||
Localhost - a profile pre-loaded on the node.
|
|
||||||
RuntimeDefault - the container runtime's default profile.
|
|
||||||
Unconfined - no AppArmor enforcement.
|
|
||||||
type: string
|
|
||||||
required:
|
|