Compare commits
21 commits
Author | SHA1 | Date | |
---|---|---|---|
87a06dac66 | |||
366ceece24 | |||
8b2425d16d | |||
101bf971a7 | |||
c4a43b82d3 | |||
1f285f6492 | |||
2d71a4a132 | |||
264b30e8a2 | |||
7f56a3db56 | |||
9d02a2d90b | |||
3c13eb0d6b | |||
e9302c51be | |||
45630f7326 | |||
6c61adb1c7 | |||
8066c34eb5 | |||
867daaa375 | |||
0fccef973f | |||
89b682935b | |||
c0cbd22bb0 | |||
0014927ca9 | |||
604525de38 |
163 changed files with 22161 additions and 6345 deletions
3
.github/workflows/docs.yml
vendored
3
.github/workflows/docs.yml
vendored
|
@ -35,7 +35,8 @@ jobs:
|
|||
run: mkdocs build
|
||||
|
||||
- name: Copy files to the s3 website content bucket
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
# for the time being, let's just always deploy the docs
|
||||
# if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: rclone sync site/ HCLOUD:/1661580-supabase-operator-docs/
|
||||
env:
|
||||
RCLONE_CONFIG_HCLOUD_TYPE: s3
|
||||
|
|
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
|
@ -28,4 +28,4 @@ jobs:
|
|||
- name: Run linter
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.61
|
||||
version: v1.63.4
|
||||
|
|
80
.github/workflows/postgres.yml
vendored
Normal file
80
.github/workflows/postgres.yml
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
name: Postgres image
|
||||
on:
|
||||
schedule:
|
||||
# every Thursday 2:30am
|
||||
- cron: "30 2 * * 2"
|
||||
push:
|
||||
paths:
|
||||
- .github/workflows/postgres.yml
|
||||
- postgres/**
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
MINOR_VERSIONS: '{"15": "12","17": "4"}'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
arch:
|
||||
- arm64
|
||||
- amd64
|
||||
postgres_major:
|
||||
- "15"
|
||||
- "17"
|
||||
runs-on: ubuntu-latest-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: registry.icb4dc0.de
|
||||
username: ${{ secrets.HARBOR_USER }}
|
||||
password: ${{ secrets.HARBOR_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: postgres/Dockerfile
|
||||
push: true
|
||||
tags: registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-${{ matrix.arch }}
|
||||
build-args: |
|
||||
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
||||
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
||||
|
||||
manifest:
|
||||
strategy:
|
||||
matrix:
|
||||
postgres_major:
|
||||
- "15"
|
||||
- "17"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: registry.icb4dc0.de
|
||||
username: ${{ secrets.HARBOR_USER }}
|
||||
password: ${{ secrets.HARBOR_TOKEN }}
|
||||
|
||||
- name: Install skopeo
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y skopeo
|
||||
|
||||
- name: Create manifest
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }} \
|
||||
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }} \
|
||||
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64 \
|
||||
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
||||
|
||||
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64
|
||||
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
|
@ -36,7 +36,6 @@ jobs:
|
|||
- name: Init go
|
||||
run: |
|
||||
go mod download
|
||||
go mod download -modfile tools/go.mod
|
||||
|
||||
- name: Snapshot release
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
|
|
5
.github/workflows/test-e2e.yml
vendored
5
.github/workflows/test-e2e.yml
vendored
|
@ -43,3 +43,8 @@ jobs:
|
|||
run: |
|
||||
go mod tidy
|
||||
make test-e2e
|
||||
|
||||
- name: Cleanup kind cluster
|
||||
if: always()
|
||||
run: |
|
||||
kind delete cluster
|
||||
|
|
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
|
@ -22,4 +22,4 @@ jobs:
|
|||
- name: Running Tests
|
||||
run: |
|
||||
go mod tidy
|
||||
make test
|
||||
go run mage.go Test
|
||||
|
|
111
.golangci.yml
111
.golangci.yml
|
@ -1,35 +1,18 @@
|
|||
version: "2"
|
||||
run:
|
||||
timeout: 5m
|
||||
allow-parallel-runners: true
|
||||
|
||||
issues:
|
||||
# don't skip warning about doc comments
|
||||
# don't exclude the default set of lint
|
||||
exclude-use-default: false
|
||||
# restore some of the defaults
|
||||
# (fill in the rest as needed)
|
||||
exclude-rules:
|
||||
- path: "api/*"
|
||||
linters:
|
||||
- lll
|
||||
- path: "internal/*"
|
||||
linters:
|
||||
- dupl
|
||||
- lll
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- copyloopvar
|
||||
- dupl
|
||||
- errcheck
|
||||
- copyloopvar
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
# enable when the TODOs are fixed
|
||||
# - godox
|
||||
- goheader
|
||||
- gosimple
|
||||
- godox
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
|
@ -38,43 +21,65 @@ linters:
|
|||
- prealloc
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
settings:
|
||||
goheader:
|
||||
values:
|
||||
const:
|
||||
AUTHOR: Peter Kurfer
|
||||
template-path: hack/header.tmpl
|
||||
importas:
|
||||
alias:
|
||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||
alias: $1$2
|
||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
alias: metav1
|
||||
no-unaliased: true
|
||||
no-extra-aliases: true
|
||||
revive:
|
||||
rules:
|
||||
- name: comment-spacings
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- name: comment-spacings
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(code.icb4dc0.de/prskr/supabase-operator)
|
||||
- alias
|
||||
- blank
|
||||
- dot
|
||||
goimports:
|
||||
local-prefixes: code.icb4dc0.de/prskr/supabase-operator
|
||||
goheader:
|
||||
values:
|
||||
const:
|
||||
AUTHOR: Peter Kurfer
|
||||
template-path: hack/header.tmpl
|
||||
|
||||
importas:
|
||||
no-unaliased: true
|
||||
no-extra-aliases: true
|
||||
alias:
|
||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||
alias: $1$2
|
||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
alias: metav1
|
||||
|
||||
- linters:
|
||||
- lll
|
||||
path: api/*
|
||||
- linters:
|
||||
- dupl
|
||||
- lll
|
||||
path: internal/*
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
severity:
|
||||
default-severity: error
|
||||
default: error
|
||||
rules:
|
||||
- linters:
|
||||
- godox
|
||||
severity: info
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(code.icb4dc0.de/prskr/supabase-operator)
|
||||
- alias
|
||||
- blank
|
||||
- dot
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- code.icb4dc0.de/prskr/supabase-operator
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# git hook pre commit
|
||||
pre-commit = [
|
||||
"go mod tidy -go=1.23.5",
|
||||
"go mod tidy -go=1.24",
|
||||
"go run mage.go GenerateAll",
|
||||
"husky lint-staged",
|
||||
# "golangci-lint run",
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
"initialization_options": {
|
||||
"local": "code.icb4dc0.de/prskr/supabase-operator"
|
||||
}
|
||||
},
|
||||
"golangci-lint": {
|
||||
"initialization_options": {
|
||||
"command": [
|
||||
"go",
|
||||
"tool",
|
||||
"golangci-lint",
|
||||
"run",
|
||||
"--output.json.path",
|
||||
"stderr",
|
||||
"--issues-exit-code=1"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.23.4 AS builder
|
||||
FROM golang:1.24-alpine AS builder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
|
@ -16,10 +16,7 @@ COPY [ "go.*", "./" ]
|
|||
COPY [ "api", "api" ]
|
||||
COPY [ "assets/migrations", "assets/migrations" ]
|
||||
COPY [ "cmd", "cmd" ]
|
||||
COPY [ "infrastructure", "infrastructure" ]
|
||||
COPY [ "internal", "internal" ]
|
||||
COPY [ "magefiles", "magefiles" ]
|
||||
COPY [ "tools", "tools" ]
|
||||
|
||||
# Build
|
||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||
|
|
64
Makefile
64
Makefile
|
@ -44,11 +44,11 @@ help: ## Display this help.
|
|||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
manifests: ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
generate: ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
|
@ -60,7 +60,7 @@ vet: ## Run go vet against code.
|
|||
go vet ./...
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
test: manifests generate fmt vet ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||
|
||||
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
||||
|
@ -81,11 +81,11 @@ test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated
|
|||
go test ./test/e2e/ -v -ginkgo.v
|
||||
|
||||
.PHONY: lint
|
||||
lint: golangci-lint ## Run golangci-lint linter
|
||||
lint: ## Run golangci-lint linter
|
||||
$(GOLANGCI_LINT) run
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
|
||||
lint-fix: ## Run golangci-lint linter and perform fixes
|
||||
$(GOLANGCI_LINT) run --fix
|
||||
|
||||
##@ Build
|
||||
|
@ -127,7 +127,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
|
|||
rm Dockerfile.cross
|
||||
|
||||
.PHONY: build-installer
|
||||
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
|
||||
build-installer: manifests generate ## Generate a consolidated YAML with CRDs and deployment.
|
||||
mkdir -p dist
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default > dist/install.yaml
|
||||
|
@ -148,7 +148,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
|
|||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
cd config/manager && $(KUSTOMIZE) edit set image supabase-operator=${IMG}
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
|
@ -164,49 +164,7 @@ $(LOCALBIN):
|
|||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.5.0
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.16.4
|
||||
ENVTEST_VERSION ?= release-0.19
|
||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f "$(1)-$(3)" ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
rm -f $(1) || true ;\
|
||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
||||
mv $(1) $(1)-$(3) ;\
|
||||
} ;\
|
||||
ln -sf $(1)-$(3) $(1)
|
||||
endef
|
||||
KUSTOMIZE ?= go tool kustomize
|
||||
CONTROLLER_GEN ?= go tool controller-gen
|
||||
ENVTEST ?= go tool setup-envtest
|
||||
GOLANGCI_LINT = go tool golangci-lint
|
||||
|
|
|
@ -37,7 +37,7 @@ This operator tries to be as un-opionionated as possible and thereofore does not
|
|||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
- go version v1.23.x+
|
||||
- go version v1.24.x+
|
||||
- docker version 27.+.
|
||||
- kubectl version v1.30.0+.
|
||||
- Access to a Kubernetes v1.30.+ cluster.
|
||||
|
|
17
Tiltfile
17
Tiltfile
|
@ -3,9 +3,7 @@ load('ext://restart_process', 'docker_build_with_restart')
|
|||
|
||||
allow_k8s_contexts('kind-kind')
|
||||
|
||||
local('./dev/prepare-dev-cluster.sh')
|
||||
k8s_yaml(kustomize('config/dev'))
|
||||
k8s_yaml(kustomize('config/samples'))
|
||||
|
||||
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
||||
|
||||
|
@ -24,6 +22,8 @@ local_resource(
|
|||
resource_deps=[]
|
||||
)
|
||||
|
||||
k8s_kind('Cluster', api_version='postgresql.cnpg.io/v1')
|
||||
|
||||
docker_build_with_restart(
|
||||
'supabase-operator',
|
||||
'.',
|
||||
|
@ -41,14 +41,17 @@ k8s_resource('supabase-controller-manager')
|
|||
k8s_resource(
|
||||
workload='supabase-control-plane',
|
||||
port_forwards=18000,
|
||||
resource_deps=[]
|
||||
)
|
||||
|
||||
k8s_resource(
|
||||
objects=["cluster-example:Cluster:supabase-demo"],
|
||||
workload='cluster-example',
|
||||
new_name='Postgres cluster',
|
||||
port_forwards=5432
|
||||
)
|
||||
|
||||
k8s_resource(workload='minio', port_forwards=[9000,9090])
|
||||
|
||||
k8s_resource(
|
||||
objects=["core-sample:Core:supabase-demo"],
|
||||
new_name='Supabase Core',
|
||||
|
@ -61,7 +64,12 @@ k8s_resource(
|
|||
k8s_resource(
|
||||
objects=["gateway-sample:APIGateway:supabase-demo"],
|
||||
extra_pod_selectors={"app.kubernetes.io/component": "api-gateway"},
|
||||
port_forwards=[8000, 19000],
|
||||
port_forwards=[3000, 8000, 19000],
|
||||
links=[
|
||||
link("https://localhost:3000", "Studio"),
|
||||
link("http://localhost:8000", "API"),
|
||||
link("http://localhost:19000", "Envoy Admin Interface")
|
||||
],
|
||||
new_name='API Gateway',
|
||||
resource_deps=[
|
||||
'supabase-controller-manager'
|
||||
|
@ -72,7 +80,6 @@ k8s_resource(
|
|||
objects=["dashboard-sample:Dashboard:supabase-demo"],
|
||||
extra_pod_selectors={"app.kubernetes.io/component": "dashboard", "app.kubernetes.io/name": "studio"},
|
||||
discovery_strategy="selectors-only",
|
||||
port_forwards=[3000],
|
||||
new_name='Dashboard',
|
||||
resource_deps=[
|
||||
'supabase-controller-manager'
|
||||
|
|
|
@ -19,6 +19,7 @@ package v1alpha1
|
|||
import (
|
||||
"iter"
|
||||
"maps"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -37,6 +38,40 @@ type ControlPlaneSpec struct {
|
|||
Port uint16 `json:"port"`
|
||||
}
|
||||
|
||||
type EnvoyLogLevel string
|
||||
|
||||
type EnvoyComponentLogLevel struct {
|
||||
// Component - the component to set the log level for
|
||||
// the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36)
|
||||
Component string `json:"component"`
|
||||
// Level - the log level to set for the component
|
||||
// +kubebuilder:validation:Enum=trace;debug;info;warning;error;critical;off
|
||||
Level EnvoyLogLevel `json:"level"`
|
||||
}
|
||||
|
||||
type EnvoyDebuggingOptions struct {
|
||||
ComponentLogLevels []EnvoyComponentLogLevel `json:"componentLogLevels,omitempty"`
|
||||
}
|
||||
|
||||
func (o *EnvoyDebuggingOptions) DebugLogging() string {
|
||||
if o == nil || len(o.ComponentLogLevels) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for i, lvl := range o.ComponentLogLevels {
|
||||
if i > 0 {
|
||||
builder.WriteString(",")
|
||||
}
|
||||
|
||||
builder.WriteString(lvl.Component)
|
||||
builder.WriteRune(':')
|
||||
builder.WriteString(string(lvl.Level))
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
type EnvoySpec struct {
|
||||
// NodeName - identifies the Envoy cluster within the current namespace
|
||||
// if not set, the name of the APIGateway resource will be used
|
||||
|
@ -45,15 +80,148 @@ type EnvoySpec struct {
|
|||
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
||||
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
||||
// WorkloadTemplate - customize the Envoy deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
// DisableIPv6 - disable IPv6 for the Envoy instance
|
||||
// this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||
DisableIPv6 bool `json:"disableIPv6,omitempty"`
|
||||
Debugging *EnvoyDebuggingOptions `json:"debugging,omitempty"`
|
||||
}
|
||||
|
||||
type TlsCertRef struct {
|
||||
SecretName string `json:"secretName"`
|
||||
// ServerCertKey - key in the secret that contains the server certificate
|
||||
// +kubebuilder:default="tls.crt"
|
||||
ServerCertKey string `json:"serverCertKey"`
|
||||
// ServerKeyKey - key in the secret that contains the server private key
|
||||
// +kubebuilder:default="tls.key"
|
||||
ServerKeyKey string `json:"serverKeyKey"`
|
||||
// CaCertKey - key in the secret that contains the CA certificate
|
||||
// +kubebuilder:default="ca.crt"
|
||||
CaCertKey string `json:"caCertKey,omitempty"`
|
||||
}
|
||||
|
||||
type EndpointTlsSpec struct {
|
||||
Cert *TlsCertRef `json:"cert"`
|
||||
}
|
||||
|
||||
type ApiEndpointSpec struct {
|
||||
// JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs
|
||||
JWKSSelector *corev1.SecretKeySelector `json:"jwks"`
|
||||
// TLS - enable and configure TLS for the API endpoint
|
||||
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
func (s *ApiEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.TLS
|
||||
}
|
||||
|
||||
type DashboardAuthType string
|
||||
|
||||
const (
|
||||
DashboardAuthTypeNone DashboardAuthType = "none"
|
||||
DashboardAuthTypeOAuth2 DashboardAuthType = "oauth2"
|
||||
DashboardAuthTypeBasic DashboardAuthType = "basic"
|
||||
)
|
||||
|
||||
type DashboardOAuth2Spec struct {
|
||||
// OpenIDIssuer - if set the defaulter will fetch the discovery document and fill
|
||||
// TokenEndpoint and AuthorizationEndpoint based on the discovery document
|
||||
OpenIDIssuer string `json:"openIdIssuer,omitempty"`
|
||||
// TokenEndpoint - endpoint where Envoy will retrieve the OAuth2 access and identity token from
|
||||
TokenEndpoint string `json:"tokenEndpoint,omitempty"`
|
||||
// AuthorizationEndpoint - endpoint where the user will be redirected to authenticate
|
||||
AuthorizationEndpoint string `json:"authorizationEndpoint,omitempty"`
|
||||
// ClientID - client ID to authenticate with the OAuth2 provider
|
||||
ClientID string `json:"clientId"`
|
||||
// Scopes - scopes to request from the OAuth2 provider (e.g. "openid", "profile", ...) - optional
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
// Resources - resources to request from the OAuth2 provider (e.g. "user", "email", ...) - optional
|
||||
Resources []string `json:"resources,omitempty"`
|
||||
// ClientSecretRef - reference to the secret that contains the client secret
|
||||
ClientSecretRef *corev1.SecretKeySelector `json:"clientSecretRef"`
|
||||
}
|
||||
|
||||
type DashboardBasicAuthSpec struct {
|
||||
// UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)
|
||||
// +kubebuilder:validation:items:Pattern="^[\\w_.]+:\\{SHA\\}[A-z0-9]+=*$"
|
||||
UsersInline []string `json:"usersInline,omitempty"`
|
||||
// PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form
|
||||
// if not empty, credentials will be merged with inline users
|
||||
PlaintextUsersSecretRef string `json:"plaintextUsersSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
type DashboardAuthSpec struct {
|
||||
// OAuth2 - configure oauth2 authentication for the dashhboard listener
|
||||
// if configured, will be preferred over Basic authentication configuration
|
||||
// effectively disabling basic auth
|
||||
OAuth2 *DashboardOAuth2Spec `json:"oauth2,omitempty"`
|
||||
// Basic - HTTP basic auth configuration, this should only be used in exceptions
|
||||
// e.g. during evaluations or for local development
|
||||
// only used if no other authentication is configured
|
||||
Basic *DashboardBasicAuthSpec `json:"basic,omitempty"`
|
||||
}
|
||||
|
||||
type DashboardEndpointSpec struct {
|
||||
// Auth - configure authentication for the dashboard endpoint
|
||||
Auth *DashboardAuthSpec `json:"auth,omitempty"`
|
||||
// TLS - enable and configure TLS for the Dashboard endpoint
|
||||
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.TLS
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) AuthType() DashboardAuthType {
|
||||
if s == nil || s.Auth == nil {
|
||||
return DashboardAuthTypeNone
|
||||
}
|
||||
|
||||
if s.Auth.OAuth2 != nil {
|
||||
return DashboardAuthTypeOAuth2
|
||||
}
|
||||
|
||||
if s.Auth.Basic != nil {
|
||||
return DashboardAuthTypeBasic
|
||||
}
|
||||
|
||||
return DashboardAuthTypeNone
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) OAuth2() *DashboardOAuth2Spec {
|
||||
if s == nil || s.Auth == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.Auth.OAuth2
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) Basic() *DashboardBasicAuthSpec {
|
||||
if s == nil || s.Auth == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.Auth.Basic
|
||||
}
|
||||
|
||||
// APIGatewaySpec defines the desired state of APIGateway.
|
||||
type APIGatewaySpec struct {
|
||||
// Envoy - configure the envoy instance and most importantly the control-plane
|
||||
Envoy *EnvoySpec `json:"envoy"`
|
||||
// JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs
|
||||
JWKSSelector *corev1.SecretKeySelector `json:"jwks"`
|
||||
// ApiEndpoint - Configure the endpoint for all API routes
|
||||
// this includes the JWT configuration
|
||||
ApiEndpoint *ApiEndpointSpec `json:"apiEndpoint,omitempty"`
|
||||
// DashboardEndpoint - Configure the endpoint for the Supabase dashboard (studio)
|
||||
// this includes optional authentication (basic or Oauth2) for the dashboard
|
||||
DashboardEndpoint *DashboardEndpointSpec `json:"dashboardEndpoint,omitempty"`
|
||||
// ServiceSelector - selector to match all Supabase services (or in fact EndpointSlices) that should be considered for this APIGateway
|
||||
// +kubebuilder:default={"matchExpressions":{{"key": "app.kubernetes.io/part-of", "operator":"In", "values":{"supabase"}},{"key":"supabase.k8s.icb4dc0.de/api-gateway-target","operator":"Exists"}}}
|
||||
ServiceSelector *metav1.LabelSelector `json:"serviceSelector"`
|
||||
|
@ -63,8 +231,7 @@ type APIGatewaySpec struct {
|
|||
}
|
||||
|
||||
type EnvoyStatus struct {
|
||||
ConfigVersion string `json:"configVersion,omitempty"`
|
||||
ResourceHash []byte `json:"resourceHash,omitempty"`
|
||||
ResourceHash []byte `json:"resourceHash,omitempty"`
|
||||
}
|
||||
|
||||
// APIGatewayStatus defines the observed state of APIGateway.
|
||||
|
@ -77,7 +244,7 @@ type APIGatewayStatus struct {
|
|||
// +kubebuilder:subresource:status
|
||||
|
||||
// APIGateway is the Schema for the apigateways API.
|
||||
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.configVersion`
|
||||
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.resourceHash`
|
||||
type APIGateway struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
@ -88,7 +255,7 @@ type APIGateway struct {
|
|||
|
||||
func (g APIGateway) JwksSecretMeta() metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: g.Spec.JWKSSelector.Name,
|
||||
Name: g.Spec.ApiEndpoint.JWKSSelector.Name,
|
||||
Namespace: g.Namespace,
|
||||
Labels: maps.Clone(g.Labels),
|
||||
}
|
||||
|
|
|
@ -48,6 +48,15 @@ func (s JwtSpec) SecretKeySelector() *corev1.SecretKeySelector {
|
|||
}
|
||||
}
|
||||
|
||||
func (s JwtSpec) JwksKeySelector() *corev1.SecretKeySelector {
|
||||
return &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: s.SecretName,
|
||||
},
|
||||
Key: s.JwksKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (s JwtSpec) AnonKeySelector() *corev1.SecretKeySelector {
|
||||
return &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
|
@ -74,22 +83,24 @@ type ImageSpec struct {
|
|||
type ContainerTemplate struct {
|
||||
ImageSpec `json:",inline"`
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
// SecurityContext -
|
||||
// SecurityContext - override the container SecurityContext
|
||||
// use with caution, by default the operator already uses sane defaults
|
||||
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
|
||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
||||
}
|
||||
|
||||
type WorkloadTemplate struct {
|
||||
type WorkloadSpec struct {
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext"`
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
// Workload - customize the container template of the workload
|
||||
Workload *ContainerTemplate `json:"workload,omitempty"`
|
||||
// ContainerSpec - customize the container template of the workload
|
||||
ContainerSpec *ContainerTemplate `json:"container,omitempty"`
|
||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
||||
func (t *WorkloadSpec) ReplicaCount() *int32 {
|
||||
if t != nil && t.Replicas != nil {
|
||||
return t.Replicas
|
||||
}
|
||||
|
@ -97,20 +108,20 @@ func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||
if t == nil || t.Workload == nil || len(t.Workload.AdditionalEnv) == 0 {
|
||||
func (t *WorkloadSpec) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||
if t == nil || t.ContainerSpec == nil || len(t.ContainerSpec.AdditionalEnv) == 0 {
|
||||
return basicEnv
|
||||
}
|
||||
|
||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.Workload.AdditionalEnv))
|
||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv))
|
||||
|
||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.Workload.AdditionalEnv)), basicEnv...)
|
||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv)), basicEnv...)
|
||||
|
||||
for _, v := range basicEnv {
|
||||
existingKeys[v.Name] = true
|
||||
}
|
||||
|
||||
for _, v := range t.Workload.AdditionalEnv {
|
||||
for _, v := range t.ContainerSpec.AdditionalEnv {
|
||||
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
||||
continue
|
||||
}
|
||||
|
@ -121,7 +132,7 @@ func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
|||
return merged
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||
func (t *WorkloadSpec) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
maps.Copy(result, initial)
|
||||
|
@ -145,47 +156,55 @@ func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...ma
|
|||
return result
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) Image(defaultImage string) string {
|
||||
if t != nil && t.Workload != nil && t.Workload.Image != "" {
|
||||
return t.Workload.Image
|
||||
func (t *WorkloadSpec) Image(defaultImage string) string {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.Image != "" {
|
||||
return t.ContainerSpec.Image
|
||||
}
|
||||
|
||||
return defaultImage
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ImagePullPolicy() corev1.PullPolicy {
|
||||
if t != nil && t.Workload != nil && t.Workload.PullPolicy != "" {
|
||||
return t.Workload.PullPolicy
|
||||
func (t *WorkloadSpec) ImagePullPolicy() corev1.PullPolicy {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.PullPolicy != "" {
|
||||
return t.ContainerSpec.PullPolicy
|
||||
}
|
||||
|
||||
return corev1.PullIfNotPresent
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) PullSecrets() []corev1.LocalObjectReference {
|
||||
if t != nil && t.Workload != nil && len(t.Workload.ImagePullSecrets) > 0 {
|
||||
return t.Workload.ImagePullSecrets
|
||||
func (t *WorkloadSpec) PullSecrets() []corev1.LocalObjectReference {
|
||||
if t != nil && t.ContainerSpec != nil && len(t.ContainerSpec.ImagePullSecrets) > 0 {
|
||||
return t.ContainerSpec.ImagePullSecrets
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) Resources() corev1.ResourceRequirements {
|
||||
if t != nil && t.Workload != nil {
|
||||
return t.Workload.Resources
|
||||
func (t *WorkloadSpec) Resources() corev1.ResourceRequirements {
|
||||
if t != nil && t.ContainerSpec != nil {
|
||||
return t.ContainerSpec.Resources
|
||||
}
|
||||
|
||||
return corev1.ResourceRequirements{}
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||
if t != nil && t.Workload != nil {
|
||||
return append(defaultMounts, t.Workload.VolumeMounts...)
|
||||
func (t *WorkloadSpec) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||
if t != nil && t.ContainerSpec != nil {
|
||||
return append(defaultMounts, t.ContainerSpec.VolumeMounts...)
|
||||
}
|
||||
|
||||
return defaultMounts
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
||||
func (t *WorkloadSpec) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
||||
if t == nil {
|
||||
return defaultVolumes
|
||||
}
|
||||
|
||||
return append(defaultVolumes, t.AdditionalVolumes...)
|
||||
}
|
||||
|
||||
func (t *WorkloadSpec) PodSecurityContext() *corev1.PodSecurityContext {
|
||||
if t != nil && t.SecurityContext != nil {
|
||||
return t.SecurityContext
|
||||
}
|
||||
|
@ -195,9 +214,9 @@ func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||
if t != nil && t.Workload != nil && t.Workload.SecurityContext != nil {
|
||||
return t.Workload.SecurityContext
|
||||
func (t *WorkloadSpec) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.SecurityContext != nil {
|
||||
return t.ContainerSpec.SecurityContext
|
||||
}
|
||||
|
||||
return &corev1.SecurityContext{
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -59,7 +61,7 @@ type DatabaseRoles struct {
|
|||
|
||||
type Database struct {
|
||||
DSN *string `json:"dsn,omitempty"`
|
||||
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef,omitempty"`
|
||||
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef"`
|
||||
Roles DatabaseRoles `json:"roles,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -167,8 +169,8 @@ type PostgrestSpec struct {
|
|||
// MaxRows - maximum number of rows PostgREST will load at a time
|
||||
// +kubebuilder:default=1000
|
||||
MaxRows int `json:"maxRows,omitempty"`
|
||||
// WorkloadTemplate - customize the PostgREST workload
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
// WorkloadSpec - customize the PostgREST workload
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type AuthProviderMeta struct {
|
||||
|
@ -365,12 +367,12 @@ func (p *AuthProviders) Vars(apiExternalURL string) []corev1.EnvVar {
|
|||
}
|
||||
|
||||
type AuthSpec struct {
|
||||
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
||||
DisableSignup *bool `json:"disableSignup,omitempty"`
|
||||
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
||||
Providers *AuthProviders `json:"providers,omitempty"`
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
||||
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
||||
DisableSignup *bool `json:"disableSignup,omitempty"`
|
||||
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
||||
Providers *AuthProviders `json:"providers,omitempty"`
|
||||
WorkloadTemplate *WorkloadSpec `json:"workloadTemplate,omitempty"`
|
||||
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
||||
}
|
||||
|
||||
// CoreSpec defines the desired state of Core.
|
||||
|
@ -387,20 +389,89 @@ type CoreSpec struct {
|
|||
Auth *AuthSpec `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
type MigrationStatus map[string]metav1.Time
|
||||
type MigrationConditionStatus string
|
||||
|
||||
func (s MigrationStatus) IsApplied(name string) bool {
|
||||
_, ok := s[name]
|
||||
return ok
|
||||
}
|
||||
const (
|
||||
MigrationConditionStatusApplied MigrationConditionStatus = "Applied"
|
||||
MigrationConditionStatusFailed MigrationConditionStatus = "Failed"
|
||||
)
|
||||
|
||||
func (s MigrationStatus) Record(name string) {
|
||||
s[name] = metav1.Now()
|
||||
type MigrationScriptCondition struct {
|
||||
// Name - file name of the migration script
|
||||
Name string `json:"name"`
|
||||
// Hash - SHA256 hash of the script when it was last successfully applied
|
||||
Hash []byte `json:"hash"`
|
||||
// Status - whether the migration was applied or not
|
||||
// +kubebuilder:validation:Enum=Applied;Failed
|
||||
Status MigrationConditionStatus `json:"status"`
|
||||
// LastProbeTime - last time the operator tried to execute the migration script
|
||||
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
|
||||
// LastTransitionTime - last time the condition transitioned from one status to another
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
// Reason - one-word, CamcelCase reason for the condition's last transition
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// Message - human-readable message indicating details about the last transition
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type DatabaseStatus struct {
|
||||
AppliedMigrations MigrationStatus `json:"appliedMigrations,omitempty"`
|
||||
Roles map[string][]byte `json:"roles,omitempty"`
|
||||
MigrationConditions []MigrationScriptCondition `json:"migrationConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
Roles map[string][]byte `json:"roles,omitempty"`
|
||||
}
|
||||
|
||||
func (s DatabaseStatus) IsMigrationUpToDate(name string, hash []byte) (found bool, upToDate bool) {
|
||||
for _, cond := range s.MigrationConditions {
|
||||
if cond.Name == name {
|
||||
return true, bytes.Equal(cond.Hash, hash)
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (s *DatabaseStatus) RecordMigrationCondition(name string, hash []byte, err error) error {
|
||||
var (
|
||||
now = time.Now()
|
||||
newStatus = MigrationConditionStatusApplied
|
||||
lastProbeTime = metav1.NewTime(now)
|
||||
lastTransitionTime = metav1.NewTime(now)
|
||||
message string
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
newStatus = MigrationConditionStatusFailed
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
for idx, cond := range s.MigrationConditions {
|
||||
if cond.Name == name {
|
||||
lastTransitionTime = cond.LastTransitionTime
|
||||
if cond.Status != newStatus {
|
||||
lastTransitionTime = metav1.NewTime(now)
|
||||
}
|
||||
|
||||
cond.Hash = hash
|
||||
cond.Status = newStatus
|
||||
cond.LastProbeTime = lastProbeTime
|
||||
cond.LastTransitionTime = lastTransitionTime
|
||||
cond.Reason = "Outdated"
|
||||
cond.Message = message
|
||||
|
||||
s.MigrationConditions[idx] = cond
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.MigrationConditions = append(s.MigrationConditions, MigrationScriptCondition{
|
||||
Name: name,
|
||||
Hash: hash,
|
||||
Status: newStatus,
|
||||
LastProbeTime: lastProbeTime,
|
||||
LastTransitionTime: lastTransitionTime,
|
||||
Message: message,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type CoreConditionType string
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
type StudioSpec struct {
|
||||
JWT *JwtSpec `json:"jwt,omitempty"`
|
||||
// WorkloadTemplate - customize the studio deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
// GatewayServiceSelector - selector to find the service for the API gateway
|
||||
// Required to configure the API URL in the studio deployment
|
||||
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
||||
|
@ -37,7 +37,7 @@ type StudioSpec struct {
|
|||
|
||||
type PGMetaSpec struct {
|
||||
// WorkloadTemplate - customize the pg-meta deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type DbCredentialsReference struct {
|
||||
|
|
|
@ -18,14 +18,17 @@ package v1alpha1
|
|||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
|
||||
)
|
||||
|
||||
type StorageBackend string
|
||||
type BackendStorageType string
|
||||
|
||||
const (
|
||||
StorageBackendFile StorageBackend = "file"
|
||||
StorageBackendS3 StorageBackend = "s3"
|
||||
BackendStorageTypeFile BackendStorageType = "file"
|
||||
BackendStorageTypeS3 BackendStorageType = "s3"
|
||||
)
|
||||
|
||||
type StorageApiDbSpec struct {
|
||||
|
@ -67,11 +70,25 @@ type S3CredentialsRef struct {
|
|||
AccessSecretKeyKey string `json:"accessSecretKeyKey,omitempty"`
|
||||
}
|
||||
|
||||
type S3ProtocolSpec struct {
|
||||
// Region - S3 region to use in the API
|
||||
// +kubebuilder:default="us-east-1"
|
||||
Region string `json:"region,omitempty"`
|
||||
func (r S3CredentialsRef) AccessKeyIdSelector() *corev1.SecretKeySelector {
|
||||
return &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: r.SecretName,
|
||||
},
|
||||
Key: r.AccessKeyIdKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (r S3CredentialsRef) AccessSecretKeySelector() *corev1.SecretKeySelector {
|
||||
return &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: r.SecretName,
|
||||
},
|
||||
Key: r.AccessSecretKeyKey,
|
||||
}
|
||||
}
|
||||
|
||||
type S3ProtocolSpec struct {
|
||||
// AllowForwardedHeader
|
||||
// +kubebuilder:default=true
|
||||
AllowForwardedHeader bool `json:"allowForwardedHeader,omitempty"`
|
||||
|
@ -80,11 +97,85 @@ type S3ProtocolSpec struct {
|
|||
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
// StorageSpec defines the desired state of Storage.
|
||||
type StorageSpec struct {
|
||||
// BackendType - backend storage type to use
|
||||
// +kubebuilder:validation:Enum={s3,file}
|
||||
BackendType StorageBackend `json:"backendType"`
|
||||
type FileBackendSpec struct {
|
||||
// Path - path to where files will be stored
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func (s *FileBackendSpec) Env() []corev1.EnvVar {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
svcCfg := supabase.ServiceConfig.Storage
|
||||
|
||||
return []corev1.EnvVar{
|
||||
svcCfg.EnvKeys.StorageBackend.Var("file"),
|
||||
svcCfg.EnvKeys.TenantID.Var(),
|
||||
svcCfg.EnvKeys.FileStorageBackendPath.Var(s.Path),
|
||||
svcCfg.EnvKeys.StorageS3Region.Var("local"),
|
||||
svcCfg.EnvKeys.StorageS3Bucket.Var("stub"),
|
||||
}
|
||||
}
|
||||
|
||||
type S3BackendSpec struct {
|
||||
// Region - S3 region of the backend
|
||||
Region string `json:"region"`
|
||||
// Endpoint - hostname and port **with** http/https
|
||||
Endpoint string `json:"endpoint"`
|
||||
// ForcePathStyle - whether to use path style (e.g. for MinIO) or domain style
|
||||
// for bucket addressing
|
||||
ForcePathStyle bool `json:"forcePathStyle,omitempty"`
|
||||
// Bucket - bucke to use, if file backend is used, default value is sufficient
|
||||
// +kubebuilder:default="stub"
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// CredentialsSecretRef - reference to the Secret where access key id and access secret key are stored
|
||||
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef"`
|
||||
}
|
||||
|
||||
func (s *S3BackendSpec) Env() []corev1.EnvVar {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
svcCfg := supabase.ServiceConfig.Storage
|
||||
|
||||
return []corev1.EnvVar{
|
||||
svcCfg.EnvKeys.StorageBackend.Var("s3"),
|
||||
svcCfg.EnvKeys.StorageS3Endpoint.Var(s.Endpoint),
|
||||
svcCfg.EnvKeys.StorageS3ForcePathStyle.Var(s.ForcePathStyle),
|
||||
svcCfg.EnvKeys.StorageS3Bucket.Var(s.Bucket),
|
||||
svcCfg.EnvKeys.StorageS3Region.Var(s.Region),
|
||||
svcCfg.EnvKeys.StorageS3AccessKeyId.Var(s.CredentialsSecretRef.AccessKeyIdSelector()),
|
||||
svcCfg.EnvKeys.StorageS3AccessSecretKey.Var(s.CredentialsSecretRef.AccessSecretKeySelector()),
|
||||
}
|
||||
}
|
||||
|
||||
type UploadTempSpec struct {
|
||||
// Medium of the empty dir to cache uploads
|
||||
Medium corev1.StorageMedium `json:"medium,omitempty"`
|
||||
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
|
||||
}
|
||||
|
||||
func (s *UploadTempSpec) VolumeSource() *corev1.EmptyDirVolumeSource {
|
||||
if s == nil {
|
||||
return &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumDefault,
|
||||
}
|
||||
}
|
||||
|
||||
return &corev1.EmptyDirVolumeSource{
|
||||
Medium: s.Medium,
|
||||
SizeLimit: s.SizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
type StorageApiSpec struct {
|
||||
S3Backend *S3BackendSpec `json:"s3Backend,omitempty"`
|
||||
// FileBackend - configure the file backend
|
||||
// either S3 or file backend **MUST** be configured
|
||||
FileBackend *FileBackendSpec `json:"fileBackend,omitempty"`
|
||||
// FileSizeLimit - maximum file upload size in bytes
|
||||
// +kubebuilder:default=52428800
|
||||
FileSizeLimit uint64 `json:"fileSizeLimit,omitempty"`
|
||||
|
@ -95,11 +186,30 @@ type StorageSpec struct {
|
|||
// DBSpec - Configure access to the Postgres database
|
||||
// In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource
|
||||
DBSpec StorageApiDbSpec `json:"db"`
|
||||
// S3 - Configure S3 protocol
|
||||
S3 *S3ProtocolSpec `json:"s3,omitempty"`
|
||||
// EnableImageTransformation - whether to deploy the image proxy
|
||||
// S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client
|
||||
S3Protocol *S3ProtocolSpec `json:"s3,omitempty"`
|
||||
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
||||
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
||||
// WorkloadTemplate - customize the Storage API workload
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageProxySpec struct {
|
||||
// Enable - whether to deploy the image proxy or not
|
||||
Enable bool `json:"enable,omitempty"`
|
||||
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
||||
// WorkloadTemplate - customize the image proxy workload
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
// StorageSpec defines the desired state of Storage.
|
||||
type StorageSpec struct {
|
||||
// Api - configure the Storage API
|
||||
Api StorageApiSpec `json:"api,omitempty"`
|
||||
|
||||
// ImageProxy - optionally enable and configure the image proxy
|
||||
// the image proxy scale images to lower resolutions on demand to reduce traffic for instance for mobile devices
|
||||
EnableImageTransformation bool `json:"enableImageTransformation,omitempty"`
|
||||
ImageProxy *ImageProxySpec `json:"imageProxy,omitempty"`
|
||||
}
|
||||
|
||||
// StorageStatus defines the observed state of Storage.
|
||||
|
|
|
@ -21,7 +21,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -93,9 +93,14 @@ func (in *APIGatewaySpec) DeepCopyInto(out *APIGatewaySpec) {
|
|||
*out = new(EnvoySpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.JWKSSelector != nil {
|
||||
in, out := &in.JWKSSelector, &out.JWKSSelector
|
||||
*out = new(v1.SecretKeySelector)
|
||||
if in.ApiEndpoint != nil {
|
||||
in, out := &in.ApiEndpoint, &out.ApiEndpoint
|
||||
*out = new(ApiEndpointSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DashboardEndpoint != nil {
|
||||
in, out := &in.DashboardEndpoint, &out.DashboardEndpoint
|
||||
*out = new(DashboardEndpointSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ServiceSelector != nil {
|
||||
|
@ -147,6 +152,31 @@ func (in *APIGatewayStatus) DeepCopy() *APIGatewayStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApiEndpointSpec) DeepCopyInto(out *ApiEndpointSpec) {
|
||||
*out = *in
|
||||
if in.JWKSSelector != nil {
|
||||
in, out := &in.JWKSSelector, &out.JWKSSelector
|
||||
*out = new(v1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(EndpointTlsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiEndpointSpec.
|
||||
func (in *ApiEndpointSpec) DeepCopy() *ApiEndpointSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApiEndpointSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthProviderMeta) DeepCopyInto(out *AuthProviderMeta) {
|
||||
*out = *in
|
||||
|
@ -222,7 +252,7 @@ func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
|
|||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EmailSignupDisabled != nil {
|
||||
|
@ -465,6 +495,51 @@ func (in *Dashboard) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardAuthSpec) DeepCopyInto(out *DashboardAuthSpec) {
|
||||
*out = *in
|
||||
if in.OAuth2 != nil {
|
||||
in, out := &in.OAuth2, &out.OAuth2
|
||||
*out = new(DashboardOAuth2Spec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Basic != nil {
|
||||
in, out := &in.Basic, &out.Basic
|
||||
*out = new(DashboardBasicAuthSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardAuthSpec.
|
||||
func (in *DashboardAuthSpec) DeepCopy() *DashboardAuthSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardAuthSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardBasicAuthSpec) DeepCopyInto(out *DashboardBasicAuthSpec) {
|
||||
*out = *in
|
||||
if in.UsersInline != nil {
|
||||
in, out := &in.UsersInline, &out.UsersInline
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardBasicAuthSpec.
|
||||
func (in *DashboardBasicAuthSpec) DeepCopy() *DashboardBasicAuthSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardBasicAuthSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardDbSpec) DeepCopyInto(out *DashboardDbSpec) {
|
||||
*out = *in
|
||||
|
@ -485,6 +560,31 @@ func (in *DashboardDbSpec) DeepCopy() *DashboardDbSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardEndpointSpec) DeepCopyInto(out *DashboardEndpointSpec) {
|
||||
*out = *in
|
||||
if in.Auth != nil {
|
||||
in, out := &in.Auth, &out.Auth
|
||||
*out = new(DashboardAuthSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(EndpointTlsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardEndpointSpec.
|
||||
func (in *DashboardEndpointSpec) DeepCopy() *DashboardEndpointSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardEndpointSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardList) DeepCopyInto(out *DashboardList) {
|
||||
*out = *in
|
||||
|
@ -517,6 +617,36 @@ func (in *DashboardList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardOAuth2Spec) DeepCopyInto(out *DashboardOAuth2Spec) {
|
||||
*out = *in
|
||||
if in.Scopes != nil {
|
||||
in, out := &in.Scopes, &out.Scopes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ClientSecretRef != nil {
|
||||
in, out := &in.ClientSecretRef, &out.ClientSecretRef
|
||||
*out = new(v1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOAuth2Spec.
|
||||
func (in *DashboardOAuth2Spec) DeepCopy() *DashboardOAuth2Spec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardOAuth2Spec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) {
|
||||
*out = *in
|
||||
|
@ -622,11 +752,11 @@ func (in *DatabaseRolesSecrets) DeepCopy() *DatabaseRolesSecrets {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
||||
*out = *in
|
||||
if in.AppliedMigrations != nil {
|
||||
in, out := &in.AppliedMigrations, &out.AppliedMigrations
|
||||
*out = make(MigrationStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
if in.MigrationConditions != nil {
|
||||
in, out := &in.MigrationConditions, &out.MigrationConditions
|
||||
*out = make([]MigrationScriptCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Roles != nil {
|
||||
|
@ -728,6 +858,61 @@ func (in *EmailAuthSmtpSpec) DeepCopy() *EmailAuthSmtpSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointTlsSpec) DeepCopyInto(out *EndpointTlsSpec) {
|
||||
*out = *in
|
||||
if in.Cert != nil {
|
||||
in, out := &in.Cert, &out.Cert
|
||||
*out = new(TlsCertRef)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointTlsSpec.
|
||||
func (in *EndpointTlsSpec) DeepCopy() *EndpointTlsSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointTlsSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoyComponentLogLevel) DeepCopyInto(out *EnvoyComponentLogLevel) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyComponentLogLevel.
|
||||
func (in *EnvoyComponentLogLevel) DeepCopy() *EnvoyComponentLogLevel {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvoyComponentLogLevel)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoyDebuggingOptions) DeepCopyInto(out *EnvoyDebuggingOptions) {
|
||||
*out = *in
|
||||
if in.ComponentLogLevels != nil {
|
||||
in, out := &in.ComponentLogLevels, &out.ComponentLogLevels
|
||||
*out = make([]EnvoyComponentLogLevel, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyDebuggingOptions.
|
||||
func (in *EnvoyDebuggingOptions) DeepCopy() *EnvoyDebuggingOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvoyDebuggingOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
||||
*out = *in
|
||||
|
@ -736,9 +921,14 @@ func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
|||
*out = new(ControlPlaneSpec)
|
||||
**out = **in
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Debugging != nil {
|
||||
in, out := &in.Debugging, &out.Debugging
|
||||
*out = new(EnvoyDebuggingOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -773,6 +963,21 @@ func (in *EnvoyStatus) DeepCopy() *EnvoyStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FileBackendSpec) DeepCopyInto(out *FileBackendSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileBackendSpec.
|
||||
func (in *FileBackendSpec) DeepCopy() *FileBackendSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FileBackendSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GithubAuthProvider) DeepCopyInto(out *GithubAuthProvider) {
|
||||
*out = *in
|
||||
|
@ -790,6 +995,26 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
||||
*out = *in
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageProxySpec.
|
||||
func (in *ImageProxySpec) DeepCopy() *ImageProxySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageProxySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
|
||||
*out = *in
|
||||
|
@ -821,24 +1046,25 @@ func (in *JwtSpec) DeepCopy() *JwtSpec {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in MigrationStatus) DeepCopyInto(out *MigrationStatus) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(MigrationStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
func (in *MigrationScriptCondition) DeepCopyInto(out *MigrationScriptCondition) {
|
||||
*out = *in
|
||||
if in.Hash != nil {
|
||||
in, out := &in.Hash, &out.Hash
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationStatus.
|
||||
func (in MigrationStatus) DeepCopy() MigrationStatus {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationScriptCondition.
|
||||
func (in *MigrationScriptCondition) DeepCopy() *MigrationScriptCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MigrationStatus)
|
||||
out := new(MigrationScriptCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
|
@ -864,9 +1090,9 @@ func (in *OAuthProvider) DeepCopy() *OAuthProvider {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
||||
*out = *in
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -910,9 +1136,9 @@ func (in *PostgrestSpec) DeepCopyInto(out *PostgrestSpec) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -927,6 +1153,26 @@ func (in *PostgrestSpec) DeepCopy() *PostgrestSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S3BackendSpec) DeepCopyInto(out *S3BackendSpec) {
|
||||
*out = *in
|
||||
if in.CredentialsSecretRef != nil {
|
||||
in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef
|
||||
*out = new(S3CredentialsRef)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackendSpec.
|
||||
func (in *S3BackendSpec) DeepCopy() *S3BackendSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(S3BackendSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S3CredentialsRef) DeepCopyInto(out *S3CredentialsRef) {
|
||||
*out = *in
|
||||
|
@ -1024,6 +1270,48 @@ func (in *StorageApiDbSpec) DeepCopy() *StorageApiDbSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
|
||||
*out = *in
|
||||
if in.S3Backend != nil {
|
||||
in, out := &in.S3Backend, &out.S3Backend
|
||||
*out = new(S3BackendSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FileBackend != nil {
|
||||
in, out := &in.FileBackend, &out.FileBackend
|
||||
*out = new(FileBackendSpec)
|
||||
**out = **in
|
||||
}
|
||||
out.JwtAuth = in.JwtAuth
|
||||
in.DBSpec.DeepCopyInto(&out.DBSpec)
|
||||
if in.S3Protocol != nil {
|
||||
in, out := &in.S3Protocol, &out.S3Protocol
|
||||
*out = new(S3ProtocolSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.UploadTemp != nil {
|
||||
in, out := &in.UploadTemp, &out.UploadTemp
|
||||
*out = new(UploadTempSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageApiSpec.
|
||||
func (in *StorageApiSpec) DeepCopy() *StorageApiSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StorageApiSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageList) DeepCopyInto(out *StorageList) {
|
||||
*out = *in
|
||||
|
@ -1059,11 +1347,10 @@ func (in *StorageList) DeepCopyObject() runtime.Object {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
|
||||
*out = *in
|
||||
out.JwtAuth = in.JwtAuth
|
||||
in.DBSpec.DeepCopyInto(&out.DBSpec)
|
||||
if in.S3 != nil {
|
||||
in, out := &in.S3, &out.S3
|
||||
*out = new(S3ProtocolSpec)
|
||||
in.Api.DeepCopyInto(&out.Api)
|
||||
if in.ImageProxy != nil {
|
||||
in, out := &in.ImageProxy, &out.ImageProxy
|
||||
*out = new(ImageProxySpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -1101,9 +1388,9 @@ func (in *StudioSpec) DeepCopyInto(out *StudioSpec) {
|
|||
*out = new(JwtSpec)
|
||||
**out = **in
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GatewayServiceMatchLabels != nil {
|
||||
|
@ -1126,7 +1413,42 @@ func (in *StudioSpec) DeepCopy() *StudioSpec {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||
func (in *TlsCertRef) DeepCopyInto(out *TlsCertRef) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TlsCertRef.
|
||||
func (in *TlsCertRef) DeepCopy() *TlsCertRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TlsCertRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
|
||||
*out = *in
|
||||
if in.SizeLimit != nil {
|
||||
in, out := &in.SizeLimit, &out.SizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadTempSpec.
|
||||
func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UploadTempSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadSpec) DeepCopyInto(out *WorkloadSpec) {
|
||||
*out = *in
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
|
@ -1145,19 +1467,26 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Workload != nil {
|
||||
in, out := &in.Workload, &out.Workload
|
||||
if in.ContainerSpec != nil {
|
||||
in, out := &in.ContainerSpec, &out.ContainerSpec
|
||||
*out = new(ContainerTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AdditionalVolumes != nil {
|
||||
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
|
||||
*out = make([]v1.Volume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.
|
||||
func (in *WorkloadTemplate) DeepCopy() *WorkloadTemplate {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpec.
|
||||
func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadTemplate)
|
||||
out := new(WorkloadSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
|
@ -32,6 +33,7 @@ var migrationsFS embed.FS
|
|||
type Script struct {
|
||||
FileName string
|
||||
Content string
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
func InitScripts() iter.Seq2[Script, error] {
|
||||
|
@ -49,10 +51,18 @@ func RoleCreationScript(roleName string) (Script, error) {
|
|||
return Script{}, err
|
||||
}
|
||||
|
||||
return Script{fileName, string(content)}, nil
|
||||
hash := sha256.New()
|
||||
_, _ = hash.Write(content)
|
||||
|
||||
return Script{
|
||||
FileName: fileName,
|
||||
Content: string(content),
|
||||
Hash: hash.Sum(nil),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readScripts(dir string) iter.Seq2[Script, error] {
|
||||
hash := sha256.New()
|
||||
return func(yield func(Script, error) bool) {
|
||||
files, err := migrationsFS.ReadDir(dir)
|
||||
if err != nil {
|
||||
|
@ -76,11 +86,16 @@ func readScripts(dir string) iter.Seq2[Script, error] {
|
|||
}
|
||||
}
|
||||
|
||||
_, _ = hash.Write(content)
|
||||
|
||||
s := Script{
|
||||
FileName: file.Name(),
|
||||
Content: string(content),
|
||||
Hash: hash.Sum(nil),
|
||||
}
|
||||
|
||||
hash.Reset()
|
||||
|
||||
if !yield(s, nil) {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
-- migrate:up
|
||||
|
||||
-- demote postgres user
|
||||
GRANT ALL ON DATABASE postgres TO postgres;
|
||||
GRANT ALL ON SCHEMA auth TO postgres;
|
||||
GRANT ALL ON SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres;
|
||||
ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS;
|
||||
|
||||
-- migrate:down
|
|
@ -5,34 +5,44 @@ DECLARE
|
|||
pgsodium_exists boolean;
|
||||
vault_exists boolean;
|
||||
BEGIN
|
||||
pgsodium_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pgsodium'
|
||||
and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9')
|
||||
);
|
||||
|
||||
vault_exists = (
|
||||
IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'supabase_vault' AND default_version != '0.2.8') THEN
|
||||
CREATE EXTENSION IF NOT EXISTS supabase_vault;
|
||||
|
||||
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||
-- we manually run it here
|
||||
GRANT USAGE ON SCHEMA vault TO postgres WITH GRANT OPTION;
|
||||
GRANT SELECT, DELETE ON vault.secrets, vault.decrypted_secrets TO postgres WITH GRANT OPTION;
|
||||
GRANT EXECUTE ON FUNCTION vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt TO postgres WITH GRANT OPTION;
|
||||
ELSE
|
||||
pgsodium_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'supabase_vault'
|
||||
);
|
||||
|
||||
IF pgsodium_exists
|
||||
THEN
|
||||
create extension if not exists pgsodium;
|
||||
|
||||
grant pgsodium_keyiduser to postgres with admin option;
|
||||
grant pgsodium_keyholder to postgres with admin option;
|
||||
grant pgsodium_keymaker to postgres with admin option;
|
||||
|
||||
grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_keygen to service_role;
|
||||
|
||||
IF vault_exists
|
||||
where name = 'pgsodium'
|
||||
and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9')
|
||||
);
|
||||
|
||||
vault_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'supabase_vault'
|
||||
);
|
||||
|
||||
IF pgsodium_exists
|
||||
THEN
|
||||
create extension if not exists supabase_vault;
|
||||
create extension if not exists pgsodium;
|
||||
|
||||
grant pgsodium_keyiduser to postgres with admin option;
|
||||
grant pgsodium_keyholder to postgres with admin option;
|
||||
grant pgsodium_keymaker to postgres with admin option;
|
||||
|
||||
grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_keygen to service_role;
|
||||
|
||||
IF vault_exists
|
||||
THEN
|
||||
create extension if not exists supabase_vault;
|
||||
END IF;
|
||||
END IF;
|
||||
END IF;
|
||||
END $$;
|
||||
|
|
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
|
@ -0,0 +1,17 @@
|
|||
-- migrate:up
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (select from pg_available_extensions where name = 'supabase_vault')
|
||||
THEN
|
||||
create extension if not exists supabase_vault;
|
||||
|
||||
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||
-- we manually run it here
|
||||
grant usage on schema vault to postgres with grant option;
|
||||
grant select, delete on vault.secrets, vault.decrypted_secrets to postgres with grant option;
|
||||
grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -4,7 +4,12 @@ ALTER ROLE authenticated inherit;
|
|||
ALTER ROLE anon inherit;
|
||||
ALTER ROLE service_role inherit;
|
||||
|
||||
GRANT pgsodium_keyholder to service_role;
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN
|
||||
GRANT pgsodium_keyholder to service_role;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
-- migrate:up
|
||||
alter role supabase_admin set log_statement = none;
|
||||
alter role supabase_auth_admin set log_statement = none;
|
||||
alter role supabase_storage_admin set log_statement = none;
|
||||
|
||||
-- migrate:down
|
3
assets/migrations/setup/realtime.sql
Normal file
3
assets/migrations/setup/realtime.sql
Normal file
|
@ -0,0 +1,3 @@
|
|||
create schema if not exists _realtime;
|
||||
|
||||
alter schema _realtime owner to supabase_admin;
|
|
@ -19,10 +19,13 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
|
||||
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
endpointservice "github.com/envoyproxy/go-control-plane/envoy/service/endpoint/v3"
|
||||
|
@ -31,31 +34,53 @@ import (
|
|||
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
|
||||
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
||||
cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/log"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"google.golang.org/grpc"
|
||||
grpchealth "google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/reflection"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
mgr "sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/certs"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/controlplane"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/health"
|
||||
)
|
||||
|
||||
//nolint:lll // flag declaration with struct tags is as long as it is
|
||||
type controlPlane struct {
|
||||
ListenAddr string `name:"listen-address" default:":18000" help:"The address the control plane binds to."`
|
||||
caCert tls.Certificate `kong:"-"`
|
||||
|
||||
ListenAddr string `name:"listen-address" default:":18000" help:"The address the control plane binds to."`
|
||||
Tls struct {
|
||||
CA struct {
|
||||
Cert FileContent `env:"CERT" name:"server-cert" required:"" help:"The path to the server certificate file."`
|
||||
Key FileContent `env:"KEY" name:"server-key" required:"" help:"The path to the server key file."`
|
||||
} `embed:"" prefix:"ca." envprefix:"CA_"`
|
||||
ServerSecretName string `name:"server-secret-name" help:"The name of the secret containing the server certificate and key." default:"control-plane-xds-tls"`
|
||||
} `embed:"" prefix:"tls." envprefix:"TLS_"`
|
||||
MetricsAddr string `name:"metrics-bind-address" default:"0" help:"The address the metrics endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service."`
|
||||
EnableLeaderElection bool `name:"leader-elect" default:"false" help:"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager."`
|
||||
ProbeAddr string `name:"health-probe-bind-address" default:":8081" help:"The address the probe endpoint binds to."`
|
||||
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
||||
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
||||
ServiceName string `name:"service-name" env:"CONTROL_PLANE_SERVICE_NAME" default:"" required:"" help:"The name of the control plane service."`
|
||||
Namespace string `name:"namespace" env:"CONTROL_PLANE_NAMESPACE" default:"" required:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
||||
}
|
||||
|
||||
func (cp controlPlane) Run(ctx context.Context) error {
|
||||
func (cp *controlPlane) Run(ctx context.Context, logger logr.Logger) error {
|
||||
var tlsOpts []func(*tls.Config)
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
|
@ -91,6 +116,11 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
}
|
||||
|
||||
bootstrapClient, err := client.New(ctrl.GetConfigOrDie(), client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create bootstrap client: %w", err)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
|
@ -104,9 +134,15 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return fmt.Errorf("unable to start control plane: %w", err)
|
||||
}
|
||||
|
||||
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, nil)
|
||||
cacheLoggerInst := cacheLogger(logger.WithName("envoy-snapshot-cache"))
|
||||
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, cacheLoggerInst)
|
||||
|
||||
envoySrv, err := cp.envoyServer(ctx, envoySnapshotCache)
|
||||
serverCert, err := cp.ensureControlPlaneTlsCert(ctx, bootstrapClient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure control plane TLS cert: %w", err)
|
||||
}
|
||||
|
||||
envoySrv, err := cp.envoyServer(ctx, logger, envoySnapshotCache, serverCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -123,6 +159,18 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return fmt.Errorf("unable to create controller Core DB: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddHealthzCheck("server-cert", health.CertValidCheck(serverCert)); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up ready check: %w", err)
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
return fmt.Errorf("problem running manager: %w", err)
|
||||
|
@ -131,9 +179,20 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cp controlPlane) envoyServer(
|
||||
func (cp *controlPlane) AfterApply() (err error) {
|
||||
cp.caCert, err = tls.X509KeyPair(cp.Tls.CA.Cert, cp.Tls.CA.Key)
|
||||
if err != nil {
|
||||
return |