Compare commits
19 commits
Author | SHA1 | Date | |
---|---|---|---|
b4347cc8a2 | |||
87a06dac66 | |||
366ceece24 | |||
8b2425d16d | |||
101bf971a7 | |||
c4a43b82d3 | |||
1f285f6492 | |||
2d71a4a132 | |||
264b30e8a2 | |||
7f56a3db56 | |||
9d02a2d90b | |||
3c13eb0d6b | |||
e9302c51be | |||
45630f7326 | |||
6c61adb1c7 | |||
8066c34eb5 | |||
867daaa375 | |||
0fccef973f | |||
89b682935b |
163 changed files with 6793 additions and 6206 deletions
3
.github/workflows/docs.yml
vendored
3
.github/workflows/docs.yml
vendored
|
@ -35,7 +35,8 @@ jobs:
|
|||
run: mkdocs build
|
||||
|
||||
- name: Copy files to the s3 website content bucket
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
# for the time being, let's just always deploy the docs
|
||||
# if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: rclone sync site/ HCLOUD:/1661580-supabase-operator-docs/
|
||||
env:
|
||||
RCLONE_CONFIG_HCLOUD_TYPE: s3
|
||||
|
|
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
|
@ -28,4 +28,4 @@ jobs:
|
|||
- name: Run linter
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.61
|
||||
version: v1.63.4
|
||||
|
|
80
.github/workflows/postgres.yml
vendored
Normal file
80
.github/workflows/postgres.yml
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
name: Postgres image
|
||||
on:
|
||||
schedule:
|
||||
# every Thursday 2:30am
|
||||
- cron: "30 2 * * 2"
|
||||
push:
|
||||
paths:
|
||||
- .github/workflows/postgres.yml
|
||||
- postgres/**
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
MINOR_VERSIONS: '{"15":"12","17":"4"}'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
arch:
|
||||
- arm64
|
||||
- amd64
|
||||
postgres_major:
|
||||
- "15"
|
||||
- "17"
|
||||
runs-on: ubuntu-latest-${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: registry.icb4dc0.de
|
||||
username: ${{ secrets.HARBOR_USER }}
|
||||
password: ${{ secrets.HARBOR_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: postgres/Dockerfile
|
||||
push: true
|
||||
tags: registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-${{ matrix.arch }}
|
||||
build-args: |
|
||||
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
||||
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
||||
|
||||
manifest:
|
||||
strategy:
|
||||
matrix:
|
||||
postgres_major:
|
||||
- "15"
|
||||
- "17"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: registry.icb4dc0.de
|
||||
username: ${{ secrets.HARBOR_USER }}
|
||||
password: ${{ secrets.HARBOR_TOKEN }}
|
||||
|
||||
- name: Install skopeo
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y skopeo
|
||||
|
||||
- name: Create manifest
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }} \
|
||||
-t registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }} \
|
||||
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64 \
|
||||
registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
||||
|
||||
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64
|
||||
skopeo delete docker://registry.icb4dc0.de/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
|
@ -22,9 +22,9 @@ jobs:
|
|||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: code.icb4dc0.de
|
||||
username: prskr
|
||||
password: ${{ secrets.RELEASE_TOKEN }}
|
||||
registry: registry.icb4dc0.de
|
||||
username: ${{ secrets.HARBOR_USER }}
|
||||
password: ${{ secrets.HARBOR_TOKEN }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
|
@ -36,7 +36,6 @@ jobs:
|
|||
- name: Init go
|
||||
run: |
|
||||
go mod download
|
||||
go mod download -modfile tools/go.mod
|
||||
|
||||
- name: Snapshot release
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
|
|
5
.github/workflows/test-e2e.yml
vendored
5
.github/workflows/test-e2e.yml
vendored
|
@ -43,3 +43,8 @@ jobs:
|
|||
run: |
|
||||
go mod tidy
|
||||
make test-e2e
|
||||
|
||||
- name: Cleanup kind cluster
|
||||
if: always()
|
||||
run: |
|
||||
kind delete cluster
|
||||
|
|
|
@ -1,36 +1,18 @@
|
|||
version: "2"
|
||||
run:
|
||||
timeout: 5m
|
||||
allow-parallel-runners: true
|
||||
|
||||
issues:
|
||||
# don't skip warning about doc comments
|
||||
# don't exclude the default set of lint
|
||||
exclude-use-default: false
|
||||
# restore some of the defaults
|
||||
# (fill in the rest as needed)
|
||||
exclude-rules:
|
||||
- path: "api/*"
|
||||
linters:
|
||||
- lll
|
||||
- path: "internal/*"
|
||||
linters:
|
||||
- dupl
|
||||
- lll
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- copyloopvar
|
||||
- dupl
|
||||
- errcheck
|
||||
- copyloopvar
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- goheader
|
||||
- gosimple
|
||||
# enable when the TODOs are fixed
|
||||
# - godox
|
||||
- goheader
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
|
@ -39,15 +21,51 @@ linters:
|
|||
- prealloc
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
|
||||
linters-settings:
|
||||
settings:
|
||||
goheader:
|
||||
values:
|
||||
const:
|
||||
AUTHOR: Peter Kurfer
|
||||
template-path: hack/header.tmpl
|
||||
importas:
|
||||
alias:
|
||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||
alias: $1$2
|
||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
alias: metav1
|
||||
no-unaliased: true
|
||||
no-extra-aliases: true
|
||||
revive:
|
||||
rules:
|
||||
- name: comment-spacings
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- lll
|
||||
path: api/*
|
||||
- linters:
|
||||
- dupl
|
||||
- lll
|
||||
path: internal/*
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
severity:
|
||||
default: error
|
||||
rules:
|
||||
- linters:
|
||||
- godox
|
||||
severity: info
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
|
@ -57,25 +75,11 @@ linters-settings:
|
|||
- blank
|
||||
- dot
|
||||
goimports:
|
||||
local-prefixes: code.icb4dc0.de/prskr/supabase-operator
|
||||
goheader:
|
||||
values:
|
||||
const:
|
||||
AUTHOR: Peter Kurfer
|
||||
template-path: hack/header.tmpl
|
||||
|
||||
importas:
|
||||
no-unaliased: true
|
||||
no-extra-aliases: true
|
||||
alias:
|
||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||
alias: $1$2
|
||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
alias: metav1
|
||||
|
||||
severity:
|
||||
default-severity: error
|
||||
rules:
|
||||
- linters:
|
||||
- godox
|
||||
severity: info
|
||||
local-prefixes:
|
||||
- code.icb4dc0.de/prskr/supabase-operator
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
@ -8,7 +8,7 @@ before:
|
|||
- go mod tidy
|
||||
- go run mage.go GenerateAll
|
||||
- mkdir -p out
|
||||
- sh -c "cd config/release/default && kustomize edit set image supabase-operator=code.icb4dc0.de/prskr/supabase-operator:{{.Version}}"
|
||||
- sh -c "cd config/release/default && kustomize edit set image supabase-operator=registry.icb4dc0.de/supabase-operator/controller:{{.Version}}"
|
||||
- sh -c "kustomize build config/release/default > out/operator_manifest.yaml"
|
||||
|
||||
builds:
|
||||
|
@ -28,7 +28,7 @@ kos:
|
|||
base_image: gcr.io/distroless/static:nonroot
|
||||
user: "65532:65532"
|
||||
repositories:
|
||||
- code.icb4dc0.de/prskr/supabase-operator
|
||||
- registry.icb4dc0.de/supabase-operator/controller
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
|
||||
# git hook pre commit
|
||||
pre-commit = [
|
||||
"go mod tidy -go=1.23.5",
|
||||
"go run mage.go GenerateAll",
|
||||
"go mod tidy -go=1.24",
|
||||
"husky lint-staged",
|
||||
# "golangci-lint run",
|
||||
]
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
"initialization_options": {
|
||||
"local": "code.icb4dc0.de/prskr/supabase-operator"
|
||||
}
|
||||
},
|
||||
"golangci-lint": {
|
||||
"initialization_options": {
|
||||
"command": [
|
||||
"go",
|
||||
"tool",
|
||||
"golangci-lint",
|
||||
"run",
|
||||
"--output.json.path",
|
||||
"stderr",
|
||||
"--issues-exit-code=1"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
47
.zed/tasks.json
Normal file
47
.zed/tasks.json
Normal file
|
@ -0,0 +1,47 @@
|
|||
[
|
||||
{
|
||||
"label": "Tilt Up",
|
||||
"command": "tilt",
|
||||
"args": ["up"],
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false
|
||||
},
|
||||
{
|
||||
"label": "Generate CRDs",
|
||||
"command": "go",
|
||||
"args": ["run", "mage.go", "CRDs"],
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false,
|
||||
"tags": ["mage", "generate"]
|
||||
},
|
||||
{
|
||||
"label": "Generate CRD docs",
|
||||
"command": "go",
|
||||
"args": ["run", "mage.go", "CRDDocs"],
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false,
|
||||
"tags": ["mage", "generate"]
|
||||
},
|
||||
{
|
||||
"label": "Update image meta",
|
||||
"command": "go",
|
||||
"args": ["run", "mage.go", "FetchImageMeta"],
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false,
|
||||
"tags": ["mage"]
|
||||
},
|
||||
{
|
||||
"label": "Update DB migrations",
|
||||
"command": "go",
|
||||
"args": ["run", "mage.go", "FetchMigrations"],
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false,
|
||||
"tags": ["mage"]
|
||||
},
|
||||
{
|
||||
"label": "Run all Go tests",
|
||||
"command": "go tool gotestsum ./...",
|
||||
"use_new_terminal": false,
|
||||
"allow_concurrent_runs": false
|
||||
}
|
||||
]
|
|
@ -1,5 +1,5 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.23.4 AS builder
|
||||
FROM golang:1.24-alpine AS builder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
|
@ -16,10 +16,7 @@ COPY [ "go.*", "./" ]
|
|||
COPY [ "api", "api" ]
|
||||
COPY [ "assets/migrations", "assets/migrations" ]
|
||||
COPY [ "cmd", "cmd" ]
|
||||
COPY [ "infrastructure", "infrastructure" ]
|
||||
COPY [ "internal", "internal" ]
|
||||
COPY [ "magefiles", "magefiles" ]
|
||||
COPY [ "tools", "tools" ]
|
||||
|
||||
# Build
|
||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||
|
|
64
Makefile
64
Makefile
|
@ -44,11 +44,11 @@ help: ## Display this help.
|
|||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
manifests: ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
generate: ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
|
@ -60,7 +60,7 @@ vet: ## Run go vet against code.
|
|||
go vet ./...
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
test: manifests generate fmt vet ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||
|
||||
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
||||
|
@ -81,11 +81,11 @@ test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated
|
|||
go test ./test/e2e/ -v -ginkgo.v
|
||||
|
||||
.PHONY: lint
|
||||
lint: golangci-lint ## Run golangci-lint linter
|
||||
lint: ## Run golangci-lint linter
|
||||
$(GOLANGCI_LINT) run
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
|
||||
lint-fix: ## Run golangci-lint linter and perform fixes
|
||||
$(GOLANGCI_LINT) run --fix
|
||||
|
||||
##@ Build
|
||||
|
@ -127,7 +127,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
|
|||
rm Dockerfile.cross
|
||||
|
||||
.PHONY: build-installer
|
||||
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
|
||||
build-installer: manifests generate ## Generate a consolidated YAML with CRDs and deployment.
|
||||
mkdir -p dist
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default > dist/install.yaml
|
||||
|
@ -148,7 +148,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
|
|||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
cd config/manager && $(KUSTOMIZE) edit set image supabase-operator=${IMG}
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
|
@ -164,49 +164,7 @@ $(LOCALBIN):
|
|||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.5.0
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.16.4
|
||||
ENVTEST_VERSION ?= release-0.19
|
||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f "$(1)-$(3)" ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
rm -f $(1) || true ;\
|
||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
||||
mv $(1) $(1)-$(3) ;\
|
||||
} ;\
|
||||
ln -sf $(1)-$(3) $(1)
|
||||
endef
|
||||
KUSTOMIZE ?= go tool kustomize
|
||||
CONTROLLER_GEN ?= go tool controller-gen
|
||||
ENVTEST ?= go tool setup-envtest
|
||||
GOLANGCI_LINT = go tool golangci-lint
|
||||
|
|
|
@ -37,7 +37,7 @@ This operator tries to be as un-opionionated as possible and thereofore does not
|
|||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
- go version v1.23.x+
|
||||
- go version v1.24.x+
|
||||
- docker version 27.+.
|
||||
- kubectl version v1.30.0+.
|
||||
- Access to a Kubernetes v1.30.+ cluster.
|
||||
|
|
16
Tiltfile
16
Tiltfile
|
@ -4,7 +4,6 @@ load('ext://restart_process', 'docker_build_with_restart')
|
|||
allow_k8s_contexts('kind-kind')
|
||||
|
||||
k8s_yaml(kustomize('config/dev'))
|
||||
k8s_yaml(kustomize('config/samples'))
|
||||
|
||||
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
||||
|
||||
|
@ -23,8 +22,10 @@ local_resource(
|
|||
resource_deps=[]
|
||||
)
|
||||
|
||||
k8s_kind('Cluster', api_version='postgresql.cnpg.io/v1')
|
||||
|
||||
docker_build_with_restart(
|
||||
'supabase-operator',
|
||||
'controller',
|
||||
'.',
|
||||
entrypoint=['/app/bin/supabase-operator'],
|
||||
dockerfile='dev/Dockerfile',
|
||||
|
@ -40,10 +41,11 @@ k8s_resource('supabase-controller-manager')
|
|||
k8s_resource(
|
||||
workload='supabase-control-plane',
|
||||
port_forwards=18000,
|
||||
resource_deps=[]
|
||||
)
|
||||
|
||||
k8s_resource(
|
||||
objects=["cluster-example:Cluster:supabase-demo"],
|
||||
workload='cluster-example',
|
||||
new_name='Postgres cluster',
|
||||
port_forwards=5432
|
||||
)
|
||||
|
@ -62,7 +64,12 @@ k8s_resource(
|
|||
k8s_resource(
|
||||
objects=["gateway-sample:APIGateway:supabase-demo"],
|
||||
extra_pod_selectors={"app.kubernetes.io/component": "api-gateway"},
|
||||
port_forwards=[8000, 19000],
|
||||
port_forwards=[3000, 8000, 19000],
|
||||
links=[
|
||||
link("https://localhost:3000", "Studio"),
|
||||
link("http://localhost:8000", "API"),
|
||||
link("http://localhost:19000", "Envoy Admin Interface")
|
||||
],
|
||||
new_name='API Gateway',
|
||||
resource_deps=[
|
||||
'supabase-controller-manager'
|
||||
|
@ -73,7 +80,6 @@ k8s_resource(
|
|||
objects=["dashboard-sample:Dashboard:supabase-demo"],
|
||||
extra_pod_selectors={"app.kubernetes.io/component": "dashboard", "app.kubernetes.io/name": "studio"},
|
||||
discovery_strategy="selectors-only",
|
||||
port_forwards=[3000],
|
||||
new_name='Dashboard',
|
||||
resource_deps=[
|
||||
'supabase-controller-manager'
|
||||
|
|
|
@ -19,6 +19,7 @@ package v1alpha1
|
|||
import (
|
||||
"iter"
|
||||
"maps"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -37,6 +38,40 @@ type ControlPlaneSpec struct {
|
|||
Port uint16 `json:"port"`
|
||||
}
|
||||
|
||||
type EnvoyLogLevel string
|
||||
|
||||
type EnvoyComponentLogLevel struct {
|
||||
// Component - the component to set the log level for
|
||||
// the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36)
|
||||
Component string `json:"component"`
|
||||
// Level - the log level to set for the component
|
||||
// +kubebuilder:validation:Enum=trace;debug;info;warning;error;critical;off
|
||||
Level EnvoyLogLevel `json:"level"`
|
||||
}
|
||||
|
||||
type EnvoyDebuggingOptions struct {
|
||||
ComponentLogLevels []EnvoyComponentLogLevel `json:"componentLogLevels,omitempty"`
|
||||
}
|
||||
|
||||
func (o *EnvoyDebuggingOptions) DebugLogging() string {
|
||||
if o == nil || len(o.ComponentLogLevels) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for i, lvl := range o.ComponentLogLevels {
|
||||
if i > 0 {
|
||||
builder.WriteString(",")
|
||||
}
|
||||
|
||||
builder.WriteString(lvl.Component)
|
||||
builder.WriteRune(':')
|
||||
builder.WriteString(string(lvl.Level))
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
type EnvoySpec struct {
|
||||
// NodeName - identifies the Envoy cluster within the current namespace
|
||||
// if not set, the name of the APIGateway resource will be used
|
||||
|
@ -45,15 +80,137 @@ type EnvoySpec struct {
|
|||
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
||||
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
||||
// WorkloadTemplate - customize the Envoy deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
// DisableIPv6 - disable IPv6 for the Envoy instance
|
||||
// this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||
DisableIPv6 bool `json:"disableIPv6,omitempty"`
|
||||
Debugging *EnvoyDebuggingOptions `json:"debugging,omitempty"`
|
||||
}
|
||||
|
||||
type TlsCertRef struct {
|
||||
SecretName string `json:"secretName"`
|
||||
// ServerCertKey - key in the secret that contains the server certificate
|
||||
// +kubebuilder:default="tls.crt"
|
||||
ServerCertKey string `json:"serverCertKey"`
|
||||
// ServerKeyKey - key in the secret that contains the server private key
|
||||
// +kubebuilder:default="tls.key"
|
||||
ServerKeyKey string `json:"serverKeyKey"`
|
||||
// CaCertKey - key in the secret that contains the CA certificate
|
||||
// +kubebuilder:default="ca.crt"
|
||||
CaCertKey string `json:"caCertKey,omitempty"`
|
||||
}
|
||||
|
||||
type EndpointTlsSpec struct {
|
||||
Cert *TlsCertRef `json:"cert"`
|
||||
}
|
||||
|
||||
type ApiEndpointSpec struct {
|
||||
// JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs
|
||||
JWKSSelector *corev1.SecretKeySelector `json:"jwks"`
|
||||
// TLS - enable and configure TLS for the API endpoint
|
||||
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
type DashboardEndpointSpec struct{}
|
||||
func (s *ApiEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.TLS
|
||||
}
|
||||
|
||||
type DashboardAuthType string
|
||||
|
||||
const (
|
||||
DashboardAuthTypeNone DashboardAuthType = "none"
|
||||
DashboardAuthTypeOAuth2 DashboardAuthType = "oauth2"
|
||||
DashboardAuthTypeBasic DashboardAuthType = "basic"
|
||||
)
|
||||
|
||||
type DashboardOAuth2Spec struct {
|
||||
// OpenIDIssuer - if set the defaulter will fetch the discovery document and fill
|
||||
// TokenEndpoint and AuthorizationEndpoint based on the discovery document
|
||||
OpenIDIssuer string `json:"openIdIssuer,omitempty"`
|
||||
// TokenEndpoint - endpoint where Envoy will retrieve the OAuth2 access and identity token from
|
||||
TokenEndpoint string `json:"tokenEndpoint,omitempty"`
|
||||
// AuthorizationEndpoint - endpoint where the user will be redirected to authenticate
|
||||
AuthorizationEndpoint string `json:"authorizationEndpoint,omitempty"`
|
||||
// ClientID - client ID to authenticate with the OAuth2 provider
|
||||
ClientID string `json:"clientId"`
|
||||
// Scopes - scopes to request from the OAuth2 provider (e.g. "openid", "profile", ...) - optional
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
// Resources - resources to request from the OAuth2 provider (e.g. "user", "email", ...) - optional
|
||||
Resources []string `json:"resources,omitempty"`
|
||||
// ClientSecretRef - reference to the secret that contains the client secret
|
||||
ClientSecretRef *corev1.SecretKeySelector `json:"clientSecretRef"`
|
||||
}
|
||||
|
||||
type DashboardBasicAuthSpec struct {
|
||||
// UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)
|
||||
// +kubebuilder:validation:items:Pattern="^[\\w_.]+:\\{SHA\\}[A-z0-9]+=*$"
|
||||
UsersInline []string `json:"usersInline,omitempty"`
|
||||
// PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form
|
||||
// if not empty, credentials will be merged with inline users
|
||||
PlaintextUsersSecretRef string `json:"plaintextUsersSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
type DashboardAuthSpec struct {
|
||||
// OAuth2 - configure oauth2 authentication for the dashhboard listener
|
||||
// if configured, will be preferred over Basic authentication configuration
|
||||
// effectively disabling basic auth
|
||||
OAuth2 *DashboardOAuth2Spec `json:"oauth2,omitempty"`
|
||||
// Basic - HTTP basic auth configuration, this should only be used in exceptions
|
||||
// e.g. during evaluations or for local development
|
||||
// only used if no other authentication is configured
|
||||
Basic *DashboardBasicAuthSpec `json:"basic,omitempty"`
|
||||
}
|
||||
|
||||
type DashboardEndpointSpec struct {
|
||||
// Auth - configure authentication for the dashboard endpoint
|
||||
Auth *DashboardAuthSpec `json:"auth,omitempty"`
|
||||
// TLS - enable and configure TLS for the Dashboard endpoint
|
||||
TLS *EndpointTlsSpec `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) TLSSpec() *EndpointTlsSpec {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.TLS
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) AuthType() DashboardAuthType {
|
||||
if s == nil || s.Auth == nil {
|
||||
return DashboardAuthTypeNone
|
||||
}
|
||||
|
||||
if s.Auth.OAuth2 != nil {
|
||||
return DashboardAuthTypeOAuth2
|
||||
}
|
||||
|
||||
if s.Auth.Basic != nil {
|
||||
return DashboardAuthTypeBasic
|
||||
}
|
||||
|
||||
return DashboardAuthTypeNone
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) OAuth2() *DashboardOAuth2Spec {
|
||||
if s == nil || s.Auth == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.Auth.OAuth2
|
||||
}
|
||||
|
||||
func (s *DashboardEndpointSpec) Basic() *DashboardBasicAuthSpec {
|
||||
if s == nil || s.Auth == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.Auth.Basic
|
||||
}
|
||||
|
||||
// APIGatewaySpec defines the desired state of APIGateway.
|
||||
type APIGatewaySpec struct {
|
||||
|
@ -74,7 +231,6 @@ type APIGatewaySpec struct {
|
|||
}
|
||||
|
||||
type EnvoyStatus struct {
|
||||
ConfigVersion string `json:"configVersion,omitempty"`
|
||||
ResourceHash []byte `json:"resourceHash,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -88,7 +244,7 @@ type APIGatewayStatus struct {
|
|||
// +kubebuilder:subresource:status
|
||||
|
||||
// APIGateway is the Schema for the apigateways API.
|
||||
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.configVersion`
|
||||
// +kubebuilder:printcolumn:name="EnvoyConfigVersion",type=string,JSONPath=`.status.envoy.resourceHash`
|
||||
type APIGateway struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
|
|
@ -91,16 +91,16 @@ type ContainerTemplate struct {
|
|||
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
||||
}
|
||||
|
||||
type WorkloadTemplate struct {
|
||||
type WorkloadSpec struct {
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
// Workload - customize the container template of the workload
|
||||
Workload *ContainerTemplate `json:"workload,omitempty"`
|
||||
// ContainerSpec - customize the container template of the workload
|
||||
ContainerSpec *ContainerTemplate `json:"container,omitempty"`
|
||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
||||
func (t *WorkloadSpec) ReplicaCount() *int32 {
|
||||
if t != nil && t.Replicas != nil {
|
||||
return t.Replicas
|
||||
}
|
||||
|
@ -108,20 +108,20 @@ func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||
if t == nil || t.Workload == nil || len(t.Workload.AdditionalEnv) == 0 {
|
||||
func (t *WorkloadSpec) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||
if t == nil || t.ContainerSpec == nil || len(t.ContainerSpec.AdditionalEnv) == 0 {
|
||||
return basicEnv
|
||||
}
|
||||
|
||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.Workload.AdditionalEnv))
|
||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv))
|
||||
|
||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.Workload.AdditionalEnv)), basicEnv...)
|
||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv)), basicEnv...)
|
||||
|
||||
for _, v := range basicEnv {
|
||||
existingKeys[v.Name] = true
|
||||
}
|
||||
|
||||
for _, v := range t.Workload.AdditionalEnv {
|
||||
for _, v := range t.ContainerSpec.AdditionalEnv {
|
||||
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
||||
continue
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
|||
return merged
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||
func (t *WorkloadSpec) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
maps.Copy(result, initial)
|
||||
|
@ -156,47 +156,47 @@ func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...ma
|
|||
return result
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) Image(defaultImage string) string {
|
||||
if t != nil && t.Workload != nil && t.Workload.Image != "" {
|
||||
return t.Workload.Image
|
||||
func (t *WorkloadSpec) Image(defaultImage string) string {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.Image != "" {
|
||||
return t.ContainerSpec.Image
|
||||
}
|
||||
|
||||
return defaultImage
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ImagePullPolicy() corev1.PullPolicy {
|
||||
if t != nil && t.Workload != nil && t.Workload.PullPolicy != "" {
|
||||
return t.Workload.PullPolicy
|
||||
func (t *WorkloadSpec) ImagePullPolicy() corev1.PullPolicy {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.PullPolicy != "" {
|
||||
return t.ContainerSpec.PullPolicy
|
||||
}
|
||||
|
||||
return corev1.PullIfNotPresent
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) PullSecrets() []corev1.LocalObjectReference {
|
||||
if t != nil && t.Workload != nil && len(t.Workload.ImagePullSecrets) > 0 {
|
||||
return t.Workload.ImagePullSecrets
|
||||
func (t *WorkloadSpec) PullSecrets() []corev1.LocalObjectReference {
|
||||
if t != nil && t.ContainerSpec != nil && len(t.ContainerSpec.ImagePullSecrets) > 0 {
|
||||
return t.ContainerSpec.ImagePullSecrets
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) Resources() corev1.ResourceRequirements {
|
||||
if t != nil && t.Workload != nil {
|
||||
return t.Workload.Resources
|
||||
func (t *WorkloadSpec) Resources() corev1.ResourceRequirements {
|
||||
if t != nil && t.ContainerSpec != nil {
|
||||
return t.ContainerSpec.Resources
|
||||
}
|
||||
|
||||
return corev1.ResourceRequirements{}
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||
if t != nil && t.Workload != nil {
|
||||
return append(defaultMounts, t.Workload.VolumeMounts...)
|
||||
func (t *WorkloadSpec) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||
if t != nil && t.ContainerSpec != nil {
|
||||
return append(defaultMounts, t.ContainerSpec.VolumeMounts...)
|
||||
}
|
||||
|
||||
return defaultMounts
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
||||
func (t *WorkloadSpec) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
||||
if t == nil {
|
||||
return defaultVolumes
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Vol
|
|||
return append(defaultVolumes, t.AdditionalVolumes...)
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
||||
func (t *WorkloadSpec) PodSecurityContext() *corev1.PodSecurityContext {
|
||||
if t != nil && t.SecurityContext != nil {
|
||||
return t.SecurityContext
|
||||
}
|
||||
|
@ -214,9 +214,9 @@ func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *WorkloadTemplate) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||
if t != nil && t.Workload != nil && t.Workload.SecurityContext != nil {
|
||||
return t.Workload.SecurityContext
|
||||
func (t *WorkloadSpec) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.SecurityContext != nil {
|
||||
return t.ContainerSpec.SecurityContext
|
||||
}
|
||||
|
||||
return &corev1.SecurityContext{
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -59,7 +61,7 @@ type DatabaseRoles struct {
|
|||
|
||||
type Database struct {
|
||||
DSN *string `json:"dsn,omitempty"`
|
||||
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef,omitempty"`
|
||||
DSNSecretRef *corev1.SecretKeySelector `json:"dsnSecretRef"`
|
||||
Roles DatabaseRoles `json:"roles,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -167,8 +169,8 @@ type PostgrestSpec struct {
|
|||
// MaxRows - maximum number of rows PostgREST will load at a time
|
||||
// +kubebuilder:default=1000
|
||||
MaxRows int `json:"maxRows,omitempty"`
|
||||
// WorkloadTemplate - customize the PostgREST workload
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
// WorkloadSpec - customize the PostgREST workload
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type AuthProviderMeta struct {
|
||||
|
@ -369,7 +371,7 @@ type AuthSpec struct {
|
|||
DisableSignup *bool `json:"disableSignup,omitempty"`
|
||||
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
||||
Providers *AuthProviders `json:"providers,omitempty"`
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadTemplate *WorkloadSpec `json:"workloadTemplate,omitempty"`
|
||||
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -387,22 +389,91 @@ type CoreSpec struct {
|
|||
Auth *AuthSpec `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
type MigrationStatus map[string]metav1.Time
|
||||
type MigrationConditionStatus string
|
||||
|
||||
func (s MigrationStatus) IsApplied(name string) bool {
|
||||
_, ok := s[name]
|
||||
return ok
|
||||
}
|
||||
const (
|
||||
MigrationConditionStatusApplied MigrationConditionStatus = "Applied"
|
||||
MigrationConditionStatusFailed MigrationConditionStatus = "Failed"
|
||||
)
|
||||
|
||||
func (s MigrationStatus) Record(name string) {
|
||||
s[name] = metav1.Now()
|
||||
type MigrationScriptCondition struct {
|
||||
// Name - file name of the migration script
|
||||
Name string `json:"name"`
|
||||
// Hash - SHA256 hash of the script when it was last successfully applied
|
||||
Hash []byte `json:"hash"`
|
||||
// Status - whether the migration was applied or not
|
||||
// +kubebuilder:validation:Enum=Applied;Failed
|
||||
Status MigrationConditionStatus `json:"status"`
|
||||
// LastProbeTime - last time the operator tried to execute the migration script
|
||||
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
|
||||
// LastTransitionTime - last time the condition transitioned from one status to another
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
// Reason - one-word, CamcelCase reason for the condition's last transition
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// Message - human-readable message indicating details about the last transition
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type DatabaseStatus struct {
|
||||
AppliedMigrations MigrationStatus `json:"appliedMigrations,omitempty"`
|
||||
MigrationConditions []MigrationScriptCondition `json:"migrationConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
Roles map[string][]byte `json:"roles,omitempty"`
|
||||
}
|
||||
|
||||
func (s DatabaseStatus) IsMigrationUpToDate(name string, hash []byte) (found bool, upToDate bool) {
|
||||
for _, cond := range s.MigrationConditions {
|
||||
if cond.Name == name {
|
||||
return true, bytes.Equal(cond.Hash, hash)
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (s *DatabaseStatus) RecordMigrationCondition(name string, hash []byte, err error) error {
|
||||
var (
|
||||
now = time.Now()
|
||||
newStatus = MigrationConditionStatusApplied
|
||||
lastProbeTime = metav1.NewTime(now)
|
||||
lastTransitionTime = metav1.NewTime(now)
|
||||
message string
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
newStatus = MigrationConditionStatusFailed
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
for idx, cond := range s.MigrationConditions {
|
||||
if cond.Name == name {
|
||||
lastTransitionTime = cond.LastTransitionTime
|
||||
if cond.Status != newStatus {
|
||||
lastTransitionTime = metav1.NewTime(now)
|
||||
}
|
||||
|
||||
cond.Hash = hash
|
||||
cond.Status = newStatus
|
||||
cond.LastProbeTime = lastProbeTime
|
||||
cond.LastTransitionTime = lastTransitionTime
|
||||
cond.Reason = "Outdated"
|
||||
cond.Message = message
|
||||
|
||||
s.MigrationConditions[idx] = cond
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.MigrationConditions = append(s.MigrationConditions, MigrationScriptCondition{
|
||||
Name: name,
|
||||
Hash: hash,
|
||||
Status: newStatus,
|
||||
LastProbeTime: lastProbeTime,
|
||||
LastTransitionTime: lastTransitionTime,
|
||||
Message: message,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type CoreConditionType string
|
||||
|
||||
// CoreStatus defines the observed state of Core.
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
type StudioSpec struct {
|
||||
JWT *JwtSpec `json:"jwt,omitempty"`
|
||||
// WorkloadTemplate - customize the studio deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
// GatewayServiceSelector - selector to find the service for the API gateway
|
||||
// Required to configure the API URL in the studio deployment
|
||||
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
||||
|
@ -37,7 +37,7 @@ type StudioSpec struct {
|
|||
|
||||
type PGMetaSpec struct {
|
||||
// WorkloadTemplate - customize the pg-meta deployment
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type DbCredentialsReference struct {
|
||||
|
|
|
@ -191,7 +191,7 @@ type StorageApiSpec struct {
|
|||
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
||||
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
||||
// WorkloadTemplate - customize the Storage API workload
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageProxySpec struct {
|
||||
|
@ -199,7 +199,7 @@ type ImageProxySpec struct {
|
|||
Enable bool `json:"enable,omitempty"`
|
||||
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
||||
// WorkloadTemplate - customize the image proxy workload
|
||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
||||
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||
}
|
||||
|
||||
// StorageSpec defines the desired state of Storage.
|
||||
|
|
|
@ -101,7 +101,7 @@ func (in *APIGatewaySpec) DeepCopyInto(out *APIGatewaySpec) {
|
|||
if in.DashboardEndpoint != nil {
|
||||
in, out := &in.DashboardEndpoint, &out.DashboardEndpoint
|
||||
*out = new(DashboardEndpointSpec)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ServiceSelector != nil {
|
||||
in, out := &in.ServiceSelector, &out.ServiceSelector
|
||||
|
@ -160,6 +160,11 @@ func (in *ApiEndpointSpec) DeepCopyInto(out *ApiEndpointSpec) {
|
|||
*out = new(v1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(EndpointTlsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiEndpointSpec.
|
||||
|
@ -247,7 +252,7 @@ func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
|
|||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EmailSignupDisabled != nil {
|
||||
|
@ -490,6 +495,51 @@ func (in *Dashboard) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardAuthSpec) DeepCopyInto(out *DashboardAuthSpec) {
|
||||
*out = *in
|
||||
if in.OAuth2 != nil {
|
||||
in, out := &in.OAuth2, &out.OAuth2
|
||||
*out = new(DashboardOAuth2Spec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Basic != nil {
|
||||
in, out := &in.Basic, &out.Basic
|
||||
*out = new(DashboardBasicAuthSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardAuthSpec.
|
||||
func (in *DashboardAuthSpec) DeepCopy() *DashboardAuthSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardAuthSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardBasicAuthSpec) DeepCopyInto(out *DashboardBasicAuthSpec) {
|
||||
*out = *in
|
||||
if in.UsersInline != nil {
|
||||
in, out := &in.UsersInline, &out.UsersInline
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardBasicAuthSpec.
|
||||
func (in *DashboardBasicAuthSpec) DeepCopy() *DashboardBasicAuthSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardBasicAuthSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardDbSpec) DeepCopyInto(out *DashboardDbSpec) {
|
||||
*out = *in
|
||||
|
@ -513,6 +563,16 @@ func (in *DashboardDbSpec) DeepCopy() *DashboardDbSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardEndpointSpec) DeepCopyInto(out *DashboardEndpointSpec) {
|
||||
*out = *in
|
||||
if in.Auth != nil {
|
||||
in, out := &in.Auth, &out.Auth
|
||||
*out = new(DashboardAuthSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(EndpointTlsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardEndpointSpec.
|
||||
|
@ -557,6 +617,36 @@ func (in *DashboardList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardOAuth2Spec) DeepCopyInto(out *DashboardOAuth2Spec) {
|
||||
*out = *in
|
||||
if in.Scopes != nil {
|
||||
in, out := &in.Scopes, &out.Scopes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ClientSecretRef != nil {
|
||||
in, out := &in.ClientSecretRef, &out.ClientSecretRef
|
||||
*out = new(v1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOAuth2Spec.
|
||||
func (in *DashboardOAuth2Spec) DeepCopy() *DashboardOAuth2Spec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DashboardOAuth2Spec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) {
|
||||
*out = *in
|
||||
|
@ -662,11 +752,11 @@ func (in *DatabaseRolesSecrets) DeepCopy() *DatabaseRolesSecrets {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
||||
*out = *in
|
||||
if in.AppliedMigrations != nil {
|
||||
in, out := &in.AppliedMigrations, &out.AppliedMigrations
|
||||
*out = make(MigrationStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
if in.MigrationConditions != nil {
|
||||
in, out := &in.MigrationConditions, &out.MigrationConditions
|
||||
*out = make([]MigrationScriptCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Roles != nil {
|
||||
|
@ -768,6 +858,61 @@ func (in *EmailAuthSmtpSpec) DeepCopy() *EmailAuthSmtpSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointTlsSpec) DeepCopyInto(out *EndpointTlsSpec) {
|
||||
*out = *in
|
||||
if in.Cert != nil {
|
||||
in, out := &in.Cert, &out.Cert
|
||||
*out = new(TlsCertRef)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointTlsSpec.
|
||||
func (in *EndpointTlsSpec) DeepCopy() *EndpointTlsSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointTlsSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoyComponentLogLevel) DeepCopyInto(out *EnvoyComponentLogLevel) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyComponentLogLevel.
|
||||
func (in *EnvoyComponentLogLevel) DeepCopy() *EnvoyComponentLogLevel {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvoyComponentLogLevel)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoyDebuggingOptions) DeepCopyInto(out *EnvoyDebuggingOptions) {
|
||||
*out = *in
|
||||
if in.ComponentLogLevels != nil {
|
||||
in, out := &in.ComponentLogLevels, &out.ComponentLogLevels
|
||||
*out = make([]EnvoyComponentLogLevel, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyDebuggingOptions.
|
||||
func (in *EnvoyDebuggingOptions) DeepCopy() *EnvoyDebuggingOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvoyDebuggingOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
||||
*out = *in
|
||||
|
@ -776,9 +921,14 @@ func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
|||
*out = new(ControlPlaneSpec)
|
||||
**out = **in
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Debugging != nil {
|
||||
in, out := &in.Debugging, &out.Debugging
|
||||
*out = new(EnvoyDebuggingOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -848,9 +998,9 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
||||
*out = *in
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -896,24 +1046,25 @@ func (in *JwtSpec) DeepCopy() *JwtSpec {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in MigrationStatus) DeepCopyInto(out *MigrationStatus) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(MigrationStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
func (in *MigrationScriptCondition) DeepCopyInto(out *MigrationScriptCondition) {
|
||||
*out = *in
|
||||
if in.Hash != nil {
|
||||
in, out := &in.Hash, &out.Hash
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationStatus.
|
||||
func (in MigrationStatus) DeepCopy() MigrationStatus {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationScriptCondition.
|
||||
func (in *MigrationScriptCondition) DeepCopy() *MigrationScriptCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MigrationStatus)
|
||||
out := new(MigrationScriptCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
|
@ -939,9 +1090,9 @@ func (in *OAuthProvider) DeepCopy() *OAuthProvider {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
||||
*out = *in
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -985,9 +1136,9 @@ func (in *PostgrestSpec) DeepCopyInto(out *PostgrestSpec) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -1144,9 +1295,9 @@ func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
|
|||
*out = new(UploadTempSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -1237,9 +1388,9 @@ func (in *StudioSpec) DeepCopyInto(out *StudioSpec) {
|
|||
*out = new(JwtSpec)
|
||||
**out = **in
|
||||
}
|
||||
if in.WorkloadTemplate != nil {
|
||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||
*out = new(WorkloadTemplate)
|
||||
if in.WorkloadSpec != nil {
|
||||
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||
*out = new(WorkloadSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GatewayServiceMatchLabels != nil {
|
||||
|
@ -1261,6 +1412,21 @@ func (in *StudioSpec) DeepCopy() *StudioSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TlsCertRef) DeepCopyInto(out *TlsCertRef) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TlsCertRef.
|
||||
func (in *TlsCertRef) DeepCopy() *TlsCertRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TlsCertRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
|
||||
*out = *in
|
||||
|
@ -1282,7 +1448,7 @@ func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||
func (in *WorkloadSpec) DeepCopyInto(out *WorkloadSpec) {
|
||||
*out = *in
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
|
@ -1301,8 +1467,8 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Workload != nil {
|
||||
in, out := &in.Workload, &out.Workload
|
||||
if in.ContainerSpec != nil {
|
||||
in, out := &in.ContainerSpec, &out.ContainerSpec
|
||||
*out = new(ContainerTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
|
@ -1315,12 +1481,12 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
|||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.
|
||||
func (in *WorkloadTemplate) DeepCopy() *WorkloadTemplate {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpec.
|
||||
func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadTemplate)
|
||||
out := new(WorkloadSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -18,7 +18,15 @@ grant pg_read_all_data to supabase_read_only_user;
|
|||
create schema if not exists extensions;
|
||||
create extension if not exists "uuid-ossp" with schema extensions;
|
||||
create extension if not exists pgcrypto with schema extensions;
|
||||
create extension if not exists pgjwt with schema extensions;
|
||||
do $$
|
||||
begin
|
||||
if exists (select 1 from pg_available_extensions where name = 'pgjwt') then
|
||||
if not exists (select 1 from pg_extension where extname = 'pgjwt') then
|
||||
create extension if not exists pgjwt with schema "extensions" cascade;
|
||||
end if;
|
||||
end if;
|
||||
end $$;
|
||||
|
||||
|
||||
-- Set up auth roles for the developer
|
||||
create role anon nologin noinherit;
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
|
@ -32,6 +33,7 @@ var migrationsFS embed.FS
|
|||
type Script struct {
|
||||
FileName string
|
||||
Content string
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
func InitScripts() iter.Seq2[Script, error] {
|
||||
|
@ -49,10 +51,18 @@ func RoleCreationScript(roleName string) (Script, error) {
|
|||
return Script{}, err
|
||||
}
|
||||
|
||||
return Script{fileName, string(content)}, nil
|
||||
hash := sha256.New()
|
||||
_, _ = hash.Write(content)
|
||||
|
||||
return Script{
|
||||
FileName: fileName,
|
||||
Content: string(content),
|
||||
Hash: hash.Sum(nil),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readScripts(dir string) iter.Seq2[Script, error] {
|
||||
hash := sha256.New()
|
||||
return func(yield func(Script, error) bool) {
|
||||
files, err := migrationsFS.ReadDir(dir)
|
||||
if err != nil {
|
||||
|
@ -76,11 +86,16 @@ func readScripts(dir string) iter.Seq2[Script, error] {
|
|||
}
|
||||
}
|
||||
|
||||
_, _ = hash.Write(content)
|
||||
|
||||
s := Script{
|
||||
FileName: file.Name(),
|
||||
Content: string(content),
|
||||
Hash: hash.Sum(nil),
|
||||
}
|
||||
|
||||
hash.Reset()
|
||||
|
||||
if !yield(s, nil) {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
-- migrate:up
|
||||
|
||||
-- demote postgres user
|
||||
GRANT ALL ON DATABASE postgres TO postgres;
|
||||
GRANT ALL ON SCHEMA auth TO postgres;
|
||||
GRANT ALL ON SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres;
|
||||
ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS;
|
||||
|
||||
-- migrate:down
|
|
@ -5,6 +5,15 @@ DECLARE
|
|||
pgsodium_exists boolean;
|
||||
vault_exists boolean;
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'supabase_vault' AND default_version != '0.2.8') THEN
|
||||
CREATE EXTENSION IF NOT EXISTS supabase_vault;
|
||||
|
||||
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||
-- we manually run it here
|
||||
GRANT USAGE ON SCHEMA vault TO postgres WITH GRANT OPTION;
|
||||
GRANT SELECT, DELETE ON vault.secrets, vault.decrypted_secrets TO postgres WITH GRANT OPTION;
|
||||
GRANT EXECUTE ON FUNCTION vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt TO postgres WITH GRANT OPTION;
|
||||
ELSE
|
||||
pgsodium_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
|
@ -35,6 +44,7 @@ BEGIN
|
|||
create extension if not exists supabase_vault;
|
||||
END IF;
|
||||
END IF;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
||||
|
|
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
17
assets/migrations/migrations/20221207154255_create_vault.sql
Normal file
|
@ -0,0 +1,17 @@
|
|||
-- migrate:up
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (select from pg_available_extensions where name = 'supabase_vault')
|
||||
THEN
|
||||
create extension if not exists supabase_vault;
|
||||
|
||||
-- for some reason extension custom scripts aren't run during AMI build, so
|
||||
-- we manually run it here
|
||||
grant usage on schema vault to postgres with grant option;
|
||||
grant select, delete on vault.secrets, vault.decrypted_secrets to postgres with grant option;
|
||||
grant execute on function vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt to postgres with grant option;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -4,7 +4,12 @@ ALTER ROLE authenticated inherit;
|
|||
ALTER ROLE anon inherit;
|
||||
ALTER ROLE service_role inherit;
|
||||
|
||||
GRANT pgsodium_keyholder to service_role;
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN
|
||||
GRANT pgsodium_keyholder to service_role;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
-- migrate:up
|
||||
alter role supabase_admin set log_statement = none;
|
||||
alter role supabase_auth_admin set log_statement = none;
|
||||
alter role supabase_storage_admin set log_statement = none;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,26 @@
|
|||
-- migrate:up
|
||||
do $$
|
||||
declare
|
||||
ext_schema text;
|
||||
extensions_schema_exists boolean;
|
||||
begin
|
||||
-- check if the "extensions" schema exists
|
||||
select exists (
|
||||
select 1 from pg_namespace where nspname = 'extensions'
|
||||
) into extensions_schema_exists;
|
||||
|
||||
if extensions_schema_exists then
|
||||
-- check if the "orioledb" extension is in the "public" schema
|
||||
select nspname into ext_schema
|
||||
from pg_extension e
|
||||
join pg_namespace n on e.extnamespace = n.oid
|
||||
where extname = 'orioledb';
|
||||
|
||||
if ext_schema = 'public' then
|
||||
execute 'alter extension orioledb set schema extensions';
|
||||
end if;
|
||||
end if;
|
||||
end $$;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
-- migrate:up
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pgsodium') THEN
|
||||
CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
SET search_path TO ''
|
||||
AS $function$
|
||||
BEGIN
|
||||
EXECUTE format(
|
||||
'GRANT SELECT ON pgsodium.key TO %s',
|
||||
masked_role);
|
||||
|
||||
EXECUTE format(
|
||||
'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s',
|
||||
masked_role);
|
||||
|
||||
EXECUTE format(
|
||||
'GRANT ALL ON %I TO %s',
|
||||
view_name,
|
||||
masked_role);
|
||||
RETURN;
|
||||
END
|
||||
$function$;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,64 @@
|
|||
-- migrate:up
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
|
||||
IF EXISTS (
|
||||
SELECT FROM pg_extension
|
||||
WHERE extname = 'pg_net'
|
||||
-- all versions in use on existing projects as of 2025-02-20
|
||||
-- version 0.12.0 onwards don't need these applied
|
||||
AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0')
|
||||
) THEN
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pg_net')
|
||||
THEN
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER;
|
||||
|
||||
REVOKE EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
REVOKE EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
|
||||
GRANT ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC;
|
||||
GRANT ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
3
assets/migrations/setup/realtime.sql
Normal file
3
assets/migrations/setup/realtime.sql
Normal file
|
@ -0,0 +1,3 @@
|
|||
create schema if not exists _realtime;
|
||||
|
||||
alter schema _realtime owner to supabase_admin;
|
|
@ -19,10 +19,13 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
|
||||
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
endpointservice "github.com/envoyproxy/go-control-plane/envoy/service/endpoint/v3"
|
||||
|
@ -31,31 +34,53 @@ import (
|
|||
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
|
||||
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
||||
cachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/log"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"google.golang.org/grpc"
|
||||
grpchealth "google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/reflection"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
mgr "sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/certs"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/controlplane"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/health"
|
||||
)
|
||||
|
||||
//nolint:lll // flag declaration with struct tags is as long as it is
|
||||
type controlPlane struct {
|
||||
caCert tls.Certificate `kong:"-"`
|
||||
|
||||
ListenAddr string `name:"listen-address" default:":18000" help:"The address the control plane binds to."`
|
||||
Tls struct {
|
||||
CA struct {
|
||||
Cert FileContent `env:"CERT" name:"server-cert" required:"" help:"The path to the server certificate file."`
|
||||
Key FileContent `env:"KEY" name:"server-key" required:"" help:"The path to the server key file."`
|
||||
} `embed:"" prefix:"ca." envprefix:"CA_"`
|
||||
ServerSecretName string `name:"server-secret-name" help:"The name of the secret containing the server certificate and key." default:"control-plane-xds-tls"`
|
||||
} `embed:"" prefix:"tls." envprefix:"TLS_"`
|
||||
MetricsAddr string `name:"metrics-bind-address" default:"0" help:"The address the metrics endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service."`
|
||||
EnableLeaderElection bool `name:"leader-elect" default:"false" help:"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager."`
|
||||
ProbeAddr string `name:"health-probe-bind-address" default:":8081" help:"The address the probe endpoint binds to."`
|
||||
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
||||
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
||||
ServiceName string `name:"service-name" env:"CONTROL_PLANE_SERVICE_NAME" default:"" required:"" help:"The name of the control plane service."`
|
||||
Namespace string `name:"namespace" env:"CONTROL_PLANE_NAMESPACE" default:"" required:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
||||
}
|
||||
|
||||
func (cp controlPlane) Run(ctx context.Context) error {
|
||||
func (cp *controlPlane) Run(ctx context.Context, logger logr.Logger) error {
|
||||
var tlsOpts []func(*tls.Config)
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
|
@ -91,6 +116,11 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
}
|
||||
|
||||
bootstrapClient, err := client.New(ctrl.GetConfigOrDie(), client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create bootstrap client: %w", err)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
|
@ -104,9 +134,15 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return fmt.Errorf("unable to start control plane: %w", err)
|
||||
}
|
||||
|
||||
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, nil)
|
||||
cacheLoggerInst := cacheLogger(logger.WithName("envoy-snapshot-cache"))
|
||||
envoySnapshotCache := cachev3.NewSnapshotCache(false, cachev3.IDHash{}, cacheLoggerInst)
|
||||
|
||||
envoySrv, err := cp.envoyServer(ctx, envoySnapshotCache)
|
||||
serverCert, err := cp.ensureControlPlaneTlsCert(ctx, bootstrapClient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure control plane TLS cert: %w", err)
|
||||
}
|
||||
|
||||
envoySrv, err := cp.envoyServer(ctx, logger, envoySnapshotCache, serverCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -123,6 +159,18 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return fmt.Errorf("unable to create controller Core DB: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddHealthzCheck("server-cert", health.CertValidCheck(serverCert)); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up ready check: %w", err)
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
return fmt.Errorf("problem running manager: %w", err)
|
||||
|
@ -131,9 +179,20 @@ func (cp controlPlane) Run(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cp controlPlane) envoyServer(
|
||||
func (cp *controlPlane) AfterApply() (err error) {
|
||||
cp.caCert, err = tls.X509KeyPair(cp.Tls.CA.Cert, cp.Tls.CA.Key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse server certificate: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *controlPlane) envoyServer(
|
||||
ctx context.Context,
|
||||
logger logr.Logger,
|
||||
cache cachev3.SnapshotCache,
|
||||
serverCert tls.Certificate,
|
||||
) (runnable mgr.Runnable, err error) {
|
||||
const (
|
||||
grpcKeepaliveTime = 30 * time.Second
|
||||
|
@ -141,11 +200,8 @@ func (cp controlPlane) envoyServer(
|
|||
grpcKeepaliveMinTime = 30 * time.Second
|
||||
grpcMaxConcurrentStreams = 1000000
|
||||
)
|
||||
|
||||
var (
|
||||
logger = ctrl.Log.WithName("control-plane")
|
||||
srv = server.NewServer(ctx, cache, nil)
|
||||
)
|
||||
srv := server.NewServer(ctx, cache, xdsServerCallbacks(logger))
|
||||
logger = logger.WithName("control-plane")
|
||||
|
||||
// gRPC golang library sets a very small upper bound for the number gRPC/h2
|
||||
// streams over a single TCP connection. If a proxy multiplexes requests over
|
||||
|
@ -153,7 +209,13 @@ func (cp controlPlane) envoyServer(
|
|||
// availability problems. Keepalive timeouts based on connection_keepalive parameter
|
||||
// https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/examples#dynamic
|
||||
|
||||
tlsCfg, err := cp.tlsConfig(serverCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLS config: %w", err)
|
||||
}
|
||||
|
||||
grpcOptions := append(make([]grpc.ServerOption, 0, 4),
|
||||
grpc.Creds(credentials.NewTLS(tlsCfg)),
|
||||
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
|
||||
grpc.KeepaliveParams(keepalive.ServerParameters{
|
||||
Time: grpcKeepaliveTime,
|
||||
|
@ -195,3 +257,169 @@ func (cp controlPlane) envoyServer(
|
|||
return grpcServer.Serve(lis)
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (cp *controlPlane) ensureControlPlaneTlsCert(
|
||||
ctx context.Context,
|
||||
k8sClient client.Client,
|
||||
) (tls.Certificate, error) {
|
||||
var (
|
||||
controlPlaneServerCert = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cp.Tls.ServerSecretName,
|
||||
Namespace: cp.Namespace,
|
||||
},
|
||||
}
|
||||
serverCert tls.Certificate
|
||||
)
|
||||
|
||||
_, err := controllerutil.CreateOrUpdate(ctx, k8sClient, controlPlaneServerCert, func() (err error) {
|
||||
controlPlaneServerCert.Type = corev1.SecretTypeTLS
|
||||
|
||||
if controlPlaneServerCert.Data == nil {
|
||||
controlPlaneServerCert.Data = make(map[string][]byte, 3)
|
||||
}
|
||||
|
||||
var (
|
||||
cert = controlPlaneServerCert.Data[corev1.TLSCertKey]
|
||||
privateKey = controlPlaneServerCert.Data[corev1.TLSPrivateKeyKey]
|
||||
)
|
||||
|
||||
var requireRenewal bool
|
||||
if cert != nil && privateKey != nil {
|
||||
if serverCert, err = tls.X509KeyPair(cert, privateKey); err != nil {
|
||||
return fmt.Errorf("failed to parse server certificate: %w", err)
|
||||
}
|
||||
|
||||
renewGracePeriod := time.Duration(float64(serverCert.Leaf.NotAfter.Sub(serverCert.Leaf.NotBefore)) * 0.1)
|
||||
if serverCert.Leaf.NotAfter.Before(time.Now().Add(-renewGracePeriod)) {
|
||||
requireRenewal = true
|
||||
}
|
||||
} else {
|
||||
requireRenewal = true
|
||||
}
|
||||
|
||||
if requireRenewal {
|
||||
dnsNames := []string{
|
||||
strings.Join([]string{cp.ServiceName, cp.Namespace, "svc"}, "."),
|
||||
strings.Join([]string{cp.ServiceName, cp.Namespace, "svc", "cluster", "local"}, "."),
|
||||
}
|
||||
if certResult, err := certs.ServerCert("supabase-control-plane", dnsNames, cp.caCert); err != nil {
|
||||
return fmt.Errorf("failed to generate server certificate: %w", err)
|
||||
} else {
|
||||
serverCert = certResult.ServerCert
|
||||
controlPlaneServerCert.Data[corev1.TLSCertKey] = certResult.PublicKey
|
||||
controlPlaneServerCert.Data[corev1.TLSPrivateKeyKey] = certResult.PrivateKey
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("failed to create or update control plane server certificate: %w", err)
|
||||
}
|
||||
|
||||
return serverCert, nil
|
||||
}
|
||||
|
||||
func (cp *controlPlane) tlsConfig(serverCert tls.Certificate) (*tls.Config, error) {
|
||||
tlsCfg := &tls.Config{
|
||||
RootCAs: x509.NewCertPool(),
|
||||
ClientCAs: x509.NewCertPool(),
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
}
|
||||
|
||||
tlsCfg.Certificates = append(tlsCfg.Certificates, serverCert)
|
||||
if !tlsCfg.RootCAs.AppendCertsFromPEM(cp.Tls.CA.Cert) {
|
||||
return nil, fmt.Errorf("failed to parse CA certificate")
|
||||
}
|
||||
|
||||
if !tlsCfg.ClientCAs.AppendCertsFromPEM(cp.Tls.CA.Cert) {
|
||||
return nil, fmt.Errorf("failed to parse client CA certificate")
|
||||
}
|
||||
|
||||
return tlsCfg, nil
|
||||
}
|
||||
|
||||
func xdsServerCallbacks(logger logr.Logger) server.Callbacks {
|
||||
return server.CallbackFuncs{
|
||||
StreamOpenFunc: func(ctx context.Context, streamId int64, nodeId string) error {
|
||||
logger.Info("Stream opened", "stream-id", streamId, "node-id", nodeId)
|
||||
return nil
|
||||
},
|
||||
StreamClosedFunc: func(streamId int64, node *corev3.Node) {
|
||||
logger.Info("Stream closed", "stream-id", streamId,
|
||||
"node.id", node.Id,
|
||||
"node.cluster", node.Cluster,
|
||||
)
|
||||
},
|
||||
StreamRequestFunc: func(streamId int64, request *discoverygrpc.DiscoveryRequest) error {
|
||||
logger.Info("Stream request",
|
||||
"stream-id", streamId,
|
||||
"request.node.id", request.Node.Id,
|
||||
"request.node.cluster", request.Node.Cluster,
|
||||
"request.version", request.VersionInfo,
|
||||
"request.error", request.ErrorDetail,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
StreamResponseFunc: func(
|
||||
ctx context.Context,
|
||||
streamId int64,
|
||||
request *discoverygrpc.DiscoveryRequest,
|
||||
response *discoverygrpc.DiscoveryResponse,
|
||||
) {
|
||||
logger.Info("Stream delta response",
|
||||
"stream-id", streamId,
|
||||
"request.node.id", request.Node.Id,
|
||||
"request.node.cluster", request.Node.Cluster,
|
||||
)
|
||||
},
|
||||
DeltaStreamOpenFunc: func(ctx context.Context, streamId int64, nodeId string) error {
|
||||
logger.Info("Delta stream opened", "stream-id", streamId, "node-id", nodeId)
|
||||
return nil
|
||||
},
|
||||
|
||||
DeltaStreamClosedFunc: func(streamId int64, node *corev3.Node) {
|
||||
logger.Info("Delta stream closed",
|
||||
"stream-id", streamId,
|
||||
"node.id", node.Id,
|
||||
"node.cluster", node.Cluster,
|
||||
)
|
||||
},
|
||||
StreamDeltaRequestFunc: func(i int64, request *discoverygrpc.DeltaDiscoveryRequest) error {
|
||||
logger.Info("Stream delta request",
|
||||
"stream-id", i,
|
||||
"request.node.id", request.Node.Id,
|
||||
"request.node.cluster", request.Node.Cluster,
|
||||
"request.error", request.ErrorDetail,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
StreamDeltaResponseFunc: func(
|
||||
i int64,
|
||||
request *discoverygrpc.DeltaDiscoveryRequest,
|
||||
response *discoverygrpc.DeltaDiscoveryResponse,
|
||||
) {
|
||||
logger.Info("Stream delta response",
|
||||
"stream-id", i,
|
||||
"request.node", request.Node,
|
||||
"response.resources", response.Resources,
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func cacheLogger(logger logr.Logger) log.Logger {
|
||||
wrapper := func(delegate func(msg string, keysAndValues ...any)) func(string, ...any) {
|
||||
return func(s string, i ...any) {
|
||||
delegate(fmt.Sprintf(s, i...))
|
||||
}
|
||||
}
|
||||
|
||||
return log.LoggerFuncs{
|
||||
DebugFunc: nil, // enable for debug info
|
||||
InfoFunc: wrapper(logger.Info),
|
||||
WarnFunc: wrapper(logger.Info),
|
||||
ErrorFunc: wrapper(logger.Info),
|
||||
}
|
||||
}
|
||||
|
|
41
cmd/flags.go
Normal file
41
cmd/flags.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
Copyright 2025 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
)
|
||||
|
||||
var _ kong.MapperValue = (*FileContent)(nil)
|
||||
|
||||
type FileContent []byte
|
||||
|
||||
func (f *FileContent) Decode(ctx *kong.DecodeContext) (err error) {
|
||||
var filePath string
|
||||
if err := ctx.Scan.PopValueInto("file-content", &filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *f, err = os.ReadFile(filePath); err != nil {
|
||||
return fmt.Errorf("failed to read file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -40,6 +40,10 @@ type manager struct {
|
|||
SecureMetrics bool `name:"metrics-secure" default:"true" help:"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead."`
|
||||
EnableHTTP2 bool `name:"enable-http2" default:"false" help:"If set, HTTP/2 will be enabled for the metrics and webhook servers"`
|
||||
Namespace string `name:"controller-namespace" env:"CONTROLLER_NAMESPACE" default:"" help:"Namespace where the controller is running, ideally set via downward API"`
|
||||
Tls struct {
|
||||
CACert FileContent `env:"CA_CERT" name:"ca-cert" required:"" help:"The path to the CA certificate file."`
|
||||
CAKey FileContent `env:"CA_KEY" name:"ca-key" required:"" help:"The path to the CA key file."`
|
||||
} `embed:"" prefix:"tls." envprefix:"TLS_"`
|
||||
}
|
||||
|
||||
func (m manager) Run(ctx context.Context) error {
|
||||
|
@ -68,6 +72,11 @@ func (m manager) Run(ctx context.Context) error {
|
|||
TLSOpts: tlsOpts,
|
||||
})
|
||||
|
||||
caCert, err := tls.X509KeyPair(m.Tls.CACert, m.Tls.CAKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load CA cert: %w", err)
|
||||
}
|
||||
|
||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||
// More info:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||
|
@ -145,6 +154,7 @@ func (m manager) Run(ctx context.Context) error {
|
|||
if err = (&controller.APIGatewayReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CACert: caCert,
|
||||
}).SetupWithManager(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("unable to create controller APIGateway: %w", err)
|
||||
}
|
||||
|
|
33
config/control-plane/cert-ca.yaml
Normal file
33
config/control-plane/cert-ca.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: cp-selfsigned-issuer
|
||||
namespace: system
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: certificate
|
||||
app.kubernetes.io/instance: serving-cert
|
||||
app.kubernetes.io/component: certificate
|
||||
app.kubernetes.io/created-by: supabase-operator
|
||||
app.kubernetes.io/part-of: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: cp-ca-cert
|
||||
namespace: supabase-system
|
||||
spec:
|
||||
commonName: control-plane-ca
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: selfsigned-issuer
|
||||
secretName: control-plane-ca-cert-tls
|
||||
isCA: true
|
|
@ -18,40 +18,26 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: control-plane
|
||||
spec:
|
||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||
# according to the platforms which are supported by your solution.
|
||||
# It is considered best practice to support multiple architectures. You can
|
||||
# build your manager image using the makefile target docker-buildx.
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/arch
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
# - arm64
|
||||
# - ppc64le
|
||||
# - s390x
|
||||
# - key: kubernetes.io/os
|
||||
# operator: In
|
||||
# values:
|
||||
# - linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
# TODO(user): For common cases that do not require escalating privileges
|
||||
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
||||
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
||||
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
||||
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- args:
|
||||
- control-plane
|
||||
image: supabase-operator:latest
|
||||
image: controller:latest
|
||||
name: control-plane
|
||||
env:
|
||||
- name: CONTROL_PLANE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROL_PLANE_SERVICE_NAME
|
||||
value: control-plane
|
||||
- name: TLS_CA_CERT
|
||||
value: /etc/supabase/control-plane/certs/tls.crt
|
||||
- name: TLS_CA_KEY
|
||||
value: /etc/supabase/control-plane/certs/tls.key
|
||||
ports:
|
||||
- containerPort: 18000
|
||||
name: grpc
|
||||
|
@ -62,17 +48,17 @@ spec:
|
|||
drop:
|
||||
- "ALL"
|
||||
livenessProbe:
|
||||
grpc:
|
||||
port: 18000
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
grpc:
|
||||
port: 18000
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 150m
|
||||
|
@ -80,5 +66,12 @@ spec:
|
|||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: tls-certs
|
||||
mountPath: /etc/supabase/control-plane/certs
|
||||
volumes:
|
||||
- name: tls-certs
|
||||
secret:
|
||||
secretName: control-plane-ca-cert-tls
|
||||
serviceAccountName: control-plane
|
||||
terminationGracePeriodSeconds: 10
|
||||
|
|
|
@ -2,5 +2,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- cert-ca.yaml
|
||||
- control-plane.yaml
|
||||
- service.yaml
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
|
|
8
config/control-plane/kustomizeconfig.yaml
Normal file
8
config/control-plane/kustomizeconfig.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
# This configuration is for teaching kustomize how to update name ref substitution
|
||||
nameReference:
|
||||
- kind: Issuer
|
||||
group: cert-manager.io
|
||||
fieldSpecs:
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/issuerRef/name
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.1
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
name: apigateways.supabase.k8s.icb4dc0.de
|
||||
spec:
|
||||
group: supabase.k8s.icb4dc0.de
|
||||
|
@ -15,7 +15,7 @@ spec:
|
|||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.envoy.configVersion
|
||||
- jsonPath: .status.envoy.resourceHash
|
||||
name: EnvoyConfigVersion
|
||||
type: string
|
||||
name: v1alpha1
|
||||
|
@ -73,6 +73,36 @@ spec:
|
|||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
tls:
|
||||
description: TLS - enable and configure TLS for the API endpoint
|
||||
properties:
|
||||
cert:
|
||||
properties:
|
||||
caCertKey:
|
||||
default: ca.crt
|
||||
description: CaCertKey - key in the secret that contains
|
||||
the CA certificate
|
||||
type: string
|
||||
secretName:
|
||||
type: string
|
||||
serverCertKey:
|
||||
default: tls.crt
|
||||
description: ServerCertKey - key in the secret that contains
|
||||
the server certificate
|
||||
type: string
|
||||
serverKeyKey:
|
||||
default: tls.key
|
||||
description: ServerKeyKey - key in the secret that contains
|
||||
the server private key
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
- serverCertKey
|
||||
- serverKeyKey
|
||||
type: object
|
||||
required:
|
||||
- cert
|
||||
type: object
|
||||
required:
|
||||
- jwks
|
||||
type: object
|
||||
|
@ -85,6 +115,125 @@ spec:
|
|||
description: |-
|
||||
DashboardEndpoint - Configure the endpoint for the Supabase dashboard (studio)
|
||||
this includes optional authentication (basic or Oauth2) for the dashboard
|
||||
properties:
|
||||
auth:
|
||||
description: Auth - configure authentication for the dashboard
|
||||
endpoint
|
||||
properties:
|
||||
basic:
|
||||
description: |-
|
||||
Basic - HTTP basic auth configuration, this should only be used in exceptions
|
||||
e.g. during evaluations or for local development
|
||||
only used if no other authentication is configured
|
||||
properties:
|
||||
plaintextUsersSecretRef:
|
||||
description: |-
|
||||
PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form
|
||||
if not empty, credentials will be merged with inline users
|
||||
type: string
|
||||
usersInline:
|
||||
description: UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html)
|
||||
items:
|
||||
pattern: ^[\w_.]+:\{SHA\}[A-z0-9]+=*$
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
oauth2:
|
||||
description: |-
|
||||
OAuth2 - configure oauth2 authentication for the dashhboard listener
|
||||
if configured, will be preferred over Basic authentication configuration
|
||||
effectively disabling basic auth
|
||||
properties:
|
||||
authorizationEndpoint:
|
||||
description: AuthorizationEndpoint - endpoint where the
|
||||
user will be redirected to authenticate
|
||||
type: string
|
||||
clientId:
|
||||
description: ClientID - client ID to authenticate with
|
||||
the OAuth2 provider
|
||||
type: string
|
||||
clientSecretRef:
|
||||
description: ClientSecretRef - reference to the secret
|
||||
that contains the client secret
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
openIdIssuer:
|
||||
description: |-
|
||||
OpenIDIssuer - if set the defaulter will fetch the discovery document and fill
|
||||
TokenEndpoint and AuthorizationEndpoint based on the discovery document
|
||||
type: string
|
||||
resources:
|
||||
description: Resources - resources to request from the
|
||||
OAuth2 provider (e.g. "user", "email", ...) - optional
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
scopes:
|
||||
description: Scopes - scopes to request from the OAuth2
|
||||
provider (e.g. "openid", "profile", ...) - optional
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tokenEndpoint:
|
||||
description: TokenEndpoint - endpoint where Envoy will
|
||||
retrieve the OAuth2 access and identity token from
|
||||
type: string
|
||||
required:
|
||||
- clientId
|
||||
- clientSecretRef
|
||||
type: object
|
||||
type: object
|
||||
tls:
|
||||
description: TLS - enable and configure TLS for the Dashboard
|
||||
endpoint
|
||||
properties:
|
||||
cert:
|
||||
properties:
|
||||
caCertKey:
|
||||
default: ca.crt
|
||||
description: CaCertKey - key in the secret that contains
|
||||
the CA certificate
|
||||
type: string
|
||||
secretName:
|
||||
type: string
|
||||
serverCertKey:
|
||||
default: tls.crt
|
||||
description: ServerCertKey - key in the secret that contains
|
||||
the server certificate
|
||||
type: string
|
||||
serverKeyKey:
|
||||
default: tls.key
|
||||
description: ServerKeyKey - key in the secret that contains
|
||||
the server private key
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
- serverCertKey
|
||||
- serverKeyKey
|
||||
type: object
|
||||
required:
|
||||
- cert
|
||||
type: object
|
||||
type: object
|
||||
envoy:
|
||||
description: Envoy - configure the envoy instance and most importantly
|
||||
|
@ -108,13 +257,45 @@ spec:
|
|||
- host
|
||||
- port
|
||||
type: object
|
||||
debugging:
|
||||
properties:
|
||||
componentLogLevels:
|
||||
items:
|
||||
properties:
|
||||
component:
|
||||
description: |-
|
||||
Component - the component to set the log level for
|
||||
the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36)
|
||||
type: string
|
||||
level:
|
||||
description: Level - the log level to set for the component
|
||||
enum:
|
||||
- trace
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
- critical
|
||||
- "off"
|
||||
type: string
|
||||
required:
|
||||
- component
|
||||
- level
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
disableIPv6:
|
||||
description: |-
|
||||
DisableIPv6 - disable IPv6 for the Envoy instance
|
||||
this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||
type: boolean
|
||||
nodeName:
|
||||
description: |-
|
||||
NodeName - identifies the Envoy cluster within the current namespace
|
||||
if not set, the name of the APIGateway resource will be used
|
||||
The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit.
|
||||
type: string
|
||||
workloadTemplate:
|
||||
workloadSpec:
|
||||
description: WorkloadTemplate - customize the Envoy deployment
|
||||
properties:
|
||||
additionalLabels:
|
||||
|
@ -1921,248 +2102,9 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
format: int32
|
||||
type: integer
|
||||
securityContext:
|
||||
description: |-
|
||||
PodSecurityContext holds pod-level security attributes and common container settings.
|
||||
Some fields are also present in container.securityContext. Field values of
|
||||
container.securityContext take precedence over field values of PodSecurityContext.
|
||||
properties:
|
||||
appArmorProfile:
|
||||
description: |-
|
||||
appArmorProfile is the AppArmor options to use by the containers in this pod.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: |-
|
||||
localhostProfile indicates a profile loaded on the node that should be used.
|
||||
The profile must be preconfigured on the node to work.
|
||||
Must match the loaded name of the profile.
|
||||
Must be set if and only if type is "Localhost".
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type indicates which kind of AppArmor profile will be applied.
|
||||
Valid options are:
|
||||
Localhost - a profile pre-loaded on the node.
|
||||
RuntimeDefault - the container runtime's default profile.
|
||||
Unconfined - no AppArmor enforcement.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
fsGroup:
|
||||
description: |-
|
||||
A special supplemental group that applies to all containers in a pod.
|
||||
Some volume types allow the Kubelet to change the ownership of that volume
|
||||
to be owned by the pod:
|
||||
|
||||
1. The owning GID will be the FSGroup
|
||||
2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
|
||||
3. The permission bits are OR'd with rw-rw----
|
||||
|
||||
If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
fsGroupChangePolicy:
|
||||
description: |-
|
||||
fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
|
||||
before being exposed inside Pod. This field will only apply to
|
||||
volume types which support fsGroup based ownership(and permissions).
|
||||
It will have no effect on ephemeral volume types such as: secret, configmaps
|
||||
and emptydir.
|
||||
Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
runAsGroup:
|
||||
description: |-
|
||||
The GID to run the entrypoint of the container process.
|
||||
Uses runtime default if unset.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
runAsNonRoot:
|
||||
description: |-
|
||||
Indicates that the container must run as a non-root user.
|
||||
If true, the Kubelet will validate the image at runtime to ensure that it
|
||||
does not run as UID 0 (root) and fail to start the container if it does.
|
||||
If unset or false, no such validation will be performed.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
type: boolean
|
||||
runAsUser:
|
||||
description: |-
|
||||
The UID to run the entrypoint of the container process.
|
||||
Defaults to user specified in image metadata if unspecified.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
seLinuxChangePolicy:
|
||||
description: |-
|
||||
seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||
It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
||||
Valid values are "MountOption" and "Recursive".
|
||||
|
||||
"Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
||||
This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||
|
||||
"MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
||||
This requires all Pods that share the same volume to use the same SELinux label.
|
||||
It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||
Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||
whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||
CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||
"MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
||||
|
||||
If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
||||
If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
||||
and "Recursive" for all other volumes.
|
||||
|
||||
This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
||||
|
||||
All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
seLinuxOptions:
|
||||
description: |-
|
||||
The SELinux context to be applied to all containers.
|
||||
If unspecified, the container runtime will allocate a random SELinux context for each
|
||||
container. May also be set in SecurityContext. If set in
|
||||
both SecurityContext and PodSecurityContext, the value specified in SecurityContext
|
||||
takes precedence for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
level:
|
||||
description: Level is SELinux level label that applies
|
||||
to the container.
|
||||
type: string
|
||||
role:
|
||||
description: Role is a SELinux role label that applies
|
||||
to the container.
|
||||
type: string
|
||||
type:
|
||||
description: Type is a SELinux type label that applies
|
||||
to the container.
|
||||
type: string
|
||||
user:
|
||||
description: User is a SELinux user label that applies
|
||||
to the container.
|
||||
type: string
|
||||
type: object
|
||||
seccompProfile:
|
||||
description: |-
|
||||
The seccomp options to use by the containers in this pod.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: |-
|
||||
localhostProfile indicates a profile defined in a file on the node should be used.
|
||||
The profile must be preconfigured on the node to work.
|
||||
Must be a descending path, relative to the kubelet's configured seccomp profile location.
|
||||
Must be set if type is "Localhost". Must NOT be set for any other type.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type indicates which kind of seccomp profile will be applied.
|
||||
Valid options are:
|
||||
|
||||
Localhost - a profile defined in a file on the node should be used.
|
||||
RuntimeDefault - the container runtime default profile should be used.
|
||||
Unconfined - no profile should be applied.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: |-
|
||||
A list of groups applied to the first process run in each container, in
|
||||
addition to the container's primary GID and fsGroup (if specified). If
|
||||
the SupplementalGroupsPolicy feature is enabled, the
|
||||
supplementalGroupsPolicy field determines whether these are in addition
|
||||
to or instead of any group memberships defined in the container image.
|
||||
If unspecified, no additional groups are added, though group memberships
|
||||
defined in the container image may still be used, depending on the
|
||||
supplementalGroupsPolicy field.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
supplementalGroupsPolicy:
|
||||
description: |-
|
||||
Defines how supplemental groups of the first container processes are calculated.
|
||||
Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
||||
(Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
||||
and the container runtime must implement support for this feature.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
sysctls:
|
||||
description: |-
|
||||
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
||||
sysctls (by the container runtime) might fail to launch.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
description: Sysctl defines a kernel parameter to be
|
||||
set
|
||||
properties:
|
||||
name:
|
||||
description: Name of a property to set
|
||||
type: string
|
||||
value:
|
||||
description: Value of a property to set
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- value
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
windowsOptions:
|
||||
description: |-
|
||||
The Windows specific settings applied to all containers.
|
||||
If unspecified, the options within a container's SecurityContext will be used.
|
||||
If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
Note that this field cannot be set when spec.os.name is linux.
|
||||
properties:
|
||||
gmsaCredentialSpec:
|
||||
description: |-
|
||||
GMSACredentialSpec is where the GMSA admission webhook
|
||||
(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
|
||||
GMSA credential spec named by the GMSACredentialSpecName field.
|
||||
type: string
|
||||
gmsaCredentialSpecName:
|
||||
description: GMSACredentialSpecName is the name of
|
||||
the GMSA credential spec to use.
|
||||
type: string
|
||||
hostProcess:
|
||||
description: |-
|
||||
HostProcess determines if a container should be run as a 'Host Process' container.
|
||||
All of a Pod's containers must have the same effective HostProcess value
|
||||
(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
|
||||
In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
type: boolean
|
||||
runAsUserName:
|
||||
description: |-
|
||||
The UserName in Windows to run the entrypoint of the container process.
|
||||
Defaults to the user specified in image metadata if unspecified.
|
||||
May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
workload:
|
||||
description: Workload - customize the container template of
|
||||
the workload
|
||||
container:
|
||||
description: ContainerSpec - customize the container template
|
||||
of the workload
|
||||
properties:
|
||||
additionalEnv:
|
||||
items:
|
||||
|
@ -2627,6 +2569,245 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
type: object
|
||||
replicas:
|
||||
format: int32
|
||||
type: integer
|
||||
securityContext:
|
||||
description: |-
|
||||
PodSecurityContext holds pod-level security attributes and common container settings.
|
||||
Some fields are also present in container.securityContext. Field values of
|
||||
container.securityContext take precedence over field values of PodSecurityContext.
|
||||
properties:
|
||||
appArmorProfile:
|
||||
description: |-
|
||||
appArmorProfile is the AppArmor options to use by the containers in this pod.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: |-
|
||||
localhostProfile indicates a profile loaded on the node that should be used.
|
||||
The profile must be preconfigured on the node to work.
|
||||
Must match the loaded name of the profile.
|
||||
Must be set if and only if type is "Localhost".
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type indicates which kind of AppArmor profile will be applied.
|
||||
Valid options are:
|
||||
Localhost - a profile pre-loaded on the node.
|
||||
RuntimeDefault - the container runtime's default profile.
|
||||
Unconfined - no AppArmor enforcement.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
fsGroup:
|
||||
description: |-
|
||||
A special supplemental group that applies to all containers in a pod.
|
||||
Some volume types allow the Kubelet to change the ownership of that volume
|
||||
to be owned by the pod:
|
||||
|
||||
1. The owning GID will be the FSGroup
|
||||
2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
|
||||
3. The permission bits are OR'd with rw-rw----
|
||||
|
||||
If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
fsGroupChangePolicy:
|
||||
description: |-
|
||||
fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
|
||||
before being exposed inside Pod. This field will only apply to
|
||||
volume types which support fsGroup based ownership(and permissions).
|
||||
It will have no effect on ephemeral volume types such as: secret, configmaps
|
||||
and emptydir.
|
||||
Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
runAsGroup:
|
||||
description: |-
|
||||
The GID to run the entrypoint of the container process.
|
||||
Uses runtime default if unset.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
runAsNonRoot:
|
||||
description: |-
|
||||
Indicates that the container must run as a non-root user.
|
||||
If true, the Kubelet will validate the image at runtime to ensure that it
|
||||
does not run as UID 0 (root) and fail to start the container if it does.
|
||||
If unset or false, no such validation will be performed.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
type: boolean
|
||||
runAsUser:
|
||||
description: |-
|
||||
The UID to run the entrypoint of the container process.
|
||||
Defaults to user specified in image metadata if unspecified.
|
||||
May also be set in SecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
format: int64
|
||||
type: integer
|
||||
seLinuxChangePolicy:
|
||||
description: |-
|
||||
seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||
It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
||||
Valid values are "MountOption" and "Recursive".
|
||||
|
||||
"Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
||||
This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||
|
||||
"MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
||||
This requires all Pods that share the same volume to use the same SELinux label.
|
||||
It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||
Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||
whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||
CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||
"MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
||||
|
||||
If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
||||
If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
||||
and "Recursive" for all other volumes.
|
||||
|
||||
This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
||||
|
||||
All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
seLinuxOptions:
|
||||
description: |-
|
||||
The SELinux context to be applied to all containers.
|
||||
If unspecified, the container runtime will allocate a random SELinux context for each
|
||||
container. May also be set in SecurityContext. If set in
|
||||
both SecurityContext and PodSecurityContext, the value specified in SecurityContext
|
||||
takes precedence for that container.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
level:
|
||||
description: Level is SELinux level label that applies
|
||||
to the container.
|
||||
type: string
|
||||
role:
|
||||
description: Role is a SELinux role label that applies
|
||||
to the container.
|
||||
type: string
|
||||
type:
|
||||
description: Type is a SELinux type label that applies
|
||||
to the container.
|
||||
type: string
|
||||
user:
|
||||
description: User is a SELinux user label that applies
|
||||
to the container.
|
||||
type: string
|
||||
type: object
|
||||
seccompProfile:
|
||||
description: |-
|
||||
The seccomp options to use by the containers in this pod.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: |-
|
||||
localhostProfile indicates a profile defined in a file on the node should be used.
|
||||
The profile must be preconfigured on the node to work.
|
||||
Must be a descending path, relative to the kubelet's configured seccomp profile location.
|
||||
Must be set if type is "Localhost". Must NOT be set for any other type.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type indicates which kind of seccomp profile will be applied.
|
||||
Valid options are:
|
||||
|
||||
Localhost - a profile defined in a file on the node should be used.
|
||||
RuntimeDefault - the container runtime default profile should be used.
|
||||
Unconfined - no profile should be applied.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
supplementalGroups:
|
||||
description: |-
|
||||
A list of groups applied to the first process run in each container, in
|
||||
addition to the container's primary GID and fsGroup (if specified). If
|
||||
the SupplementalGroupsPolicy feature is enabled, the
|
||||
supplementalGroupsPolicy field determines whether these are in addition
|
||||
to or instead of any group memberships defined in the container image.
|
||||
If unspecified, no additional groups are added, though group memberships
|
||||
defined in the container image may still be used, depending on the
|
||||
supplementalGroupsPolicy field.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
format: int64
|
||||
type: integer
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
supplementalGroupsPolicy:
|
||||
description: |-
|
||||
Defines how supplemental groups of the first container processes are calculated.
|
||||
Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
||||
(Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
||||
and the container runtime must implement support for this feature.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
type: string
|
||||
sysctls:
|
||||
description: |-
|
||||
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
||||
sysctls (by the container runtime) might fail to launch.
|
||||
Note that this field cannot be set when spec.os.name is windows.
|
||||
items:
|
||||
description: Sysctl defines a kernel parameter to be
|
||||
set
|
||||
properties:
|
||||
name:
|
||||
description: Name of a property to set
|
||||
type: string
|
||||
value:
|
||||
description: Value of a property to set
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- value
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
windowsOptions:
|
||||
description: |-
|
||||
The Windows specific settings applied to all containers.
|
||||
If unspecified, the options within a container's SecurityContext will be used.
|
||||
If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
Note that this field cannot be set when spec.os.name is linux.
|
||||
properties:
|
||||
gmsaCredentialSpec:
|
||||
description: |-
|
||||
GMSACredentialSpec is where the GMSA admission webhook
|
||||
(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
|
||||
GMSA credential spec named by the GMSACredentialSpecName field.
|
||||
type: string
|
||||
gmsaCredentialSpecName:
|
||||
description: GMSACredentialSpecName is the name of
|
||||
the GMSA credential spec to use.
|
||||
type: string
|
||||
hostProcess:
|
||||
description: |-
|
||||
HostProcess determines if a container should be run as a 'Host Process' container.
|
||||
All of a Pod's containers must have the same effective HostProcess value
|
||||
(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
|
||||
In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
type: boolean
|
||||
runAsUserName:
|
||||
description: |-
|
||||
The UserName in Windows to run the entrypoint of the container process.
|
||||
Defaults to the user specified in image metadata if unspecified.
|
||||
May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- controlPlane
|
||||
|
@ -2695,8 +2876,6 @@ spec:
|
|||
properties:
|
||||
envoy:
|
||||
properties:
|
||||
configVersion:
|
||||
type: string
|
||||
resourceHash:
|
||||
format: byte
|
||||
type: string
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -23,7 +23,6 @@ resources:
|
|||
- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
# [METRICS] Expose the controller manager metrics service.
|
||||
- metrics_service.yaml
|
||||
# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
|
||||
# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
|
||||
|
@ -44,19 +43,31 @@ patches:
|
|||
# crd/kustomization.yaml
|
||||
- path: manager_webhook_patch.yaml
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
# Uncomment the following replacements to add the cert-manager CA injection annotations
|
||||
replacements:
|
||||
- source: # Uncomment the following block if you have any webhook
|
||||
- source:
|
||||
kind: Service
|
||||
version: v1
|
||||
name: control-plane
|
||||
fieldPath: .metadata.name
|
||||
targets:
|
||||
- select:
|
||||
kind: Deployment
|
||||
group: apps
|
||||
version: v1
|
||||
name: control-plane
|
||||
fieldPaths:
|
||||
- .spec.template.spec.containers.*.env.[name=CONTROL_PLANE_SERVICE_NAME].value
|
||||
- source:
|
||||
kind: Service
|
||||
version: v1
|
||||
name: webhook-service
|
||||
fieldPath: .metadata.name # Name of the service
|
||||
fieldPath: .metadata.name
|
||||
targets:
|
||||
- select:
|
||||
kind: Certificate
|
||||
group: cert-manager.io
|
||||
version: v1
|
||||
name: serving-cert
|
||||
fieldPaths:
|
||||
- .spec.dnsNames.0
|
||||
- .spec.dnsNames.1
|
||||
|
@ -74,6 +85,7 @@ replacements:
|
|||
kind: Certificate
|
||||
group: cert-manager.io
|
||||
version: v1
|
||||
name: serving-cert
|
||||
fieldPaths:
|
||||
- .spec.dnsNames.0
|
||||
- .spec.dnsNames.1
|
||||
|
|
1
config/dev/.gitignore
vendored
Normal file
1
config/dev/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
studio-credentials-secret.yaml
|
55
config/dev/apigateway.yaml
Normal file
55
config/dev/apigateway.yaml
Normal file
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: APIGateway
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: gateway-sample
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
envoy:
|
||||
disableIPv6: true
|
||||
workloadSpec:
|
||||
replicas: 2
|
||||
apiEndpoint:
|
||||
jwks:
|
||||
name: core-sample-jwt
|
||||
key: jwks.json
|
||||
dashboardEndpoint:
|
||||
tls:
|
||||
cert:
|
||||
secretName: dashboard-tls-cert
|
||||
auth:
|
||||
oauth2:
|
||||
openIdIssuer: "https://login.microsoftonline.com/f4e80111-1571-477a-b56d-c5fe517676b7/"
|
||||
clientId: 3528016b-f6e3-49be-8fb3-f9a9a2ab6c3f
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
clientSecretRef:
|
||||
name: studio-sample-oauth2
|
||||
key: clientSecret
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: certificate
|
||||
app.kubernetes.io/instance: dashboard-tls
|
||||
app.kubernetes.io/component: certificate
|
||||
app.kubernetes.io/created-by: supabase-operator
|
||||
app.kubernetes.io/part-of: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: dashboard-tls
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
dnsNames:
|
||||
- gateway-sample-envoy.supabase-demo.svc
|
||||
- gateway-sample-envoy.supabase-demo.svc.cluster.local
|
||||
- localhost:3000
|
||||
issuerRef:
|
||||
kind: ClusterIssuer
|
||||
name: cluster-pki
|
||||
secretName: dashboard-tls-cert
|
36
config/dev/ca.yaml
Normal file
36
config/dev/ca.yaml
Normal file
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: cluster-pki-bootstrapper
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: cluster-pki-ca
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
commonName: cluster-pki
|
||||
isCA: true
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: cluster-pki-bootstrapper
|
||||
secretName: cluster-pki-ca-cert
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: cluster-pki
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
ca:
|
||||
secretName: cluster-pki-ca-cert
|
|
@ -3,6 +3,7 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: pgsodium-config
|
||||
namespace: supabase-demo
|
||||
data:
|
||||
pgsodium_getkey.sh: |
|
||||
#!/bin/bash
|
||||
|
@ -18,6 +19,7 @@ apiVersion: v1
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: pgsodium-key
|
||||
namespace: supabase-demo
|
||||
data:
|
||||
# Generate a 32-byte key
|
||||
# head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' | base64
|
||||
|
@ -27,6 +29,7 @@ apiVersion: v1
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: supabase-admin-credentials
|
||||
namespace: supabase-demo
|
||||
labels:
|
||||
cnpg.io/reload: "true"
|
||||
type: kubernetes.io/basic-auth
|
||||
|
@ -38,16 +41,18 @@ apiVersion: postgresql.cnpg.io/v1
|
|||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
instances: 1
|
||||
imageName: ghcr.io/supabase/postgres:15.8.1.021
|
||||
postgresUID: 105
|
||||
postgresGID: 106
|
||||
imageName: registry.icb4dc0.de/supabase-operator/postgres:17.4
|
||||
imagePullPolicy: Always
|
||||
postgresUID: 26
|
||||
postgresGID: 102
|
||||
|
||||
bootstrap:
|
||||
initdb:
|
||||
database: app
|
||||
owner: setup
|
||||
owner: supabase_admin
|
||||
postInitSQL:
|
||||
- drop publication if exists supabase_realtime;
|
||||
|
44
config/dev/core.yaml
Normal file
44
config/dev/core.yaml
Normal file
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: supabase-demo-credentials
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
url: postgresql://supabase_admin:1n1t-R00t!@cluster-example-rw.supabase-demo:5432/app
|
||||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Core
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: core-sample
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
# public URL of the Supabase instance (API)
|
||||
# normally the Ingress/HTTPRoute endpoint
|
||||
externalUrl: http://localhost:8000/
|
||||
|
||||
# public URL of the frontend
|
||||
# could be the same as the externalUrl if you're using one Ingress/HTTPRoute for both
|
||||
# or a different one if you prefer to separate API and frontend URLs
|
||||
# will be used by Supabase Auth to redirect users after login
|
||||
siteUrl: http://localhost:3000/
|
||||
database:
|
||||
# this field is write-only, it can be used to configure the DSN without having to create the secret manually
|
||||
# dsn: postgresql://supabase_admin:1n1t-R00t!@cluster-example-rw.supabase-demo:5432/app
|
||||
dsnSecretRef:
|
||||
name: supabase-demo-credentials
|
||||
key: url
|
||||
auth:
|
||||
disableSignup: false
|
||||
enableEmailAutoconfirm: true
|
||||
providers: {}
|
||||
postgrest:
|
||||
maxRows: 1000
|
||||
jwt:
|
||||
expiry: 3600
|
||||
# name of the secret containing the JWT secret
|
||||
# will be created if not found, make sure to refernce this secret in the APIGateway, Dashboard and Storage
|
||||
secretName: core-sample-jwt
|
19
config/dev/dashboard.yaml
Normal file
19
config/dev/dashboard.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: dashboard-sample
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
db:
|
||||
host: cluster-example-rw.supabase-demo.svc
|
||||
dbName: app
|
||||
dbCredentialsRef:
|
||||
secretName: core-sample-db-creds-supabase-admin
|
||||
studio:
|
||||
externalUrl: http://localhost:8000
|
||||
jwt:
|
||||
secretName: core-sample-jwt
|
|
@ -2,10 +2,19 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml
|
||||
- https://github.com/cert-manager/cert-manager/releases/download/v1.17.1/cert-manager.yaml
|
||||
- https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/cnpg-1.25.0.yaml
|
||||
- resources/minio.yaml
|
||||
- ca.yaml
|
||||
- namespace.yaml
|
||||
- cnpg-cluster.yaml
|
||||
- minio.yaml
|
||||
- ../default
|
||||
- studio-plaintext-users.yaml
|
||||
- studio-credentials-secret.yaml
|
||||
- core.yaml
|
||||
- apigateway.yaml
|
||||
- dashboard.yaml
|
||||
- storage.yaml
|
||||
|
||||
patches:
|
||||
- path: manager_dev_settings.yaml
|
||||
|
@ -16,3 +25,6 @@ patches:
|
|||
target:
|
||||
kind: Deployment
|
||||
labelSelector: app.kubernetes.io/name=control-plane
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig/cnpg-cluster.yaml
|
||||
|
|
4
config/dev/kustomizeconfig/cnpg-cluster.yaml
Normal file
4
config/dev/kustomizeconfig/cnpg-cluster.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
images:
|
||||
- kind: Cluster
|
||||
group: postgresql.cnpg.io
|
||||
path: spec/imageName
|
|
@ -1,3 +0,0 @@
|
|||
- op: replace
|
||||
path: /spec/replicas
|
||||
value: 1
|
|
@ -1,4 +1,4 @@
|
|||
# Deploys a new Namespace for the MinIO Pod
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
4
config/dev/namespace.yaml
Normal file
4
config/dev/namespace.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: supabase-demo
|
43
config/dev/storage.yaml
Normal file
43
config/dev/storage.yaml
Normal file
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-s3-credentials
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
accessKeyId: FPxTAFL7NaubjPgIGBo3
|
||||
secretAccessKey: 7F437pPe84QcoocD3MWdAIVBU3oXonhVHxK645tm
|
||||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Storage
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: storage-sample
|
||||
namespace: supabase-demo
|
||||
spec:
|
||||
api:
|
||||
s3Backend:
|
||||
endpoint: http://minio.minio-dev.svc:9000
|
||||
region: us-east-1
|
||||
forcePathStyle: true
|
||||
bucket: test
|
||||
credentialsSecretRef:
|
||||
secretName: storage-s3-credentials
|
||||
s3: {}
|
||||
db:
|
||||
host: cluster-example-rw.supabase-demo.svc
|
||||
dbName: app
|
||||
dbCredentialsRef:
|
||||
# will be created by Core resource operator if not present
|
||||
# just make sure the secret name is either based on the name of the core resource or explicitly set
|
||||
# format <core-resource-name>-db-creds-supabase-storage-admin
|
||||
secretName: core-sample-db-creds-supabase-storage-admin
|
||||
enableImageTransformation: true
|
||||
jwtAuth:
|
||||
# will be created by Core resource operator if not present
|
||||
# just make sure the secret name is either based on the name of the core resource or explicitly set
|
||||
secretName: core-sample-jwt
|
||||
imageProxy:
|
||||
enable: true
|
8
config/dev/studio-credentials-secret.yaml
Normal file
8
config/dev/studio-credentials-secret.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: studio-sample-oauth2
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
clientSecret: "G9r8Q~o4LJRlTQwPpdCBaZLsWdhUxM_02Y_XBcEr"
|
8
config/dev/studio-plaintext-users.yaml
Normal file
8
config/dev/studio-plaintext-users.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: studio-sample-basic-auth
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
ted: not_admin
|
|
@ -20,26 +20,6 @@ spec:
|
|||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||
# according to the platforms which are supported by your solution.
|
||||
# It is considered best practice to support multiple architectures. You can
|
||||
# build your manager image using the makefile target docker-buildx.
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/arch
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
# - arm64
|
||||
# - ppc64le
|
||||
# - s390x
|
||||
# - key: kubernetes.io/os
|
||||
# operator: In
|
||||
# values:
|
||||
# - linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
|
@ -49,13 +29,17 @@ spec:
|
|||
- manager
|
||||
- --leader-elect
|
||||
- --health-probe-bind-address=:8081
|
||||
image: supabase-operator:latest
|
||||
image: controller:latest
|
||||
name: manager
|
||||
env:
|
||||
- name: CONTROLLER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: TLS_CA_CERT
|
||||
value: /etc/supabase/operator/certs/tls.crt
|
||||
- name: TLS_CA_KEY
|
||||
value: /etc/supabase/operator/certs/tls.key
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
|
@ -73,8 +57,6 @@ spec:
|
|||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 150m
|
||||
|
@ -82,5 +64,12 @@ spec:
|
|||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: tls-certs
|
||||
mountPath: /etc/supabase/operator/certs
|
||||
volumes:
|
||||
- name: tls-certs
|
||||
secret:
|
||||
secretName: control-plane-ca-cert-tls
|
||||
serviceAccountName: controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
||||
|
|
|
@ -4,6 +4,17 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: control-plane-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
|
@ -12,6 +23,8 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
|
|
|
@ -2,9 +2,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||
kind: Kustomization
|
||||
|
||||
images:
|
||||
- name: supabase-operator
|
||||
newName: code.icb4dc0.de/prskr/supabase-operator
|
||||
- name: controller
|
||||
newName: registry.icb4dc0.de/supabase-operator/controller
|
||||
newTag: 0.1.0-SNAPSHOT-e6c7d68
|
||||
|
||||
resources:
|
||||
- ../../default
|
||||
- ../../default
|
||||
|
|
|
@ -4,7 +4,6 @@ namespace: supabase-demo
|
|||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- cnpg-cluster.yaml
|
||||
- supabase_v1alpha1_core.yaml
|
||||
- supabase_v1alpha1_apigateway.yaml
|
||||
- supabase_v1alpha1_dashboard.yaml
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: APIGateway
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: gateway-sample
|
||||
spec:
|
||||
apiEndpoint:
|
||||
jwks:
|
||||
# will be created by Core resource operator if not present
|
||||
# just make sure the secret name is either based on the name of the core resource or explicitly set
|
||||
name: core-sample-jwt
|
||||
key: jwks.json
|
||||
dashboardEndpoint:
|
||||
auth:
|
||||
basic:
|
||||
usersInline:
|
||||
# admin:admin
|
||||
- admin:{SHA}0DPiKuNIrrVmD8IUCuw1hQxNqZc=
|
||||
plaintextUsersSecretRef: studio-sample-basic-auth
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: studio-sample-basic-auth
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
ted: not_admin
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: APIGateway
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: gateway-sample
|
||||
spec:
|
||||
apiEndpoint:
|
||||
jwks:
|
||||
# will be created by Core resource operator if not present
|
||||
# just make sure the secret name is either based on the name of the core resource or explicitly set
|
||||
name: core-sample-jwt
|
||||
key: jwks.json
|
||||
dashboardEndpoint:
|
||||
auth:
|
||||
oauth2:
|
||||
openIdIssuer: "https://idp.your-domain.com/"
|
||||
clientId: "<your-client-id>"
|
||||
# if not set, 'user' will be used
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
clientSecretRef:
|
||||
name: studio-sample-oauth2
|
||||
key: clientSecret
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: studio-sample-oauth2
|
||||
namespace: supabase-demo
|
||||
stringData:
|
||||
clientSecret: "<your-client-secret>"
|
|
@ -1,17 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: supabase-demo-credentials
|
||||
stringData:
|
||||
url: postgresql://supabase_admin:1n1t-R00t!@cluster-example-rw.supabase-demo:5432/app
|
||||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Core
|
||||
metadata:
|
||||
name: core-sample
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: core-sample
|
||||
spec:
|
||||
# public URL of the Supabase instance (API)
|
||||
# normally the Ingress/HTTPRoute endpoint
|
||||
|
@ -24,10 +18,10 @@ spec:
|
|||
siteUrl: http://localhost:3000/
|
||||
database:
|
||||
dsnSecretRef:
|
||||
# name of the secret containing the database DSN
|
||||
name: supabase-demo-credentials
|
||||
key: url
|
||||
auth:
|
||||
disableSignup: false
|
||||
enableEmailAutoconfirm: true
|
||||
providers: {}
|
||||
postgrest:
|
||||
|
@ -35,5 +29,8 @@ spec:
|
|||
jwt:
|
||||
expiry: 3600
|
||||
# name of the secret containing the JWT secret
|
||||
# will be created if not found, make sure to refernce this secret in the APIGateway, Dashboard and Storage
|
||||
# will be created if not found, make sure to refernce this secret in:
|
||||
# - APIGateway
|
||||
# - Dashboard-
|
||||
# - Storage
|
||||
secretName: core-sample-jwt
|
||||
|
|
|
@ -1,12 +1,4 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-s3-credentials
|
||||
stringData:
|
||||
accessKeyId: FPxTAFL7NaubjPgIGBo3
|
||||
secretAccessKey: 7F437pPe84QcoocD3MWdAIVBU3oXonhVHxK645tm
|
||||
---
|
||||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Storage
|
||||
metadata:
|
||||
|
@ -23,7 +15,7 @@ spec:
|
|||
bucket: test
|
||||
credentialsSecretRef:
|
||||
secretName: storage-s3-credentials
|
||||
s3Protocol: {}
|
||||
s3: {}
|
||||
db:
|
||||
host: cluster-example-rw.supabase-demo.svc
|
||||
dbName: app
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
apiVersion: ctlptl.dev/v1alpha1
|
||||
kind: Registry
|
||||
name: ctlptl-registry
|
||||
name: supabase-operator-registry
|
||||
port: 5005
|
||||
---
|
||||
apiVersion: ctlptl.dev/v1alpha1
|
||||
kind: Cluster
|
||||
product: kind
|
||||
registry: ctlptl-registry
|
||||
registry: supabase-operator-registry
|
||||
kindV1Alpha4Cluster:
|
||||
name: supabase-operator-debug
|
||||
networking:
|
||||
ipFamily: dual
|
||||
|
|
|
@ -93,6 +93,7 @@ _Appears in:_
|
|||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `jwks` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secretkeyselector-v1-core)_ | JWKSSelector - selector where the JWKS can be retrieved from to enable the API gateway to validate JWTs | | |
|
||||
| `tls` _[EndpointTlsSpec](#endpointtlsspec)_ | TLS - enable and configure TLS for the API endpoint | | |
|
||||
|
||||
|
||||
#### AuthProviderMeta
|
||||
|
@ -150,7 +151,7 @@ _Appears in:_
|
|||
| `disableSignup` _boolean_ | | | |
|
||||
| `anonymousUsersEnabled` _boolean_ | | | |
|
||||
| `providers` _[AuthProviders](#authproviders)_ | | | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | | | |
|
||||
| `workloadTemplate` _[WorkloadSpec](#workloadspec)_ | | | |
|
||||
| `emailSignupDisabled` _boolean_ | | | |
|
||||
|
||||
|
||||
|
@ -184,7 +185,7 @@ _Appears in:_
|
|||
|
||||
|
||||
_Appears in:_
|
||||
- [WorkloadTemplate](#workloadtemplate)
|
||||
- [WorkloadSpec](#workloadspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
|
@ -317,6 +318,42 @@ _Appears in:_
|
|||
| `spec` _[DashboardSpec](#dashboardspec)_ | | | |
|
||||
|
||||
|
||||
#### DashboardAuthSpec
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [DashboardEndpointSpec](#dashboardendpointspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `oauth2` _[DashboardOAuth2Spec](#dashboardoauth2spec)_ | OAuth2 - configure oauth2 authentication for the dashhboard listener<br />if configured, will be preferred over Basic authentication configuration<br />effectively disabling basic auth | | |
|
||||
| `basic` _[DashboardBasicAuthSpec](#dashboardbasicauthspec)_ | Basic - HTTP basic auth configuration, this should only be used in exceptions<br />e.g. during evaluations or for local development<br />only used if no other authentication is configured | | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### DashboardBasicAuthSpec
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [DashboardAuthSpec](#dashboardauthspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `usersInline` _string array_ | UsersInline - [htpasswd format](https://httpd.apache.org/docs/2.4/programs/htpasswd.html) | | items:Pattern: ^[\w_.]+:\\{SHA\\}[A-z0-9]+=*$ <br /> |
|
||||
| `plaintextUsersSecretRef` _string_ | PlaintextUsersSecretRef - name of a secret that contains plaintext credentials in key-value form<br />if not empty, credentials will be merged with inline users | | |
|
||||
|
||||
|
||||
#### DashboardDbSpec
|
||||
|
||||
|
||||
|
@ -347,6 +384,10 @@ _Appears in:_
|
|||
_Appears in:_
|
||||
- [APIGatewaySpec](#apigatewayspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `auth` _[DashboardAuthSpec](#dashboardauthspec)_ | Auth - configure authentication for the dashboard endpoint | | |
|
||||
| `tls` _[EndpointTlsSpec](#endpointtlsspec)_ | TLS - enable and configure TLS for the Dashboard endpoint | | |
|
||||
|
||||
|
||||
#### DashboardList
|
||||
|
@ -367,6 +408,28 @@ DashboardList contains a list of Dashboard.
|
|||
| `items` _[Dashboard](#dashboard) array_ | | | |
|
||||
|
||||
|
||||
#### DashboardOAuth2Spec
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [DashboardAuthSpec](#dashboardauthspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `openIdIssuer` _string_ | OpenIDIssuer - if set the defaulter will fetch the discovery document and fill<br />TokenEndpoint and AuthorizationEndpoint based on the discovery document | | |
|
||||
| `tokenEndpoint` _string_ | TokenEndpoint - endpoint where Envoy will retrieve the OAuth2 access and identity token from | | |
|
||||
| `authorizationEndpoint` _string_ | AuthorizationEndpoint - endpoint where the user will be redirected to authenticate | | |
|
||||
| `clientId` _string_ | ClientID - client ID to authenticate with the OAuth2 provider | | |
|
||||
| `scopes` _string array_ | Scopes - scopes to request from the OAuth2 provider (e.g. "openid", "profile", ...) - optional | | |
|
||||
| `resources` _string array_ | Resources - resources to request from the OAuth2 provider (e.g. "user", "email", ...) - optional | | |
|
||||
| `clientSecretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secretkeyselector-v1-core)_ | ClientSecretRef - reference to the secret that contains the client secret | | |
|
||||
|
||||
|
||||
#### DashboardSpec
|
||||
|
||||
|
||||
|
@ -455,7 +518,7 @@ _Appears in:_
|
|||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `appliedMigrations` _[MigrationStatus](#migrationstatus)_ | | | |
|
||||
| `migrationConditions` _[MigrationScriptCondition](#migrationscriptcondition) array_ | | | |
|
||||
| `roles` _object (keys:string, values:integer array)_ | | | |
|
||||
|
||||
|
||||
|
@ -519,6 +582,69 @@ _Appears in:_
|
|||
| `credentialsRef` _[SmtpCredentialsReference](#smtpcredentialsreference)_ | | | |
|
||||
|
||||
|
||||
#### EndpointTlsSpec
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ApiEndpointSpec](#apiendpointspec)
|
||||
- [DashboardEndpointSpec](#dashboardendpointspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `cert` _[TlsCertRef](#tlscertref)_ | | | |
|
||||
|
||||
|
||||
#### EnvoyComponentLogLevel
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [EnvoyDebuggingOptions](#envoydebuggingoptions)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `component` _string_ | Component - the component to set the log level for<br />the component IDs can be found [here](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h#L36) | | |
|
||||
| `level` _[EnvoyLogLevel](#envoyloglevel)_ | Level - the log level to set for the component | | Enum: [trace debug info warning error critical off] <br /> |
|
||||
|
||||
|
||||
#### EnvoyDebuggingOptions
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [EnvoySpec](#envoyspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `componentLogLevels` _[EnvoyComponentLogLevel](#envoycomponentloglevel) array_ | | | |
|
||||
|
||||
|
||||
#### EnvoyLogLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [EnvoyComponentLogLevel](#envoycomponentloglevel)
|
||||
|
||||
|
||||
|
||||
#### EnvoySpec
|
||||
|
||||
|
||||
|
@ -534,7 +660,9 @@ _Appears in:_
|
|||
| --- | --- | --- | --- |
|
||||
| `nodeName` _string_ | NodeName - identifies the Envoy cluster within the current namespace<br />if not set, the name of the APIGateway resource will be used<br />The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit. | | |
|
||||
| `controlPlane` _[ControlPlaneSpec](#controlplanespec)_ | ControlPlane - configure the control plane where Envoy will retrieve its configuration from | | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the Envoy deployment | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the Envoy deployment | | |
|
||||
| `disableIPv6` _boolean_ | DisableIPv6 - disable IPv6 for the Envoy instance<br />this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint) | | |
|
||||
| `debugging` _[EnvoyDebuggingOptions](#envoydebuggingoptions)_ | | | |
|
||||
|
||||
|
||||
#### EnvoyStatus
|
||||
|
@ -550,7 +678,6 @@ _Appears in:_
|
|||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `configVersion` _string_ | | | |
|
||||
| `resourceHash` _integer array_ | | | |
|
||||
|
||||
|
||||
|
@ -604,7 +731,7 @@ _Appears in:_
|
|||
| --- | --- | --- | --- |
|
||||
| `enable` _boolean_ | Enable - whether to deploy the image proxy or not | | |
|
||||
| `enableWebPDetection` _boolean_ | | | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the image proxy workload | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the image proxy workload | | |
|
||||
|
||||
|
||||
#### ImageSpec
|
||||
|
@ -646,9 +773,11 @@ _Appears in:_
|
|||
| `serviceKey` _string_ | ServiceKey - key in secret where to read the service JWT from | service_key | |
|
||||
|
||||
|
||||
#### MigrationStatus
|
||||
|
||||
_Underlying type:_ _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_
|
||||
|
||||
#### MigrationScriptCondition
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -657,6 +786,14 @@ _Underlying type:_ _[Time](https://kubernetes.io/docs/reference/generated/kubern
|
|||
_Appears in:_
|
||||
- [DatabaseStatus](#databasestatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `name` _string_ | Name - file name of the migration script | | |
|
||||
| `hash` _integer array_ | Hash - SHA256 hash of the script when it was last successfully applied | | |
|
||||
| `lastProbeTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | LastProbeTime - last time the operator tried to execute the migration script | | |
|
||||
| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | LastTransitionTime - last time the condition transitioned from one status to another | | |
|
||||
| `reason` _string_ | Reason - one-word, CamcelCase reason for the condition's last transition | | |
|
||||
| `message` _string_ | Message - human-readable message indicating details about the last transition | | |
|
||||
|
||||
|
||||
#### OAuthProvider
|
||||
|
@ -691,7 +828,7 @@ _Appears in:_
|
|||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the pg-meta deployment | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the pg-meta deployment | | |
|
||||
|
||||
|
||||
#### PhoneAuthProvider
|
||||
|
@ -727,7 +864,7 @@ _Appears in:_
|
|||
| `extraSearchPath` _string array_ | ExtraSearchPath - Extra schemas to add to the search_path of every request.<br />These schemas tables, views and functions don’t get API endpoints, they can only be referred from the database objects inside your db-schemas. | [public extensions] | |
|
||||
| `anonRole` _string_ | AnonRole - name of the anon role | anon | |
|
||||
| `maxRows` _integer_ | MaxRows - maximum number of rows PostgREST will load at a time | 1000 | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the PostgREST workload | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadSpec - customize the PostgREST workload | | |
|
||||
|
||||
|
||||
#### S3BackendSpec
|
||||
|
@ -862,7 +999,7 @@ _Appears in:_
|
|||
| `db` _[StorageApiDbSpec](#storageapidbspec)_ | DBSpec - Configure access to the Postgres database<br />In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource | | |
|
||||
| `s3` _[S3ProtocolSpec](#s3protocolspec)_ | S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client | | |
|
||||
| `uploadTemp` _[UploadTempSpec](#uploadtempspec)_ | UploadTemp - configure the emptyDir for storing intermediate files during uploads | | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the Storage API workload | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the Storage API workload | | |
|
||||
|
||||
|
||||
#### StorageList
|
||||
|
@ -916,11 +1053,30 @@ _Appears in:_
|
|||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `jwt` _[JwtSpec](#jwtspec)_ | | | |
|
||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the studio deployment | | |
|
||||
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the studio deployment | | |
|
||||
| `gatewayServiceSelector` _object (keys:string, values:string)_ | GatewayServiceSelector - selector to find the service for the API gateway<br />Required to configure the API URL in the studio deployment<br />If you don't run multiple APIGateway instances in the same namespaces, the default will be fine | \{ app.kubernetes.io/component:api-gateway app.kubernetes.io/name:envoy \} | |
|
||||
| `externalUrl` _string_ | APIExternalURL is referring to the URL where Supabase API will be available<br />Typically this is the ingress of the API gateway | | |
|
||||
|
||||
|
||||
#### TlsCertRef
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [EndpointTlsSpec](#endpointtlsspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretName` _string_ | | | |
|
||||
| `serverCertKey` _string_ | ServerCertKey - key in the secret that contains the server certificate | tls.crt | |
|
||||
| `serverKeyKey` _string_ | ServerKeyKey - key in the secret that contains the server private key | tls.key | |
|
||||
| `caCertKey` _string_ | CaCertKey - key in the secret that contains the CA certificate | ca.crt | |
|
||||
|
||||
|
||||
#### UploadTempSpec
|
||||
|
||||
|
||||
|
@ -938,7 +1094,7 @@ _Appears in:_
|
|||
| `sizeLimit` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#quantity-resource-api)_ | | | |
|
||||
|
||||
|
||||
#### WorkloadTemplate
|
||||
#### WorkloadSpec
|
||||
|
||||
|
||||
|
||||
|
@ -960,7 +1116,7 @@ _Appears in:_
|
|||
| `replicas` _integer_ | | | |
|
||||
| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podsecuritycontext-v1-core)_ | | | |
|
||||
| `additionalLabels` _object (keys:string, values:string)_ | | | |
|
||||
| `workload` _[ContainerTemplate](#containertemplate)_ | Workload - customize the container template of the workload | | |
|
||||
| `container` _[ContainerTemplate](#containertemplate)_ | ContainerSpec - customize the container template of the workload | | |
|
||||
| `additionalVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volume-v1-core) array_ | | | |
|
||||
|
||||
|
||||
|
|
1
docs/auth/email.md
Normal file
1
docs/auth/email.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Email
|
1
docs/auth/overview.md
Normal file
1
docs/auth/overview.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Overview
|
1
docs/auth/providers/azure.md
Normal file
1
docs/auth/providers/azure.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Providers
|
1
docs/components/apigateway.md
Normal file
1
docs/components/apigateway.md
Normal file
|
@ -0,0 +1 @@
|
|||
# APIGateway
|
|
@ -0,0 +1,14 @@
|
|||
# Core
|
||||
|
||||
The `Core` resource configures the essential Supabase services:
|
||||
|
||||
- PostgREST
|
||||
- Auth
|
||||
|
||||
and it manages the initial DB migrations that are not done by the services themselves and are part of the [supabase/postgres](https://github.com/supabase/postgres) repository.
|
||||
|
||||
To deploy a basic `Core` instance, you can use the following snippet as a 'template':
|
||||
|
||||
```yaml title="Basic 'Core' example" linenums="1"
|
||||
--8<-- "config/samples/supabase_v1alpha1_core.yaml"
|
||||
```
|
19
docs/development/tools.md
Normal file
19
docs/development/tools.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Tools
|
||||
|
||||
## Essentials
|
||||
|
||||
- Current Go version (see `go.mod`)
|
||||
- Any Docker daemon[^1]
|
||||
- kubectl (v1.30.0+.)
|
||||
- Access to a Kubernetes v1.30.+ cluster[^2]
|
||||
|
||||
[^1]: Docker Desktop, Orbstack, Podman, ...
|
||||
[^2]: kind, Orbstack, Docker Desktop, Rancher Desktop, k3s, microk8s, ...
|
||||
|
||||
## Recommended
|
||||
|
||||
- [husky](https://github.com/go-courier/husky)
|
||||
- [tilt](https://tilt.dev/)
|
||||
- [ctlptl](https://github.com/tilt-dev/ctlptl)
|
||||
- [goreleaser](https://goreleaser.com/)
|
||||
- [ko](https://ko.build/)
|
|
@ -1,5 +1,11 @@
|
|||
# Getting Started
|
||||
|
||||
## Dependencies
|
||||
|
||||
The operator has the following dependencies:
|
||||
|
||||
- [cert-manager](https://cert-manager.io/) (to issue webhook certificates)
|
||||
|
||||
## Deploying the operator
|
||||
|
||||
The easiest way to deploy the operator is to fetch the manifest from the [releases](https://code.icb4dc0.de/prskr/supabase-operator/releases) and apply it like this:
|
||||
|
@ -12,3 +18,12 @@ The manifest is rendered as part of the release workflow and based on [kustomize
|
|||
If you want to customize the deployment, you can start from the [release/default](https://code.icb4dc0.de/prskr/supabase-operator/src/branch/main/config/release/default) layer and build your own manifest.
|
||||
|
||||
## Deploying a basic Supabase instance
|
||||
|
||||
As described in the [overview](./components/overview.md) the custom resources are 'grouping' the Supabase services into 'modules'.
|
||||
A common basic instance requires:
|
||||
|
||||
- a [`Core`](./components/core.md) instance (PostgREST, Auth & DB migrations)
|
||||
- an [`APIGateway`](./components/apigateway.md) instance (gateway to handle JWT auth and routing)
|
||||
|
||||
it is perfectly possible to deploy for instance only an `APIGateway` and a `Storage` instance as well if you don't need the API or you can also manage your own API gateway if you prefer it that way.
|
||||
The operator setup tries to be as 'unopinionated' as possible.
|
||||
|
|
|
@ -9,7 +9,7 @@ This project is not affiliated with the Supabase project or company in any way.
|
|||
This is currently a work-in-progress experiment to replace existing Helm charts for Supabase as they tend to be hard to deploy and to manage and the default Supabase stack - although working great as a single instance or in their SaaS instances - isn't a perfect fit for Kubernetes.
|
||||
This operator replaces tedious Helm values files with a small set of custom resources that allow an user to quickly deploy a Supabase instance without having to know much (if anything) of the Supabase internals.
|
||||
|
||||
## Targets
|
||||
## Goals
|
||||
|
||||
- Make it as easy as possible to deploy Supabase on a Kubernetes cluster
|
||||
- Manage updates of components
|
||||
|
@ -23,7 +23,7 @@ This operator replaces tedious Helm values files with a small set of custom reso
|
|||
- ConfigMaps
|
||||
- *soon*: NetworkPolicies
|
||||
|
||||
## Non-Targets
|
||||
## Non-Goals
|
||||
|
||||
- Manage **all** Kubernetes aspects, it does **not** create:
|
||||
- PodDisruptionBudgets
|
||||
|
|
301
go.mod
301
go.mod
|
@ -1,119 +1,326 @@
|
|||
module code.icb4dc0.de/prskr/supabase-operator
|
||||
|
||||
go 1.23.5
|
||||
go 1.24
|
||||
|
||||
toolchain go1.24.0
|
||||
|
||||
require (
|
||||
github.com/alecthomas/kong v1.6.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.1
|
||||
github.com/jackc/pgx/v5 v5.7.1
|
||||
github.com/alecthomas/kong v1.7.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.4
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
github.com/go-logr/logr v1.4.2
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/lestrrat-go/jwx/v2 v2.1.3
|
||||
github.com/magefile/mage v1.15.0
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.35.1
|
||||
github.com/onsi/ginkgo/v2 v2.22.2
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.36.3
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
|
||||
google.golang.org/grpc v1.70.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.32.1
|
||||
k8s.io/apimachinery v0.32.1
|
||||
k8s.io/client-go v0.32.1
|
||||
sigs.k8s.io/controller-runtime v0.20.0
|
||||
sigs.k8s.io/controller-runtime v0.20.1
|
||||
)
|
||||
|
||||
tool (
|
||||
github.com/elastic/crd-ref-docs
|
||||
github.com/golangci/golangci-lint/v2/cmd/golangci-lint
|
||||
gotest.tools/gotestsum
|
||||
mvdan.cc/gofumpt
|
||||
sigs.k8s.io/controller-runtime/tools/setup-envtest
|
||||
sigs.k8s.io/controller-tools/cmd/controller-gen
|
||||
sigs.k8s.io/kustomize/kustomize/v5
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
4d63.com/gocheckcompilerdirectives v1.3.0 // indirect
|
||||
4d63.com/gochecknoglobals v0.2.2 // indirect
|
||||
cel.dev/expr v0.19.2 // indirect
|
||||
github.com/4meepo/tagalign v1.4.2 // indirect
|
||||
github.com/Abirdcfly/dupword v0.1.3 // indirect
|
||||
github.com/Antonboom/errname v1.1.0 // indirect
|
||||
github.com/Antonboom/nilnil v1.1.0 // indirect
|
||||
github.com/Antonboom/testifylint v1.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/Crocmagnon/fatcontext v0.7.1 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect
|
||||
github.com/alecthomas/go-check-sumtype v0.3.1 // indirect
|
||||
github.com/alexkohler/nakedret/v2 v2.0.5 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/alingse/nilnesserr v0.1.2 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.6.0 // indirect
|
||||
github.com/ashanbrown/makezero v1.2.0 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bitfield/gotestdox v0.2.2 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.3 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/blizzy78/varnamelen v0.8.0 // indirect
|
||||
github.com/bombsimon/wsl/v4 v4.6.0 // indirect
|
||||
github.com/breml/bidichk v0.3.3 // indirect
|
||||
github.com/breml/errchkjson v0.4.1 // indirect
|
||||
github.com/butuzov/ireturn v0.3.1 // indirect
|
||||
github.com/butuzov/mirror v1.3.0 // indirect
|
||||
github.com/catenacyber/perfsprint v0.9.1 // indirect
|
||||
github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect
|
||||
github.com/charithe/durationcheck v0.0.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/chavacava/garif v0.1.0 // indirect
|
||||
github.com/ckaznocha/intrange v0.3.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect
|
||||
github.com/curioswitch/go-reassign v0.3.0 // indirect
|
||||
github.com/daixiang0/gci v0.13.6 // indirect
|
||||
github.com/dave/dst v0.27.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.5.0 // indirect
|
||||
github.com/dnephin/pflag v1.0.7 // indirect
|
||||
github.com/elastic/crd-ref-docs v0.1.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/ettle/strcase v0.2.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/ghostiam/protogetter v0.3.12 // indirect
|
||||
github.com/go-critic/go-critic v0.13.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/go-toolsmith/astcast v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astequal v1.2.0 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.1.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.1.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.1.0 // indirect
|
||||
github.com/go-toolsmith/typep v1.1.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect
|
||||
github.com/gobuffalo/flect v1.0.3 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/goccy/go-yaml v1.11.3 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect
|
||||
github.com/golangci/go-printf-func-name v0.1.0 // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect
|
||||
github.com/golangci/golangci-lint/v2 v2.0.1 // indirect
|
||||
github.com/golangci/golines v0.0.0-20250217232252-b35a6149b587 // indirect
|
||||
github.com/golangci/misspell v0.6.0 // indirect
|
||||
github.com/golangci/plugin-module-register v0.1.1 // indirect
|
||||
github.com/golangci/revgrep v0.8.0 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.1.0 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.5.0 // indirect
|
||||
github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect
|
||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/huandu/xstrings v1.3.3 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jgautheron/goconst v1.7.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jjti/go-spancheck v0.6.4 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/julz/importas v0.2.0 // indirect
|
||||
github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect
|
||||
github.com/kisielk/errcheck v1.9.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.6 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.12 // indirect
|
||||
github.com/lasiar/canonicalheader v1.1.2 // indirect
|
||||
github.com/ldez/exptostd v0.4.2 // indirect
|
||||
github.com/ldez/gomoddirectives v0.6.1 // indirect
|
||||
github.com/ldez/grignotin v0.9.0 // indirect
|
||||
github.com/ldez/tagliatelle v0.7.1 // indirect
|
||||
github.com/ldez/usetesting v0.4.2 // indirect
|
||||
github.com/leonklingele/grouper v1.1.2 // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.2 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/httprc v1.0.6 // indirect
|
||||
github.com/lestrrat-go/iter v1.0.2 // indirect
|
||||
github.com/lestrrat-go/option v1.0.1 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/macabu/inamedparam v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/maratori/testableexamples v1.0.0 // indirect
|
||||
github.com/maratori/testpackage v1.1.1 // indirect
|
||||
github.com/matoous/godox v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgechev/revive v1.7.0 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/moricho/tparallel v0.3.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nishanths/exhaustive v0.12.0 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/nunnatsa/ginkgolinter v0.19.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.7.1 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.4.4 // indirect
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
|
||||
github.com/quasilyte/gogrep v0.5.0 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/raeperd/recvcheck v0.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.4.1 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.8.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.22.2 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sivchari/containedctx v1.0.3 // indirect
|
||||
github.com/sonatard/noctx v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/sourcegraph/go-diff v0.7.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.20.0 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tdakkota/asciicheck v0.4.1 // indirect
|
||||
github.com/tetafro/godot v1.5.0 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect
|
||||
github.com/timonwong/loggercheck v0.10.1 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
|
||||
github.com/ultraware/funlen v0.2.0 // indirect
|
||||
github.com/ultraware/whitespace v0.2.0 // indirect
|
||||
github.com/uudashr/gocognit v1.2.0 // indirect
|
||||
github.com/uudashr/iface v1.3.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
github.com/xen0n/gosmopolitan v1.3.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.3.0 // indirect
|
||||
github.com/ykadowak/zerologlint v0.1.5 // indirect
|
||||
gitlab.com/bosi/decorder v0.4.2 // indirect
|
||||
go-simpler.org/musttag v0.13.0 // indirect
|
||||
go-simpler.org/sloglint v0.9.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/oauth2 v0.26.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250127172529-29210b9bc287 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151- |