feat(db): prepare migrations and core CRD
This commit is contained in:
parent
734e1b22f9
commit
12090c913a
105 changed files with 5910 additions and 54 deletions
25
.devcontainer/devcontainer.json
Normal file
25
.devcontainer/devcontainer.json
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"name": "Kubebuilder DevContainer",
|
||||
"image": "golang:1.22",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
|
||||
"ghcr.io/devcontainers/features/git:1": {}
|
||||
},
|
||||
|
||||
"runArgs": ["--network=host"],
|
||||
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash"
|
||||
},
|
||||
"extensions": [
|
||||
"ms-kubernetes-tools.vscode-kubernetes-tools",
|
||||
"ms-azuretools.vscode-docker"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
"onCreateCommand": "bash .devcontainer/post-install.sh"
|
||||
}
|
||||
|
23
.devcontainer/post-install.sh
Normal file
23
.devcontainer/post-install.sh
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64
|
||||
chmod +x ./kind
|
||||
mv ./kind /usr/local/bin/kind
|
||||
|
||||
curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/amd64
|
||||
chmod +x kubebuilder
|
||||
mv kubebuilder /usr/local/bin/
|
||||
|
||||
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
|
||||
curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
docker network create -d=bridge --subnet=172.19.0.0/24 kind
|
||||
|
||||
kind version
|
||||
kubebuilder version
|
||||
docker --version
|
||||
go version
|
||||
kubectl version --client
|
3
.dockerignore
Normal file
3
.dockerignore
Normal file
|
@ -0,0 +1,3 @@
|
|||
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
# Ignore build and test binaries.
|
||||
bin/
|
25
.github/workflows/lint.yml
vendored
Normal file
25
.github/workflows/lint.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
name: Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: go.sum
|
||||
check-latest: true
|
||||
|
||||
- name: Run linter
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.61
|
37
.github/workflows/test-e2e.yml
vendored
Normal file
37
.github/workflows/test-e2e.yml
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
name: E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-e2e:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: go.sum
|
||||
check-latest: true
|
||||
|
||||
- name: Install the latest version of kind
|
||||
run: |
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64
|
||||
chmod +x ./kind
|
||||
sudo mv ./kind /usr/local/bin/kind
|
||||
|
||||
- name: Verify kind installation
|
||||
run: kind version
|
||||
|
||||
- name: Create kind cluster
|
||||
run: kind create cluster
|
||||
|
||||
- name: Running Test e2e
|
||||
run: |
|
||||
go mod tidy
|
||||
make test-e2e
|
25
.github/workflows/test.yml
vendored
Normal file
25
.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: go.sum
|
||||
check-latest: true
|
||||
|
||||
- name: Running Tests
|
||||
run: |
|
||||
go mod tidy
|
||||
make test
|
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
bin/
|
65
.golangci.yml
Normal file
65
.golangci.yml
Normal file
|
@ -0,0 +1,65 @@
|
|||
run:
|
||||
timeout: 5m
|
||||
allow-parallel-runners: true
|
||||
|
||||
issues:
|
||||
# don't skip warning about doc comments
|
||||
# don't exclude the default set of lint
|
||||
exclude-use-default: false
|
||||
# restore some of the defaults
|
||||
# (fill in the rest as needed)
|
||||
exclude-rules:
|
||||
- path: "api/*"
|
||||
linters:
|
||||
- lll
|
||||
- path: "internal/*"
|
||||
linters:
|
||||
- dupl
|
||||
- lll
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- dupl
|
||||
- errcheck
|
||||
- copyloopvar
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- prealloc
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: comment-spacings
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(code.icb4dc0.de/prskr/supabase-operator)
|
||||
- alias
|
||||
- blank
|
||||
- dot
|
||||
goimports:
|
||||
local-prefixes: code.icb4dc0.de/prskr/supabase-operator
|
||||
importas:
|
||||
no-unaliased: true
|
||||
no-extra-aliases: true
|
||||
alias:
|
||||
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
|
||||
alias: $1$2
|
||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
alias: metav1
|
9
.zed/settings.json
Normal file
9
.zed/settings.json
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"lsp": {
|
||||
"gopls": {
|
||||
"initialization_options": {
|
||||
"local": "code.icb4dc0.de/prskr/supabase-operator"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
33
Dockerfile
Normal file
33
Dockerfile
Normal file
|
@ -0,0 +1,33 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.22 AS builder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY cmd/main.go cmd/main.go
|
||||
COPY api/ api/
|
||||
COPY internal/ internal/
|
||||
|
||||
# Build
|
||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
|
||||
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
||||
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
212
Makefile
Normal file
212
Makefile
Normal file
|
@ -0,0 +1,212 @@
|
|||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= controller:latest
|
||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||
ENVTEST_K8S_VERSION = 1.31.0
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
GOBIN=$(shell go env GOPATH)/bin
|
||||
else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# CONTAINER_TOOL defines the container tool to be used for building images.
|
||||
# Be aware that the target commands are only tested with Docker which is
|
||||
# scaffolded by default. However, you might want to replace it to use other
|
||||
# tools. (i.e. podman)
|
||||
CONTAINER_TOOL ?= docker
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
.PHONY: all
|
||||
all: build
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
# beneath their categories. The categories are represented by '##@' and the
|
||||
# target descriptions by '##'. The awk command is responsible for reading the
|
||||
# entire set of makefiles included in this invocation, looking for lines of the
|
||||
# file as xyz: ## something, and then pretty-format the target and help. Then,
|
||||
# if there's a line with ##@ something, that gets pretty-printed as a category.
|
||||
# More info on the usage of ANSI control characters for terminal formatting:
|
||||
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
|
||||
# More info on the awk command:
|
||||
# http://linuxcommand.org/lc3_adv_awk.php
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Run go fmt against code.
|
||||
go fmt ./...
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||
|
||||
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
||||
# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally.
|
||||
# Prometheus and CertManager are installed by default; skip with:
|
||||
# - PROMETHEUS_INSTALL_SKIP=true
|
||||
# - CERT_MANAGER_INSTALL_SKIP=true
|
||||
.PHONY: test-e2e
|
||||
test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind.
|
||||
@command -v kind >/dev/null 2>&1 || { \
|
||||
echo "Kind is not installed. Please install Kind manually."; \
|
||||
exit 1; \
|
||||
}
|
||||
@kind get clusters | grep -q 'kind' || { \
|
||||
echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \
|
||||
exit 1; \
|
||||
}
|
||||
go test ./test/e2e/ -v -ginkgo.v
|
||||
|
||||
.PHONY: lint
|
||||
lint: golangci-lint ## Run golangci-lint linter
|
||||
$(GOLANGCI_LINT) run
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
|
||||
$(GOLANGCI_LINT) run --fix
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: manifests generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager cmd/main.go
|
||||
|
||||
.PHONY: run
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
go run ./cmd/main.go
|
||||
|
||||
# If you wish to build the manager image targeting other platforms you can use the --platform flag.
|
||||
# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it.
|
||||
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
.PHONY: docker-build
|
||||
docker-build: ## Build docker image with the manager.
|
||||
$(CONTAINER_TOOL) build -t ${IMG} .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: ## Push docker image with the manager.
|
||||
$(CONTAINER_TOOL) push ${IMG}
|
||||
|
||||
# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple
|
||||
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
|
||||
# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/
|
||||
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
|
||||
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
|
||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build and push docker image for the manager for cross-platform support
|
||||
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
|
||||
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
|
||||
- $(CONTAINER_TOOL) buildx create --name supabase-operator-builder
|
||||
$(CONTAINER_TOOL) buildx use supabase-operator-builder
|
||||
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
|
||||
- $(CONTAINER_TOOL) buildx rm supabase-operator-builder
|
||||
rm Dockerfile.cross
|
||||
|
||||
.PHONY: build-installer
|
||||
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
|
||||
mkdir -p dist
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default > dist/install.yaml
|
||||
|
||||
##@ Deployment
|
||||
|
||||
ifndef ignore-not-found
|
||||
ignore-not-found = false
|
||||
endif
|
||||
|
||||
.PHONY: install
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
##@ Dependencies
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.5.0
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.16.4
|
||||
ENVTEST_VERSION ?= release-0.19
|
||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f "$(1)-$(3)" ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
rm -f $(1) || true ;\
|
||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
||||
mv $(1) $(1)-$(3) ;\
|
||||
} ;\
|
||||
ln -sf $(1)-$(3) $(1)
|
||||
endef
|
20
PROJECT
Normal file
20
PROJECT
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Code generated by tool. DO NOT EDIT.
|
||||
# This file is used to track the info used to scaffold your project
|
||||
# and allow the plugins properly work.
|
||||
# More info: https://book.kubebuilder.io/reference/project-config.html
|
||||
domain: k8s.icb4dc0.de
|
||||
layout:
|
||||
- go.kubebuilder.io/v4
|
||||
projectName: supabase-operator
|
||||
repo: code.icb4dc0.de/prskr/supabase-operator
|
||||
resources:
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
controller: true
|
||||
domain: k8s.icb4dc0.de
|
||||
group: supabase
|
||||
kind: Core
|
||||
path: code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1
|
||||
version: v1alpha1
|
||||
version: "3"
|
114
README.md
Normal file
114
README.md
Normal file
|
@ -0,0 +1,114 @@
|
|||
# supabase-operator
|
||||
// TODO(user): Add simple overview of use/purpose
|
||||
|
||||
## Description
|
||||
// TODO(user): An in-depth paragraph about your project and overview of use
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
- go version v1.22.0+
|
||||
- docker version 17.03+.
|
||||
- kubectl version v1.11.3+.
|
||||
- Access to a Kubernetes v1.11.3+ cluster.
|
||||
|
||||
### To Deploy on the cluster
|
||||
**Build and push your image to the location specified by `IMG`:**
|
||||
|
||||
```sh
|
||||
make docker-build docker-push IMG=<some-registry>/supabase-operator:tag
|
||||
```
|
||||
|
||||
**NOTE:** This image ought to be published in the personal registry you specified.
|
||||
And it is required to have access to pull the image from the working environment.
|
||||
Make sure you have the proper permission to the registry if the above commands don’t work.
|
||||
|
||||
**Install the CRDs into the cluster:**
|
||||
|
||||
```sh
|
||||
make install
|
||||
```
|
||||
|
||||
**Deploy the Manager to the cluster with the image specified by `IMG`:**
|
||||
|
||||
```sh
|
||||
make deploy IMG=<some-registry>/supabase-operator:tag
|
||||
```
|
||||
|
||||
> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin
|
||||
privileges or be logged in as admin.
|
||||
|
||||
**Create instances of your solution**
|
||||
You can apply the samples (examples) from the config/sample:
|
||||
|
||||
```sh
|
||||
kubectl apply -k config/samples/
|
||||
```
|
||||
|
||||
>**NOTE**: Ensure that the samples has default values to test it out.
|
||||
|
||||
### To Uninstall
|
||||
**Delete the instances (CRs) from the cluster:**
|
||||
|
||||
```sh
|
||||
kubectl delete -k config/samples/
|
||||
```
|
||||
|
||||
**Delete the APIs(CRDs) from the cluster:**
|
||||
|
||||
```sh
|
||||
make uninstall
|
||||
```
|
||||
|
||||
**UnDeploy the controller from the cluster:**
|
||||
|
||||
```sh
|
||||
make undeploy
|
||||
```
|
||||
|
||||
## Project Distribution
|
||||
|
||||
Following are the steps to build the installer and distribute this project to users.
|
||||
|
||||
1. Build the installer for the image built and published in the registry:
|
||||
|
||||
```sh
|
||||
make build-installer IMG=<some-registry>/supabase-operator:tag
|
||||
```
|
||||
|
||||
NOTE: The makefile target mentioned above generates an 'install.yaml'
|
||||
file in the dist directory. This file contains all the resources built
|
||||
with Kustomize, which are necessary to install this project without
|
||||
its dependencies.
|
||||
|
||||
2. Using the installer
|
||||
|
||||
Users can just run kubectl apply -f <URL for YAML BUNDLE> to install the project, i.e.:
|
||||
|
||||
```sh
|
||||
kubectl apply -f https://raw.githubusercontent.com/<org>/supabase-operator/<tag or branch>/dist/install.yaml
|
||||
```
|
||||
|
||||
## Contributing
|
||||
// TODO(user): Add detailed information on how you would like others to contribute to this project
|
||||
|
||||
**NOTE:** Run `make help` for more information on all potential `make` targets
|
||||
|
||||
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
|
||||
|
||||
## License
|
||||
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
105
api/v1alpha1/core_types.go
Normal file
105
api/v1alpha1/core_types.go
Normal file
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
DSN *string `json:"dsn,omitempty"`
|
||||
DSNFrom *corev1.SecretKeySelector `json:"dsnFrom,omitempty"`
|
||||
}
|
||||
|
||||
func (d Database) GetDSN(ctx context.Context, client client.Client) (string, error) {
|
||||
if d.DSN != nil {
|
||||
return *d.DSN, nil
|
||||
}
|
||||
|
||||
if d.DSNFrom == nil {
|
||||
return "", errors.New("DSN not set")
|
||||
}
|
||||
|
||||
var secret corev1.Secret
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: d.DSNFrom.Name}, &secret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data, ok := secret.Data[d.DSNFrom.Key]
|
||||
if !ok {
|
||||
return "", errors.New("key not found in secret")
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// CoreSpec defines the desired state of Core.
|
||||
type CoreSpec struct {
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
Database Database `json:"database,omitempty"`
|
||||
}
|
||||
|
||||
type MigrationStatus map[string]int64
|
||||
|
||||
func (s MigrationStatus) IsApplied(name string) bool {
|
||||
_, ok := s[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s MigrationStatus) Record(name string) {
|
||||
s[name] = time.Now().UTC().UnixMilli()
|
||||
}
|
||||
|
||||
// CoreStatus defines the observed state of Core.
|
||||
type CoreStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
AppliedMigrations MigrationStatus `json:"appliedMigrations,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
|
||||
// Core is the Schema for the cores API.
|
||||
type Core struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec CoreSpec `json:"spec,omitempty"`
|
||||
Status CoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// CoreList contains a list of Core.
|
||||
type CoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Core `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Core{}, &CoreList{})
|
||||
}
|
36
api/v1alpha1/groupversion_info.go
Normal file
36
api/v1alpha1/groupversion_info.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 contains API Schema definitions for the supabase v1alpha1 API group.
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=supabase.k8s.icb4dc0.de
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "supabase.k8s.icb4dc0.de", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
137
api/v1alpha1/zz_generated.deepcopy.go
Normal file
137
api/v1alpha1/zz_generated.deepcopy.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Core) DeepCopyInto(out *Core) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Core.
|
||||
func (in *Core) DeepCopy() *Core {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Core)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Core) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CoreList) DeepCopyInto(out *CoreList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Core, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreList.
|
||||
func (in *CoreList) DeepCopy() *CoreList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CoreList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CoreList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CoreSpec) DeepCopyInto(out *CoreSpec) {
|
||||
*out = *in
|
||||
out.Database = in.Database
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreSpec.
|
||||
func (in *CoreSpec) DeepCopy() *CoreSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CoreSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CoreStatus) DeepCopyInto(out *CoreStatus) {
|
||||
*out = *in
|
||||
if in.AppliedMigrations != nil {
|
||||
in, out := &in.AppliedMigrations, &out.AppliedMigrations
|
||||
*out = make(map[string]uint64, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreStatus.
|
||||
func (in *CoreStatus) DeepCopy() *CoreStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CoreStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Database) DeepCopyInto(out *Database) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
|
||||
func (in *Database) DeepCopy() *Database {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Database)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
-- migrate:up
|
||||
|
||||
-- Set up realtime
|
||||
-- defaults to empty publication
|
||||
create publication supabase_realtime;
|
||||
|
||||
-- Supabase super admin
|
||||
alter user supabase_admin with superuser createdb createrole replication bypassrls;
|
||||
|
||||
-- Supabase replication user
|
||||
create user supabase_replication_admin with login replication;
|
||||
|
||||
-- Supabase read-only user
|
||||
create role supabase_read_only_user with login bypassrls;
|
||||
grant pg_read_all_data to supabase_read_only_user;
|
||||
|
||||
-- Extension namespacing
|
||||
create schema if not exists extensions;
|
||||
create extension if not exists "uuid-ossp" with schema extensions;
|
||||
create extension if not exists pgcrypto with schema extensions;
|
||||
create extension if not exists pgjwt with schema extensions;
|
||||
|
||||
-- Set up auth roles for the developer
|
||||
create role anon nologin noinherit;
|
||||
create role authenticated nologin noinherit; -- "logged in" user: web_user, app_user, etc
|
||||
create role service_role nologin noinherit bypassrls; -- allow developers to create JWT's that bypass their policies
|
||||
|
||||
create user authenticator noinherit;
|
||||
grant anon to authenticator;
|
||||
grant authenticated to authenticator;
|
||||
grant service_role to authenticator;
|
||||
grant supabase_admin to authenticator;
|
||||
|
||||
grant usage on schema public to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema public grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema public grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema public grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Allow Extensions to be used in the API
|
||||
grant usage on schema extensions to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Set up namespacing
|
||||
alter user supabase_admin SET search_path TO public, extensions; -- don't include the "auth" schema
|
||||
|
||||
-- These are required so that the users receive grants whenever "supabase_admin" creates tables/function
|
||||
alter default privileges for user supabase_admin in schema public grant all
|
||||
on sequences to postgres, anon, authenticated, service_role;
|
||||
alter default privileges for user supabase_admin in schema public grant all
|
||||
on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges for user supabase_admin in schema public grant all
|
||||
on functions to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Set short statement/query timeouts for API roles
|
||||
alter role anon set statement_timeout = '3s';
|
||||
alter role authenticated set statement_timeout = '8s';
|
||||
|
||||
-- migrate:down
|
123
assets/migrations/init-scripts/00000000000001-auth-schema.sql
Normal file
123
assets/migrations/init-scripts/00000000000001-auth-schema.sql
Normal file
|
@ -0,0 +1,123 @@
|
|||
-- migrate:up
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_admin;
|
||||
|
||||
-- auth.users definition
|
||||
|
||||
CREATE TABLE auth.users (
|
||||
instance_id uuid NULL,
|
||||
id uuid NOT NULL UNIQUE,
|
||||
aud varchar(255) NULL,
|
||||
"role" varchar(255) NULL,
|
||||
email varchar(255) NULL UNIQUE,
|
||||
encrypted_password varchar(255) NULL,
|
||||
confirmed_at timestamptz NULL,
|
||||
invited_at timestamptz NULL,
|
||||
confirmation_token varchar(255) NULL,
|
||||
confirmation_sent_at timestamptz NULL,
|
||||
recovery_token varchar(255) NULL,
|
||||
recovery_sent_at timestamptz NULL,
|
||||
email_change_token varchar(255) NULL,
|
||||
email_change varchar(255) NULL,
|
||||
email_change_sent_at timestamptz NULL,
|
||||
last_sign_in_at timestamptz NULL,
|
||||
raw_app_meta_data jsonb NULL,
|
||||
raw_user_meta_data jsonb NULL,
|
||||
is_super_admin bool NULL,
|
||||
created_at timestamptz NULL,
|
||||
updated_at timestamptz NULL,
|
||||
CONSTRAINT users_pkey PRIMARY KEY (id)
|
||||
);
|
||||
CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email);
|
||||
CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id);
|
||||
comment on table auth.users is 'Auth: Stores user login data within a secure schema.';
|
||||
|
||||
-- auth.refresh_tokens definition
|
||||
|
||||
CREATE TABLE auth.refresh_tokens (
|
||||
instance_id uuid NULL,
|
||||
id bigserial NOT NULL,
|
||||
"token" varchar(255) NULL,
|
||||
user_id varchar(255) NULL,
|
||||
revoked bool NULL,
|
||||
created_at timestamptz NULL,
|
||||
updated_at timestamptz NULL,
|
||||
CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id)
|
||||
);
|
||||
CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id);
|
||||
CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id);
|
||||
CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token);
|
||||
comment on table auth.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.';
|
||||
|
||||
-- auth.instances definition
|
||||
|
||||
CREATE TABLE auth.instances (
|
||||
id uuid NOT NULL,
|
||||
uuid uuid NULL,
|
||||
raw_base_config text NULL,
|
||||
created_at timestamptz NULL,
|
||||
updated_at timestamptz NULL,
|
||||
CONSTRAINT instances_pkey PRIMARY KEY (id)
|
||||
);
|
||||
comment on table auth.instances is 'Auth: Manages users across multiple sites.';
|
||||
|
||||
-- auth.audit_log_entries definition
|
||||
|
||||
CREATE TABLE auth.audit_log_entries (
|
||||
instance_id uuid NULL,
|
||||
id uuid NOT NULL,
|
||||
payload json NULL,
|
||||
created_at timestamptz NULL,
|
||||
CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id)
|
||||
);
|
||||
CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id);
|
||||
comment on table auth.audit_log_entries is 'Auth: Audit trail for user actions.';
|
||||
|
||||
-- auth.schema_migrations definition
|
||||
|
||||
CREATE TABLE auth.schema_migrations (
|
||||
"version" varchar(255) NOT NULL,
|
||||
CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version")
|
||||
);
|
||||
comment on table auth.schema_migrations is 'Auth: Manages updates to the auth system.';
|
||||
|
||||
INSERT INTO auth.schema_migrations (version)
|
||||
VALUES ('20171026211738'),
|
||||
('20171026211808'),
|
||||
('20171026211834'),
|
||||
('20180103212743'),
|
||||
('20180108183307'),
|
||||
('20180119214651'),
|
||||
('20180125194653');
|
||||
|
||||
-- Gets the User ID from the request cookie
|
||||
create or replace function auth.uid() returns uuid as $$
|
||||
select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid;
|
||||
$$ language sql stable;
|
||||
|
||||
-- Gets the User ID from the request cookie
|
||||
create or replace function auth.role() returns text as $$
|
||||
select nullif(current_setting('request.jwt.claim.role', true), '')::text;
|
||||
$$ language sql stable;
|
||||
|
||||
-- Gets the User email
|
||||
create or replace function auth.email() returns text as $$
|
||||
select nullif(current_setting('request.jwt.claim.email', true), '')::text;
|
||||
$$ language sql stable;
|
||||
|
||||
-- usage on auth functions to API roles
|
||||
GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role;
|
||||
|
||||
-- Supabase super admin
|
||||
CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA auth TO supabase_auth_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO supabase_auth_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO supabase_auth_admin;
|
||||
ALTER USER supabase_auth_admin SET search_path = "auth";
|
||||
ALTER table "auth".users OWNER TO supabase_auth_admin;
|
||||
ALTER table "auth".refresh_tokens OWNER TO supabase_auth_admin;
|
||||
ALTER table "auth".audit_log_entries OWNER TO supabase_auth_admin;
|
||||
ALTER table "auth".instances OWNER TO supabase_auth_admin;
|
||||
ALTER table "auth".schema_migrations OWNER TO supabase_auth_admin;
|
||||
|
||||
-- migrate:down
|
120
assets/migrations/init-scripts/00000000000002-storage-schema.sql
Normal file
120
assets/migrations/init-scripts/00000000000002-storage-schema.sql
Normal file
|
@ -0,0 +1,120 @@
|
|||
-- migrate:up
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION supabase_admin;
|
||||
|
||||
grant usage on schema storage to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema storage grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema storage grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema storage grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
CREATE TABLE "storage"."buckets" (
|
||||
"id" text not NULL,
|
||||
"name" text NOT NULL,
|
||||
"owner" uuid,
|
||||
"created_at" timestamptz DEFAULT now(),
|
||||
"updated_at" timestamptz DEFAULT now(),
|
||||
CONSTRAINT "buckets_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"),
|
||||
PRIMARY KEY ("id")
|
||||
);
|
||||
CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING BTREE ("name");
|
||||
|
||||
CREATE TABLE "storage"."objects" (
|
||||
"id" uuid NOT NULL DEFAULT extensions.uuid_generate_v4(),
|
||||
"bucket_id" text,
|
||||
"name" text,
|
||||
"owner" uuid,
|
||||
"created_at" timestamptz DEFAULT now(),
|
||||
"updated_at" timestamptz DEFAULT now(),
|
||||
"last_accessed_at" timestamptz DEFAULT now(),
|
||||
"metadata" jsonb,
|
||||
CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"),
|
||||
CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"),
|
||||
PRIMARY KEY ("id")
|
||||
);
|
||||
CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name");
|
||||
CREATE INDEX name_prefix_search ON storage.objects(name text_pattern_ops);
|
||||
|
||||
ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
CREATE FUNCTION storage.foldername(name text)
|
||||
RETURNS text[]
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
_parts text[];
|
||||
BEGIN
|
||||
select string_to_array(name, '/') into _parts;
|
||||
return _parts[1:array_length(_parts,1)-1];
|
||||
END
|
||||
$function$;
|
||||
|
||||
CREATE FUNCTION storage.filename(name text)
|
||||
RETURNS text
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
_parts text[];
|
||||
BEGIN
|
||||
select string_to_array(name, '/') into _parts;
|
||||
return _parts[array_length(_parts,1)];
|
||||
END
|
||||
$function$;
|
||||
|
||||
CREATE FUNCTION storage.extension(name text)
|
||||
RETURNS text
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
_parts text[];
|
||||
_filename text;
|
||||
BEGIN
|
||||
select string_to_array(name, '/') into _parts;
|
||||
select _parts[array_length(_parts,1)] into _filename;
|
||||
-- @todo return the last part instead of 2
|
||||
return split_part(_filename, '.', 2);
|
||||
END
|
||||
$function$;
|
||||
|
||||
CREATE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0)
|
||||
RETURNS TABLE (
|
||||
name text,
|
||||
id uuid,
|
||||
updated_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ,
|
||||
last_accessed_at TIMESTAMPTZ,
|
||||
metadata jsonb
|
||||
)
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
_bucketId text;
|
||||
BEGIN
|
||||
-- will be replaced by migrations when server starts
|
||||
-- saving space for cloud-init
|
||||
END
|
||||
$function$;
|
||||
|
||||
-- create migrations table
|
||||
-- https://github.com/ThomWright/postgres-migrations/blob/master/src/migrations/0_create-migrations-table.sql
|
||||
-- we add this table here and not let it be auto-created so that the permissions are properly applied to it
|
||||
CREATE TABLE IF NOT EXISTS storage.migrations (
|
||||
id integer PRIMARY KEY,
|
||||
name varchar(100) UNIQUE NOT NULL,
|
||||
hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration
|
||||
executed_at timestamp DEFAULT current_timestamp
|
||||
);
|
||||
|
||||
CREATE USER supabase_storage_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA storage TO supabase_storage_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO supabase_storage_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO supabase_storage_admin;
|
||||
ALTER USER supabase_storage_admin SET search_path = "storage";
|
||||
ALTER table "storage".objects owner to supabase_storage_admin;
|
||||
ALTER table "storage".buckets owner to supabase_storage_admin;
|
||||
ALTER table "storage".migrations OWNER TO supabase_storage_admin;
|
||||
ALTER function "storage".foldername(text) owner to supabase_storage_admin;
|
||||
ALTER function "storage".filename(text) owner to supabase_storage_admin;
|
||||
ALTER function "storage".extension(text) owner to supabase_storage_admin;
|
||||
ALTER function "storage".search(text,text,int,int,int) owner to supabase_storage_admin;
|
||||
|
||||
-- migrate:down
|
119
assets/migrations/init-scripts/00000000000003-post-setup.sql
Normal file
119
assets/migrations/init-scripts/00000000000003-post-setup.sql
Normal file
|
@ -0,0 +1,119 @@
|
|||
-- migrate:up
|
||||
|
||||
ALTER ROLE supabase_admin SET search_path TO "\$user",public,auth,extensions;
|
||||
ALTER ROLE postgres SET search_path TO "\$user",public,extensions;
|
||||
|
||||
-- Trigger for pg_cron
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
schema_is_cron bool;
|
||||
BEGIN
|
||||
schema_is_cron = (
|
||||
SELECT n.nspname = 'cron'
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
LEFT JOIN pg_catalog.pg_namespace AS n
|
||||
ON ev.objid = n.oid
|
||||
);
|
||||
|
||||
IF schema_is_cron
|
||||
THEN
|
||||
grant usage on schema cron to postgres with grant option;
|
||||
|
||||
alter default privileges in schema cron grant all on tables to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on functions to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on sequences to postgres with grant option;
|
||||
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on sequences to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on tables to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on functions to postgres with grant option;
|
||||
|
||||
grant all privileges on all tables in schema cron to postgres with grant option;
|
||||
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$$;
|
||||
CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG in ('CREATE SCHEMA')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_cron_access();
|
||||
COMMENT ON FUNCTION extensions.grant_pg_cron_access IS 'Grants access to pg_cron';
|
||||
|
||||
-- Event trigger for pg_net
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
||||
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger
|
||||
WHERE evtname = 'issue_pg_net_access'
|
||||
) THEN
|
||||
CREATE EVENT TRIGGER issue_pg_net_access
|
||||
ON ddl_command_end
|
||||
WHEN TAG IN ('CREATE EXTENSION')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Supabase dashboard user
|
||||
CREATE ROLE dashboard_user NOSUPERUSER CREATEDB CREATEROLE REPLICATION;
|
||||
GRANT ALL ON DATABASE postgres TO dashboard_user;
|
||||
GRANT ALL ON SCHEMA auth TO dashboard_user;
|
||||
GRANT ALL ON SCHEMA extensions TO dashboard_user;
|
||||
GRANT ALL ON SCHEMA storage TO dashboard_user;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO dashboard_user;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO dashboard_user;
|
||||
-- GRANT ALL ON ALL TABLES IN SCHEMA storage TO dashboard_user;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO dashboard_user;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO dashboard_user;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO dashboard_user;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO dashboard_user;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO dashboard_user;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO dashboard_user;
|
||||
|
||||
-- migrate:down
|
62
assets/migrations/migrations.go
Normal file
62
assets/migrations/migrations.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"io/fs"
|
||||
"iter"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:embed */*.sql
|
||||
var migrationsFS embed.FS
|
||||
|
||||
type Script struct {
|
||||
FileName string
|
||||
Content string
|
||||
}
|
||||
|
||||
func InitScripts() iter.Seq2[Script, error] {
|
||||
return readScripts(path.Join(".", "init-scripts"))
|
||||
}
|
||||
|
||||
func MigrationScripts() iter.Seq2[Script, error] {
|
||||
return readScripts(path.Join(".", "migrations"))
|
||||
}
|
||||
|
||||
func readScripts(dir string) iter.Seq2[Script, error] {
|
||||
return iter.Seq2[Script, error](func(yield func(Script, error) bool) {
|
||||
files, err := migrationsFS.ReadDir(dir)
|
||||
if err != nil {
|
||||
yield(Script{}, err)
|
||||
return
|
||||
}
|
||||
|
||||
slices.SortFunc(files, func(a, b fs.DirEntry) int {
|
||||
return strings.Compare(a.Name(), b.Name())
|
||||
})
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
content, err := migrationsFS.ReadFile(path.Join(dir, file.Name()))
|
||||
if err != nil {
|
||||
if !yield(Script{}, err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s := Script{
|
||||
FileName: file.Name(),
|
||||
Content: string(content),
|
||||
}
|
||||
|
||||
if !yield(s, nil) {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
-- migrate:up
|
||||
|
||||
-- demote postgres user
|
||||
GRANT ALL ON DATABASE postgres TO postgres;
|
||||
GRANT ALL ON SCHEMA auth TO postgres;
|
||||
GRANT ALL ON SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres;
|
||||
ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,22 @@
|
|||
-- migrate:up
|
||||
|
||||
-- update auth schema permissions
|
||||
GRANT ALL PRIVILEGES ON SCHEMA auth TO supabase_auth_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO supabase_auth_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO supabase_auth_admin;
|
||||
|
||||
ALTER table IF EXISTS "auth".users OWNER TO supabase_auth_admin;
|
||||
ALTER table IF EXISTS "auth".refresh_tokens OWNER TO supabase_auth_admin;
|
||||
ALTER table IF EXISTS "auth".audit_log_entries OWNER TO supabase_auth_admin;
|
||||
ALTER table IF EXISTS "auth".instances OWNER TO supabase_auth_admin;
|
||||
ALTER table IF EXISTS "auth".schema_migrations OWNER TO supabase_auth_admin;
|
||||
|
||||
GRANT USAGE ON SCHEMA auth TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres, dashboard_user;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres, dashboard_user;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres, dashboard_user;
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_auth_admin IN SCHEMA auth GRANT ALL ON TABLES TO postgres, dashboard_user;
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_auth_admin IN SCHEMA auth GRANT ALL ON SEQUENCES TO postgres, dashboard_user;
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_auth_admin IN SCHEMA auth GRANT ALL ON ROUTINES TO postgres, dashboard_user;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,6 @@
|
|||
-- migrate:up
|
||||
|
||||
-- create realtime schema for Realtime RLS (WALRUS)
|
||||
CREATE SCHEMA IF NOT EXISTS realtime;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,9 @@
|
|||
-- migrate:up
|
||||
|
||||
-- update realtime schema permissions
|
||||
GRANT USAGE ON SCHEMA realtime TO postgres;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA realtime TO postgres, dashboard_user;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA realtime TO postgres, dashboard_user;
|
||||
GRANT ALL ON ALL ROUTINES IN SCHEMA realtime TO postgres, dashboard_user;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,24 @@
|
|||
-- migrate:up
|
||||
|
||||
-- update owner for auth.uid, auth.role and auth.email functions
|
||||
DO $$
|
||||
BEGIN
|
||||
ALTER FUNCTION auth.uid owner to supabase_auth_admin;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE WARNING 'Error encountered when changing owner of auth.uid to supabase_auth_admin';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
ALTER FUNCTION auth.role owner to supabase_auth_admin;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE WARNING 'Error encountered when changing owner of auth.role to supabase_auth_admin';
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
ALTER FUNCTION auth.email owner to supabase_auth_admin;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE WARNING 'Error encountered when changing owner of auth.email to supabase_auth_admin';
|
||||
END $$;
|
||||
-- migrate:down
|
|
@ -0,0 +1,8 @@
|
|||
-- migrate:up
|
||||
|
||||
-- Update future objects' permissions
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_admin IN SCHEMA realtime GRANT ALL ON TABLES TO postgres, dashboard_user;
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_admin IN SCHEMA realtime GRANT ALL ON SEQUENCES TO postgres, dashboard_user;
|
||||
ALTER DEFAULT PRIVILEGES FOR ROLE supabase_admin IN SCHEMA realtime GRANT ALL ON ROUTINES TO postgres, dashboard_user;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,4 @@
|
|||
-- migrate:up
|
||||
ALTER ROLE authenticator SET session_preload_libraries = 'safeupdate';
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,70 @@
|
|||
-- migrate:up
|
||||
|
||||
drop event trigger if exists api_restart;
|
||||
drop function if exists extensions.notify_api_restart();
|
||||
|
||||
-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger
|
||||
-- watch create and alter
|
||||
CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$
|
||||
DECLARE
|
||||
cmd record;
|
||||
BEGIN
|
||||
FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands()
|
||||
LOOP
|
||||
IF cmd.command_tag IN (
|
||||
'CREATE SCHEMA', 'ALTER SCHEMA'
|
||||
, 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE'
|
||||
, 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE'
|
||||
, 'CREATE VIEW', 'ALTER VIEW'
|
||||
, 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW'
|
||||
, 'CREATE FUNCTION', 'ALTER FUNCTION'
|
||||
, 'CREATE TRIGGER'
|
||||
, 'CREATE TYPE'
|
||||
, 'CREATE RULE'
|
||||
, 'COMMENT'
|
||||
)
|
||||
-- don't notify in case of CREATE TEMP table or other objects created on pg_temp
|
||||
AND cmd.schema_name is distinct from 'pg_temp'
|
||||
THEN
|
||||
NOTIFY pgrst, 'reload schema';
|
||||
END IF;
|
||||
END LOOP;
|
||||
END; $$ LANGUAGE plpgsql;
|
||||
|
||||
-- watch drop
|
||||
CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$
|
||||
DECLARE
|
||||
obj record;
|
||||
BEGIN
|
||||
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
IF obj.object_type IN (
|
||||
'schema'
|
||||
, 'table'
|
||||
, 'foreign table'
|
||||
, 'view'
|
||||
, 'materialized view'
|
||||
, 'function'
|
||||
, 'trigger'
|
||||
, 'type'
|
||||
, 'rule'
|
||||
)
|
||||
AND obj.is_temporary IS false -- no pg_temp objects
|
||||
THEN
|
||||
NOTIFY pgrst, 'reload schema';
|
||||
END IF;
|
||||
END LOOP;
|
||||
END; $$ LANGUAGE plpgsql;
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch;
|
||||
CREATE EVENT TRIGGER pgrst_ddl_watch
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE extensions.pgrst_ddl_watch();
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch;
|
||||
CREATE EVENT TRIGGER pgrst_drop_watch
|
||||
ON sql_drop
|
||||
EXECUTE PROCEDURE extensions.pgrst_drop_watch();
|
||||
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,21 @@
|
|||
-- migrate:up
|
||||
|
||||
-- Note: supatils extension is not installed in docker image.
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
supautils_exists boolean;
|
||||
BEGIN
|
||||
supautils_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'supautils'
|
||||
);
|
||||
|
||||
IF supautils_exists
|
||||
THEN
|
||||
ALTER ROLE authenticator SET session_preload_libraries = supautils, safeupdate;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
148
assets/migrations/migrations/20220317095840_pg_graphql.sql
Normal file
148
assets/migrations/migrations/20220317095840_pg_graphql.sql
Normal file
|
@ -0,0 +1,148 @@
|
|||
-- migrate:up
|
||||
create schema if not exists graphql_public;
|
||||
|
||||
-- obsolete signature: https://github.com/supabase/infrastructure/pull/5524/files
|
||||
drop function if exists graphql_public.graphql(text, text, jsonb);
|
||||
-- GraphQL Placeholder Entrypoint
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language plpgsql
|
||||
as $$
|
||||
DECLARE
|
||||
server_version float;
|
||||
BEGIN
|
||||
server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float);
|
||||
|
||||
IF server_version >= 14 THEN
|
||||
RETURN jsonb_build_object(
|
||||
'data', null::jsonb,
|
||||
'errors', array['pg_graphql extension is not enabled.']
|
||||
);
|
||||
ELSE
|
||||
RETURN jsonb_build_object(
|
||||
'data', null::jsonb,
|
||||
'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.']
|
||||
);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
grant usage on schema graphql_public to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql_public grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql_public grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql_public grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
alter default privileges for user supabase_admin in schema graphql_public grant all
|
||||
on sequences to postgres, anon, authenticated, service_role;
|
||||
alter default privileges for user supabase_admin in schema graphql_public grant all
|
||||
on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges for user supabase_admin in schema graphql_public grant all
|
||||
on functions to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Trigger upon enabling pg_graphql
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_graphql_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
func_is_graphql_resolve bool;
|
||||
BEGIN
|
||||
func_is_graphql_resolve = (
|
||||
SELECT n.proname = 'resolve'
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
LEFT JOIN pg_catalog.pg_proc AS n
|
||||
ON ev.objid = n.oid
|
||||
);
|
||||
|
||||
IF func_is_graphql_resolve
|
||||
THEN
|
||||
grant usage on schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant all on function graphql.resolve to postgres, anon, authenticated, service_role;
|
||||
|
||||
alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
DROP FUNCTION IF EXISTS graphql_public.graphql;
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language sql
|
||||
as $$
|
||||
SELECT graphql.resolve(query, coalesce(variables, '{}'));
|
||||
$$;
|
||||
|
||||
grant execute on function graphql.resolve to postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS issue_pg_graphql_access;
|
||||
CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end WHEN TAG in ('CREATE FUNCTION')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_graphql_access();
|
||||
COMMENT ON FUNCTION extensions.grant_pg_graphql_access IS 'Grants access to pg_graphql';
|
||||
|
||||
-- Trigger upon dropping the pg_graphql extension
|
||||
CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
graphql_is_dropped bool;
|
||||
BEGIN
|
||||
graphql_is_dropped = (
|
||||
SELECT ev.schema_name = 'graphql_public'
|
||||
FROM pg_event_trigger_dropped_objects() AS ev
|
||||
WHERE ev.schema_name = 'graphql_public'
|
||||
);
|
||||
|
||||
IF graphql_is_dropped
|
||||
THEN
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language plpgsql
|
||||
as $$
|
||||
DECLARE
|
||||
server_version float;
|
||||
BEGIN
|
||||
server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float);
|
||||
|
||||
IF server_version >= 14 THEN
|
||||
RETURN jsonb_build_object(
|
||||
'data', null::jsonb,
|
||||
'errors', array['pg_graphql extension is not enabled.']
|
||||
);
|
||||
ELSE
|
||||
RETURN jsonb_build_object(
|
||||
'data', null::jsonb,
|
||||
'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.']
|
||||
);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS issue_graphql_placeholder;
|
||||
CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop WHEN TAG in ('DROP EXTENSION')
|
||||
EXECUTE PROCEDURE extensions.set_graphql_placeholder();
|
||||
COMMENT ON FUNCTION extensions.set_graphql_placeholder IS 'Reintroduces placeholder function for graphql_public.graphql';
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,70 @@
|
|||
-- migrate:up
|
||||
|
||||
drop event trigger if exists api_restart;
|
||||
drop function if exists extensions.notify_api_restart();
|
||||
|
||||
-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger
|
||||
-- watch create and alter
|
||||
CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$
|
||||
DECLARE
|
||||
cmd record;
|
||||
BEGIN
|
||||
FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands()
|
||||
LOOP
|
||||
IF cmd.command_tag IN (
|
||||
'CREATE SCHEMA', 'ALTER SCHEMA'
|
||||
, 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE'
|
||||
, 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE'
|
||||
, 'CREATE VIEW', 'ALTER VIEW'
|
||||
, 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW'
|
||||
, 'CREATE FUNCTION', 'ALTER FUNCTION'
|
||||
, 'CREATE TRIGGER'
|
||||
, 'CREATE TYPE', 'ALTER TYPE'
|
||||
, 'CREATE RULE'
|
||||
, 'COMMENT'
|
||||
)
|
||||
-- don't notify in case of CREATE TEMP table or other objects created on pg_temp
|
||||
AND cmd.schema_name is distinct from 'pg_temp'
|
||||
THEN
|
||||
NOTIFY pgrst, 'reload schema';
|
||||
END IF;
|
||||
END LOOP;
|
||||
END; $$ LANGUAGE plpgsql;
|
||||
|
||||
-- watch drop
|
||||
CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$
|
||||
DECLARE
|
||||
obj record;
|
||||
BEGIN
|
||||
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
IF obj.object_type IN (
|
||||
'schema'
|
||||
, 'table'
|
||||
, 'foreign table'
|
||||
, 'view'
|
||||
, 'materialized view'
|
||||
, 'function'
|
||||
, 'trigger'
|
||||
, 'type'
|
||||
, 'rule'
|
||||
)
|
||||
AND obj.is_temporary IS false -- no pg_temp objects
|
||||
THEN
|
||||
NOTIFY pgrst, 'reload schema';
|
||||
END IF;
|
||||
END LOOP;
|
||||
END; $$ LANGUAGE plpgsql;
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch;
|
||||
CREATE EVENT TRIGGER pgrst_ddl_watch
|
||||
ON ddl_command_end
|
||||
EXECUTE PROCEDURE extensions.pgrst_ddl_watch();
|
||||
|
||||
DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch;
|
||||
CREATE EVENT TRIGGER pgrst_drop_watch
|
||||
ON sql_drop
|
||||
EXECUTE PROCEDURE extensions.pgrst_drop_watch();
|
||||
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,4 @@
|
|||
-- migrate:up
|
||||
ALTER ROLE supabase_auth_admin SET idle_in_transaction_session_timeout TO 60000;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,161 @@
|
|||
-- migrate:up
|
||||
|
||||
-- Update Trigger upon enabling pg_graphql
|
||||
create or replace function extensions.grant_pg_graphql_access()
|
||||
returns event_trigger
|
||||
language plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
func_is_graphql_resolve bool;
|
||||
BEGIN
|
||||
func_is_graphql_resolve = (
|
||||
SELECT n.proname = 'resolve'
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
LEFT JOIN pg_catalog.pg_proc AS n
|
||||
ON ev.objid = n.oid
|
||||
);
|
||||
|
||||
IF func_is_graphql_resolve
|
||||
THEN
|
||||
grant usage on schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant all on function graphql.resolve to postgres, anon, authenticated, service_role;
|
||||
|
||||
alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Update public wrapper to pass all arguments through to the pg_graphql resolve func
|
||||
DROP FUNCTION IF EXISTS graphql_public.graphql;
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language sql
|
||||
as $$
|
||||
-- This changed
|
||||
select graphql.resolve(
|
||||
query := query,
|
||||
variables := coalesce(variables, '{}'),
|
||||
"operationName" := "operationName",
|
||||
extensions := extensions
|
||||
);
|
||||
$$;
|
||||
|
||||
grant execute on function graphql.resolve to postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
graphql_is_dropped bool;
|
||||
BEGIN
|
||||
graphql_is_dropped = (
|
||||
SELECT ev.schema_name = 'graphql_public'
|
||||
FROM pg_event_trigger_dropped_objects() AS ev
|
||||
WHERE ev.schema_name = 'graphql_public'
|
||||
);
|
||||
|
||||
IF graphql_is_dropped
|
||||
THEN
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language plpgsql
|
||||
as $$
|
||||
DECLARE
|
||||
server_version float;
|
||||
BEGIN
|
||||
server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float);
|
||||
|
||||
IF server_version >= 14 THEN
|
||||
RETURN jsonb_build_object(
|
||||
'errors', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'message', 'pg_graphql extension is not enabled.'
|
||||
)
|
||||
)
|
||||
);
|
||||
ELSE
|
||||
RETURN jsonb_build_object(
|
||||
'errors', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'message', 'pg_graphql is only available on projects running Postgres 14 onwards.'
|
||||
)
|
||||
)
|
||||
);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
-- GraphQL Placeholder Entrypoint
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language plpgsql
|
||||
as $$
|
||||
DECLARE
|
||||
server_version float;
|
||||
BEGIN
|
||||
server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float);
|
||||
|
||||
IF server_version >= 14 THEN
|
||||
RETURN jsonb_build_object(
|
||||
'errors', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'message', 'pg_graphql extension is not enabled.'
|
||||
)
|
||||
)
|
||||
);
|
||||
ELSE
|
||||
RETURN jsonb_build_object(
|
||||
'errors', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'message', 'pg_graphql is only available on projects running Postgres 14 onwards.'
|
||||
)
|
||||
)
|
||||
);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
drop extension if exists pg_graphql;
|
||||
-- Avoids limitation of only being able to load the extension via dashboard
|
||||
-- Only install as well if the extension is actually installed
|
||||
DO $$
|
||||
DECLARE
|
||||
graphql_exists boolean;
|
||||
BEGIN
|
||||
graphql_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pg_graphql'
|
||||
);
|
||||
|
||||
IF graphql_exists
|
||||
THEN
|
||||
create extension if not exists pg_graphql;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,10 @@
|
|||
-- migrate:up
|
||||
|
||||
-- This is done so that the `postgres` role can manage auth tables triggers,
|
||||
-- storage tables policies, etc. which unblocks the revocation of superuser
|
||||
-- access.
|
||||
--
|
||||
-- More context: https://www.notion.so/supabase/RFC-Postgres-Permissions-I-40cb4f61bd4145fd9e75ce657c0e31dd#bf5d853436384e6e8e339d0a2e684cbb
|
||||
grant supabase_auth_admin, supabase_storage_admin to postgres;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,74 @@
|
|||
-- migrate:up
|
||||
|
||||
create or replace function extensions.grant_pg_graphql_access()
|
||||
returns event_trigger
|
||||
language plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
func_is_graphql_resolve bool;
|
||||
BEGIN
|
||||
func_is_graphql_resolve = (
|
||||
SELECT n.proname = 'resolve'
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
LEFT JOIN pg_catalog.pg_proc AS n
|
||||
ON ev.objid = n.oid
|
||||
);
|
||||
|
||||
IF func_is_graphql_resolve
|
||||
THEN
|
||||
-- Update public wrapper to pass all arguments through to the pg_graphql resolve func
|
||||
DROP FUNCTION IF EXISTS graphql_public.graphql;
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language sql
|
||||
as $$
|
||||
select graphql.resolve(
|
||||
query := query,
|
||||
variables := coalesce(variables, '{}'),
|
||||
"operationName" := "operationName",
|
||||
extensions := extensions
|
||||
);
|
||||
$$;
|
||||
|
||||
-- This hook executes when `graphql.resolve` is created. That is not necessarily the last
|
||||
-- function in the extension so we need to grant permissions on existing entities AND
|
||||
-- update default permissions to any others that are created after `graphql.resolve`
|
||||
grant usage on schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant select on all tables in schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
-- Cycle the extension off and back on to apply the permissions update.
|
||||
|
||||
drop extension if exists pg_graphql;
|
||||
-- Avoids limitation of only being able to load the extension via dashboard
|
||||
-- Only install as well if the extension is actually installed
|
||||
DO $$
|
||||
DECLARE
|
||||
graphql_exists boolean;
|
||||
BEGIN
|
||||
graphql_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pg_graphql'
|
||||
);
|
||||
|
||||
IF graphql_exists
|
||||
THEN
|
||||
create extension if not exists pg_graphql;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,74 @@
|
|||
-- migrate:up
|
||||
DO $$
|
||||
DECLARE
|
||||
pg_cron_installed boolean;
|
||||
BEGIN
|
||||
-- checks if pg_cron is enabled
|
||||
pg_cron_installed = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pg_cron'
|
||||
and installed_version is not null
|
||||
);
|
||||
|
||||
IF pg_cron_installed
|
||||
THEN
|
||||
grant usage on schema cron to postgres with grant option;
|
||||
grant all on all functions in schema cron to postgres with grant option;
|
||||
|
||||
alter default privileges in schema cron grant all on tables to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on functions to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on sequences to postgres with grant option;
|
||||
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on sequences to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on tables to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on functions to postgres with grant option;
|
||||
|
||||
grant all privileges on all tables in schema cron to postgres with grant option;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
pg_net_installed boolean;
|
||||
BEGIN
|
||||
-- checks if pg_net is enabled
|
||||
pg_net_installed = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pg_net'
|
||||
and installed_version is not null
|
||||
|
||||
);
|
||||
|
||||
IF pg_net_installed
|
||||
THEN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,5 @@
|
|||
-- migrate:up
|
||||
alter role authenticator set statement_timeout = '8s';
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
-- migrate:up
|
||||
revoke supabase_admin from authenticator;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
-- migrate:up
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
pgsodium_exists boolean;
|
||||
vault_exists boolean;
|
||||
BEGIN
|
||||
pgsodium_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pgsodium'
|
||||
and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9')
|
||||
);
|
||||
|
||||
vault_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'supabase_vault'
|
||||
);
|
||||
|
||||
IF pgsodium_exists
|
||||
THEN
|
||||
create extension if not exists pgsodium;
|
||||
|
||||
grant pgsodium_keyiduser to postgres with admin option;
|
||||
grant pgsodium_keyholder to postgres with admin option;
|
||||
grant pgsodium_keymaker to postgres with admin option;
|
||||
|
||||
grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role;
|
||||
grant execute on function pgsodium.crypto_aead_det_keygen to service_role;
|
||||
|
||||
IF vault_exists
|
||||
THEN
|
||||
create extension if not exists supabase_vault;
|
||||
END IF;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,5 @@
|
|||
-- migrate:up
|
||||
grant anon, authenticated, service_role to postgres;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
-- migrate:up
|
||||
grant all privileges on all tables in schema extensions to postgres with grant option;
|
||||
grant all privileges on all routines in schema extensions to postgres with grant option;
|
||||
grant all privileges on all sequences in schema extensions to postgres with grant option;
|
||||
alter default privileges in schema extensions grant all on tables to postgres with grant option;
|
||||
alter default privileges in schema extensions grant all on routines to postgres with grant option;
|
||||
alter default privileges in schema extensions grant all on sequences to postgres with grant option;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
-- migrate:up
|
||||
grant pg_monitor to postgres;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
-- migrate:up
|
||||
grant anon, authenticated, service_role to supabase_storage_admin;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,10 @@
|
|||
-- migrate:up
|
||||
|
||||
ALTER ROLE authenticated inherit;
|
||||
ALTER ROLE anon inherit;
|
||||
ALTER ROLE service_role inherit;
|
||||
|
||||
GRANT pgsodium_keyholder to service_role;
|
||||
|
||||
-- migrate:down
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
-- migrate:up
|
||||
grant authenticator to supabase_storage_admin;
|
||||
revoke anon, authenticated, service_role from supabase_storage_admin;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,78 @@
|
|||
-- migrate:up
|
||||
|
||||
create or replace function extensions.grant_pg_graphql_access()
|
||||
returns event_trigger
|
||||
language plpgsql
|
||||
AS $func$
|
||||
DECLARE
|
||||
func_is_graphql_resolve bool;
|
||||
BEGIN
|
||||
func_is_graphql_resolve = (
|
||||
SELECT n.proname = 'resolve'
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
LEFT JOIN pg_catalog.pg_proc AS n
|
||||
ON ev.objid = n.oid
|
||||
);
|
||||
|
||||
IF func_is_graphql_resolve
|
||||
THEN
|
||||
-- Update public wrapper to pass all arguments through to the pg_graphql resolve func
|
||||
DROP FUNCTION IF EXISTS graphql_public.graphql;
|
||||
create or replace function graphql_public.graphql(
|
||||
"operationName" text default null,
|
||||
query text default null,
|
||||
variables jsonb default null,
|
||||
extensions jsonb default null
|
||||
)
|
||||
returns jsonb
|
||||
language sql
|
||||
as $$
|
||||
select graphql.resolve(
|
||||
query := query,
|
||||
variables := coalesce(variables, '{}'),
|
||||
"operationName" := "operationName",
|
||||
extensions := extensions
|
||||
);
|
||||
$$;
|
||||
|
||||
-- This hook executes when `graphql.resolve` is created. That is not necessarily the last
|
||||
-- function in the extension so we need to grant permissions on existing entities AND
|
||||
-- update default permissions to any others that are created after `graphql.resolve`
|
||||
grant usage on schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant select on all tables in schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role;
|
||||
grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role;
|
||||
alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role;
|
||||
|
||||
-- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles
|
||||
grant usage on schema graphql_public to postgres with grant option;
|
||||
grant usage on schema graphql to postgres with grant option;
|
||||
END IF;
|
||||
|
||||
END;
|
||||
$func$;
|
||||
|
||||
-- Cycle the extension off and back on to apply the permissions update.
|
||||
|
||||
drop extension if exists pg_graphql;
|
||||
-- Avoids limitation of only being able to load the extension via dashboard
|
||||
-- Only install as well if the extension is actually installed
|
||||
DO $$
|
||||
DECLARE
|
||||
graphql_exists boolean;
|
||||
BEGIN
|
||||
graphql_exists = (
|
||||
select count(*) = 1
|
||||
from pg_available_extensions
|
||||
where name = 'pg_graphql'
|
||||
);
|
||||
|
||||
IF graphql_exists
|
||||
THEN
|
||||
create extension if not exists pg_graphql;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,47 @@
|
|||
-- migrate:up
|
||||
do $$
|
||||
begin
|
||||
if exists (select from pg_extension where extname = 'pg_cron') then
|
||||
revoke all on table cron.job from postgres;
|
||||
grant select on table cron.job to postgres with grant option;
|
||||
end if;
|
||||
end $$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_cron'
|
||||
)
|
||||
THEN
|
||||
grant usage on schema cron to postgres with grant option;
|
||||
|
||||
alter default privileges in schema cron grant all on tables to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on functions to postgres with grant option;
|
||||
alter default privileges in schema cron grant all on sequences to postgres with grant option;
|
||||
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on sequences to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on tables to postgres with grant option;
|
||||
alter default privileges for user supabase_admin in schema cron grant all
|
||||
on functions to postgres with grant option;
|
||||
|
||||
grant all privileges on all tables in schema cron to postgres with grant option;
|
||||
revoke all on table cron.job from postgres;
|
||||
grant select on table cron.job to postgres with grant option;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
drop event trigger if exists issue_pg_cron_access;
|
||||
CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end
|
||||
WHEN TAG IN ('CREATE EXTENSION')
|
||||
EXECUTE FUNCTION extensions.grant_pg_cron_access();
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,4 @@
|
|||
-- migrate:up
|
||||
ALTER ROLE authenticator set lock_timeout to '8s';
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,6 @@
|
|||
-- migrate:up
|
||||
alter function pg_catalog.lo_export owner to supabase_admin;
|
||||
alter function pg_catalog.lo_import(text) owner to supabase_admin;
|
||||
alter function pg_catalog.lo_import(text, oid) owner to supabase_admin;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,4 @@
|
|||
-- migrate:up
|
||||
grant pg_read_all_data, pg_signal_backend to postgres;
|
||||
|
||||
-- migrate:down
|
|
@ -0,0 +1,11 @@
|
|||
-- migrate:up
|
||||
do $$
|
||||
begin
|
||||
if exists (select 1 from pg_available_extensions where name = 'orioledb') then
|
||||
if not exists (select 1 from pg_extension where extname = 'orioledb') then
|
||||
create extension if not exists orioledb;
|
||||
end if;
|
||||
end if;
|
||||
end $$;
|
||||
|
||||
-- migrate:down
|
168
cmd/main.go
Normal file
168
cmd/main.go
Normal file
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/internal/controller"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(supabasev1alpha1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var enableLeaderElection bool
|
||||
var probeAddr string
|
||||
var secureMetrics bool
|
||||
var enableHTTP2 bool
|
||||
var tlsOpts []func(*tls.Config)
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
// due to its vulnerabilities. More specifically, disabling http/2 will
|
||||
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
||||
// Rapid Reset CVEs. For more information see:
|
||||
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
||||
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
setupLog.Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
|
||||
if !enableHTTP2 {
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
TLSOpts: tlsOpts,
|
||||
})
|
||||
|
||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||
// More info:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||
// - https://book.kubebuilder.io/reference/metrics.html
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: metricsAddr,
|
||||
SecureServing: secureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if secureMetrics {
|
||||
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
||||
// These configurations ensure that only authorized users and service accounts
|
||||
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
||||
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
|
||||
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
|
||||
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
||||
// this setup is not recommended for production.
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "05f9463f.k8s.icb4dc0.de",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CoreReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Core")
|
||||
os.Exit(1)
|
||||
}
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
61
config/crd/bases/supabase.k8s.icb4dc0.de_cores.yaml
Normal file
61
config/crd/bases/supabase.k8s.icb4dc0.de_cores.yaml
Normal file
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: cores.supabase.k8s.icb4dc0.de
|
||||
spec:
|
||||
group: supabase.k8s.icb4dc0.de
|
||||
names:
|
||||
kind: Core
|
||||
listKind: CoreList
|
||||
plural: cores
|
||||
singular: core
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Core is the Schema for the cores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CoreSpec defines the desired state of Core.
|
||||
properties:
|
||||
database:
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: CoreStatus defines the observed state of Core.
|
||||
properties:
|
||||
appliedMigrations:
|
||||
additionalProperties:
|
||||
format: int64
|
||||
type: integer
|
||||
description: |-
|
||||
INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
Important: Run "make" to regenerate code after modifying this file
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
20
config/crd/kustomization.yaml
Normal file
20
config/crd/kustomization.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/supabase.k8s.icb4dc0.de_cores.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patches:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment the following section
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
#configurations:
|
||||
#- kustomizeconfig.yaml
|
19
config/crd/kustomizeconfig.yaml
Normal file
19
config/crd/kustomizeconfig.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
177
config/default/kustomization.yaml
Normal file
177
config/default/kustomization.yaml
Normal file
|
@ -0,0 +1,177 @@
|
|||
# Adds namespace to all resources.
|
||||
namespace: supabase-operator-system
|
||||
|
||||
# Value of this field is prepended to the
|
||||
# names of all resources, e.g. a deployment named
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
namePrefix: supabase-operator-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#labels:
|
||||
#- includeSelectors: true
|
||||
# pairs:
|
||||
# someName: someValue
|
||||
|
||||
resources:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- ../webhook
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||
#- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
# [METRICS] Expose the controller manager metrics service.
|
||||
- metrics_service.yaml
|
||||
# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
|
||||
# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
|
||||
# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
|
||||
# be able to communicate with the Webhook Server.
|
||||
#- ../network-policy
|
||||
|
||||
# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager
|
||||
patches:
|
||||
# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics
|
||||
- path: manager_metrics_patch.yaml
|
||||
target:
|
||||
kind: Deployment
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- path: manager_webhook_patch.yaml
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
# Uncomment the following replacements to add the cert-manager CA injection annotations
|
||||
#replacements:
|
||||
# - source: # Uncomment the following block if you have any webhook
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.name # Name of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.namespace # Namespace of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 1
|
||||
# create: true
|
||||
#
|
||||
# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation)
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
#
|
||||
# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting )
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
#
|
||||
# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion)
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: CustomResourceDefinition
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: CustomResourceDefinition
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
4
config/default/manager_metrics_patch.yaml
Normal file
4
config/default/manager_metrics_patch.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
# This patch adds the args to allow exposing the metrics endpoint using HTTPS
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/0
|
||||
value: --metrics-bind-address=:8443
|
17
config/default/metrics_service.yaml
Normal file
17
config/default/metrics_service.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: 8443
|
||||
selector:
|
||||
control-plane: controller-manager
|
2
config/manager/kustomization.yaml
Normal file
2
config/manager/kustomization.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
resources:
|
||||
- manager.yaml
|
95
config/manager/manager.yaml
Normal file
95
config/manager/manager.yaml
Normal file
|
@ -0,0 +1,95 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: manager
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||
# according to the platforms which are supported by your solution.
|
||||
# It is considered best practice to support multiple architectures. You can
|
||||
# build your manager image using the makefile target docker-buildx.
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/arch
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
# - arm64
|
||||
# - ppc64le
|
||||
# - s390x
|
||||
# - key: kubernetes.io/os
|
||||
# operator: In
|
||||
# values:
|
||||
# - linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
# TODO(user): For common cases that do not require escalating privileges
|
||||
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
||||
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
||||
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
||||
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
containers:
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --leader-elect
|
||||
- --health-probe-bind-address=:8081
|
||||
image: controller:latest
|
||||
name: manager
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
serviceAccountName: controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
26
config/network-policy/allow-metrics-traffic.yaml
Normal file
26
config/network-policy/allow-metrics-traffic.yaml
Normal file
|
@ -0,0 +1,26 @@
|
|||
# This NetworkPolicy allows ingress traffic
|
||||
# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
|
||||
# namespaces are able to gathering data from the metrics endpoint.
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: allow-metrics-traffic
|
||||
namespace: system
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
# This allows ingress traffic from any namespace with the label metrics: enabled
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
metrics: enabled # Only from namespaces with this label
|
||||
ports:
|
||||
- port: 8443
|
||||
protocol: TCP
|
2
config/network-policy/kustomization.yaml
Normal file
2
config/network-policy/kustomization.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
resources:
|
||||
- allow-metrics-traffic.yaml
|
2
config/prometheus/kustomization.yaml
Normal file
2
config/prometheus/kustomization.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
resources:
|
||||
- monitor.yaml
|
30
config/prometheus/monitor.yaml
Normal file
30
config/prometheus/monitor.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-monitor
|
||||
namespace: system
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: https # Ensure this is the name of the port that exposes HTTPS metrics
|
||||
scheme: https
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
# TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
|
||||
# certificate verification. This poses a significant security risk by making the system vulnerable to
|
||||
# man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between
|
||||
# Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data,
|
||||
# compromising the integrity and confidentiality of the information.
|
||||
# Please use the following options for secure configurations:
|
||||
# caFile: /etc/metrics-certs/ca.crt
|
||||
# certFile: /etc/metrics-certs/tls.crt
|
||||
# keyFile: /etc/metrics-certs/tls.key
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
27
config/rbac/core_editor_role.yaml
Normal file
27
config/rbac/core_editor_role.yaml
Normal file
|
@ -0,0 +1,27 @@
|
|||
# permissions for end users to edit cores.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: core-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores/status
|
||||
verbs:
|
||||
- get
|
23
config/rbac/core_viewer_role.yaml
Normal file
23
config/rbac/core_viewer_role.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
# permissions for end users to view cores.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: core-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores/status
|
||||
verbs:
|
||||
- get
|
27
config/rbac/kustomization.yaml
Normal file
27
config/rbac/kustomization.yaml
Normal file
|
@ -0,0 +1,27 @@
|
|||
resources:
|
||||
# All RBAC will be applied under this service account in
|
||||
# the deployment namespace. You may comment out this resource
|
||||
# if your manager will use a service account that exists at
|
||||
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||
# subjects if changing service account names.
|
||||
- service_account.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# The following RBAC configurations are used to protect
|
||||
# the metrics endpoint with authn/authz. These configurations
|
||||
# ensure that only authorized users and service accounts
|
||||
# can access the metrics endpoint. Comment the following
|
||||
# permissions if you want to disable this protection.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics.html
|
||||
- metrics_auth_role.yaml
|
||||
- metrics_auth_role_binding.yaml
|
||||
- metrics_reader_role.yaml
|
||||
# For each CRD, "Editor" and "Viewer" roles are scaffolded by
|
||||
# default, aiding admins in cluster management. Those roles are
|
||||
# not used by the Project itself. You can comment the following lines
|
||||
# if you do not want those helpers be installed with your Project.
|
||||
- core_editor_role.yaml
|
||||
- core_viewer_role.yaml
|
||||
|
40
config/rbac/leader_election_role.yaml
Normal file
40
config/rbac/leader_election_role.yaml
Normal file
|
@ -0,0 +1,40 @@
|
|||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
15
config/rbac/leader_election_role_binding.yaml
Normal file
15
config/rbac/leader_election_role_binding.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
17
config/rbac/metrics_auth_role.yaml
Normal file
17
config/rbac/metrics_auth_role.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-auth-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
12
config/rbac/metrics_auth_role_binding.yaml
Normal file
12
config/rbac/metrics_auth_role_binding.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-auth-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metrics-auth-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
9
config/rbac/metrics_reader_role.yaml
Normal file
9
config/rbac/metrics_reader_role.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
32
config/rbac/role.yaml
Normal file
32
config/rbac/role.yaml
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- supabase.k8s.icb4dc0.de
|
||||
resources:
|
||||
- cores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
15
config/rbac/role_binding.yaml
Normal file
15
config/rbac/role_binding.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: manager-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
8
config/rbac/service_account.yaml
Normal file
8
config/rbac/service_account.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager
|
||||
namespace: system
|
4
config/samples/kustomization.yaml
Normal file
4
config/samples/kustomization.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
## Append samples of your project ##
|
||||
resources:
|
||||
- supabase_v1alpha1_core.yaml
|
||||
# +kubebuilder:scaffold:manifestskustomizesamples
|
12
config/samples/supabase_v1alpha1_core.yaml
Normal file
12
config/samples/supabase_v1alpha1_core.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
|
||||
kind: Core
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: supabase-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: core-sample
|
||||
spec:
|
||||
database:
|
||||
dsnFrom:
|
||||
name: example-cluster-credentials
|
||||
key: url
|
|
@ -47,7 +47,9 @@ spec:
|
|||
bootstrap:
|
||||
initdb:
|
||||
database: app
|
||||
owner: supabase_admin
|
||||
owner: setup
|
||||
postInitSQL:
|
||||
- drop publication if exists supabase_realtime;
|
||||
|
||||
postgresql:
|
||||
shared_preload_libraries:
|
||||
|
@ -66,6 +68,7 @@ spec:
|
|||
pgsodium.getkey_script: /projected/bin/pgsodium_getkey.sh
|
||||
cron.database_name: app
|
||||
auto_explain.log_min_duration: 10s
|
||||
|
||||
projectedVolumeTemplate:
|
||||
sources:
|
||||
- configMap:
|
||||
|
|
100
go.mod
100
go.mod
|
@ -2,4 +2,102 @@ module code.icb4dc0.de/prskr/supabase-operator
|
|||
|
||||
go 1.23.4
|
||||
|
||||
require github.com/magefile/mage v1.15.0
|
||||
require (
|
||||
github.com/jackc/pgx/v5 v5.7.1
|
||||
github.com/magefile/mage v1.15.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
sigs.k8s.io/controller-runtime v0.19.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.27.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.25.0 // indirect
|
||||
golang.org/x/term v0.24.0 // indirect
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.0 // indirect
|
||||
k8s.io/apiserver v0.31.0 // indirect
|
||||
k8s.io/component-base v0.31.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
262
go.sum
262
go.sum
|
@ -1,2 +1,264 @@
|
|||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
|
||||
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
|
||||
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
|
||||
k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
|
||||
k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
|
||||
k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk=
|
||||
sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
6
go.work
6
go.work
|
@ -1,6 +0,0 @@
|
|||
go 1.23.4
|
||||
|
||||
use (
|
||||
.
|
||||
./tools
|
||||
)
|
15
hack/boilerplate.go.txt
Normal file
15
hack/boilerplate.go.txt
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
|
@ -1,3 +0,0 @@
|
|||
-- drop publication if exists supabase_realtime;
|
||||
-- reach clean state for supabase-operator
|
||||
drop publication if exists supabase_realtime;
|
7
hack/migrate.sh
Executable file
7
hack/migrate.sh
Executable file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export DATABASE_URL="postgres://supabase_admin:1n1t-R00t!@localhost:5432/app"
|
||||
|
||||
go run mage.go Migrate
|
51
infrastructure/db/migrator.go
Normal file
51
infrastructure/db/migrator.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"iter"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/assets/migrations"
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
type Migrator struct {
|
||||
Conn *pgx.Conn
|
||||
}
|
||||
|
||||
func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.MigrationStatus, seq iter.Seq2[migrations.Script, error]) (appliedSomething bool, err error) {
|
||||
for s, err := range seq {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if status.IsApplied(s.FileName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := m.Apply(ctx, s.Content); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
appliedSomething = true
|
||||
status.Record(s.FileName)
|
||||
}
|
||||
|
||||
return appliedSomething, nil
|
||||
}
|
||||
|
||||
func (m Migrator) Apply(ctx context.Context, script string) error {
|
||||
tx, err := m.Conn.BeginTx(ctx, pgx.TxOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(ctx, script)
|
||||
if err != nil {
|
||||
return errors.Join(err, tx.Rollback(ctx))
|
||||
}
|
||||
|
||||
return tx.Commit(ctx)
|
||||
}
|
132
internal/controller/core_controller.go
Normal file
132
internal/controller/core_controller.go
Normal file
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/assets/migrations"
|
||||
"code.icb4dc0.de/prskr/supabase-operator/infrastructure/db"
|
||||
)
|
||||
|
||||
// CoreReconciler reconciles a Core object
|
||||
type CoreReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=supabase.k8s.icb4dc0.de,resources=cores,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=supabase.k8s.icb4dc0.de,resources=cores/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=supabase.k8s.icb4dc0.de,resources=cores/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the Core object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile
|
||||
func (r *CoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
var core supabasev1alpha1.Core
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &core); client.IgnoreNotFound(err) != nil {
|
||||
logger.Error(err, "unable to fetch Core")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
dsn, err := core.Spec.Database.GetDSN(ctx, client.NewNamespacedClient(r.Client, req.Namespace))
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to get DSN")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
conn, err := pgx.Connect(ctx, dsn)
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to connect to database")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
defer CloseCtx(ctx, conn, &err)
|
||||
|
||||
if err := r.applyMissingMigrations(ctx, conn, &core); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *CoreReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&supabasev1alpha1.Core{}).
|
||||
Owns(new(appsv1.Deployment)).
|
||||
Named("core").
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *CoreReconciler) applyMissingMigrations(ctx context.Context, conn *pgx.Conn, core *supabasev1alpha1.Core) (err error) {
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Info("Checking for outstanding migrations")
|
||||
migrator := db.Migrator{Conn: conn}
|
||||
|
||||
var appliedSomething bool
|
||||
|
||||
if appliedSomething, err = migrator.ApplyAll(ctx, core.Status.AppliedMigrations, migrations.InitScripts()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if appliedSomething {
|
||||
logger.Info("Updating status after applying init scripts")
|
||||
return r.Client.Status().Update(ctx, core)
|
||||
}
|
||||
|
||||
if appliedSomething, err = migrator.ApplyAll(ctx, core.Status.AppliedMigrations, migrations.MigrationScripts()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if appliedSomething {
|
||||
logger.Info("Updating status after applying migration scripts")
|
||||
return r.Client.Status().Update(ctx, core)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Close(closer io.Closer, err *error) {
|
||||
*err = errors.Join(*err, closer.Close())
|
||||
}
|
||||
|
||||
func CloseCtx(ctx context.Context, closable interface {
|
||||
Close(ctx context.Context) error
|
||||
}, err *error,
|
||||
) {
|
||||
*err = errors.Join(*err, closable.Close(ctx))
|
||||
}
|
84
internal/controller/core_controller_test.go
Normal file
84
internal/controller/core_controller_test.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Core Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
core := &supabasev1alpha1.Core{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind Core")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, core)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &supabasev1alpha1.Core{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &supabasev1alpha1.Core{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance Core")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &CoreReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
97
internal/controller/suite_test.go
Normal file
97
internal/controller/suite_test.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var (
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = supabasev1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
63
internal/supabase/images.go
Normal file
63
internal/supabase/images.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
package supabase
|
||||
|
||||
type ImageRef struct {
|
||||
// The repository of the image
|
||||
Repository string
|
||||
// The tag of the image
|
||||
Tag string
|
||||
}
|
||||
|
||||
var Images = map[string]ImageRef{
|
||||
"analytics": {
|
||||
Repository: "supabase/logflare",
|
||||
Tag: "1.4.0",
|
||||
},
|
||||
"auth": {
|
||||
Repository: "supabase/gotrue",
|
||||
Tag: "v2.164.0",
|
||||
},
|
||||
"db": {
|
||||
Repository: "supabase/postgres",
|
||||
Tag: "15.6.1.146",
|
||||
},
|
||||
"functions": {
|
||||
Repository: "supabase/edge-runtime",
|
||||
Tag: "v1.65.3",
|
||||
},
|
||||
"imgproxy": {
|
||||
Repository: "darthsim/imgproxy",
|
||||
Tag: "v3.8.0",
|
||||
},
|
||||
"kong": {
|
||||
Repository: "kong",
|
||||
Tag: "2.8.1",
|
||||
},
|
||||
"meta": {
|
||||
Repository: "supabase/postgres-meta",
|
||||
Tag: "v0.84.2",
|
||||
},
|
||||
"realtime": {
|
||||
Repository: "supabase/realtime",
|
||||
Tag: "v2.33.58",
|
||||
},
|
||||
"rest": {
|
||||
Repository: "postgrest/postgrest",
|
||||
Tag: "v12.2.0",
|
||||
},
|
||||
"storage": {
|
||||
Repository: "supabase/storage-api",
|
||||
Tag: "v1.11.13",
|
||||
},
|
||||
"studio": {
|
||||
Repository: "supabase/studio",
|
||||
Tag: "20241202-71e5240",
|
||||
},
|
||||
"supavisor": {
|
||||
Repository: "supabase/supavisor",
|
||||
Tag: "1.1.56",
|
||||
},
|
||||
"vector": {
|
||||
Repository: "timberio/vector",
|
||||
Tag: "0.28.1-alpine",
|
||||
},
|
||||
}
|
31
magefiles/build.go
Normal file
31
magefiles/build.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
// Default target to run when none is specified
|
||||
// If not set, running mage will list available targets
|
||||
// var Default = Build
|
||||
|
||||
// A build step that requires additional params, or platform specific steps for example
|
||||
func Build() error {
|
||||
mg.Deps(InstallDeps, InstallToolDeps)
|
||||
slog.Info("Building...")
|
||||
return Go("build", "-o", "bin/manager", "cmd/main.go")
|
||||
}
|
||||
|
||||
func Run() error {
|
||||
mg.Deps(InstallDeps, InstallToolDeps)
|
||||
|
||||
return Go("run", "./cmd/main.go")
|
||||
}
|
||||
|
||||
func InstallToolDeps() error {
|
||||
return Go("mod", "download", "-x", "-modfile=tools/go.mod")
|
||||
}
|
||||
|
||||
func InstallDeps() error {
|
||||
return Go("mod", "download", "-x")
|
||||
}
|
23
magefiles/cleanup.go
Normal file
23
magefiles/cleanup.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package main
|
||||
|
||||
import "github.com/magefile/mage/mg"
|
||||
|
||||
func Validate() {
|
||||
mg.Deps(Fmt, Lint)
|
||||
}
|
||||
|
||||
func Fmt() error {
|
||||
return RunTool(tools[Gofumpt], "-l", "-w", ".")
|
||||
}
|
||||
|
||||
func Vet() error {
|
||||
return Go("vet", "./...")
|
||||
}
|
||||
|
||||
func Lint() error {
|
||||
return RunTool(tools[GolangciLint], "run")
|
||||
}
|
||||
|
||||
func LintFix() error {
|
||||
return RunTool(tools[GolangciLint], "run", "--fix")
|
||||
}
|
31
magefiles/commands.go
Normal file
31
magefiles/commands.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/magefile/mage/sh"
|
||||
)
|
||||
|
||||
type command string
|
||||
|
||||
var (
|
||||
ControllerGen = command("controller-gen")
|
||||
Gofumpt = command("gofumpt")
|
||||
GolangciLint = command("golangci-lint")
|
||||
)
|
||||
|
||||
var tools map[command]string = map[command]string{
|
||||
ControllerGen: "sigs.k8s.io/controller-tools/cmd/controller-gen",
|
||||
Gofumpt: "mvdan.cc/gofumpt",
|
||||
GolangciLint: "github.com/golangci/golangci-lint/cmd/golangci-lint",
|
||||
}
|
||||
|
||||
var (
|
||||
Go = sh.RunCmd("go")
|
||||
Git = sh.RunCmd("git")
|
||||
RunTool = RunVCmd("go", "run", "-modfile=tools/go.mod")
|
||||
)
|
||||
|
||||
func RunVCmd(cmd string, primaryArgs ...string) func(args ...string) error {
|
||||
return func(args ...string) error {
|
||||
return sh.RunV(cmd, append(primaryArgs, args...)...)
|
||||
}
|
||||
}
|
|
@ -1,13 +1,20 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"log/slog"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
_ "github.com/magefile/mage/sh"
|
||||
)
|
||||
|
||||
var workingDir string
|
||||
var (
|
||||
workingDir string
|
||||
//go:embed templates/*.tmpl
|
||||
templatesFS embed.FS
|
||||
templates *template.Template
|
||||
)
|
||||
|
||||
func init() {
|
||||
logLevel := new(slog.LevelVar)
|
||||
|
@ -27,4 +34,6 @@ func init() {
|
|||
} else {
|
||||
workingDir = wd
|
||||
}
|
||||
|
||||
templates = template.Must(template.ParseFS(templatesFS, "templates/*.tmpl"))
|
||||
}
|
||||
|
|
|
@ -1,4 +1,146 @@
|
|||
package main
|
||||
|
||||
func FetchImageMeta() {
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/magefile/mage/mg"
|
||||
"github.com/magefile/mage/sh"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
composeFileUrl = "https://raw.githubusercontent.com/supabase/supabase/refs/heads/master/docker/docker-compose.yml"
|
||||
)
|
||||
|
||||
func GenerateAll(ctx context.Context) {
|
||||
mg.CtxDeps(ctx, FetchImageMeta, FetchMigrations, Manifests, Generate)
|
||||
}
|
||||
|
||||
func Manifests() error {
|
||||
return RunTool(
|
||||
tools[ControllerGen],
|
||||
"rbac:roleName=manager-role",
|
||||
"crd",
|
||||
"webhook",
|
||||
`paths="./..."`,
|
||||
"output:crd:artifacts:config=config/crd/bases",
|
||||
)
|
||||
}
|
||||
|
||||
func Generate() error {
|
||||
return RunTool(tools[ControllerGen], `object:headerFile="hack/boilerplate.go.txt"`, `paths="./..."`)
|
||||
}
|
||||
|
||||
func FetchImageMeta(ctx context.Context) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, composeFileUrl, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
var composeFile struct {
|
||||
Services map[string]struct {
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
}
|
||||
|
||||
if err := yaml.NewDecoder(resp.Body).Decode(&composeFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(filepath.Join("internal", "supabase", "images.go"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
type imageRef struct {
|
||||
Repository string
|
||||
Tag string
|
||||
}
|
||||
|
||||
templateData := struct {
|
||||
Images map[string]imageRef
|
||||
}{
|
||||
Images: make(map[string]imageRef),
|
||||
}
|
||||
|
||||
for name, service := range composeFile.Services {
|
||||
splitIdx := strings.LastIndex(service.Image, ":")
|
||||
repo := service.Image[:splitIdx]
|
||||
tag := service.Image[splitIdx+1:]
|
||||
templateData.Images[name] = imageRef{
|
||||
Repository: repo,
|
||||
Tag: tag,
|
||||
}
|
||||
}
|
||||
|
||||
return templates.ExecuteTemplate(f, "images.go.tmpl", templateData)
|
||||
}
|
||||
|
||||
func FetchMigrations(ctx context.Context) error {
|
||||
latestRelease, err := latestReleaseVersion(ctx, "supabase", "postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
checkoutDir, err := os.MkdirTemp(os.TempDir(), "supabase-*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoFS := os.DirFS(checkoutDir)
|
||||
|
||||
defer os.RemoveAll(checkoutDir)
|
||||
|
||||
if err := Git("clone", "--filter=blob:none", "--no-checkout", "https://github.com/supabase/postgres", checkoutDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := Git("-C", checkoutDir, "sparse-checkout", "set", "--cone", "migrations"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := Git("-C", checkoutDir, "checkout", latestRelease); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
migrationsDirPath := path.Join(".", "migrations", "db")
|
||||
return fs.WalkDir(repoFS, migrationsDirPath, func(filePath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.IsDir() || filepath.Ext(filePath) != ".sql" {
|
||||
return nil
|
||||
}
|
||||
|
||||
fileName, err := filepath.Rel(migrationsDirPath, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, _ := filepath.Split(fileName)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(workingDir, "assets", "migrations", dir), 0o750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Copying migration file", slog.String("file", fileName))
|
||||
return sh.Copy(filepath.Join(workingDir, "assets", "migrations", fileName), filepath.Join(checkoutDir, filepath.FromSlash(filePath)))
|
||||
})
|
||||
}
|
||||
|
|
36
magefiles/github.go
Normal file
36
magefiles/github.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func latestReleaseVersion(ctx context.Context, owner, repo string) (string, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", fmt.Errorf("failed to retrieve latest release: %s", resp.Status)
|
||||
}
|
||||
|
||||
var release struct {
|
||||
TagName string `json:"tag_name"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return release.TagName, nil
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps
|
||||
)
|
||||
|
||||
// Default target to run when none is specified
|
||||
// If not set, running mage will list available targets
|
||||
// var Default = Build
|
||||
|
||||
// A build step that requires additional params, or platform specific steps for example
|
||||
func Build() error {
|
||||
mg.Deps(InstallDeps)
|
||||
fmt.Println("Building...")
|
||||
cmd := exec.Command("go", "build", "-o", "MyApp", ".")
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// A custom install step if you need your bin someplace other than go/bin
|
||||
func Install() error {
|
||||
mg.Deps(Build)
|
||||
fmt.Println("Installing...")
|
||||
return os.Rename("./MyApp", "/usr/bin/MyApp")
|
||||
}
|
||||
|
||||
// Manage your deps, or running package managers.
|
||||
func InstallDeps() error {
|
||||
fmt.Println("Installing Deps...")
|
||||
cmd := exec.Command("go", "get", "github.com/stretchr/piglatin")
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// Clean up after yourself
|
||||
func Clean() {
|
||||
fmt.Println("Cleaning...")
|
||||
os.RemoveAll("MyApp")
|
||||
}
|
54
magefiles/migrate.go
Normal file
54
magefiles/migrate.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/assets/migrations"
|
||||
)
|
||||
|
||||
func Migrate(ctx context.Context) error {
|
||||
dsn := os.Getenv("DATABASE_URL")
|
||||
if dsn == "" {
|
||||
return errors.New("DATABASE_URL is required")
|
||||
}
|
||||
|
||||
conn, err := pgx.Connect(ctx, dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close(ctx)
|
||||
|
||||
for s, err := range migrations.InitScripts() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Running init script", slog.String("file", s.FileName))
|
||||
|
||||
_, err = conn.Exec(ctx, s.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for s, err := range migrations.MigrationScripts() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.Info("Running migration script", slog.String("file", s.FileName))
|
||||
|
||||
_, err = conn.Exec(ctx, s.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
17
magefiles/templates/images.go.tmpl
Normal file
17
magefiles/templates/images.go.tmpl
Normal file
|
@ -0,0 +1,17 @@
|
|||
package supabase
|
||||
|
||||
type ImageRef struct {
|
||||
// The repository of the image
|
||||
Repository string
|
||||
// The tag of the image
|
||||
Tag string
|
||||
}
|
||||
|
||||
var Images = map[string]ImageRef{
|
||||
{{- range $name, $image := .Images }}
|
||||
"{{$name}}": {
|
||||
Repository: "{{$image.Repository}}",
|
||||
Tag: "{{$image.Tag}}",
|
||||
},
|
||||
{{- end }}
|
||||
}
|
120
test/e2e/e2e_suite_test.go
Normal file
120
test/e2e/e2e_suite_test.go
Normal file
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
Copyright 2024 Peter Kurfer.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"code.icb4dc0.de/prskr/supabase-operator/test/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// Optional Environment Variables:
|
||||
// - PROMETHEUS_INSTALL_SKIP=true: Skips Prometheus Operator installation during test setup.
|
||||
// - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup.
|
||||
// These variables are useful if Prometheus or CertManager is already installed, avoiding
|
||||
// re-installation and conflicts.
|
||||
skipPrometheusInstall = os.Getenv("PROMETHEUS_INSTALL_SKIP") == "true"
|
||||
skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true"
|
||||
// isPrometheusOperatorAlreadyInstalled will be set true when prometheus CRDs be found on the cluster
|
||||
isPrometheusOperatorAlreadyInstalled = false
|
||||
// isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster
|
||||
isCertManagerAlreadyInstalled = false
|
||||
|
||||
// projectImage is the name of the image which will be build and loaded
|
||||
// with the code source changes to be tested.
|
||||
projectImage = "example.com/supabase-operator:v0.0.1"
|
||||
)
|
||||
|
||||
// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated,
|
||||
// temporary environment to validate project changes with the the purposed to be used in CI jobs.
|
||||
// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs
|
||||
// CertManager and Prometheus.
|
||||
func TestE2E(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Starting supabase-operator integration test suite\n")
|
||||
RunSpecs(t, "e2e suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
By("Ensure that Prometheus is enabled")
|
||||
_ = utils.UncommentCode("config/default/kustomization.yaml", "#- ../prometheus", "#")
|
||||
|
||||
By("generating files")
|
||||
cmd := exec.Command("make", "generate")
|
||||
_, err := utils.Run(cmd)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make generate")
|
||||
|
||||
By("generating manifests")
|
||||
cmd = exec.Command("make", "manifests")
|
||||
_, err = utils.Run(cmd)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make manifests")
|
||||
|
||||
By("building the manager(Operator) image")
|
||||
cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage))
|
||||
_, err = utils.Run(cmd)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image")
|
||||
|
||||
// TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is
|
||||
// built and available before running the tests. Also, remove the following block.
|
||||
By("loading the manager(Operator) image on Kind")
|
||||
err = utils.LoadImageToKindClusterWithName(projectImage)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind")
|
||||
|
||||
// The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing.
|
||||
// To prevent errors when tests run in environments with Prometheus or CertManager already installed,
|
||||
// we check for their presence before execution.
|
||||
// Setup Prometheus and CertManager before the suite if not skipped and if not already installed
|
||||
if !skipPrometheusInstall {
|
||||
By("checking if prometheus is installed already")
|
||||
isPrometheusOperatorAlreadyInstalled = utils.IsPrometheusCRDsInstalled()
|
||||
if !isPrometheusOperatorAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Installing Prometheus Operator...\n")
|
||||
Expect(utils.InstallPrometheusOperator()).To(Succeed(), "Failed to install Prometheus Operator")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "WARNING: Prometheus Operator is already installed. Skipping installation...\n")
|
||||
}
|
||||
}
|
||||
if !skipCertManagerInstall {
|
||||
By("checking if cert manager is installed already")
|
||||
isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled()
|
||||
if !isCertManagerAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n")
|
||||
Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
// Teardown Prometheus and CertManager after the suite if not skipped and if they were not already installed
|
||||
if !skipPrometheusInstall && !isPrometheusOperatorAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling Prometheus Operator...\n")
|
||||
utils.UninstallPrometheusOperator()
|
||||
}
|
||||
if !skipCertManagerInstall && !isCertManagerAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n")
|
||||
utils.UninstallCertManager()
|
||||
}
|
||||
})
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue