feat: custom postgres images
This commit is contained in:
parent
9d02a2d90b
commit
7f56a3db56
30 changed files with 2131 additions and 1874 deletions
.github/workflows
Tiltfileapi/v1alpha1
apigateway_types.gocommon_types.gocore_types.godashboard_types.gostorage_types.gozz_generated.deepcopy.go
assets/migrations/migrations
config
crd/bases
supabase.k8s.icb4dc0.de_apigateways.yamlsupabase.k8s.icb4dc0.de_cores.yamlsupabase.k8s.icb4dc0.de_dashboards.yamlsupabase.k8s.icb4dc0.de_storages.yaml
dev
docs/api
internal
controller
apigateway_controller.gocore_gotrue_controller.gocore_postgrest_controller.godashboard_pg-meta_controller.godashboard_studio_controller.gostorage_api_controller.gostorage_imgproxy_controller.go
db
supabase
magefiles
postgres
64
.github/workflows/postgres.yml
vendored
Normal file
64
.github/workflows/postgres.yml
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
name: Postgres image
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- "v*"
|
||||||
|
|
||||||
|
env:
|
||||||
|
MINOR_VERSIONS: '{"15": "10","17": "2"}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- arm64
|
||||||
|
- amd64
|
||||||
|
postgres_major:
|
||||||
|
- "15"
|
||||||
|
- "17"
|
||||||
|
runs-on: ubuntu-latest-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Login to container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: code.icb4dc0.de
|
||||||
|
username: prskr
|
||||||
|
password: ${{ secrets.RELEASE_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
file: postgres/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: code.icb4dc0.de/prskr/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-${{ matrix.arch }}
|
||||||
|
build-args: |
|
||||||
|
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
||||||
|
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
postgres_major:
|
||||||
|
- "15"
|
||||||
|
- "17"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- name: Login to container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: code.icb4dc0.de
|
||||||
|
username: prskr
|
||||||
|
password: ${{ secrets.RELEASE_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create manifest
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create -t code.icb4dc0.de/prskr/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }} \
|
||||||
|
code.icb4dc0.de/prskr/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-arm64 \
|
||||||
|
code.icb4dc0.de/prskr/supabase-operator/postgres:${{ matrix.postgres_major }}.${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}.${{ github.run_number }}-amd64
|
5
.github/workflows/test-e2e.yml
vendored
5
.github/workflows/test-e2e.yml
vendored
|
@ -43,3 +43,8 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
go mod tidy
|
go mod tidy
|
||||||
make test-e2e
|
make test-e2e
|
||||||
|
|
||||||
|
- name: Cleanup kind cluster
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
kind delete cluster
|
||||||
|
|
12
Tiltfile
12
Tiltfile
|
@ -7,6 +7,13 @@ k8s_yaml(kustomize('config/dev'))
|
||||||
|
|
||||||
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
compile_cmd = 'CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/supabase-operator ./cmd/'
|
||||||
|
|
||||||
|
update_settings(suppress_unused_image_warnings=["localhost:5005/cnpg-postgres:17.2"])
|
||||||
|
custom_build(
|
||||||
|
'localhost:5005/cnpg-postgres:17.2',
|
||||||
|
'docker build -t $EXPECTED_REF --push -f postgres/Dockerfile --build-arg POSTGRES_MAJOR=17 --build-arg=POSTGRES_MINOR=2 .',
|
||||||
|
['postgres/Dockerfile']
|
||||||
|
)
|
||||||
|
|
||||||
local_resource(
|
local_resource(
|
||||||
'manager-go-compile',
|
'manager-go-compile',
|
||||||
compile_cmd,
|
compile_cmd,
|
||||||
|
@ -22,6 +29,8 @@ local_resource(
|
||||||
resource_deps=[]
|
resource_deps=[]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
k8s_kind('Cluster', api_version='postgresql.cnpg.io/v1')
|
||||||
|
|
||||||
docker_build_with_restart(
|
docker_build_with_restart(
|
||||||
'supabase-operator',
|
'supabase-operator',
|
||||||
'.',
|
'.',
|
||||||
|
@ -39,10 +48,11 @@ k8s_resource('supabase-controller-manager')
|
||||||
k8s_resource(
|
k8s_resource(
|
||||||
workload='supabase-control-plane',
|
workload='supabase-control-plane',
|
||||||
port_forwards=18000,
|
port_forwards=18000,
|
||||||
|
resource_deps=[]
|
||||||
)
|
)
|
||||||
|
|
||||||
k8s_resource(
|
k8s_resource(
|
||||||
objects=["cluster-example:Cluster:supabase-demo"],
|
workload='cluster-example',
|
||||||
new_name='Postgres cluster',
|
new_name='Postgres cluster',
|
||||||
port_forwards=5432
|
port_forwards=5432
|
||||||
)
|
)
|
||||||
|
|
|
@ -80,7 +80,7 @@ type EnvoySpec struct {
|
||||||
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
// ControlPlane - configure the control plane where Envoy will retrieve its configuration from
|
||||||
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
ControlPlane *ControlPlaneSpec `json:"controlPlane"`
|
||||||
// WorkloadTemplate - customize the Envoy deployment
|
// WorkloadTemplate - customize the Envoy deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
// DisableIPv6 - disable IPv6 for the Envoy instance
|
// DisableIPv6 - disable IPv6 for the Envoy instance
|
||||||
// this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
// this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint)
|
||||||
DisableIPv6 bool `json:"disableIPv6,omitempty"`
|
DisableIPv6 bool `json:"disableIPv6,omitempty"`
|
||||||
|
|
|
@ -91,16 +91,16 @@ type ContainerTemplate struct {
|
||||||
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
AdditionalEnv []corev1.EnvVar `json:"additionalEnv,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type WorkloadTemplate struct {
|
type WorkloadSpec struct {
|
||||||
Replicas *int32 `json:"replicas,omitempty"`
|
Replicas *int32 `json:"replicas,omitempty"`
|
||||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||||
// Workload - customize the container template of the workload
|
// ContainerSpec - customize the container template of the workload
|
||||||
Workload *ContainerTemplate `json:"workload,omitempty"`
|
ContainerSpec *ContainerTemplate `json:"container,omitempty"`
|
||||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
func (t *WorkloadSpec) ReplicaCount() *int32 {
|
||||||
if t != nil && t.Replicas != nil {
|
if t != nil && t.Replicas != nil {
|
||||||
return t.Replicas
|
return t.Replicas
|
||||||
}
|
}
|
||||||
|
@ -108,20 +108,20 @@ func (t *WorkloadTemplate) ReplicaCount() *int32 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
func (t *WorkloadSpec) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||||
if t == nil || t.Workload == nil || len(t.Workload.AdditionalEnv) == 0 {
|
if t == nil || t.ContainerSpec == nil || len(t.ContainerSpec.AdditionalEnv) == 0 {
|
||||||
return basicEnv
|
return basicEnv
|
||||||
}
|
}
|
||||||
|
|
||||||
existingKeys := make(map[string]bool, len(basicEnv)+len(t.Workload.AdditionalEnv))
|
existingKeys := make(map[string]bool, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv))
|
||||||
|
|
||||||
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.Workload.AdditionalEnv)), basicEnv...)
|
merged := append(make([]corev1.EnvVar, 0, len(basicEnv)+len(t.ContainerSpec.AdditionalEnv)), basicEnv...)
|
||||||
|
|
||||||
for _, v := range basicEnv {
|
for _, v := range basicEnv {
|
||||||
existingKeys[v.Name] = true
|
existingKeys[v.Name] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range t.Workload.AdditionalEnv {
|
for _, v := range t.ContainerSpec.AdditionalEnv {
|
||||||
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
if _, alreadyPresent := existingKeys[v.Name]; alreadyPresent {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func (t *WorkloadTemplate) MergeEnv(basicEnv []corev1.EnvVar) []corev1.EnvVar {
|
||||||
return merged
|
return merged
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
func (t *WorkloadSpec) MergeLabels(initial map[string]string, toAppend ...map[string]string) map[string]string {
|
||||||
result := make(map[string]string)
|
result := make(map[string]string)
|
||||||
|
|
||||||
maps.Copy(result, initial)
|
maps.Copy(result, initial)
|
||||||
|
@ -156,47 +156,47 @@ func (t *WorkloadTemplate) MergeLabels(initial map[string]string, toAppend ...ma
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Image(defaultImage string) string {
|
func (t *WorkloadSpec) Image(defaultImage string) string {
|
||||||
if t != nil && t.Workload != nil && t.Workload.Image != "" {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.Image != "" {
|
||||||
return t.Workload.Image
|
return t.ContainerSpec.Image
|
||||||
}
|
}
|
||||||
|
|
||||||
return defaultImage
|
return defaultImage
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ImagePullPolicy() corev1.PullPolicy {
|
func (t *WorkloadSpec) ImagePullPolicy() corev1.PullPolicy {
|
||||||
if t != nil && t.Workload != nil && t.Workload.PullPolicy != "" {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.PullPolicy != "" {
|
||||||
return t.Workload.PullPolicy
|
return t.ContainerSpec.PullPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
return corev1.PullIfNotPresent
|
return corev1.PullIfNotPresent
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) PullSecrets() []corev1.LocalObjectReference {
|
func (t *WorkloadSpec) PullSecrets() []corev1.LocalObjectReference {
|
||||||
if t != nil && t.Workload != nil && len(t.Workload.ImagePullSecrets) > 0 {
|
if t != nil && t.ContainerSpec != nil && len(t.ContainerSpec.ImagePullSecrets) > 0 {
|
||||||
return t.Workload.ImagePullSecrets
|
return t.ContainerSpec.ImagePullSecrets
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Resources() corev1.ResourceRequirements {
|
func (t *WorkloadSpec) Resources() corev1.ResourceRequirements {
|
||||||
if t != nil && t.Workload != nil {
|
if t != nil && t.ContainerSpec != nil {
|
||||||
return t.Workload.Resources
|
return t.ContainerSpec.Resources
|
||||||
}
|
}
|
||||||
|
|
||||||
return corev1.ResourceRequirements{}
|
return corev1.ResourceRequirements{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
func (t *WorkloadSpec) AdditionalVolumeMounts(defaultMounts ...corev1.VolumeMount) []corev1.VolumeMount {
|
||||||
if t != nil && t.Workload != nil {
|
if t != nil && t.ContainerSpec != nil {
|
||||||
return append(defaultMounts, t.Workload.VolumeMounts...)
|
return append(defaultMounts, t.ContainerSpec.VolumeMounts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return defaultMounts
|
return defaultMounts
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
func (t *WorkloadSpec) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return defaultVolumes
|
return defaultVolumes
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Vol
|
||||||
return append(defaultVolumes, t.AdditionalVolumes...)
|
return append(defaultVolumes, t.AdditionalVolumes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
func (t *WorkloadSpec) PodSecurityContext() *corev1.PodSecurityContext {
|
||||||
if t != nil && t.SecurityContext != nil {
|
if t != nil && t.SecurityContext != nil {
|
||||||
return t.SecurityContext
|
return t.SecurityContext
|
||||||
}
|
}
|
||||||
|
@ -214,9 +214,9 @@ func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *WorkloadTemplate) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
func (t *WorkloadSpec) ContainerSecurityContext(uid, gid int64) *corev1.SecurityContext {
|
||||||
if t != nil && t.Workload != nil && t.Workload.SecurityContext != nil {
|
if t != nil && t.ContainerSpec != nil && t.ContainerSpec.SecurityContext != nil {
|
||||||
return t.Workload.SecurityContext
|
return t.ContainerSpec.SecurityContext
|
||||||
}
|
}
|
||||||
|
|
||||||
return &corev1.SecurityContext{
|
return &corev1.SecurityContext{
|
||||||
|
|
|
@ -167,8 +167,8 @@ type PostgrestSpec struct {
|
||||||
// MaxRows - maximum number of rows PostgREST will load at a time
|
// MaxRows - maximum number of rows PostgREST will load at a time
|
||||||
// +kubebuilder:default=1000
|
// +kubebuilder:default=1000
|
||||||
MaxRows int `json:"maxRows,omitempty"`
|
MaxRows int `json:"maxRows,omitempty"`
|
||||||
// WorkloadTemplate - customize the PostgREST workload
|
// WorkloadSpec - customize the PostgREST workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthProviderMeta struct {
|
type AuthProviderMeta struct {
|
||||||
|
@ -365,12 +365,12 @@ func (p *AuthProviders) Vars(apiExternalURL string) []corev1.EnvVar {
|
||||||
}
|
}
|
||||||
|
|
||||||
type AuthSpec struct {
|
type AuthSpec struct {
|
||||||
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
AdditionalRedirectUrls []string `json:"additionalRedirectUrls,omitempty"`
|
||||||
DisableSignup *bool `json:"disableSignup,omitempty"`
|
DisableSignup *bool `json:"disableSignup,omitempty"`
|
||||||
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
AnonymousUsersEnabled *bool `json:"anonymousUsersEnabled,omitempty"`
|
||||||
Providers *AuthProviders `json:"providers,omitempty"`
|
Providers *AuthProviders `json:"providers,omitempty"`
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadTemplate *WorkloadSpec `json:"workloadTemplate,omitempty"`
|
||||||
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
EmailSignupDisabled *bool `json:"emailSignupDisabled,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CoreSpec defines the desired state of Core.
|
// CoreSpec defines the desired state of Core.
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
type StudioSpec struct {
|
type StudioSpec struct {
|
||||||
JWT *JwtSpec `json:"jwt,omitempty"`
|
JWT *JwtSpec `json:"jwt,omitempty"`
|
||||||
// WorkloadTemplate - customize the studio deployment
|
// WorkloadTemplate - customize the studio deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
// GatewayServiceSelector - selector to find the service for the API gateway
|
// GatewayServiceSelector - selector to find the service for the API gateway
|
||||||
// Required to configure the API URL in the studio deployment
|
// Required to configure the API URL in the studio deployment
|
||||||
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
// If you don't run multiple APIGateway instances in the same namespaces, the default will be fine
|
||||||
|
@ -37,7 +37,7 @@ type StudioSpec struct {
|
||||||
|
|
||||||
type PGMetaSpec struct {
|
type PGMetaSpec struct {
|
||||||
// WorkloadTemplate - customize the pg-meta deployment
|
// WorkloadTemplate - customize the pg-meta deployment
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DbCredentialsReference struct {
|
type DbCredentialsReference struct {
|
||||||
|
|
|
@ -191,7 +191,7 @@ type StorageApiSpec struct {
|
||||||
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
|
||||||
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
|
||||||
// WorkloadTemplate - customize the Storage API workload
|
// WorkloadTemplate - customize the Storage API workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ImageProxySpec struct {
|
type ImageProxySpec struct {
|
||||||
|
@ -199,7 +199,7 @@ type ImageProxySpec struct {
|
||||||
Enable bool `json:"enable,omitempty"`
|
Enable bool `json:"enable,omitempty"`
|
||||||
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
|
||||||
// WorkloadTemplate - customize the image proxy workload
|
// WorkloadTemplate - customize the image proxy workload
|
||||||
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
|
WorkloadSpec *WorkloadSpec `json:"workloadSpec,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageSpec defines the desired state of Storage.
|
// StorageSpec defines the desired state of Storage.
|
||||||
|
|
|
@ -252,7 +252,7 @@ func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadTemplate != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.EmailSignupDisabled != nil {
|
if in.EmailSignupDisabled != nil {
|
||||||
|
@ -921,9 +921,9 @@ func (in *EnvoySpec) DeepCopyInto(out *EnvoySpec) {
|
||||||
*out = new(ControlPlaneSpec)
|
*out = new(ControlPlaneSpec)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.Debugging != nil {
|
if in.Debugging != nil {
|
||||||
|
@ -998,9 +998,9 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1089,9 +1089,9 @@ func (in *OAuthProvider) DeepCopy() *OAuthProvider {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
func (in *PGMetaSpec) DeepCopyInto(out *PGMetaSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1135,9 +1135,9 @@ func (in *PostgrestSpec) DeepCopyInto(out *PostgrestSpec) {
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1294,9 +1294,9 @@ func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
|
||||||
*out = new(UploadTempSpec)
|
*out = new(UploadTempSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1387,9 +1387,9 @@ func (in *StudioSpec) DeepCopyInto(out *StudioSpec) {
|
||||||
*out = new(JwtSpec)
|
*out = new(JwtSpec)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.WorkloadTemplate != nil {
|
if in.WorkloadSpec != nil {
|
||||||
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
|
in, out := &in.WorkloadSpec, &out.WorkloadSpec
|
||||||
*out = new(WorkloadTemplate)
|
*out = new(WorkloadSpec)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.GatewayServiceMatchLabels != nil {
|
if in.GatewayServiceMatchLabels != nil {
|
||||||
|
@ -1447,7 +1447,7 @@ func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
func (in *WorkloadSpec) DeepCopyInto(out *WorkloadSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Replicas != nil {
|
if in.Replicas != nil {
|
||||||
in, out := &in.Replicas, &out.Replicas
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
@ -1466,8 +1466,8 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||||
(*out)[key] = val
|
(*out)[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.Workload != nil {
|
if in.ContainerSpec != nil {
|
||||||
in, out := &in.Workload, &out.Workload
|
in, out := &in.ContainerSpec, &out.ContainerSpec
|
||||||
*out = new(ContainerTemplate)
|
*out = new(ContainerTemplate)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
@ -1480,12 +1480,12 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpec.
|
||||||
func (in *WorkloadTemplate) DeepCopy() *WorkloadTemplate {
|
func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(WorkloadTemplate)
|
out := new(WorkloadSpec)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
-- migrate:up
|
|
||||||
|
|
||||||
-- demote postgres user
|
|
||||||
GRANT ALL ON DATABASE postgres TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL TABLES IN SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO postgres;
|
|
||||||
GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO postgres;
|
|
||||||
ALTER ROLE postgres NOSUPERUSER CREATEDB CREATEROLE LOGIN REPLICATION BYPASSRLS;
|
|
||||||
|
|
||||||
-- migrate:down
|
|
|
@ -4,7 +4,5 @@ ALTER ROLE authenticated inherit;
|
||||||
ALTER ROLE anon inherit;
|
ALTER ROLE anon inherit;
|
||||||
ALTER ROLE service_role inherit;
|
ALTER ROLE service_role inherit;
|
||||||
|
|
||||||
GRANT pgsodium_keyholder to service_role;
|
|
||||||
|
|
||||||
-- migrate:down
|
-- migrate:down
|
||||||
|
|
||||||
|
|
|
@ -295,7 +295,7 @@ spec:
|
||||||
if not set, the name of the APIGateway resource will be used
|
if not set, the name of the APIGateway resource will be used
|
||||||
The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit.
|
The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit.
|
||||||
type: string
|
type: string
|
||||||
workloadTemplate:
|
workloadSpec:
|
||||||
description: WorkloadTemplate - customize the Envoy deployment
|
description: WorkloadTemplate - customize the Envoy deployment
|
||||||
properties:
|
properties:
|
||||||
additionalLabels:
|
additionalLabels:
|
||||||
|
@ -2102,248 +2102,9 @@ spec:
|
||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
replicas:
|
container:
|
||||||
format: int32
|
description: ContainerSpec - customize the container template
|
||||||
type: integer
|
of the workload
|
||||||
securityContext:
|
|
||||||
description: |-
|
|
||||||
PodSecurityContext holds pod-level security attributes and common container settings.
|
|
||||||
Some fields are also present in container.securityContext. Field values of
|
|
||||||
container.securityContext take precedence over field values of PodSecurityContext.
|
|
||||||
properties:
|
|
||||||
appArmorProfile:
|
|
||||||
description: |-
|
|
||||||
appArmorProfile is the AppArmor options to use by the containers in this pod.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
properties:
|
|
||||||
localhostProfile:
|
|
||||||
description: |-
|
|
||||||
localhostProfile indicates a profile loaded on the node that should be used.
|
|
||||||
The profile must be preconfigured on the node to work.
|
|
||||||
Must match the loaded name of the profile.
|
|
||||||
Must be set if and only if type is "Localhost".
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: |-
|
|
||||||
type indicates which kind of AppArmor profile will be applied.
|
|
||||||
Valid options are:
|
|
||||||
Localhost - a profile pre-loaded on the node.
|
|
||||||
RuntimeDefault - the container runtime's default profile.
|
|
||||||
Unconfined - no AppArmor enforcement.
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
type: object
|
|
||||||
fsGroup:
|
|
||||||
description: |-
|
|
||||||
A special supplemental group that applies to all containers in a pod.
|
|
||||||
Some volume types allow the Kubelet to change the ownership of that volume
|
|
||||||
to be owned by the pod:
|
|
||||||
|
|
||||||
1. The owning GID will be the FSGroup
|
|
||||||
2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
|
|
||||||
3. The permission bits are OR'd with rw-rw----
|
|
||||||
|
|
||||||
If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
fsGroupChangePolicy:
|
|
||||||
description: |-
|
|
||||||
fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
|
|
||||||
before being exposed inside Pod. This field will only apply to
|
|
||||||
volume types which support fsGroup based ownership(and permissions).
|
|
||||||
It will have no effect on ephemeral volume types such as: secret, configmaps
|
|
||||||
and emptydir.
|
|
||||||
Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
type: string
|
|
||||||
runAsGroup:
|
|
||||||
description: |-
|
|
||||||
The GID to run the entrypoint of the container process.
|
|
||||||
Uses runtime default if unset.
|
|
||||||
May also be set in SecurityContext. If set in both SecurityContext and
|
|
||||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
|
||||||
for that container.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
runAsNonRoot:
|
|
||||||
description: |-
|
|
||||||
Indicates that the container must run as a non-root user.
|
|
||||||
If true, the Kubelet will validate the image at runtime to ensure that it
|
|
||||||
does not run as UID 0 (root) and fail to start the container if it does.
|
|
||||||
If unset or false, no such validation will be performed.
|
|
||||||
May also be set in SecurityContext. If set in both SecurityContext and
|
|
||||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
|
||||||
type: boolean
|
|
||||||
runAsUser:
|
|
||||||
description: |-
|
|
||||||
The UID to run the entrypoint of the container process.
|
|
||||||
Defaults to user specified in image metadata if unspecified.
|
|
||||||
May also be set in SecurityContext. If set in both SecurityContext and
|
|
||||||
PodSecurityContext, the value specified in SecurityContext takes precedence
|
|
||||||
for that container.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
seLinuxChangePolicy:
|
|
||||||
description: |-
|
|
||||||
seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
|
||||||
It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
|
||||||
Valid values are "MountOption" and "Recursive".
|
|
||||||
|
|
||||||
"Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
|
||||||
This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
|
||||||
|
|
||||||
"MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
|
||||||
This requires all Pods that share the same volume to use the same SELinux label.
|
|
||||||
It is not possible to share the same volume among privileged and unprivileged Pods.
|
|
||||||
Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
|
||||||
whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
|
||||||
CSIDriver instance. Other volumes are always re-labelled recursively.
|
|
||||||
"MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
|
||||||
|
|
||||||
If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
|
||||||
If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
|
||||||
and "Recursive" for all other volumes.
|
|
||||||
|
|
||||||
This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
|
||||||
|
|
||||||
All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
type: string
|
|
||||||
seLinuxOptions:
|
|
||||||
description: |-
|
|
||||||
The SELinux context to be applied to all containers.
|
|
||||||
If unspecified, the container runtime will allocate a random SELinux context for each
|
|
||||||
container. May also be set in SecurityContext. If set in
|
|
||||||
both SecurityContext and PodSecurityContext, the value specified in SecurityContext
|
|
||||||
takes precedence for that container.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
properties:
|
|
||||||
level:
|
|
||||||
description: Level is SELinux level label that applies
|
|
||||||
to the container.
|
|
||||||
type: string
|
|
||||||
role:
|
|
||||||
description: Role is a SELinux role label that applies
|
|
||||||
to the container.
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: Type is a SELinux type label that applies
|
|
||||||
to the container.
|
|
||||||
type: string
|
|
||||||
user:
|
|
||||||
description: User is a SELinux user label that applies
|
|
||||||
to the container.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
seccompProfile:
|
|
||||||
description: |-
|
|
||||||
The seccomp options to use by the containers in this pod.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
properties:
|
|
||||||
localhostProfile:
|
|
||||||
description: |-
|
|
||||||
localhostProfile indicates a profile defined in a file on the node should be used.
|
|
||||||
The profile must be preconfigured on the node to work.
|
|
||||||
Must be a descending path, relative to the kubelet's configured seccomp profile location.
|
|
||||||
Must be set if type is "Localhost". Must NOT be set for any other type.
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: |-
|
|
||||||
type indicates which kind of seccomp profile will be applied.
|
|
||||||
Valid options are:
|
|
||||||
|
|
||||||
Localhost - a profile defined in a file on the node should be used.
|
|
||||||
RuntimeDefault - the container runtime default profile should be used.
|
|
||||||
Unconfined - no profile should be applied.
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
type: object
|
|
||||||
supplementalGroups:
|
|
||||||
description: |-
|
|
||||||
A list of groups applied to the first process run in each container, in
|
|
||||||
addition to the container's primary GID and fsGroup (if specified). If
|
|
||||||
the SupplementalGroupsPolicy feature is enabled, the
|
|
||||||
supplementalGroupsPolicy field determines whether these are in addition
|
|
||||||
to or instead of any group memberships defined in the container image.
|
|
||||||
If unspecified, no additional groups are added, though group memberships
|
|
||||||
defined in the container image may still be used, depending on the
|
|
||||||
supplementalGroupsPolicy field.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
items:
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
type: array
|
|
||||||
x-kubernetes-list-type: atomic
|
|
||||||
supplementalGroupsPolicy:
|
|
||||||
description: |-
|
|
||||||
Defines how supplemental groups of the first container processes are calculated.
|
|
||||||
Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
|
||||||
(Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
|
||||||
and the container runtime must implement support for this feature.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
type: string
|
|
||||||
sysctls:
|
|
||||||
description: |-
|
|
||||||
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
|
||||||
sysctls (by the container runtime) might fail to launch.
|
|
||||||
Note that this field cannot be set when spec.os.name is windows.
|
|
||||||
items:
|
|
||||||
description: Sysctl defines a kernel parameter to be
|
|
||||||
set
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
description: Name of a property to set
|
|
||||||
type: string
|
|
||||||
value:
|
|
||||||
description: Value of a property to set
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- name
|
|
||||||
- value
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
x-kubernetes-list-type: atomic
|
|
||||||
windowsOptions:
|
|
||||||
description: |-
|
|
||||||
The Windows specific settings applied to all containers.
|
|
||||||
If unspecified, the options within a container's SecurityContext will be used.
|
|
||||||
If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
|
||||||
Note that this field cannot be set when spec.os.name is linux.
|
|
||||||
properties:
|
|
||||||
gmsaCredentialSpec:
|
|
||||||
description: |-
|
|
||||||
GMSACredentialSpec is where the GMSA admission webhook
|
|
||||||
(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
|
|
||||||
GMSA credential spec named by the GMSACredentialSpecName field.
|
|
||||||
type: string
|
|
||||||
gmsaCredentialSpecName:
|
|
||||||
description: GMSACredentialSpecName is the name of
|
|
||||||
the GMSA credential spec to use.
|
|
||||||
type: string
|
|
||||||
hostProcess:
|
|
||||||
description: |-
|
|
||||||
HostProcess determines if a container should be run as a 'Host Process' container.
|
|
||||||
All of a Pod's containers must have the same effective HostProcess value
|
|
||||||
(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
|
|
||||||
In addition, if HostProcess is true then HostNetwork must also be set to true.
|
|
||||||
type: boolean
|
|
||||||
runAsUserName:
|
|
||||||
description: |-
|
|
||||||
The UserName in Windows to run the entrypoint of the container process.
|
|
||||||
Defaults to the user specified in image metadata if unspecified.
|
|
||||||
May also be set in PodSecurityContext. If set in both SecurityContext and
|
|
||||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
type: object
|
|
||||||
workload:
|
|
||||||
description: Workload - customize the container template of
|
|
||||||
the workload
|
|
||||||
properties:
|
properties:
|
||||||
additionalEnv:
|
additionalEnv:
|
||||||
items:
|
items:
|
||||||
|
@ -2808,6 +2569,245 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
|
replicas:
|
||||||
|
format: int32
|
||||||
|
type: integer
|
||||||
|
securityContext:
|
||||||
|
description: |-
|
||||||
|
PodSecurityContext holds pod-level security attributes and common container settings.
|
||||||
|
Some fields are also present in container.securityContext. Field values of
|
||||||
|
container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
|
properties:
|
||||||
|
appArmorProfile:
|
||||||
|
description: |-
|
||||||
|
appArmorProfile is the AppArmor options to use by the containers in this pod.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
properties:
|
||||||
|
localhostProfile:
|
||||||
|
description: |-
|
||||||
|
localhostProfile indicates a profile loaded on the node that should be used.
|
||||||
|
The profile must be preconfigured on the node to work.
|
||||||
|
Must match the loaded name of the profile.
|
||||||
|
Must be set if and only if type is "Localhost".
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: |-
|
||||||
|
type indicates which kind of AppArmor profile will be applied.
|
||||||
|
Valid options are:
|
||||||
|
Localhost - a profile pre-loaded on the node.
|
||||||
|
RuntimeDefault - the container runtime's default profile.
|
||||||
|
Unconfined - no AppArmor enforcement.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
type: object
|
||||||
|
fsGroup:
|
||||||
|
description: |-
|
||||||
|
A special supplemental group that applies to all containers in a pod.
|
||||||
|
Some volume types allow the Kubelet to change the ownership of that volume
|
||||||
|
to be owned by the pod:
|
||||||
|
|
||||||
|
1. The owning GID will be the FSGroup
|
||||||
|
2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
|
||||||
|
3. The permission bits are OR'd with rw-rw----
|
||||||
|
|
||||||
|
If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
fsGroupChangePolicy:
|
||||||
|
description: |-
|
||||||
|
fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
|
||||||
|
before being exposed inside Pod. This field will only apply to
|
||||||
|
volume types which support fsGroup based ownership(and permissions).
|
||||||
|
It will have no effect on ephemeral volume types such as: secret, configmaps
|
||||||
|
and emptydir.
|
||||||
|
Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
type: string
|
||||||
|
runAsGroup:
|
||||||
|
description: |-
|
||||||
|
The GID to run the entrypoint of the container process.
|
||||||
|
Uses runtime default if unset.
|
||||||
|
May also be set in SecurityContext. If set in both SecurityContext and
|
||||||
|
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||||
|
for that container.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
runAsNonRoot:
|
||||||
|
description: |-
|
||||||
|
Indicates that the container must run as a non-root user.
|
||||||
|
If true, the Kubelet will validate the image at runtime to ensure that it
|
||||||
|
does not run as UID 0 (root) and fail to start the container if it does.
|
||||||
|
If unset or false, no such validation will be performed.
|
||||||
|
May also be set in SecurityContext. If set in both SecurityContext and
|
||||||
|
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||||
|
type: boolean
|
||||||
|
runAsUser:
|
||||||
|
description: |-
|
||||||
|
The UID to run the entrypoint of the container process.
|
||||||
|
Defaults to user specified in image metadata if unspecified.
|
||||||
|
May also be set in SecurityContext. If set in both SecurityContext and
|
||||||
|
PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||||
|
for that container.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
seLinuxChangePolicy:
|
||||||
|
description: |-
|
||||||
|
seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||||
|
It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
||||||
|
Valid values are "MountOption" and "Recursive".
|
||||||
|
|
||||||
|
"Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
||||||
|
This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||||
|
|
||||||
|
"MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
||||||
|
This requires all Pods that share the same volume to use the same SELinux label.
|
||||||
|
It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||||
|
Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||||
|
whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||||
|
CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||||
|
"MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
||||||
|
|
||||||
|
If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
||||||
|
If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
||||||
|
and "Recursive" for all other volumes.
|
||||||
|
|
||||||
|
This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
||||||
|
|
||||||
|
All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
type: string
|
||||||
|
seLinuxOptions:
|
||||||
|
description: |-
|
||||||
|
The SELinux context to be applied to all containers.
|
||||||
|
If unspecified, the container runtime will allocate a random SELinux context for each
|
||||||
|
container. May also be set in SecurityContext. If set in
|
||||||
|
both SecurityContext and PodSecurityContext, the value specified in SecurityContext
|
||||||
|
takes precedence for that container.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
properties:
|
||||||
|
level:
|
||||||
|
description: Level is SELinux level label that applies
|
||||||
|
to the container.
|
||||||
|
type: string
|
||||||
|
role:
|
||||||
|
description: Role is a SELinux role label that applies
|
||||||
|
to the container.
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: Type is a SELinux type label that applies
|
||||||
|
to the container.
|
||||||
|
type: string
|
||||||
|
user:
|
||||||
|
description: User is a SELinux user label that applies
|
||||||
|
to the container.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
seccompProfile:
|
||||||
|
description: |-
|
||||||
|
The seccomp options to use by the containers in this pod.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
properties:
|
||||||
|
localhostProfile:
|
||||||
|
description: |-
|
||||||
|
localhostProfile indicates a profile defined in a file on the node should be used.
|
||||||
|
The profile must be preconfigured on the node to work.
|
||||||
|
Must be a descending path, relative to the kubelet's configured seccomp profile location.
|
||||||
|
Must be set if type is "Localhost". Must NOT be set for any other type.
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: |-
|
||||||
|
type indicates which kind of seccomp profile will be applied.
|
||||||
|
Valid options are:
|
||||||
|
|
||||||
|
Localhost - a profile defined in a file on the node should be used.
|
||||||
|
RuntimeDefault - the container runtime default profile should be used.
|
||||||
|
Unconfined - no profile should be applied.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
type: object
|
||||||
|
supplementalGroups:
|
||||||
|
description: |-
|
||||||
|
A list of groups applied to the first process run in each container, in
|
||||||
|
addition to the container's primary GID and fsGroup (if specified). If
|
||||||
|
the SupplementalGroupsPolicy feature is enabled, the
|
||||||
|
supplementalGroupsPolicy field determines whether these are in addition
|
||||||
|
to or instead of any group memberships defined in the container image.
|
||||||
|
If unspecified, no additional groups are added, though group memberships
|
||||||
|
defined in the container image may still be used, depending on the
|
||||||
|
supplementalGroupsPolicy field.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
items:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
|
supplementalGroupsPolicy:
|
||||||
|
description: |-
|
||||||
|
Defines how supplemental groups of the first container processes are calculated.
|
||||||
|
Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
||||||
|
(Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
||||||
|
and the container runtime must implement support for this feature.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
type: string
|
||||||
|
sysctls:
|
||||||
|
description: |-
|
||||||
|
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
||||||
|
sysctls (by the container runtime) might fail to launch.
|
||||||
|
Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
items:
|
||||||
|
description: Sysctl defines a kernel parameter to be
|
||||||
|
set
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
description: Name of a property to set
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
description: Value of a property to set
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
- value
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
x-kubernetes-list-type: atomic
|
||||||
|
windowsOptions:
|
||||||
|
description: |-
|
||||||
|
The Windows specific settings applied to all containers.
|
||||||
|
If unspecified, the options within a container's SecurityContext will be used.
|
||||||
|
If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||||
|
Note that this field cannot be set when spec.os.name is linux.
|
||||||
|
properties:
|
||||||
|
gmsaCredentialSpec:
|
||||||
|
description: |-
|
||||||
|
GMSACredentialSpec is where the GMSA admission webhook
|
||||||
|
(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
|
||||||
|
GMSA credential spec named by the GMSACredentialSpecName field.
|
||||||
|
type: string
|
||||||
|
gmsaCredentialSpecName:
|
||||||
|
description: GMSACredentialSpecName is the name of
|
||||||
|
the GMSA credential spec to use.
|
||||||
|
type: string
|
||||||
|
hostProcess:
|
||||||
|
description: |-
|
||||||
|
HostProcess determines if a container should be run as a 'Host Process' container.
|
||||||
|
All of a Pod's containers must have the same effective HostProcess value
|
||||||
|
(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
|
||||||
|
In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||||
|
type: boolean
|
||||||
|
runAsUserName:
|
||||||
|
description: |-
|
||||||
|
The UserName in Windows to run the entrypoint of the container process.
|
||||||
|
Defaults to the user specified in image metadata if unspecified.
|
||||||
|
May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||||
|
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- controlPlane
|
- controlPlane
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -10,6 +10,8 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
envoy:
|
envoy:
|
||||||
disableIPv6: true
|
disableIPv6: true
|
||||||
|
workloadSpec:
|
||||||
|
replicas: 2
|
||||||
apiEndpoint:
|
apiEndpoint:
|
||||||
jwks:
|
jwks:
|
||||||
name: core-sample-jwt
|
name: core-sample-jwt
|
||||||
|
|
|
@ -44,14 +44,15 @@ metadata:
|
||||||
namespace: supabase-demo
|
namespace: supabase-demo
|
||||||
spec:
|
spec:
|
||||||
instances: 1
|
instances: 1
|
||||||
imageName: ghcr.io/supabase/postgres:15.8.1.021
|
imageName: localhost:5005/cnpg-postgres:17.2
|
||||||
postgresUID: 105
|
imagePullPolicy: Always
|
||||||
postgresGID: 106
|
postgresUID: 26
|
||||||
|
postgresGID: 102
|
||||||
|
|
||||||
bootstrap:
|
bootstrap:
|
||||||
initdb:
|
initdb:
|
||||||
database: app
|
database: app
|
||||||
owner: setup
|
owner: supabase_admin
|
||||||
postInitSQL:
|
postInitSQL:
|
||||||
- drop publication if exists supabase_realtime;
|
- drop publication if exists supabase_realtime;
|
||||||
|
|
||||||
|
|
|
@ -25,3 +25,6 @@ patches:
|
||||||
target:
|
target:
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
labelSelector: app.kubernetes.io/name=control-plane
|
labelSelector: app.kubernetes.io/name=control-plane
|
||||||
|
|
||||||
|
configurations:
|
||||||
|
- kustomizeconfig/cnpg-cluster.yaml
|
||||||
|
|
|
@ -151,7 +151,7 @@ _Appears in:_
|
||||||
| `disableSignup` _boolean_ | | | |
|
| `disableSignup` _boolean_ | | | |
|
||||||
| `anonymousUsersEnabled` _boolean_ | | | |
|
| `anonymousUsersEnabled` _boolean_ | | | |
|
||||||
| `providers` _[AuthProviders](#authproviders)_ | | | |
|
| `providers` _[AuthProviders](#authproviders)_ | | | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | | | |
|
| `workloadTemplate` _[WorkloadSpec](#workloadspec)_ | | | |
|
||||||
| `emailSignupDisabled` _boolean_ | | | |
|
| `emailSignupDisabled` _boolean_ | | | |
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ _Appears in:_
|
||||||
|
|
||||||
|
|
||||||
_Appears in:_
|
_Appears in:_
|
||||||
- [WorkloadTemplate](#workloadtemplate)
|
- [WorkloadSpec](#workloadspec)
|
||||||
|
|
||||||
| Field | Description | Default | Validation |
|
| Field | Description | Default | Validation |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
|
@ -660,7 +660,7 @@ _Appears in:_
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `nodeName` _string_ | NodeName - identifies the Envoy cluster within the current namespace<br />if not set, the name of the APIGateway resource will be used<br />The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit. | | |
|
| `nodeName` _string_ | NodeName - identifies the Envoy cluster within the current namespace<br />if not set, the name of the APIGateway resource will be used<br />The primary use case is to make the assignment of multiple supabase instances in a single namespace explicit. | | |
|
||||||
| `controlPlane` _[ControlPlaneSpec](#controlplanespec)_ | ControlPlane - configure the control plane where Envoy will retrieve its configuration from | | |
|
| `controlPlane` _[ControlPlaneSpec](#controlplanespec)_ | ControlPlane - configure the control plane where Envoy will retrieve its configuration from | | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the Envoy deployment | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the Envoy deployment | | |
|
||||||
| `disableIPv6` _boolean_ | DisableIPv6 - disable IPv6 for the Envoy instance<br />this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint) | | |
|
| `disableIPv6` _boolean_ | DisableIPv6 - disable IPv6 for the Envoy instance<br />this will force Envoy to use IPv4 for upstream hosts (mostly for the OAuth2 token endpoint) | | |
|
||||||
| `debugging` _[EnvoyDebuggingOptions](#envoydebuggingoptions)_ | | | |
|
| `debugging` _[EnvoyDebuggingOptions](#envoydebuggingoptions)_ | | | |
|
||||||
|
|
||||||
|
@ -731,7 +731,7 @@ _Appears in:_
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `enable` _boolean_ | Enable - whether to deploy the image proxy or not | | |
|
| `enable` _boolean_ | Enable - whether to deploy the image proxy or not | | |
|
||||||
| `enableWebPDetection` _boolean_ | | | |
|
| `enableWebPDetection` _boolean_ | | | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the image proxy workload | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the image proxy workload | | |
|
||||||
|
|
||||||
|
|
||||||
#### ImageSpec
|
#### ImageSpec
|
||||||
|
@ -818,7 +818,7 @@ _Appears in:_
|
||||||
|
|
||||||
| Field | Description | Default | Validation |
|
| Field | Description | Default | Validation |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the pg-meta deployment | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the pg-meta deployment | | |
|
||||||
|
|
||||||
|
|
||||||
#### PhoneAuthProvider
|
#### PhoneAuthProvider
|
||||||
|
@ -854,7 +854,7 @@ _Appears in:_
|
||||||
| `extraSearchPath` _string array_ | ExtraSearchPath - Extra schemas to add to the search_path of every request.<br />These schemas tables, views and functions don’t get API endpoints, they can only be referred from the database objects inside your db-schemas. | [public extensions] | |
|
| `extraSearchPath` _string array_ | ExtraSearchPath - Extra schemas to add to the search_path of every request.<br />These schemas tables, views and functions don’t get API endpoints, they can only be referred from the database objects inside your db-schemas. | [public extensions] | |
|
||||||
| `anonRole` _string_ | AnonRole - name of the anon role | anon | |
|
| `anonRole` _string_ | AnonRole - name of the anon role | anon | |
|
||||||
| `maxRows` _integer_ | MaxRows - maximum number of rows PostgREST will load at a time | 1000 | |
|
| `maxRows` _integer_ | MaxRows - maximum number of rows PostgREST will load at a time | 1000 | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the PostgREST workload | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadSpec - customize the PostgREST workload | | |
|
||||||
|
|
||||||
|
|
||||||
#### S3BackendSpec
|
#### S3BackendSpec
|
||||||
|
@ -989,7 +989,7 @@ _Appears in:_
|
||||||
| `db` _[StorageApiDbSpec](#storageapidbspec)_ | DBSpec - Configure access to the Postgres database<br />In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource | | |
|
| `db` _[StorageApiDbSpec](#storageapidbspec)_ | DBSpec - Configure access to the Postgres database<br />In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource | | |
|
||||||
| `s3` _[S3ProtocolSpec](#s3protocolspec)_ | S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client | | |
|
| `s3` _[S3ProtocolSpec](#s3protocolspec)_ | S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client | | |
|
||||||
| `uploadTemp` _[UploadTempSpec](#uploadtempspec)_ | UploadTemp - configure the emptyDir for storing intermediate files during uploads | | |
|
| `uploadTemp` _[UploadTempSpec](#uploadtempspec)_ | UploadTemp - configure the emptyDir for storing intermediate files during uploads | | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the Storage API workload | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the Storage API workload | | |
|
||||||
|
|
||||||
|
|
||||||
#### StorageList
|
#### StorageList
|
||||||
|
@ -1043,7 +1043,7 @@ _Appears in:_
|
||||||
| Field | Description | Default | Validation |
|
| Field | Description | Default | Validation |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `jwt` _[JwtSpec](#jwtspec)_ | | | |
|
| `jwt` _[JwtSpec](#jwtspec)_ | | | |
|
||||||
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the studio deployment | | |
|
| `workloadSpec` _[WorkloadSpec](#workloadspec)_ | WorkloadTemplate - customize the studio deployment | | |
|
||||||
| `gatewayServiceSelector` _object (keys:string, values:string)_ | GatewayServiceSelector - selector to find the service for the API gateway<br />Required to configure the API URL in the studio deployment<br />If you don't run multiple APIGateway instances in the same namespaces, the default will be fine | \{ app.kubernetes.io/component:api-gateway app.kubernetes.io/name:envoy \} | |
|
| `gatewayServiceSelector` _object (keys:string, values:string)_ | GatewayServiceSelector - selector to find the service for the API gateway<br />Required to configure the API URL in the studio deployment<br />If you don't run multiple APIGateway instances in the same namespaces, the default will be fine | \{ app.kubernetes.io/component:api-gateway app.kubernetes.io/name:envoy \} | |
|
||||||
| `externalUrl` _string_ | APIExternalURL is referring to the URL where Supabase API will be available<br />Typically this is the ingress of the API gateway | | |
|
| `externalUrl` _string_ | APIExternalURL is referring to the URL where Supabase API will be available<br />Typically this is the ingress of the API gateway | | |
|
||||||
|
|
||||||
|
@ -1084,7 +1084,7 @@ _Appears in:_
|
||||||
| `sizeLimit` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#quantity-resource-api)_ | | | |
|
| `sizeLimit` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#quantity-resource-api)_ | | | |
|
||||||
|
|
||||||
|
|
||||||
#### WorkloadTemplate
|
#### WorkloadSpec
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1106,7 +1106,7 @@ _Appears in:_
|
||||||
| `replicas` _integer_ | | | |
|
| `replicas` _integer_ | | | |
|
||||||
| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podsecuritycontext-v1-core)_ | | | |
|
| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podsecuritycontext-v1-core)_ | | | |
|
||||||
| `additionalLabels` _object (keys:string, values:string)_ | | | |
|
| `additionalLabels` _object (keys:string, values:string)_ | | | |
|
||||||
| `workload` _[ContainerTemplate](#containertemplate)_ | Workload - customize the container template of the workload | | |
|
| `container` _[ContainerTemplate](#containertemplate)_ | ContainerSpec - customize the container template of the workload | | |
|
||||||
| `additionalVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volume-v1-core) array_ | | | |
|
| `additionalVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volume-v1-core) array_ | | | |
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -436,7 +436,7 @@ func (r *APIGatewayReconciler) reconileEnvoyDeployment(
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, envoyDeployment, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, envoyDeployment, func() error {
|
||||||
envoyDeployment.Labels = envoySpec.WorkloadTemplate.MergeLabels(
|
envoyDeployment.Labels = envoySpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(gateway, "envoy", "api-gateway", supabase.Images.Envoy.Tag),
|
objectLabels(gateway, "envoy", "api-gateway", supabase.Images.Envoy.Tag),
|
||||||
gateway.Labels,
|
gateway.Labels,
|
||||||
)
|
)
|
||||||
|
@ -447,7 +447,7 @@ func (r *APIGatewayReconciler) reconileEnvoyDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
envoyDeployment.Spec.Replicas = envoySpec.WorkloadTemplate.ReplicaCount()
|
envoyDeployment.Spec.Replicas = envoySpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
envoyArgs := []string{"-c /etc/envoy/config.yaml"}
|
envoyArgs := []string{"-c /etc/envoy/config.yaml"}
|
||||||
|
|
||||||
|
@ -464,13 +464,13 @@ func (r *APIGatewayReconciler) reconileEnvoyDeployment(
|
||||||
Labels: objectLabels(gateway, "envoy", "api-gateway", supabase.Images.Envoy.Tag),
|
Labels: objectLabels(gateway, "envoy", "api-gateway", supabase.Images.Envoy.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: envoySpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: envoySpec.WorkloadSpec.PullSecrets(),
|
||||||
AutomountServiceAccountToken: ptrOf(false),
|
AutomountServiceAccountToken: ptrOf(false),
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "envoy-proxy",
|
Name: "envoy-proxy",
|
||||||
Image: envoySpec.WorkloadTemplate.Image(supabase.Images.Envoy.String()),
|
Image: envoySpec.WorkloadSpec.Image(supabase.Images.Envoy.String()),
|
||||||
ImagePullPolicy: envoySpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: envoySpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Args: envoyArgs,
|
Args: envoyArgs,
|
||||||
Ports: []corev1.ContainerPort{
|
Ports: []corev1.ContainerPort{
|
||||||
{
|
{
|
||||||
|
@ -512,16 +512,16 @@ func (r *APIGatewayReconciler) reconileEnvoyDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
SecurityContext: envoySpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
SecurityContext: envoySpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
||||||
Resources: envoySpec.WorkloadTemplate.Resources(),
|
Resources: envoySpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: envoySpec.WorkloadTemplate.AdditionalVolumeMounts(corev1.VolumeMount{
|
VolumeMounts: envoySpec.WorkloadSpec.AdditionalVolumeMounts(corev1.VolumeMount{
|
||||||
Name: configVolumeName,
|
Name: configVolumeName,
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
MountPath: "/etc/envoy",
|
MountPath: "/etc/envoy",
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
SecurityContext: envoySpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: envoySpec.WorkloadSpec.PodSecurityContext(),
|
||||||
Volumes: []corev1.Volume{
|
Volumes: []corev1.Volume{
|
||||||
{
|
{
|
||||||
Name: configVolumeName,
|
Name: configVolumeName,
|
||||||
|
|
|
@ -232,7 +232,7 @@ func (r *CoreAuthReconciler) reconcileAuthService(
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, authService, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, authService, func() error {
|
||||||
authService.Labels = core.Spec.Postgrest.WorkloadTemplate.MergeLabels(
|
authService.Labels = core.Spec.Postgrest.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(core, "auth", "core", supabase.Images.Gotrue.Tag),
|
objectLabels(core, "auth", "core", supabase.Images.Gotrue.Tag),
|
||||||
core.Labels,
|
core.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -115,7 +115,7 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, postgrestDeployment, func() error {
|
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, postgrestDeployment, func() error {
|
||||||
postgrestDeployment.Labels = postgrestSpec.WorkloadTemplate.MergeLabels(
|
postgrestDeployment.Labels = postgrestSpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
||||||
core.Labels,
|
core.Labels,
|
||||||
)
|
)
|
||||||
|
@ -155,7 +155,7 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
postgrestDeployment.Spec.Replicas = postgrestSpec.WorkloadTemplate.ReplicaCount()
|
postgrestDeployment.Spec.Replicas = postgrestSpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
postgrestDeployment.Spec.Template = corev1.PodTemplateSpec{
|
postgrestDeployment.Spec.Template = corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -165,14 +165,14 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
|
||||||
Labels: objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
Labels: objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: postgrestSpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: postgrestSpec.WorkloadSpec.PullSecrets(),
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "supabase-rest",
|
Name: "supabase-rest",
|
||||||
Image: postgrestSpec.WorkloadTemplate.Image(supabase.Images.Postgrest.String()),
|
Image: postgrestSpec.WorkloadSpec.Image(supabase.Images.Postgrest.String()),
|
||||||
ImagePullPolicy: postgrestSpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: postgrestSpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Args: []string{"postgrest"},
|
Args: []string{"postgrest"},
|
||||||
Env: postgrestSpec.WorkloadTemplate.MergeEnv(postgrestEnv),
|
Env: postgrestSpec.WorkloadSpec.MergeEnv(postgrestEnv),
|
||||||
Ports: []corev1.ContainerPort{
|
Ports: []corev1.ContainerPort{
|
||||||
{
|
{
|
||||||
Name: serviceCfg.Defaults.ServerPortName,
|
Name: serviceCfg.Defaults.ServerPortName,
|
||||||
|
@ -185,9 +185,9 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
|
||||||
Protocol: corev1.ProtocolTCP,
|
Protocol: corev1.ProtocolTCP,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
SecurityContext: postgrestSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
SecurityContext: postgrestSpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
||||||
Resources: postgrestSpec.WorkloadTemplate.Resources(),
|
Resources: postgrestSpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: postgrestSpec.WorkloadTemplate.AdditionalVolumeMounts(),
|
VolumeMounts: postgrestSpec.WorkloadSpec.AdditionalVolumeMounts(),
|
||||||
ReadinessProbe: &corev1.Probe{
|
ReadinessProbe: &corev1.Probe{
|
||||||
InitialDelaySeconds: 5,
|
InitialDelaySeconds: 5,
|
||||||
PeriodSeconds: 3,
|
PeriodSeconds: 3,
|
||||||
|
@ -213,7 +213,7 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
SecurityContext: postgrestSpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: postgrestSpec.WorkloadSpec.PodSecurityContext(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ func (r *CorePostgrestReconiler) reconcilePostgrestService(
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, postgrestService, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, postgrestService, func() error {
|
||||||
postgrestService.Labels = core.Spec.Postgrest.WorkloadTemplate.MergeLabels(
|
postgrestService.Labels = core.Spec.Postgrest.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
|
||||||
core.Labels,
|
core.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -99,7 +99,7 @@ func (r *DashboardPGMetaReconciler) reconcilePGMetaDeployment(
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, pgMetaDeployment, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, pgMetaDeployment, func() error {
|
||||||
pgMetaDeployment.Labels = pgMetaSpec.WorkloadTemplate.MergeLabels(
|
pgMetaDeployment.Labels = pgMetaSpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
||||||
dashboard.Labels,
|
dashboard.Labels,
|
||||||
)
|
)
|
||||||
|
@ -110,7 +110,7 @@ func (r *DashboardPGMetaReconciler) reconcilePGMetaDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pgMetaDeployment.Spec.Replicas = pgMetaSpec.WorkloadTemplate.ReplicaCount()
|
pgMetaDeployment.Spec.Replicas = pgMetaSpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
pgMetaEnv := []corev1.EnvVar{
|
pgMetaEnv := []corev1.EnvVar{
|
||||||
serviceCfg.EnvKeys.APIPort.Var(serviceCfg.Defaults.APIPort),
|
serviceCfg.EnvKeys.APIPort.Var(serviceCfg.Defaults.APIPort),
|
||||||
|
@ -126,20 +126,20 @@ func (r *DashboardPGMetaReconciler) reconcilePGMetaDeployment(
|
||||||
Labels: objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
Labels: objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: pgMetaSpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: pgMetaSpec.WorkloadSpec.PullSecrets(),
|
||||||
Containers: []corev1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "supabase-meta",
|
Name: "supabase-meta",
|
||||||
Image: pgMetaSpec.WorkloadTemplate.Image(supabase.Images.PostgresMeta.String()),
|
Image: pgMetaSpec.WorkloadSpec.Image(supabase.Images.PostgresMeta.String()),
|
||||||
ImagePullPolicy: pgMetaSpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: pgMetaSpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Env: pgMetaSpec.WorkloadTemplate.MergeEnv(pgMetaEnv),
|
Env: pgMetaSpec.WorkloadSpec.MergeEnv(pgMetaEnv),
|
||||||
Ports: []corev1.ContainerPort{{
|
Ports: []corev1.ContainerPort{{
|
||||||
Name: "api",
|
Name: "api",
|
||||||
ContainerPort: serviceCfg.Defaults.APIPort,
|
ContainerPort: serviceCfg.Defaults.APIPort,
|
||||||
Protocol: corev1.ProtocolTCP,
|
Protocol: corev1.ProtocolTCP,
|
||||||
}},
|
}},
|
||||||
SecurityContext: pgMetaSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.NodeUID, serviceCfg.Defaults.NodeGID),
|
SecurityContext: pgMetaSpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.NodeUID, serviceCfg.Defaults.NodeGID),
|
||||||
Resources: pgMetaSpec.WorkloadTemplate.Resources(),
|
Resources: pgMetaSpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: pgMetaSpec.WorkloadTemplate.AdditionalVolumeMounts(),
|
VolumeMounts: pgMetaSpec.WorkloadSpec.AdditionalVolumeMounts(),
|
||||||
ReadinessProbe: &corev1.Probe{
|
ReadinessProbe: &corev1.Probe{
|
||||||
InitialDelaySeconds: 5,
|
InitialDelaySeconds: 5,
|
||||||
PeriodSeconds: 3,
|
PeriodSeconds: 3,
|
||||||
|
@ -164,7 +164,7 @@ func (r *DashboardPGMetaReconciler) reconcilePGMetaDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
SecurityContext: pgMetaSpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: pgMetaSpec.WorkloadSpec.PodSecurityContext(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ func (r *DashboardPGMetaReconciler) reconcilePGMetaService(
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrPatch(ctx, r.Client, pgMetaService, func() error {
|
_, err := controllerutil.CreateOrPatch(ctx, r.Client, pgMetaService, func() error {
|
||||||
pgMetaService.Labels = dashboard.Spec.PGMeta.WorkloadTemplate.MergeLabels(
|
pgMetaService.Labels = dashboard.Spec.PGMeta.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(dashboard, supabase.ServiceConfig.PGMeta.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
objectLabels(dashboard, supabase.ServiceConfig.PGMeta.Name, "dashboard", supabase.Images.PostgresMeta.Tag),
|
||||||
dashboard.Labels,
|
dashboard.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -107,7 +107,7 @@ func (r *DashboardStudioReconciler) reconcileStudioDeployment(
|
||||||
gatewayService := gatewayServiceList.Items[0]
|
gatewayService := gatewayServiceList.Items[0]
|
||||||
|
|
||||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, studioDeployment, func() error {
|
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, studioDeployment, func() error {
|
||||||
studioDeployment.Labels = studioSpec.WorkloadTemplate.MergeLabels(
|
studioDeployment.Labels = studioSpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.Studio.Tag),
|
objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.Studio.Tag),
|
||||||
dashboard.Labels,
|
dashboard.Labels,
|
||||||
)
|
)
|
||||||
|
@ -118,7 +118,7 @@ func (r *DashboardStudioReconciler) reconcileStudioDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
studioDeployment.Spec.Replicas = studioSpec.WorkloadTemplate.ReplicaCount()
|
studioDeployment.Spec.Replicas = studioSpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
studioEnv := []corev1.EnvVar{
|
studioEnv := []corev1.EnvVar{
|
||||||
serviceCfg.EnvKeys.PGMetaURL.Var(fmt.Sprintf("http://%s.%s.svc:%d", supabase.ServiceConfig.PGMeta.ObjectName(dashboard), dashboard.Namespace, supabase.ServiceConfig.PGMeta.Defaults.APIPort)),
|
serviceCfg.EnvKeys.PGMetaURL.Var(fmt.Sprintf("http://%s.%s.svc:%d", supabase.ServiceConfig.PGMeta.ObjectName(dashboard), dashboard.Namespace, supabase.ServiceConfig.PGMeta.Defaults.APIPort)),
|
||||||
|
@ -137,20 +137,20 @@ func (r *DashboardStudioReconciler) reconcileStudioDeployment(
|
||||||
Labels: objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.Studio.Tag),
|
Labels: objectLabels(dashboard, serviceCfg.Name, "dashboard", supabase.Images.Studio.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: studioSpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: studioSpec.WorkloadSpec.PullSecrets(),
|
||||||
Containers: []corev1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "supabase-studio",
|
Name: "supabase-studio",
|
||||||
Image: studioSpec.WorkloadTemplate.Image(supabase.Images.Studio.String()),
|
Image: studioSpec.WorkloadSpec.Image(supabase.Images.Studio.String()),
|
||||||
ImagePullPolicy: studioSpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: studioSpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Env: studioSpec.WorkloadTemplate.MergeEnv(studioEnv),
|
Env: studioSpec.WorkloadSpec.MergeEnv(studioEnv),
|
||||||
Ports: []corev1.ContainerPort{{
|
Ports: []corev1.ContainerPort{{
|
||||||
Name: "studio",
|
Name: "studio",
|
||||||
ContainerPort: serviceCfg.Defaults.APIPort,
|
ContainerPort: serviceCfg.Defaults.APIPort,
|
||||||
Protocol: corev1.ProtocolTCP,
|
Protocol: corev1.ProtocolTCP,
|
||||||
}},
|
}},
|
||||||
SecurityContext: studioSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.NodeUID, serviceCfg.Defaults.NodeGID),
|
SecurityContext: studioSpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.NodeUID, serviceCfg.Defaults.NodeGID),
|
||||||
Resources: studioSpec.WorkloadTemplate.Resources(),
|
Resources: studioSpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: studioSpec.WorkloadTemplate.AdditionalVolumeMounts(corev1.VolumeMount{
|
VolumeMounts: studioSpec.WorkloadSpec.AdditionalVolumeMounts(corev1.VolumeMount{
|
||||||
Name: "next-cache",
|
Name: "next-cache",
|
||||||
MountPath: "/app/apps/studio/.next/cache",
|
MountPath: "/app/apps/studio/.next/cache",
|
||||||
}),
|
}),
|
||||||
|
@ -178,7 +178,7 @@ func (r *DashboardStudioReconciler) reconcileStudioDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
SecurityContext: studioSpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: studioSpec.WorkloadSpec.PodSecurityContext(),
|
||||||
Volumes: []corev1.Volume{{
|
Volumes: []corev1.Volume{{
|
||||||
Name: "next-cache",
|
Name: "next-cache",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
@ -214,7 +214,7 @@ func (r *DashboardStudioReconciler) reconcileStudioService(
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrPatch(ctx, r.Client, studioService, func() error {
|
_, err := controllerutil.CreateOrPatch(ctx, r.Client, studioService, func() error {
|
||||||
studioService.Labels = dashboard.Spec.Studio.WorkloadTemplate.MergeLabels(
|
studioService.Labels = dashboard.Spec.Studio.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(dashboard, supabase.ServiceConfig.Studio.Name, "dashboard", supabase.Images.Studio.Tag),
|
objectLabels(dashboard, supabase.ServiceConfig.Studio.Name, "dashboard", supabase.Images.Studio.Tag),
|
||||||
dashboard.Labels,
|
dashboard.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (r *StorageApiReconciler) reconcileStorageApiDeployment(
|
||||||
))
|
))
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiDeployment, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiDeployment, func() error {
|
||||||
storageApiDeployment.Labels = apiSpec.WorkloadTemplate.MergeLabels(
|
storageApiDeployment.Labels = apiSpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
||||||
storage.Labels,
|
storage.Labels,
|
||||||
)
|
)
|
||||||
|
@ -188,7 +188,7 @@ func (r *StorageApiReconciler) reconcileStorageApiDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
storageApiDeployment.Spec.Replicas = apiSpec.WorkloadTemplate.ReplicaCount()
|
storageApiDeployment.Spec.Replicas = apiSpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
storageApiDeployment.Spec.Template = corev1.PodTemplateSpec{
|
storageApiDeployment.Spec.Template = corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -199,20 +199,20 @@ func (r *StorageApiReconciler) reconcileStorageApiDeployment(
|
||||||
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: apiSpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: apiSpec.WorkloadSpec.PullSecrets(),
|
||||||
Containers: []corev1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "supabase-storage",
|
Name: "supabase-storage",
|
||||||
Image: apiSpec.WorkloadTemplate.Image(supabase.Images.Storage.String()),
|
Image: apiSpec.WorkloadSpec.Image(supabase.Images.Storage.String()),
|
||||||
ImagePullPolicy: apiSpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: apiSpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Env: apiSpec.WorkloadTemplate.MergeEnv(append(storagApiEnv, slices.Concat(apiSpec.FileBackend.Env(), apiSpec.S3Backend.Env())...)),
|
Env: apiSpec.WorkloadSpec.MergeEnv(append(storagApiEnv, slices.Concat(apiSpec.FileBackend.Env(), apiSpec.S3Backend.Env())...)),
|
||||||
Ports: []corev1.ContainerPort{{
|
Ports: []corev1.ContainerPort{{
|
||||||
Name: serviceCfg.Defaults.ApiPortName,
|
Name: serviceCfg.Defaults.ApiPortName,
|
||||||
ContainerPort: serviceCfg.Defaults.ApiPort,
|
ContainerPort: serviceCfg.Defaults.ApiPort,
|
||||||
Protocol: corev1.ProtocolTCP,
|
Protocol: corev1.ProtocolTCP,
|
||||||
}},
|
}},
|
||||||
SecurityContext: apiSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
SecurityContext: apiSpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
||||||
Resources: apiSpec.WorkloadTemplate.Resources(),
|
Resources: apiSpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: apiSpec.WorkloadTemplate.AdditionalVolumeMounts(
|
VolumeMounts: apiSpec.WorkloadSpec.AdditionalVolumeMounts(
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: "tmp",
|
Name: "tmp",
|
||||||
MountPath: "/tmp",
|
MountPath: "/tmp",
|
||||||
|
@ -242,8 +242,8 @@ func (r *StorageApiReconciler) reconcileStorageApiDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
SecurityContext: apiSpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: apiSpec.WorkloadSpec.PodSecurityContext(),
|
||||||
Volumes: apiSpec.WorkloadTemplate.Volumes(
|
Volumes: apiSpec.WorkloadSpec.Volumes(
|
||||||
corev1.Volume{
|
corev1.Volume{
|
||||||
Name: "tmp",
|
Name: "tmp",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
@ -276,7 +276,7 @@ func (r *StorageApiReconciler) reconcileStorageApiService(
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiService, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiService, func() error {
|
||||||
storageApiService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
|
storageApiService.Labels = storage.Spec.Api.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
|
||||||
storage.Labels,
|
storage.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -98,7 +98,7 @@ func (r *StorageImgProxyReconciler) reconcileImgProxyDeployment(
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, imgProxyDeployment, func() error {
|
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, imgProxyDeployment, func() error {
|
||||||
imgProxyDeployment.Labels = imgProxySpec.WorkloadTemplate.MergeLabels(
|
imgProxyDeployment.Labels = imgProxySpec.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
||||||
storage.Labels,
|
storage.Labels,
|
||||||
)
|
)
|
||||||
|
@ -119,27 +119,27 @@ func (r *StorageImgProxyReconciler) reconcileImgProxyDeployment(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
imgProxyDeployment.Spec.Replicas = imgProxySpec.WorkloadTemplate.ReplicaCount()
|
imgProxyDeployment.Spec.Replicas = imgProxySpec.WorkloadSpec.ReplicaCount()
|
||||||
|
|
||||||
imgProxyDeployment.Spec.Template = corev1.PodTemplateSpec{
|
imgProxyDeployment.Spec.Template = corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ImagePullSecrets: imgProxySpec.WorkloadTemplate.PullSecrets(),
|
ImagePullSecrets: imgProxySpec.WorkloadSpec.PullSecrets(),
|
||||||
Containers: []corev1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "supabase-imgproxy",
|
Name: "supabase-imgproxy",
|
||||||
Image: imgProxySpec.WorkloadTemplate.Image(supabase.Images.ImgProxy.String()),
|
Image: imgProxySpec.WorkloadSpec.Image(supabase.Images.ImgProxy.String()),
|
||||||
ImagePullPolicy: imgProxySpec.WorkloadTemplate.ImagePullPolicy(),
|
ImagePullPolicy: imgProxySpec.WorkloadSpec.ImagePullPolicy(),
|
||||||
Env: imgProxySpec.WorkloadTemplate.MergeEnv(imgProxyEnv),
|
Env: imgProxySpec.WorkloadSpec.MergeEnv(imgProxyEnv),
|
||||||
Ports: []corev1.ContainerPort{{
|
Ports: []corev1.ContainerPort{{
|
||||||
Name: serviceCfg.Defaults.ApiPortName,
|
Name: serviceCfg.Defaults.ApiPortName,
|
||||||
ContainerPort: serviceCfg.Defaults.ApiPort,
|
ContainerPort: serviceCfg.Defaults.ApiPort,
|
||||||
Protocol: corev1.ProtocolTCP,
|
Protocol: corev1.ProtocolTCP,
|
||||||
}},
|
}},
|
||||||
SecurityContext: imgProxySpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
SecurityContext: imgProxySpec.WorkloadSpec.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
|
||||||
Resources: imgProxySpec.WorkloadTemplate.Resources(),
|
Resources: imgProxySpec.WorkloadSpec.Resources(),
|
||||||
VolumeMounts: imgProxySpec.WorkloadTemplate.AdditionalVolumeMounts(),
|
VolumeMounts: imgProxySpec.WorkloadSpec.AdditionalVolumeMounts(),
|
||||||
ReadinessProbe: &corev1.Probe{
|
ReadinessProbe: &corev1.Probe{
|
||||||
InitialDelaySeconds: 5,
|
InitialDelaySeconds: 5,
|
||||||
PeriodSeconds: 3,
|
PeriodSeconds: 3,
|
||||||
|
@ -162,8 +162,8 @@ func (r *StorageImgProxyReconciler) reconcileImgProxyDeployment(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
SecurityContext: imgProxySpec.WorkloadTemplate.PodSecurityContext(),
|
SecurityContext: imgProxySpec.WorkloadSpec.PodSecurityContext(),
|
||||||
Volumes: imgProxySpec.WorkloadTemplate.Volumes(),
|
Volumes: imgProxySpec.WorkloadSpec.Volumes(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ func (r *StorageImgProxyReconciler) reconcileImgProxyService(
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := controllerutil.CreateOrPatch(ctx, r.Client, imgProxyService, func() error {
|
_, err := controllerutil.CreateOrPatch(ctx, r.Client, imgProxyService, func() error {
|
||||||
imgProxyService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
|
imgProxyService.Labels = storage.Spec.Api.WorkloadSpec.MergeLabels(
|
||||||
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
|
||||||
storage.Labels,
|
storage.Labels,
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"iter"
|
"iter"
|
||||||
|
|
||||||
"github.com/jackc/pgx/v5"
|
"github.com/jackc/pgx/v5"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
|
||||||
"code.icb4dc0.de/prskr/supabase-operator/assets/migrations"
|
"code.icb4dc0.de/prskr/supabase-operator/assets/migrations"
|
||||||
|
@ -32,6 +33,8 @@ type Migrator struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.MigrationStatus, seq iter.Seq2[migrations.Script, error]) (appliedSomething bool, err error) {
|
func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.MigrationStatus, seq iter.Seq2[migrations.Script, error]) (appliedSomething bool, err error) {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
for s, err := range seq {
|
for s, err := range seq {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -41,6 +44,7 @@ func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.Migratio
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Info("Applying missing migration", "filename", s.FileName)
|
||||||
if err := m.Apply(ctx, s.Content); err != nil {
|
if err := m.Apply(ctx, s.Content); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ var Images = struct {
|
||||||
}{
|
}{
|
||||||
EdgeRuntime: ImageRef{
|
EdgeRuntime: ImageRef{
|
||||||
Repository: "supabase/edge-runtime",
|
Repository: "supabase/edge-runtime",
|
||||||
Tag: "v1.66.5",
|
Tag: "v1.67.0",
|
||||||
},
|
},
|
||||||
Envoy: ImageRef{
|
Envoy: ImageRef{
|
||||||
Repository: "envoyproxy/envoy",
|
Repository: "envoyproxy/envoy",
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -41,6 +42,10 @@ const (
|
||||||
composeFileUrl = "https://raw.githubusercontent.com/supabase/supabase/refs/heads/master/docker/docker-compose.yml"
|
composeFileUrl = "https://raw.githubusercontent.com/supabase/supabase/refs/heads/master/docker/docker-compose.yml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ignoredMigrations = []string{
|
||||||
|
"10000000000000_demote-postgres.sql",
|
||||||
|
}
|
||||||
|
|
||||||
func GenerateAll(ctx context.Context) {
|
func GenerateAll(ctx context.Context) {
|
||||||
mg.CtxDeps(ctx, FetchImageMeta, FetchMigrations, CRDs, CRDDocs)
|
mg.CtxDeps(ctx, FetchImageMeta, FetchMigrations, CRDs, CRDDocs)
|
||||||
}
|
}
|
||||||
|
@ -204,7 +209,13 @@ func FetchMigrations(ctx context.Context) (err error) {
|
||||||
if strings.HasPrefix(fileName, migrationsDirPath) {
|
if strings.HasPrefix(fileName, migrationsDirPath) {
|
||||||
fileName = strings.TrimPrefix(fileName, migrationsDirPath)
|
fileName = strings.TrimPrefix(fileName, migrationsDirPath)
|
||||||
|
|
||||||
dir, _ := path.Split(fileName)
|
dir, migrationFileName := path.Split(fileName)
|
||||||
|
|
||||||
|
if slices.Contains(ignoredMigrations, migrationFileName) {
|
||||||
|
slog.Info("Skipping migration file", slog.String("name", migrationFileName))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
outDir := filepath.Join(workingDir, "assets", "migrations", filepath.FromSlash(dir))
|
outDir := filepath.Join(workingDir, "assets", "migrations", filepath.FromSlash(dir))
|
||||||
if err := os.MkdirAll(outDir, 0o750); err != nil {
|
if err := os.MkdirAll(outDir, 0o750); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
178
postgres/Dockerfile
Normal file
178
postgres/Dockerfile
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
# syntax=docker/dockerfile:1.13
|
||||||
|
|
||||||
|
ARG POSTGRES_MAJOR=17
|
||||||
|
ARG POSTGRES_MINOR=2
|
||||||
|
ARG IMAGE_FLAVOR=standard
|
||||||
|
ARG DISTRO=bookworm
|
||||||
|
ARG IMAGE_BASE=ghcr.io/cloudnative-pg/postgresql:${POSTGRES_MAJOR}.${POSTGRES_MINOR}-${IMAGE_FLAVOR}-${DISTRO}
|
||||||
|
|
||||||
|
FROM ${IMAGE_BASE} AS base
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
ARG POSTGRES_MAJOR
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install --yes --no-install-recommends \
|
||||||
|
libsodium23 \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-cron \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-http \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-hypopg \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-pgrouting \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-pgtap \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-plpgsql-check \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-postgis-3 \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-wal2json \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-rum \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-repack \
|
||||||
|
postgresql-${POSTGRES_MAJOR}-timescaledb && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
USER 26
|
||||||
|
|
||||||
|
FROM ${IMAGE_BASE} AS builder
|
||||||
|
|
||||||
|
ARG POSTGRES_MAJOR
|
||||||
|
ARG PG_SAFEUPDATE_VERSION=1.5
|
||||||
|
ARG PG_NET_VERSION=v0.9.1
|
||||||
|
ARG PGSODIUM_VERSION=v3.1.9
|
||||||
|
ARG PG_HASHIDS_VERSION=v1.2.1
|
||||||
|
ARG PG_TLE_VERSION=v1.4.0
|
||||||
|
ARG PG_GRAPHQL_VERSION=v1.5.9
|
||||||
|
ARG PG_SUPABASE_VAULT_VERSION=v0.3.1
|
||||||
|
ARG SUPABASE_WRAPPER_VERSION=v0.4.4
|
||||||
|
ARG INDEX_ADVISOR_VERSION=v0.2.0
|
||||||
|
ARG PG_JSONSCHEMA_VERSION=v0.3.3
|
||||||
|
ARG PG_PLAN_FILTER_REVISION=5081a7b
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install --yes --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
pkg-config \
|
||||||
|
ca-certificates \
|
||||||
|
postgresql-server-dev-${POSTGRES_MAJOR} \
|
||||||
|
libcurl4-openssl-dev \
|
||||||
|
libsodium-dev \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
flex \
|
||||||
|
libkrb5-dev
|
||||||
|
|
||||||
|
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y && \
|
||||||
|
export PATH="/root/.cargo/bin:$PATH" && \
|
||||||
|
export "PG${POSTGRES_MAJOR}_PG_CONFIG=/usr/lib/postgresql/${POSTGRES_MAJOR}/bin/pg_config" && \
|
||||||
|
cargo install cargo-pgrx --version 0.12.6 --locked && \
|
||||||
|
cargo pgrx init
|
||||||
|
|
||||||
|
WORKDIR /postgres/extensions
|
||||||
|
|
||||||
|
ENV PATH="/root/.cargo/bin:$PATH"
|
||||||
|
|
||||||
|
# Install pgjwt
|
||||||
|
RUN git clone https://github.com/michelp/pgjwt.git && \
|
||||||
|
make -C pgjwt && make -C pgjwt install
|
||||||
|
|
||||||
|
# Install pg_hashids
|
||||||
|
RUN git clone --branch ${PG_HASHIDS_VERSION} --depth 1 https://github.com/iCyberon/pg_hashids.git && \
|
||||||
|
make -C pg_hashids && make -C pg_hashids install
|
||||||
|
|
||||||
|
# Install pg-safeupdate
|
||||||
|
RUN git clone --branch ${PG_SAFEUPDATE_VERSION} --depth 1 https://github.com/eradman/pg-safeupdate.git && \
|
||||||
|
make -C pg-safeupdate && make -C pg-safeupdate install
|
||||||
|
|
||||||
|
# install pg_net
|
||||||
|
RUN git clone --branch ${PG_NET_VERSION} --depth 1 https://github.com/supabase/pg_net.git && \
|
||||||
|
make -C pg_net && make -C pg_net install
|
||||||
|
|
||||||
|
# install pg_graphql
|
||||||
|
RUN git clone --branch ${PG_GRAPHQL_VERSION} --depth 1 https://github.com/supabase/pg_graphql.git && \
|
||||||
|
cd pg_graphql && \
|
||||||
|
cargo pgrx install --release
|
||||||
|
|
||||||
|
# install supabase vault
|
||||||
|
RUN git clone --branch ${PG_SUPABASE_VAULT_VERSION} --depth 1 https://github.com/supabase/vault.git && \
|
||||||
|
make -C vault && make -C vault install
|
||||||
|
|
||||||
|
# install pg_jsonschema
|
||||||
|
RUN git clone --branch ${PG_JSONSCHEMA_VERSION} --depth 1 https://github.com/supabase/pg_jsonschema.git && \
|
||||||
|
cd pg_jsonschema && \
|
||||||
|
cargo pgrx install --release
|
||||||
|
|
||||||
|
# install supabase wrappers
|
||||||
|
RUN git clone --branch ${SUPABASE_WRAPPER_VERSION} --depth 1 https://github.com/supabase/wrappers.git && \
|
||||||
|
cd wrappers/wrappers && \
|
||||||
|
cargo pgrx install --release --features "pg${POSTGRES_MAJOR},all_fdws"
|
||||||
|
|
||||||
|
# install index_advisor
|
||||||
|
RUN git clone --branch ${INDEX_ADVISOR_VERSION} --depth 1 https://github.com/supabase/index_advisor.git && \
|
||||||
|
make -C index_advisor && make -C index_advisor install
|
||||||
|
|
||||||
|
# install pgsodium
|
||||||
|
RUN git clone --branch ${PGSODIUM_VERSION} --depth 1 https://github.com/michelp/pgsodium.git && \
|
||||||
|
make -C pgsodium && make -C pgsodium install
|
||||||
|
|
||||||
|
# install pg_tle
|
||||||
|
RUN git clone --branch ${PG_TLE_VERSION} --depth 1 https://github.com/aws/pg_tle.git && \
|
||||||
|
make -C pg_tle && make -C pg_tle install
|
||||||
|
|
||||||
|
# install pg_plan_filter
|
||||||
|
RUN git clone https://github.com/pgexperts/pg_plan_filter.git && \
|
||||||
|
git -C pg_plan_filter checkout ${PG_PLAN_FILTER_REVISION} && \
|
||||||
|
make -C pg_plan_filter && make -C pg_plan_filter install
|
||||||
|
|
||||||
|
FROM base AS final
|
||||||
|
|
||||||
|
ARG POSTGRES_MAJOR
|
||||||
|
|
||||||
|
# Copy all bitcode additions
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/bitcode /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/bitcode
|
||||||
|
|
||||||
|
# pgjwt
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pgjwt* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg-safeupdate
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/safeupdate.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
|
||||||
|
# pg_hashids
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pg_hashids.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pg_hashids* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg_net
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pg_net.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pg_net* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg_graphql
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pg_graphql.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pg_graphql* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# supabase vault
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/supabase_vault.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/supabase_vault* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg_jsonschema
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pg_jsonschema.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pg_jsonschema* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# Supabase wrappers
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/wrappers-*.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/wrappers* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pgsodium
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pgsodium.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pgsodium* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg_tle
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/pg_tle.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/pg_tle* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
||||||
|
|
||||||
|
# pg_plan_filter
|
||||||
|
COPY --from=builder /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/plan_filter.so /usr/lib/postgresql/${POSTGRES_MAJOR}/lib/
|
||||||
|
|
||||||
|
# index_advisor
|
||||||
|
COPY --from=builder /usr/share/postgresql/${POSTGRES_MAJOR}/extension/index_advisor* /usr/share/postgresql/${POSTGRES_MAJOR}/extension/
|
Loading…
Add table
Reference in a new issue