feat(ci): configure image build caching
This commit is contained in:
parent
7f56a3db56
commit
264b30e8a2
6 changed files with 151 additions and 34 deletions
.github/workflows
api/v1alpha1
assets/migrations
config/crd/bases
internal/db
4
.github/workflows/postgres.yml
vendored
4
.github/workflows/postgres.yml
vendored
|
@ -1,6 +1,8 @@
|
||||||
name: Postgres image
|
name: Postgres image
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
paths:
|
||||||
|
- postgres/**
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
tags:
|
tags:
|
||||||
|
@ -39,6 +41,8 @@ jobs:
|
||||||
build-args: |
|
build-args: |
|
||||||
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
POSTGRES_MAJOR=${{ matrix.postgres_major }}
|
||||||
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
POSTGRES_MINOR=${{ fromJSON(env.MINOR_VERSIONS)[matrix.postgres_major] }}
|
||||||
|
cache-from: type=registry,ref=code.icb4dc0.de/prskr/supabase-operator/postgres:buildcache
|
||||||
|
cache-to: type=registry,ref=code.icb4dc0.de/prskr/supabase-operator/postgres:buildcache,mode=max
|
||||||
|
|
||||||
manifest:
|
manifest:
|
||||||
strategy:
|
strategy:
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -25,6 +26,7 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -387,20 +389,75 @@ type CoreSpec struct {
|
||||||
Auth *AuthSpec `json:"auth,omitempty"`
|
Auth *AuthSpec `json:"auth,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MigrationStatus map[string]metav1.Time
|
type MigrationConditionStatus string
|
||||||
|
|
||||||
func (s MigrationStatus) IsApplied(name string) bool {
|
const (
|
||||||
_, ok := s[name]
|
MigrationConditionStatusApplied MigrationConditionStatus = "Applied"
|
||||||
return ok
|
MigrationConditionStatusFailed MigrationConditionStatus = "Failed"
|
||||||
}
|
)
|
||||||
|
|
||||||
func (s MigrationStatus) Record(name string) {
|
type MigrationScriptCondition struct {
|
||||||
s[name] = metav1.Now()
|
// Name - file name of the migration script
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Hash - SHA256 hash of the script when it was last successfully applied
|
||||||
|
Hash []byte `json:"hash"`
|
||||||
|
// Status - whether the migration was applied or not
|
||||||
|
// +kubebuilder:validation:Enum=Applied;Failed
|
||||||
|
Status MigrationConditionStatus `json:"status"`
|
||||||
|
// LastProbeTime - last time the operator tried to execute the migration script
|
||||||
|
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
|
||||||
|
// LastTransitionTime - last time the condition transitioned from one status to another
|
||||||
|
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||||
|
// Reason - one-word, CamcelCase reason for the condition's last transition
|
||||||
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// Message - human-readable message indicating details about the last transition
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DatabaseStatus struct {
|
type DatabaseStatus struct {
|
||||||
AppliedMigrations MigrationStatus `json:"appliedMigrations,omitempty"`
|
MigrationConditions []MigrationScriptCondition `json:"migrationConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
Roles map[string][]byte `json:"roles,omitempty"`
|
Roles map[string][]byte `json:"roles,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DatabaseStatus) IsMigrationUpToDate(name string, hash []byte) (found bool, upToDate bool) {
|
||||||
|
for _, cond := range s.MigrationConditions {
|
||||||
|
if cond.Name == name {
|
||||||
|
return true, bytes.Equal(cond.Hash, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DatabaseStatus) RecordMigrationCondition(name string, hash []byte, err error) {
|
||||||
|
var (
|
||||||
|
now = time.Now()
|
||||||
|
newStatus = MigrationConditionStatusApplied
|
||||||
|
lastProbeTime = metav1.NewTime(now)
|
||||||
|
lastTransitionTime metav1.Time
|
||||||
|
message string
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
newStatus = MigrationConditionStatusFailed
|
||||||
|
message = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, cond := range s.MigrationConditions {
|
||||||
|
if cond.Name == name {
|
||||||
|
lastTransitionTime = cond.LastTransitionTime
|
||||||
|
if cond.Status != newStatus {
|
||||||
|
lastTransitionTime = metav1.NewTime(now)
|
||||||
|
}
|
||||||
|
|
||||||
|
cond.Hash = hash
|
||||||
|
cond.Status = newStatus
|
||||||
|
cond.LastProbeTime = lastProbeTime
|
||||||
|
cond.LastTransitionTime = lastTransitionTime
|
||||||
|
cond.Reason = "Outdated"
|
||||||
|
cond.Message = message
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type CoreConditionType string
|
type CoreConditionType string
|
||||||
|
|
|
@ -752,11 +752,11 @@ func (in *DatabaseRolesSecrets) DeepCopy() *DatabaseRolesSecrets {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.AppliedMigrations != nil {
|
if in.MigrationConditions != nil {
|
||||||
in, out := &in.AppliedMigrations, &out.AppliedMigrations
|
in, out := &in.MigrationConditions, &out.MigrationConditions
|
||||||
*out = make(MigrationStatus, len(*in))
|
*out = make([]MigrationScriptCondition, len(*in))
|
||||||
for key, val := range *in {
|
for i := range *in {
|
||||||
(*out)[key] = *val.DeepCopy()
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.Roles != nil {
|
if in.Roles != nil {
|
||||||
|
@ -1046,24 +1046,25 @@ func (in *JwtSpec) DeepCopy() *JwtSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in MigrationStatus) DeepCopyInto(out *MigrationStatus) {
|
func (in *MigrationScriptCondition) DeepCopyInto(out *MigrationScriptCondition) {
|
||||||
{
|
*out = *in
|
||||||
in := &in
|
if in.Hash != nil {
|
||||||
*out = make(MigrationStatus, len(*in))
|
in, out := &in.Hash, &out.Hash
|
||||||
for key, val := range *in {
|
*out = make([]byte, len(*in))
|
||||||
(*out)[key] = *val.DeepCopy()
|
copy(*out, *in)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||||
|
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationScriptCondition.
|
||||||
func (in MigrationStatus) DeepCopy() MigrationStatus {
|
func (in *MigrationScriptCondition) DeepCopy() *MigrationScriptCondition {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := new(MigrationStatus)
|
out := new(MigrationScriptCondition)
|
||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return *out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
"embed"
|
"embed"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
@ -32,6 +33,7 @@ var migrationsFS embed.FS
|
||||||
type Script struct {
|
type Script struct {
|
||||||
FileName string
|
FileName string
|
||||||
Content string
|
Content string
|
||||||
|
Hash []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitScripts() iter.Seq2[Script, error] {
|
func InitScripts() iter.Seq2[Script, error] {
|
||||||
|
@ -49,10 +51,18 @@ func RoleCreationScript(roleName string) (Script, error) {
|
||||||
return Script{}, err
|
return Script{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Script{fileName, string(content)}, nil
|
hash := sha256.New()
|
||||||
|
_, _ = hash.Write(content)
|
||||||
|
|
||||||
|
return Script{
|
||||||
|
FileName: fileName,
|
||||||
|
Content: string(content),
|
||||||
|
Hash: hash.Sum(nil),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readScripts(dir string) iter.Seq2[Script, error] {
|
func readScripts(dir string) iter.Seq2[Script, error] {
|
||||||
|
hash := sha256.New()
|
||||||
return func(yield func(Script, error) bool) {
|
return func(yield func(Script, error) bool) {
|
||||||
files, err := migrationsFS.ReadDir(dir)
|
files, err := migrationsFS.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -76,11 +86,16 @@ func readScripts(dir string) iter.Seq2[Script, error] {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, _ = hash.Write(content)
|
||||||
|
|
||||||
s := Script{
|
s := Script{
|
||||||
FileName: file.Name(),
|
FileName: file.Name(),
|
||||||
Content: string(content),
|
Content: string(content),
|
||||||
|
Hash: hash.Sum(nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hash.Reset()
|
||||||
|
|
||||||
if !yield(s, nil) {
|
if !yield(s, nil) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -5353,11 +5353,48 @@ spec:
|
||||||
properties:
|
properties:
|
||||||
database:
|
database:
|
||||||
properties:
|
properties:
|
||||||
appliedMigrations:
|
migrationConditions:
|
||||||
additionalProperties:
|
items:
|
||||||
format: date-time
|
properties:
|
||||||
type: string
|
hash:
|
||||||
type: object
|
description: Hash - SHA256 hash of the script when it was
|
||||||
|
last successfully applied
|
||||||
|
format: byte
|
||||||
|
type: string
|
||||||
|
lastProbeTime:
|
||||||
|
description: LastProbeTime - last time the operator tried
|
||||||
|
to execute the migration script
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
lastTransitionTime:
|
||||||
|
description: LastTransitionTime - last time the condition
|
||||||
|
transitioned from one status to another
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
message:
|
||||||
|
description: Message - human-readable message indicating
|
||||||
|
details about the last transition
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: Name - file name of the migration script
|
||||||
|
type: string
|
||||||
|
reason:
|
||||||
|
description: Reason - one-word, CamcelCase reason for the
|
||||||
|
condition's last transition
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
description: Status - whether the migration was applied
|
||||||
|
or not
|
||||||
|
enum:
|
||||||
|
- Applied
|
||||||
|
- Failed
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- hash
|
||||||
|
- name
|
||||||
|
- status
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
roles:
|
roles:
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
format: byte
|
format: byte
|
||||||
|
|
|
@ -32,7 +32,7 @@ type Migrator struct {
|
||||||
Conn *pgx.Conn
|
Conn *pgx.Conn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.MigrationStatus, seq iter.Seq2[migrations.Script, error]) (appliedSomething bool, err error) {
|
func (m Migrator) ApplyAll(ctx context.Context, status *supabasev1alpha1.CoreStatus, seq iter.Seq2[migrations.Script, error], areInitScripts bool) (appliedSomething bool, err error) {
|
||||||
logger := log.FromContext(ctx)
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
for s, err := range seq {
|
for s, err := range seq {
|
||||||
|
@ -40,11 +40,14 @@ func (m Migrator) ApplyAll(ctx context.Context, status supabasev1alpha1.Migratio
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.IsApplied(s.FileName) {
|
if found, upToDate := status.Database.IsMigrationUpToDate(s.FileName, s.Hash); found && upToDate {
|
||||||
|
continue
|
||||||
|
} else if found && !upToDate && areInitScripts {
|
||||||
|
logger.Info("Change in init script was detected - will not apply because init scripts are not idempotent", "file_name", s.FileName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Applying missing migration", "filename", s.FileName)
|
logger.Info("Applying missing or outdated migration", "filename", s.FileName)
|
||||||
if err := m.Apply(ctx, s.Content); err != nil {
|
if err := m.Apply(ctx, s.Content); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue