refactor(ignore): make ignore implement fs.FS for easier testing later on

This commit is contained in:
Peter 2023-04-19 18:39:56 +02:00
parent 5d46cb15c2
commit f5c9dfc231
Signed by: prskr
GPG key ID: C1DB5D2E8DB512F9
20 changed files with 617 additions and 117 deletions

View file

@ -1,10 +1,16 @@
task "script" "buf_generate" {
inline = [
"buf generate"
"buf generate --debug"
]
out_dir = buildr.repo.root
input_mapping = {
"api" = ".",
"buf.gen.yaml" = "buf.gen.yaml"
"buf.work.yaml" = "buf.work.yaml"
}
container {
image = "docker.io/bufbuild/buf"
}
@ -38,6 +44,13 @@ task "script" "go_test" {
input_mapping = {
"${buildr.repo.root}" = "."
}
container {
image = "docker.io/golang:1.20-alpine"
capabilities {
add = ["SYS_ADMIN"]
}
}
}
task "script" "golangci_lint" {

View file

@ -52,7 +52,7 @@ message TaskLog {
google.protobuf.Timestamp time = 1;
string message = 2;
uint32 level = 3;
int32 level = 3;
repeated LogAttribute attributes = 4;
}

3
go.mod
View file

@ -3,6 +3,7 @@ module code.icb4dc0.de/buildr/buildr
go 1.20
require (
github.com/bmatcuk/doublestar/v4 v4.6.0
github.com/docker/docker v23.0.4+incompatible
github.com/docker/go-connections v0.4.0
github.com/fsnotify/fsnotify v1.6.0
@ -15,8 +16,8 @@ require (
github.com/hashicorp/hcl/v2 v2.16.2
github.com/jinzhu/copier v0.3.5
github.com/klauspost/compress v1.16.5
github.com/klauspost/pgzip v1.2.5
github.com/opencontainers/image-spec v1.1.0-rc2
github.com/shibumi/go-pathspec v1.3.0
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0

6
go.sum
View file

@ -51,6 +51,8 @@ github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkE
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc=
github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@ -193,6 +195,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -240,8 +244,6 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=

17
internal/archive/gzip.go Normal file
View file

@ -0,0 +1,17 @@
package archive
import (
"errors"
"io"
"github.com/klauspost/pgzip"
)
func WriteTarGzipEncoded(tar Tar, writer io.Writer) (err error) {
s2Writer := pgzip.NewWriter(writer)
defer func() {
err = errors.Join(err, s2Writer.Close())
}()
return tar.Write(s2Writer)
}

View file

@ -5,7 +5,6 @@ import (
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"code.icb4dc0.de/buildr/buildr/internal/ignore"
@ -24,7 +23,7 @@ func (t *Tar) Write(writer io.Writer) error {
}
tarWriter := tar.NewWriter(writer)
if err := t.root.writeToTar(tarWriter, ""); err != nil {
if err := t.root.writeToTar(tarWriter, t.Ignorer, ""); err != nil {
return err
}
@ -44,7 +43,7 @@ func (t *Tar) Add(srcPath, destPath string) error {
t.root = newNode()
}
info, err := os.Stat(srcPath)
info, err := t.Ignorer.Stat(srcPath)
if err != nil {
return err
}
@ -70,28 +69,17 @@ func (t *Tar) addDir(srcPath, destPath string) error {
t.root = newNode()
}
var walkFunc fs.WalkDirFunc = func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
relativeToSource, err := filepath.Rel(srcPath, path)
return t.Ignorer.WalkDir(srcPath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
t.root.addDir(filepath.Join(destPath, relativeToSource))
t.root.addDir(filepath.Join(destPath, path))
} else {
t.root.addFile(path, filepath.Join(destPath, relativeToSource))
t.root.addFile(path, filepath.Join(destPath, path))
}
return nil
}
if t.Ignorer != nil {
return t.Ignorer.WalkDir(srcPath, walkFunc)
}
return filepath.WalkDir(srcPath, walkFunc)
})
}

View file

@ -4,14 +4,24 @@ import (
"archive/tar"
"errors"
"io"
"os"
"io/fs"
"path"
"path/filepath"
"strings"
"sync"
"time"
)
var defaultCreationTime = time.Unix(0, 0)
const defaultBufferSize = 256 * 1024
var (
defaultCreationTime = time.Unix(0, 0)
bufferPool = sync.Pool{
New: func() any {
return make([]byte, defaultBufferSize)
},
}
)
type fileToTar struct {
sourcePath string
@ -56,7 +66,7 @@ type archiveNode struct {
files *filesToTar
}
func (n *archiveNode) writeToTar(writer *tar.Writer, parent string) error {
func (n *archiveNode) writeToTar(writer *tar.Writer, fileSystem fs.StatFS, parent string) error {
for segment, child := range n.children {
if child.isEmpty() {
continue
@ -76,18 +86,18 @@ func (n *archiveNode) writeToTar(writer *tar.Writer, parent string) error {
return err
}
if err := child.writeToTar(writer, segmentPath); err != nil {
if err := child.writeToTar(writer, fileSystem, segmentPath); err != nil {
return err
}
}
for _, fileSpec := range *n.files {
info, err := os.Stat(fileSpec.sourcePath)
info, err := fileSystem.Stat(fileSpec.sourcePath)
if err != nil {
return err
}
f, err := os.Open(fileSpec.sourcePath)
f, err := fileSystem.Open(fileSpec.sourcePath)
if err != nil {
return err
}
@ -103,10 +113,14 @@ func (n *archiveNode) writeToTar(writer *tar.Writer, parent string) error {
return errors.Join(err, f.Close())
}
if _, err = io.Copy(writer, f); err != nil {
buf := bufferPool.Get().([]byte)
if _, err = io.CopyBuffer(writer, f, buf); err != nil {
returnCopyBuf(buf)
return errors.Join(err, f.Close())
}
returnCopyBuf(buf)
if err = f.Close(); err != nil {
return err
}
@ -187,3 +201,8 @@ func (n *archiveNode) addDir(dirPath string) {
func (n *archiveNode) isEmpty() bool {
return len(n.children) == 0 && n.files.IsEmpty()
}
func returnCopyBuf(buf []byte) {
buf = buf[:defaultBufferSize]
bufferPool.Put(buf)
}

View file

@ -26,16 +26,6 @@ import (
var _ Shutdowner = (*Orchestrator)(nil)
type BuildRContainerSpec struct {
ID string
ModuleName string
Image string
RepoRoot string
ToolsDir string
User string
Content map[string]string
}
func NewOrchestrator(cli *client.Client, ignorer *ignore.Ignorer) *Orchestrator {
o := Orchestrator{
client: cli,
@ -67,6 +57,8 @@ func (o *Orchestrator) BuildRContainer(ctx context.Context, spec BuildRContainer
conSpec := ContainerSpec{
Image: spec.Image,
User: spec.User,
Privileged: spec.Privileged,
Capabilities: spec.Capabilities,
ExposedPorts: []string{"3000/tcp"},
Env: map[string]string{
"BUILDR_GRPC_SERVE_ADDRESS": "0.0.0.0:3000",
@ -118,7 +110,7 @@ func (o *Orchestrator) BuildRContainer(ctx context.Context, spec BuildRContainer
grpc.WithBlock(),
)
if err != nil {
return nil, nil, err
return nil, nil, fmt.Errorf("failed to connect gRPC connection: %w", err)
}
healthClient := v1health.NewHealthClient(conn)
@ -129,7 +121,7 @@ func (o *Orchestrator) BuildRContainer(ctx context.Context, spec BuildRContainer
)
if err != nil {
return nil, nil, err
return nil, nil, fmt.Errorf("failed to check health status: %w", err)
} else if resp.Status != v1health.HealthCheckResponse_SERVING {
return nil, nil, errors.New("service not healthy in time")
}
@ -231,6 +223,9 @@ func (o *Orchestrator) createContainer(ctx context.Context, spec ContainerSpec,
hostConfig := container.HostConfig{
AutoRemove: spec.AutoRemove,
Privileged: spec.Privileged,
CapAdd: spec.Capabilities.Add,
CapDrop: spec.Capabilities.Drop,
PortBindings: binding,
}

View file

@ -2,6 +2,24 @@ package containers
import specsv1 "github.com/opencontainers/image-spec/specs-go/v1"
type Capabilities struct {
Add []string
Drop []string
}
type BuildRContainerSpec struct {
ID string
ModuleName string
RepoRoot string
ToolsDir string
Content map[string]string
Image string
User string
Privileged bool
Capabilities Capabilities
}
type ContainerPlatform struct {
Architecture string
OS string
@ -28,6 +46,8 @@ type ContainerSpec struct {
ContainerPlatform
Image string
User string
Privileged bool
Capabilities Capabilities
WorkingDir string
ExposedPorts []string
Env map[string]string

View file

@ -59,6 +59,7 @@ func (c *containerTask) doExecute(ctx context.Context, b buildr.Buildr) (err err
ModuleName: c.moduleWithMeta.Name(),
Image: containerSpec.Image,
User: containerSpec.User,
Privileged: containerSpec.Privileged,
RepoRoot: b.Repo.Root,
Content: c.moduleWithMeta.InputMappings(),
ToolsDir: b.Config.ToolsDirectory,
@ -73,25 +74,37 @@ func (c *containerTask) doExecute(ctx context.Context, b buildr.Buildr) (err err
err = errors.Join(err, outputSink.Close())
}()
logger.Debug("Preparing container")
con, grpcConn, err := c.orchestrator.BuildRContainer(ctx, spec)
if err != nil {
return err
return fmt.Errorf("failed to create container for task %s/%s: %w", c.moduleWithMeta.Type(), c.moduleWithMeta.Name(), err)
}
var keepContainer bool
defer func() {
if keepContainer {
err = errors.Join(err, grpcConn.Close())
return
}
err = errors.Join(err, grpcConn.Close(), con.Shutdown(context.Background()))
}()
executorClient := rpcv1.NewExecutorServiceClient(grpcConn)
logger.Debug("Start remote task execution")
streamClient, err := executorClient.ExecuteTask(ctx)
if err != nil {
return err
return fmt.Errorf("failed to start remote task execution: %w", err)
}
defer func() {
err = errors.Join(err, streamClient.CloseSend())
}()
rawModule, err := json.Marshal(c.moduleWithMeta.Unwrap())
if err != nil {
return err
return fmt.Errorf("failed to marshal task spec as JSON: %w", err)
}
startTaskReq := &rpcv1.ExecuteTaskRequest{
@ -114,30 +127,33 @@ func (c *containerTask) doExecute(ctx context.Context, b buildr.Buildr) (err err
err = streamClient.Send(startTaskReq)
if err != nil {
return err
return fmt.Errorf("failed to send start task request: %w", err)
}
for {
ev, err := streamClient.Recv()
if err != nil {
return err
keepContainer = true
return fmt.Errorf("failed to receive remote executor event: %w", err)
}
switch msg := ev.GetEnvelope().(type) {
case *rpcv1.ExecuteTaskResponse_TaskLog:
c.handleTaskLog(ctx, msg.TaskLog, logger)
case *rpcv1.ExecuteTaskResponse_TaskOutput:
logger.Debug("Handle task output")
if err := c.handleTaskOutput(outputSink, msg.TaskOutput); err != nil {
logger.Error("Failed to process task output", slog.String("err", err.Error()))
}
case *rpcv1.ExecuteTaskResponse_TaskResult:
logger.Debug("Received task result")
if errMsg := msg.TaskResult.Error; errMsg != "" {
return fmt.Errorf("failed to execute task: %s", msg.TaskResult.Error)
}
if msg.TaskResult.ModifiedFilesArchivePath != "" {
if err := c.handleModifiedFiles(ctx, con, msg.TaskResult.ModifiedFilesArchivePath, c.moduleWithMeta.OutDir()); err != nil {
return err
return fmt.Errorf("failed to fetch modified files from execution container: %w", err)
}
}
@ -166,7 +182,13 @@ func (c *containerTask) handleTaskOutput(sink logging.TaskOutputSink, req *rpcv1
}
func (c *containerTask) handleTaskLog(ctx context.Context, taskLog *rpcv1.TaskLog, logger *slog.Logger) {
rec := slog.NewRecord(taskLog.Time.AsTime().Local(), slog.Level(taskLog.Level), taskLog.Message, 0)
handler := logger.Handler()
level := slog.Level(taskLog.Level)
if !handler.Enabled(ctx, level) {
return
}
rec := slog.NewRecord(taskLog.Time.AsTime().Local(), level, taskLog.Message, 0)
for i := range taskLog.Attributes {
attr := taskLog.Attributes[i]
rec.AddAttrs(slog.String(attr.GetKey(), attr.GetValue()))
@ -176,8 +198,11 @@ func (c *containerTask) handleTaskLog(ctx context.Context, taskLog *rpcv1.TaskLo
func (c *containerTask) handleModifiedFiles(ctx context.Context, con containers.Container, modifiedFilesPath, outDir string) error {
return con.CopyFileFromContainer(ctx, modifiedFilesPath, func(_ *tar.Header, reader io.Reader) (err error) {
s2Reader := s2.NewReader(reader)
tarReader := tar.NewReader(s2Reader)
deflateReader := s2.NewReader(reader)
if err != nil {
return err
}
tarReader := tar.NewReader(deflateReader)
var header *tar.Header
for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() {

View file

@ -2,18 +2,24 @@ package ignore
import (
"bufio"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/shibumi/go-pathspec"
"github.com/bmatcuk/doublestar/v4"
"code.icb4dc0.de/buildr/buildr/modules/buildr"
)
const IgnoreFileName = ".buildrignore"
const FileName = ".buildrignore"
var (
_ fs.StatFS = (*Ignorer)(nil)
_ fs.ReadDirFS = (*Ignorer)(nil)
)
// NewRootIgnorer returns a new Ignorer with the default ignore patterns included
func NewRootIgnorer(b *buildr.Buildr, additionalPatterns ...string) (*Ignorer, error) {
@ -40,11 +46,15 @@ func NewRootIgnorer(b *buildr.Buildr, additionalPatterns ...string) (*Ignorer, e
func NewIgnorer(dir string, additionalPatterns ...string) (*Ignorer, error) {
ig := &Ignorer{
Root: dir,
Patterns: additionalPatterns,
root: dir,
fs: os.DirFS(dir),
}
ignoreFile, err := os.Open(filepath.Join(dir, IgnoreFileName))
if err := ig.AddPatterns(additionalPatterns...); err != nil {
return nil, err
}
ignoreFile, err := os.Open(filepath.Join(dir, FileName))
if err != nil {
if os.IsNotExist(err) {
return ig, nil
@ -59,58 +69,127 @@ func NewIgnorer(dir string, additionalPatterns ...string) (*Ignorer, error) {
if patterns, err := readGitIgnore(ignoreFile); err != nil {
return nil, err
} else {
ig.Patterns = append(ig.Patterns, patterns...)
} else if err = ig.AddPatterns(patterns...); err != nil {
return nil, err
}
return ig, nil
}
type Ignorer struct {
Root string
Parent *Ignorer
Patterns []string
fs fs.FS
patterns []ignorePattern
root string
parent *Ignorer
}
func (ig Ignorer) Match(path string) (ignore bool, err error) {
if ig.Parent != nil {
ignore, err = ig.Parent.Match(path)
if err != nil || ignore {
return
func (ig *Ignorer) AddPatterns(patterns ...string) error {
for i := range patterns {
if parsed, err := parsePattern(patterns[i]); err != nil {
return err
} else {
ig.patterns = append(ig.patterns, *parsed)
}
}
return pathspec.GitIgnore(ig.Patterns, path)
return nil
}
func (ig Ignorer) WalkDir(root string, walkFunc fs.WalkDirFunc) error {
return filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
func (ig *Ignorer) Ignore(path string) (ignore bool) {
if ig.parent != nil {
if match, ignore := ig.parent.computeIgnore(path); match {
return ignore
}
}
_, ignore = ig.computeIgnore(path)
return ignore
}
func (ig *Ignorer) ReadDir(name string) (result []fs.DirEntry, err error) {
if ig.Ignore(name) {
return nil, nil
}
var isPartOfFS bool
if name, isPartOfFS, err = ig.normalize(name); err != nil {
return nil, err
}
if !isPartOfFS {
return os.ReadDir(name)
}
entries, err := fs.ReadDir(ig.fs, name)
if err != nil {
return nil, err
}
result = make([]fs.DirEntry, 0, len(entries))
for i := range entries {
if entry := entries[i]; ig.Ignore(filepath.Join(ig.root, name, entry.Name())) {
continue
} else {
result = append(result, entry)
}
}
return result, nil
}
func (ig *Ignorer) Open(name string) (f fs.File, err error) {
if ig.Ignore(name) {
return nil, os.ErrNotExist
}
var isPartOfFS bool
if name, isPartOfFS, err = ig.normalize(name); err != nil {
return nil, err
}
if !isPartOfFS {
return os.Open(name)
}
return ig.fs.Open(name)
}
func (ig *Ignorer) Stat(name string) (info fs.FileInfo, err error) {
if ig.Ignore(name) {
return nil, os.ErrNotExist
}
var isPartOfFS bool
if name, isPartOfFS, err = ig.normalize(name); err != nil {
return nil, err
}
if !isPartOfFS {
return os.Stat(name)
}
info, err = fs.Stat(ig.fs, name)
if err != nil {
return nil, fmt.Errorf("failed to stat %s: %w", name, err)
}
return info, nil
}
func (ig *Ignorer) WalkDir(root string, walkFunc fs.WalkDirFunc) (err error) {
var isPartOfFS bool
if root, isPartOfFS, err = ig.normalize(root); err != nil {
return err
}
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
var ignore bool
ignore, err = ig.Match(path)
if !d.IsDir() {
if err != nil {
return err
} else if ignore {
return nil
} else {
return walkFunc(path, d, nil)
}
}
// it's a directory - at first check if it should be ignored
if ignore {
return fs.SkipDir
}
if path != ig.Root {
localIgnoreFile := filepath.Join(path, IgnoreFileName)
if _, err := os.Stat(localIgnoreFile); err == nil {
if path != root {
localIgnoreFile := filepath.Join(path, FileName)
if _, err := ig.Stat(localIgnoreFile); err == nil {
child, err := ig.child(path)
if err != nil {
return err
@ -125,19 +204,54 @@ func (ig Ignorer) WalkDir(root string, walkFunc fs.WalkDirFunc) error {
}
return walkFunc(path, d, nil)
})
}
if !isPartOfFS {
return filepath.WalkDir(root, walkFn)
}
return fs.WalkDir(ig, root, walkFn)
}
func (ig Ignorer) child(path string) (*Ignorer, error) {
func (ig *Ignorer) child(path string) (*Ignorer, error) {
child, err := NewIgnorer(path)
if err != nil {
return nil, err
}
child.Parent = &ig
child.parent = ig
return child, nil
}
func (ig *Ignorer) normalize(name string) (normalized string, isPartOfFS bool, err error) {
if !filepath.IsAbs(name) {
return name, true, nil
}
if isPartOfFS, err = doublestar.Match(filepath.Join(ig.root, "**"), name); err != nil {
return "", false, err
} else if !isPartOfFS {
return name, false, nil
}
normalized, err = filepath.Rel(ig.root, name)
if err != nil {
return "", false, fmt.Errorf("failed to normalize path %s: %w", name, err)
}
return normalized, true, nil
}
func (ig *Ignorer) computeIgnore(path string) (match, ignore bool) {
for i := range ig.patterns {
if match, ignore = ig.patterns[i].ignore(path); match {
return
}
}
return false, false
}
func readGitIgnore(content io.Reader) (patterns []string, err error) {
scanner := bufio.NewScanner(content)
@ -158,12 +272,11 @@ func defaultIgnorePatterns(b *buildr.Buildr, ig *Ignorer) error {
if err != nil {
return err
}
if strings.HasSuffix(relDir, "/") {
ig.Patterns = append(ig.Patterns, relDir)
} else {
ig.Patterns = append(ig.Patterns, relDir+"/")
if !strings.HasSuffix(relDir, "/") {
relDir += "/"
}
return nil
return ig.AddPatterns(relDir)
}
for _, d := range []string{b.Config.ToolsDirectory, b.Config.OutDirectory, b.Config.Logging.LogsDirectory} {

View file

@ -92,10 +92,10 @@ func TestIgnorer_Match(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ig := Ignorer{
Parent: tt.fields.Parent,
parent: tt.fields.Parent,
Patterns: tt.fields.Patterns,
}
got, err := ig.Match(tt.args.path)
got, err := ig.Ignore(tt.args.path)
if (err != nil) != tt.wantErr {
t.Errorf("Match() error = %v, wantErr %v", err, tt.wantErr)
return

190
internal/ignore/pattern.go Normal file
View file

@ -0,0 +1,190 @@
package ignore
import (
"bytes"
"regexp"
"strings"
)
type ignorePattern struct {
Pattern *regexp.Regexp
Include bool
}
func (p ignorePattern) ignore(path string) (match, ignore bool) {
match = p.Pattern.MatchString(path)
if match {
ignore = !p.Include
}
return match, ignore
}
func parsePattern(pattern string) (*ignorePattern, error) {
p := &ignorePattern{}
// An optional prefix "!" which negates the pattern; any matching file
// excluded by a previous pattern will become included again.
if strings.HasPrefix(pattern, "!") {
pattern = pattern[1:]
p.Include = true
} else {
p.Include = false
}
// Remove leading back-slash escape for escaped hash ('#') or
// exclamation mark ('!').
pattern = strings.TrimPrefix(pattern, "\\")
// Split pattern into segments.
patternSegs := strings.Split(pattern, "/")
// A pattern beginning with a slash ('/') will only match paths
// directly on the root directory instead of any descendant paths.
// So remove empty first segment to make pattern absoluut to root.
// A pattern without a beginning slash ('/') will match any
// descendant path. This is equivilent to "**/{pattern}". So
// prepend with double-asterisks to make pattern relative to
// root.
if patternSegs[0] == "" {
patternSegs = patternSegs[1:]
} else if patternSegs[0] != "**" {
patternSegs = append([]string{"**"}, patternSegs...)
}
// A pattern ending with a slash ('/') will match all descendant
// paths of if it is a directory but not if it is a regular file.
// This is equivalent to "{pattern}/**". So, set last segment to
// double asterisks to include all descendants.
if patternSegs[len(patternSegs)-1] == "" {
patternSegs[len(patternSegs)-1] = "**"
}
// Build regular expression from pattern.
var expr bytes.Buffer
expr.WriteString("^")
needSlash := false
for i, seg := range patternSegs {
switch seg {
case "**":
switch {
case i == 0 && i == len(patternSegs)-1:
// A pattern consisting solely of double-asterisks ('**')
// will match every path.
expr.WriteString(".+")
case i == 0:
// A normalized pattern beginning with double-asterisks
// ('**') will match any leading path segments.
expr.WriteString("(?:.+/)?")
needSlash = false
case i == len(patternSegs)-1:
// A normalized pattern ending with double-asterisks ('**')
// will match any trailing path segments.
expr.WriteString("/.+")
default:
// A pattern with inner double-asterisks ('**') will match
// multiple (or zero) inner path segments.
expr.WriteString("(?:/.+)?")
needSlash = true
}
case "*":
// Match single path segment.
if needSlash {
expr.WriteString("/")
}
expr.WriteString("[^/]+")
needSlash = true
default:
// Match segment glob pattern.
if needSlash {
expr.WriteString("/")
}
expr.WriteString(translateGlob(seg))
needSlash = true
}
}
expr.WriteString("$")
if compiled, err := regexp.Compile(expr.String()); err != nil {
return nil, err
} else {
p.Pattern = compiled
}
return p, nil
}
// NOTE: This is derived from `fnmatch.translate()` and is similar to
// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
func translateGlob(glob string) string {
var regex bytes.Buffer
escape := false
for i := 0; i < len(glob); i++ {
char := glob[i]
// Escape the character.
switch {
case escape:
escape = false
regex.WriteString(regexp.QuoteMeta(string(char)))
case char == '\\':
// Escape character, escape next character.
escape = true
case char == '*':
// Multi-character wildcard. Match any string (except slashes),
// including an empty string.
regex.WriteString("[^/]*")
case char == '?':
// Single-character wildcard. Match any single character (except
// a slash).
regex.WriteString("[^/]")
case char == '[':
regex.WriteString(translateBracketExpression(&i, glob))
default:
// Regular character, escape it for regex.
regex.WriteString(regexp.QuoteMeta(string(char)))
}
}
return regex.String()
}
// Bracket expression wildcard. Except for the beginning
// exclamation mark, the whole bracket expression can be used
// directly as regex but we have to find where the expression
// ends.
// - "[][!]" matches ']', '[' and '!'.
// - "[]-]" matches ']' and '-'.
// - "[!]a-]" matches any character except ']', 'a' and '-'.
func translateBracketExpression(i *int, glob string) string {
regex := string(glob[*i])
*i++
j := *i
// Pass bracket expression negation.
if j < len(glob) && glob[j] == '!' {
j++
}
// Pass first closing bracket if it is at the beginning of the
// expression.
if j < len(glob) && glob[j] == ']' {
j++
}
// Find closing bracket. Stop once we reach the end or find it.
for j < len(glob) && glob[j] != ']' {
j++
}
if j < len(glob) {
if glob[*i] == '!' {
regex = regex + "^"
*i++
}
regex = regexp.QuoteMeta(glob[*i:j])
*i = j
} else {
// Failed to find closing bracket, treat opening bracket as a
// bracket literal instead of as an expression.
regex = regexp.QuoteMeta(string(glob[*i]))
}
return "[" + regex + "]"
}

View file

@ -3,8 +3,10 @@ package v1
import (
"context"
"errors"
"fmt"
"io"
"os"
"time"
"golang.org/x/exp/slog"
"google.golang.org/grpc/codes"
@ -30,10 +32,16 @@ type ExecutorServiceServer struct {
registry *modules.TypeRegistry
}
func (e *ExecutorServiceServer) ExecuteTask(server rpcv1.ExecutorService_ExecuteTaskServer) error {
func (e *ExecutorServiceServer) ExecuteTask(server rpcv1.ExecutorService_ExecuteTaskServer) (err error) {
ctx, cancel := context.WithCancelCause(server.Context())
defer cancel(errors.New("stream closed"))
defer func() {
if r := recover(); r != nil {
err = errors.Join(err, fmt.Errorf("module exeuction paniced: %v", r))
}
}()
logger := slog.New(NewGrpcExecutorHandler(server))
for ctx.Err() == nil {
@ -72,6 +80,8 @@ func (e *ExecutorServiceServer) ExecuteTask(server rpcv1.ExecutorService_Execute
result.Error = err.Error()
}
logger.Debug("Waiting for file change events to propagate before canceling watcher")
time.Sleep(250 * time.Millisecond)
cancel(ErrExecutionCompleted)
if err := addModifiedFilesToResult(result, logger, watcher); err != nil {

View file

@ -2,14 +2,17 @@ package v1
import (
"context"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"github.com/fsnotify/fsnotify"
"golang.org/x/exp/slog"
"code.icb4dc0.de/buildr/buildr/internal/archive"
"code.icb4dc0.de/buildr/buildr/internal/ignore"
)
func newFSWatcher(logger *slog.Logger, workingDir string) (*fsWatcher, error) {
@ -18,27 +21,31 @@ func newFSWatcher(logger *slog.Logger, workingDir string) (*fsWatcher, error) {
return nil, err
}
err = filepath.WalkDir(workingDir, func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return watcher.Add(path)
}
return nil
})
ignorer, err := ignore.NewIgnorer(workingDir)
if err != nil {
return nil, err
}
return &fsWatcher{
w := &fsWatcher{
logger: logger,
watcher: watcher,
workingDir: workingDir,
}, nil
ignorer: ignorer,
archive: archive.Tar{
Ignorer: ignorer,
},
}
if err := w.addDir(workingDir); err != nil {
return nil, err
}
return w, nil
}
type fsWatcher struct {
logger *slog.Logger
ignorer *ignore.Ignorer
watcher *fsnotify.Watcher
archive archive.Tar
workingDir string
@ -72,6 +79,15 @@ func (w *fsWatcher) Watch(ctx context.Context) {
switch event.Op {
case fsnotify.Create, fsnotify.Write, fsnotify.Chmod:
if info, err := os.Stat(event.Name); err != nil {
w.logger.Error("Failed to stat file", slog.String("file", event.Name), slog.String("err", err.Error()))
continue
} else if info.IsDir() {
if err := w.addDir(event.Name); err != nil {
w.logger.Error("Failed to add newly created directory to watcher", slog.String("file", event.Name), slog.String("err", err.Error()))
}
}
relativeToWorkingDir, err := filepath.Rel(w.workingDir, event.Name)
if err != nil {
w.logger.Error(
@ -82,6 +98,12 @@ func (w *fsWatcher) Watch(ctx context.Context) {
continue
}
w.logger.Debug(
"Processing file change",
slog.String("orig_path", event.Name),
slog.String("relative_path", relativeToWorkingDir),
)
if err := w.archive.Add(event.Name, relativeToWorkingDir); err != nil {
w.logger.Error(
"Failed to add file to archive",
@ -114,3 +136,24 @@ func (w *fsWatcher) Watch(ctx context.Context) {
}
}
}
func (w *fsWatcher) addDir(dirToAdd string) (err error) {
return w.ignorer.WalkDir(dirToAdd, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !filepath.IsAbs(path) {
path = filepath.Join(w.workingDir, path)
}
if d.IsDir() {
w.logger.Debug("Subscribing to changes in directory", slog.String("directory", path))
if err := w.watcher.Add(path); err != nil {
return fmt.Errorf("failed to subscribe to changes in directory %s: %w", path, err)
}
}
return nil
})
}

View file

@ -13,7 +13,7 @@ var _ slog.Handler = (*GrpcExecutorHandler)(nil)
func NewGrpcExecutorHandler(sender StreamSender[*rpcv1.ExecuteTaskResponse]) *GrpcExecutorHandler {
return &GrpcExecutorHandler{
Level: slog.LevelInfo,
Level: slog.LevelDebug,
sender: sender,
}
}
@ -33,7 +33,7 @@ func (g GrpcExecutorHandler) Handle(_ context.Context, record slog.Record) error
taskLog := rpcv1.TaskLog{
Time: timestamppb.New(record.Time.UTC()),
Message: record.Message,
Level: uint32(record.Level),
Level: int32(record.Level),
Attributes: make([]*rpcv1.TaskLog_LogAttribute, 0, record.NumAttrs()),
}

View file

@ -13,9 +13,16 @@ var (
_ json.Unmarshaler = (*Metadata[Module])(nil)
)
type ContainerCapabilities struct {
Add []string `hcl:"add,optional"`
Drop []string `hcl:"drop,optional"`
}
type ContainerSpec struct {
Image string `hcl:"image"`
User string `hcl:"user,optional"`
Image string `hcl:"image"`
User string `hcl:"user,optional"`
Privileged bool `hcl:"privileged,optional"`
Capabilities *ContainerCapabilities `hcl:"capabilities,block"`
}
type Metadata[T Module] struct {

View file

@ -0,0 +1,38 @@
package strings
import (
"strings"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
func Join() function.Function {
return function.New(&function.Spec{
Description: "join a list of strings",
Params: []function.Parameter{
{
Name: "separator",
Description: "separator to be used to join the strings",
Type: cty.String,
},
{
Name: "list",
Description: "list of strings to join",
Type: cty.List(cty.String),
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
separator := args[0].AsString()
rawElems := args[1].AsValueSlice()
elems := make([]string, len(rawElems))
for i := range rawElems {
elems[i] = rawElems[i].AsString()
}
return cty.StringVal(strings.Join(elems, separator)), nil
},
})
}

View file

@ -6,4 +6,5 @@ func RegisterInContext(evalCtx *hcl.EvalContext) {
evalCtx.Functions["trim"] = Trim()
evalCtx.Functions["format"] = Format()
evalCtx.Functions["base64encode"] = Base64Encode()
evalCtx.Functions["join"] = Join()
}

View file

@ -9,8 +9,17 @@ import (
"path"
"path/filepath"
"strings"
"sync"
)
const defaultBufferSize = 256 * 1024
var bufferPool = sync.Pool{
New: func() any {
return make([]byte, defaultBufferSize)
},
}
func buildDirFromContent(cwd string, content map[string]string) (*dirNode, error) {
rootNode := newDirNode()
for origSource, target := range content {
@ -169,10 +178,14 @@ func (n *dirNode) writeToTar(writer *tar.Writer, parent string) error {
return errors.Join(err, f.Close())
}
if _, err = io.Copy(writer, f); err != nil {
buf := bufferPool.Get().([]byte)
if _, err = io.CopyBuffer(writer, f, buf); err != nil {
returnCopyBuf(buf)
return errors.Join(err, f.Close())
}
returnCopyBuf(buf)
if err = f.Close(); err != nil {
return err
}
@ -180,3 +193,8 @@ func (n *dirNode) writeToTar(writer *tar.Writer, parent string) error {
return nil
}
func returnCopyBuf(buf []byte) {
buf = buf[:defaultBufferSize]
bufferPool.Put(buf)
}