2020-12-30 16:03:01 +00:00
|
|
|
package audit
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/bwmarrin/snowflake"
|
|
|
|
"gitlab.com/inetmock/inetmock/pkg/logging"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
snowflake.Epoch = time.Unix(0, 0).Unix()
|
|
|
|
}
|
|
|
|
|
|
|
|
type eventStream struct {
|
|
|
|
logger logging.Logger
|
|
|
|
buffer chan *Event
|
2021-01-04 16:52:21 +00:00
|
|
|
sinks map[string]*registeredSink
|
2020-12-30 16:03:01 +00:00
|
|
|
lock sync.Locker
|
|
|
|
idGenerator *snowflake.Node
|
|
|
|
sinkBufferSize int
|
|
|
|
sinkConsumptionTimeout time.Duration
|
|
|
|
}
|
|
|
|
|
2021-01-04 16:52:21 +00:00
|
|
|
type registeredSink struct {
|
|
|
|
downstream chan Event
|
|
|
|
lock sync.Locker
|
|
|
|
}
|
|
|
|
|
2020-12-30 16:03:01 +00:00
|
|
|
func NewEventStream(logger logging.Logger, options ...EventStreamOption) (es EventStream, err error) {
|
|
|
|
cfg := newEventStreamCfg()
|
|
|
|
|
|
|
|
for _, opt := range options {
|
|
|
|
opt(&cfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
var node *snowflake.Node
|
|
|
|
node, err = snowflake.NewNode(cfg.generatorIndex)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
generatorIdx++
|
|
|
|
underlying := &eventStream{
|
|
|
|
logger: logger,
|
2021-01-04 16:52:21 +00:00
|
|
|
sinks: make(map[string]*registeredSink),
|
2020-12-30 16:03:01 +00:00
|
|
|
buffer: make(chan *Event, cfg.bufferSize),
|
|
|
|
sinkBufferSize: cfg.sinkBuffersize,
|
|
|
|
sinkConsumptionTimeout: cfg.sinkConsumptionTimeout,
|
|
|
|
idGenerator: node,
|
|
|
|
lock: &sync.Mutex{},
|
|
|
|
}
|
|
|
|
|
2021-01-04 16:52:21 +00:00
|
|
|
// start distribute workers
|
|
|
|
for i := 0; i < cfg.distributeParallelization; i++ {
|
|
|
|
go underlying.distribute()
|
|
|
|
}
|
2020-12-30 16:03:01 +00:00
|
|
|
|
|
|
|
es = underlying
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *eventStream) Emit(ev Event) {
|
2021-01-02 16:24:06 +00:00
|
|
|
ev.ApplyDefaults(e.idGenerator.Generate().Int64())
|
2020-12-30 16:03:01 +00:00
|
|
|
select {
|
2021-01-02 16:24:06 +00:00
|
|
|
case e.buffer <- &ev:
|
2020-12-30 16:03:01 +00:00
|
|
|
e.logger.Debug("pushed event to distribute loop")
|
|
|
|
default:
|
|
|
|
e.logger.Warn("buffer is full")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *eventStream) RegisterSink(s Sink) error {
|
|
|
|
name := s.Name()
|
|
|
|
|
|
|
|
if _, exists := e.sinks[name]; exists {
|
|
|
|
return ErrSinkAlreadyRegistered
|
|
|
|
}
|
|
|
|
|
2021-01-04 16:52:21 +00:00
|
|
|
rs := ®isteredSink{
|
|
|
|
downstream: make(chan Event, e.sinkBufferSize),
|
|
|
|
lock: new(sync.Mutex),
|
|
|
|
}
|
|
|
|
s.OnSubscribe(rs.downstream)
|
|
|
|
e.sinks[name] = rs
|
2020-12-30 16:03:01 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e eventStream) Sinks() (sinks []string) {
|
|
|
|
for name := range e.sinks {
|
|
|
|
sinks = append(sinks, name)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *eventStream) Close() error {
|
|
|
|
close(e.buffer)
|
2021-01-04 16:52:21 +00:00
|
|
|
for _, rs := range e.sinks {
|
|
|
|
close(rs.downstream)
|
2020-12-30 16:03:01 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *eventStream) distribute() {
|
|
|
|
for ev := range e.buffer {
|
2021-01-04 16:52:21 +00:00
|
|
|
for name, rs := range e.sinks {
|
|
|
|
rs.lock.Lock()
|
2020-12-30 16:03:01 +00:00
|
|
|
e.logger.Debug("notify sink", zap.String("sink", name))
|
|
|
|
select {
|
2021-01-04 16:52:21 +00:00
|
|
|
case rs.downstream <- *ev:
|
2020-12-30 16:03:01 +00:00
|
|
|
e.logger.Debug("pushed event to sink channel")
|
|
|
|
case <-time.After(e.sinkConsumptionTimeout):
|
|
|
|
e.logger.Warn("sink consummation timed out")
|
|
|
|
}
|
2021-01-04 16:52:21 +00:00
|
|
|
rs.lock.Unlock()
|
2020-12-30 16:03:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|