Manager paradigm change to an event-loop concurrency style, begun implementing batching in binance futures_websocket.go
This commit is contained in:
@@ -17,10 +17,10 @@ import (
|
|||||||
func main() {
|
func main() {
|
||||||
fmt.Println("Starting Data Service...")
|
fmt.Println("Starting Data Service...")
|
||||||
// Setup
|
// Setup
|
||||||
r := router.NewRouter()
|
r := router.NewRouter(2048)
|
||||||
m := manager.NewManager(r)
|
m := manager.NewManager(r)
|
||||||
binanceFutures := binance.NewFuturesWebsocket()
|
binanceFutures := binance.NewFuturesWebsocket()
|
||||||
m.AddProvider("binance_futures_websocket", binanceFutures)
|
_ = m.AddProvider("binance_futures_websocket", binanceFutures)
|
||||||
|
|
||||||
// gRPC Control Server
|
// gRPC Control Server
|
||||||
grpcControlServer := grpc.NewServer()
|
grpcControlServer := grpc.NewServer()
|
||||||
|
|||||||
100
services/data_service/internal/manager/helper.go
Normal file
100
services/data_service/internal/manager/helper.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||||
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/provider"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lightweight error helper to define package-level errors inline.
|
||||||
|
type constErr string
|
||||||
|
|
||||||
|
func (e constErr) Error() string { return string(e) }
|
||||||
|
func errorf(s string) error { return constErr(s) }
|
||||||
|
|
||||||
|
// copySet copies a set of identifiers to a new map.
|
||||||
|
func copySet(in map[domain.Identifier]struct{}) map[domain.Identifier]struct{} {
|
||||||
|
out := make(map[domain.Identifier]struct{}, len(in))
|
||||||
|
for k := range in {
|
||||||
|
out[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// identifierSetDifferences computes additions and deletions from old -> next.
|
||||||
|
func identifierSetDifferences(old map[domain.Identifier]struct{}, next []domain.Identifier) (toAdd, toDel []domain.Identifier) {
|
||||||
|
newSet := make(map[domain.Identifier]struct{}, len(next))
|
||||||
|
for _, id := range next {
|
||||||
|
newSet[id] = struct{}{}
|
||||||
|
if _, ok := old[id]; !ok {
|
||||||
|
toAdd = append(toAdd, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for id := range old {
|
||||||
|
if _, ok := newSet[id]; !ok {
|
||||||
|
toDel = append(toDel, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// joinErrors aggregates multiple errors.
|
||||||
|
type joined struct{ es []error }
|
||||||
|
|
||||||
|
func (j joined) Error() string {
|
||||||
|
switch n := len(j.es); {
|
||||||
|
case n == 0:
|
||||||
|
return ""
|
||||||
|
case n == 1:
|
||||||
|
return j.es[0].Error()
|
||||||
|
default:
|
||||||
|
s := j.es[0].Error()
|
||||||
|
for i := 1; i < n; i++ {
|
||||||
|
s += "; " + j.es[i].Error()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func join(es []error) error {
|
||||||
|
if len(es) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return joined{es}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveProvider parses a raw identifier and looks up the provider.
|
||||||
|
func (m *Manager) resolveProvider(id domain.Identifier) (provider.Provider, string, error) {
|
||||||
|
provName, subj, ok := id.ProviderSubject()
|
||||||
|
if !ok || provName == "" || subj == "" {
|
||||||
|
return nil, "", ErrInvalidIdentifier
|
||||||
|
}
|
||||||
|
p := m.providers[provName]
|
||||||
|
if p == nil {
|
||||||
|
return nil, "", fmt.Errorf("%w: %s", ErrUnknownProvider, provName)
|
||||||
|
}
|
||||||
|
return p, subj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// incrementStreamRefCount increments refcount and returns true if transitioning 0->1.
|
||||||
|
func (m *Manager) incrementStreamRefCount(id domain.Identifier) bool {
|
||||||
|
rc := m.streamRef[id] + 1
|
||||||
|
m.streamRef[id] = rc
|
||||||
|
return rc == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// decrementStreamRefCount decrements refcount and returns true if transitioning 1->0.
|
||||||
|
func (m *Manager) decrementStreamRefCount(id domain.Identifier) bool {
|
||||||
|
rc, ok := m.streamRef[id]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
rc--
|
||||||
|
if rc <= 0 {
|
||||||
|
delete(m.streamRef, id)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
m.streamRef[id] = rc
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -1,10 +1,7 @@
|
|||||||
package manager
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@@ -13,512 +10,376 @@ import (
|
|||||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/router"
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Manager is a single-goroutine actor that owns all state.
|
||||||
ErrSessionNotFound = errors.New("session not found")
|
|
||||||
ErrSessionClosed = errors.New("session closed")
|
|
||||||
ErrInvalidIdentifier = errors.New("invalid identifier")
|
|
||||||
ErrUnknownProvider = errors.New("unknown provider")
|
|
||||||
ErrClientAlreadyBound = errors.New("client channels already bound")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultInternalBuf = 1024
|
|
||||||
defaultClientBuf = 256
|
|
||||||
)
|
|
||||||
|
|
||||||
type ChannelOpts struct {
|
|
||||||
InBufSize int
|
|
||||||
OutBufSize int
|
|
||||||
DropOutbound bool // If true, drop outbound to client when its buffer is full. If false, block.
|
|
||||||
DropInbound bool // If true, drop inbound from client when internalIn is full. If false, block.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manager owns providers, sessions, and the router fanout.
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
|
// Command channel
|
||||||
|
cmdCh chan any
|
||||||
|
|
||||||
|
// State (loop-owned)
|
||||||
providers map[string]provider.Provider
|
providers map[string]provider.Provider
|
||||||
providerStreams map[domain.Identifier]chan domain.Message
|
|
||||||
rawReferenceCount map[domain.Identifier]int
|
|
||||||
|
|
||||||
sessions map[uuid.UUID]*session
|
sessions map[uuid.UUID]*session
|
||||||
|
streamRef map[domain.Identifier]int
|
||||||
|
|
||||||
|
// Router
|
||||||
router *router.Router
|
router *router.Router
|
||||||
mu sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type session struct {
|
// New creates a manager and starts its run loop.
|
||||||
id uuid.UUID
|
func New(r *router.Router) *Manager {
|
||||||
|
m := &Manager{
|
||||||
// Stable internal channels.
|
cmdCh: make(chan any, 256),
|
||||||
internalIn chan domain.Message // forwarded into router.IncomingChannel()
|
|
||||||
internalOut chan domain.Message // registered as router route target, forwarded to clientOut (or dropped if unattached)
|
|
||||||
|
|
||||||
// Client Channels (optional). Created on GetChannels and cleared on DetachClient.
|
|
||||||
clientIn chan domain.Message // caller writes
|
|
||||||
clientOut chan domain.Message // caller reads
|
|
||||||
|
|
||||||
// Controls the permanent internalIn forwarder.
|
|
||||||
cancelInternal context.CancelFunc
|
|
||||||
|
|
||||||
// Permanent outbound drain control.
|
|
||||||
egressWG sync.WaitGroup
|
|
||||||
|
|
||||||
// Policy
|
|
||||||
dropWhenUnattached bool // always drop when no client attached
|
|
||||||
dropWhenSlow bool // mirror ChannelOpts.DropOutbound
|
|
||||||
dropInbound bool // mirror ChannelOpts.DropInbound
|
|
||||||
|
|
||||||
bound map[domain.Identifier]struct{} // map for quick existence checks
|
|
||||||
closed bool
|
|
||||||
idleAfter time.Duration
|
|
||||||
idleTimer *time.Timer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewManager(r *router.Router) *Manager {
|
|
||||||
go r.Run()
|
|
||||||
return &Manager{
|
|
||||||
providers: make(map[string]provider.Provider),
|
providers: make(map[string]provider.Provider),
|
||||||
providerStreams: make(map[domain.Identifier]chan domain.Message),
|
|
||||||
rawReferenceCount: make(map[domain.Identifier]int),
|
|
||||||
sessions: make(map[uuid.UUID]*session),
|
sessions: make(map[uuid.UUID]*session),
|
||||||
|
streamRef: make(map[domain.Identifier]int),
|
||||||
router: r,
|
router: r,
|
||||||
}
|
}
|
||||||
|
go r.Run()
|
||||||
|
go m.run()
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSession creates a session with stable internal channels and two permanent workers:
|
// Public API (posts commands to loop)
|
||||||
// 1) internalIn -> router.Incoming 2) internalOut -> clientOut (or discard if unattached)
|
|
||||||
|
// AddProvider adds and starts a new provider.
|
||||||
|
func (m *Manager) AddProvider(name string, p provider.Provider) error {
|
||||||
|
resp := make(chan error, 1)
|
||||||
|
m.cmdCh <- addProviderCmd{name: name, p: p, resp: resp}
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveProvider stops and removes a provider, cleaning up all sessions.
|
||||||
|
func (m *Manager) RemoveProvider(name string) error {
|
||||||
|
resp := make(chan error, 1)
|
||||||
|
m.cmdCh <- removeProviderCmd{name: name, resp: resp}
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession creates a new session with the given idle timeout.
|
||||||
func (m *Manager) NewSession(idleAfter time.Duration) (uuid.UUID, error) {
|
func (m *Manager) NewSession(idleAfter time.Duration) (uuid.UUID, error) {
|
||||||
|
resp := make(chan struct {
|
||||||
|
id uuid.UUID
|
||||||
|
err error
|
||||||
|
}, 1)
|
||||||
|
m.cmdCh <- newSessionCmd{idleAfter: idleAfter, resp: resp}
|
||||||
|
r := <-resp
|
||||||
|
return r.id, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachClient attaches a client to a session, creates and returns client channels for the session.
|
||||||
|
func (m *Manager) AttachClient(id uuid.UUID, inBuf, outBuf int) (chan<- domain.Message, <-chan domain.Message, error) {
|
||||||
|
resp := make(chan struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
|
cout <-chan domain.Message
|
||||||
|
err error
|
||||||
|
}, 1)
|
||||||
|
m.cmdCh <- attachCmd{sid: id, inBuf: inBuf, outBuf: outBuf, resp: resp}
|
||||||
|
r := <-resp
|
||||||
|
return r.cin, r.cout, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetachClient detaches the client from the session, closes client channels and arms timeout.
|
||||||
|
func (m *Manager) DetachClient(id uuid.UUID) error {
|
||||||
|
resp := make(chan error, 1)
|
||||||
|
m.cmdCh <- detachCmd{sid: id, resp: resp}
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureSession sets the next set of identifiers for the session, starting and stopping streams as needed.
|
||||||
|
func (m *Manager) ConfigureSession(id uuid.UUID, next []domain.Identifier) error {
|
||||||
|
resp := make(chan error, 1)
|
||||||
|
m.cmdCh <- configureCmd{sid: id, next: next, resp: resp}
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseSession closes and removes the session, cleaning up all bindings.
|
||||||
|
func (m *Manager) CloseSession(id uuid.UUID) error {
|
||||||
|
resp := make(chan error, 1)
|
||||||
|
m.cmdCh <- closeSessionCmd{sid: id, resp: resp}
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// The main loop of the manager, processing commands serially.
|
||||||
|
func (m *Manager) run() {
|
||||||
|
for {
|
||||||
|
msg := <-m.cmdCh
|
||||||
|
switch c := msg.(type) {
|
||||||
|
case addProviderCmd:
|
||||||
|
m.handleAddProvider(c)
|
||||||
|
case removeProviderCmd:
|
||||||
|
m.handleRemoveProvider(c)
|
||||||
|
case newSessionCmd:
|
||||||
|
m.handleNewSession(c)
|
||||||
|
case attachCmd:
|
||||||
|
m.handleAttach(c)
|
||||||
|
case detachCmd:
|
||||||
|
m.handleDetach(c)
|
||||||
|
case configureCmd:
|
||||||
|
m.handleConfigure(c)
|
||||||
|
case closeSessionCmd:
|
||||||
|
m.handleCloseSession(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command handlers, run in loop goroutine. With a single goroutine, no locking is needed.
|
||||||
|
|
||||||
|
func (m *Manager) handleAddProvider(cmd addProviderCmd) {
|
||||||
|
if _, ok := m.providers[cmd.name]; ok {
|
||||||
|
cmd.resp <- fmt.Errorf("provider exists: %s", cmd.name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := cmd.p.Start(); err != nil {
|
||||||
|
cmd.resp <- fmt.Errorf("start provider %s: %w", cmd.name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.providers[cmd.name] = cmd.p
|
||||||
|
cmd.resp <- nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) handleRemoveProvider(cmd removeProviderCmd) {
|
||||||
|
p, ok := m.providers[cmd.name]
|
||||||
|
if !ok {
|
||||||
|
cmd.resp <- fmt.Errorf("provider not found: %s", cmd.name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean all identifiers belonging to this provider. Iterates through sessions to reduce provider burden.
|
||||||
|
for _, s := range m.sessions {
|
||||||
|
for ident := range s.bound {
|
||||||
|
provName, subj, ok := ident.ProviderSubject()
|
||||||
|
if !ok || provName != cmd.name {
|
||||||
|
// TODO: add log warning, but basically should never ever happen
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if s.attached && s.clientOut != nil {
|
||||||
|
m.router.DeregisterRoute(ident, s.clientOut)
|
||||||
|
}
|
||||||
|
delete(s.bound, ident)
|
||||||
|
|
||||||
|
// decrementStreamRefCount returns true if this was the last ref. In which case we want to stop the stream.
|
||||||
|
if ident.IsRaw() && m.decrementStreamRefCount(ident) && subj != "" {
|
||||||
|
_ = p.StopStream(subj) // best-effort as we will remove the provider anyway
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// first iteration above is sound, but as a precaution we also clean up any dangling streamRef entries here
|
||||||
|
for id := range m.streamRef {
|
||||||
|
provName, _, ok := id.ProviderSubject()
|
||||||
|
if !ok || provName != cmd.name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("manager: warning — dangling streamRef for %s after removing provider %s\n", id.Key(), cmd.name)
|
||||||
|
delete(m.streamRef, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Stop()
|
||||||
|
delete(m.providers, cmd.name)
|
||||||
|
cmd.resp <- nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) handleNewSession(cmd newSessionCmd) {
|
||||||
s := &session{
|
s := &session{
|
||||||
id: uuid.New(),
|
id: uuid.New(),
|
||||||
internalIn: make(chan domain.Message, defaultInternalBuf),
|
|
||||||
internalOut: make(chan domain.Message, defaultInternalBuf),
|
|
||||||
bound: make(map[domain.Identifier]struct{}),
|
bound: make(map[domain.Identifier]struct{}),
|
||||||
idleAfter: idleAfter,
|
idleAfter: cmd.idleAfter,
|
||||||
dropWhenUnattached: true,
|
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
s.cancelInternal = cancel
|
|
||||||
|
|
||||||
m.mu.Lock()
|
// Arm idle timer to auto-close the session.
|
||||||
m.sessions[s.id] = s
|
s.idleTimer = time.AfterFunc(cmd.idleAfter, func() {
|
||||||
incoming := m.router.IncomingChannel()
|
m.cmdCh <- closeSessionCmd{sid: s.id, resp: make(chan error, 1)}
|
||||||
m.mu.Unlock()
|
})
|
||||||
|
|
||||||
// Permanent forwarder: internalIn -> router.Incoming
|
m.sessions[s.id] = s // added after arming in the case of immediate timeout or error in arming timer
|
||||||
go func(ctx context.Context, in <-chan domain.Message) {
|
|
||||||
for {
|
cmd.resp <- struct {
|
||||||
select {
|
id uuid.UUID
|
||||||
case <-ctx.Done():
|
err error
|
||||||
return
|
}{id: s.id, err: nil}
|
||||||
case msg, ok := <-in:
|
}
|
||||||
|
|
||||||
|
func (m *Manager) handleAttach(cmd attachCmd) {
|
||||||
|
s, ok := m.sessions[cmd.sid]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
cmd.resp <- struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
|
cout <-chan domain.Message
|
||||||
|
err error
|
||||||
|
}{nil, nil, ErrSessionNotFound}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Hook: filter/validate/meter/throttle inbound to router here.
|
if s.closed {
|
||||||
incoming <- msg
|
cmd.resp <- struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
|
cout <-chan domain.Message
|
||||||
|
err error
|
||||||
|
}{nil, nil, ErrSessionClosed}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
if s.attached {
|
||||||
}(ctx, s.internalIn)
|
cmd.resp <- struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
// Permanent drain: internalOut -> clientOut (drop if unattached)
|
cout <-chan domain.Message
|
||||||
s.egressWG.Add(1)
|
err error
|
||||||
go func(sid uuid.UUID) {
|
}{nil, nil, ErrClientAlreadyAttached}
|
||||||
defer s.egressWG.Done()
|
return
|
||||||
for msg := range s.internalOut {
|
|
||||||
m.mu.Lock()
|
|
||||||
// Session might be gone; re-fetch safely.
|
|
||||||
s, ok := m.sessions[sid]
|
|
||||||
var cout chan domain.Message
|
|
||||||
var dropSlow, attached bool
|
|
||||||
if ok {
|
|
||||||
cout = s.clientOut
|
|
||||||
dropSlow = s.dropWhenSlow
|
|
||||||
attached = cout != nil
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case !attached:
|
|
||||||
// unattached => drop
|
|
||||||
|
|
||||||
case dropSlow: // typical case when attached
|
|
||||||
select {
|
|
||||||
case cout <- msg:
|
|
||||||
default:
|
|
||||||
// drop on slow consumer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
cin, cout, err := m.attachSession(s, cmd.inBuf, cmd.outBuf)
|
||||||
cout <- msg // push to client, block if slow
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(s.id)
|
|
||||||
|
|
||||||
return s.id, nil
|
cmd.resp <- struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
|
cout <-chan domain.Message
|
||||||
|
err error
|
||||||
|
}{cin, cout, err}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetChannels creates a fresh client attachment and hooks inbound (clientIn -> internalIn).
|
func (m *Manager) handleDetach(cmd detachCmd) {
|
||||||
// Outbound delivery is handled by the permanent drain.
|
s, ok := m.sessions[cmd.sid]
|
||||||
// Only one attachment at a time.
|
|
||||||
func (m *Manager) GetChannels(id uuid.UUID, opts ChannelOpts) (chan<- domain.Message, <-chan domain.Message, error) {
|
|
||||||
if opts.InBufSize <= 0 {
|
|
||||||
opts.InBufSize = defaultClientBuf
|
|
||||||
}
|
|
||||||
if opts.OutBufSize <= 0 {
|
|
||||||
opts.OutBufSize = defaultClientBuf
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
s, ok := m.sessions[id]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
m.mu.Unlock()
|
cmd.resp <- ErrSessionNotFound
|
||||||
return nil, nil, ErrSessionNotFound
|
return
|
||||||
}
|
}
|
||||||
if s.closed {
|
if s.closed {
|
||||||
m.mu.Unlock()
|
cmd.resp <- ErrSessionClosed
|
||||||
return nil, nil, ErrSessionClosed
|
return
|
||||||
}
|
}
|
||||||
if s.clientIn != nil || s.clientOut != nil {
|
if !s.attached {
|
||||||
m.mu.Unlock()
|
cmd.resp <- ErrClientNotAttached
|
||||||
return nil, nil, ErrClientAlreadyBound
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create attachment channels.
|
_ = m.detachSession(cmd.sid, s)
|
||||||
cin := make(chan domain.Message, opts.InBufSize)
|
|
||||||
cout := make(chan domain.Message, opts.OutBufSize)
|
|
||||||
s.clientIn, s.clientOut = cin, cout
|
|
||||||
s.dropWhenSlow = opts.DropOutbound
|
|
||||||
s.dropInbound = opts.DropInbound
|
|
||||||
|
|
||||||
// Stop idle timer while attached.
|
cmd.resp <- nil
|
||||||
if s.idleTimer != nil {
|
|
||||||
s.idleTimer.Stop()
|
|
||||||
s.idleTimer = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
internalIn := s.internalIn
|
func (m *Manager) handleConfigure(c configureCmd) {
|
||||||
m.mu.Unlock()
|
s, ok := m.sessions[c.sid]
|
||||||
|
|
||||||
// Forward clientIn -> internalIn
|
|
||||||
go func(src <-chan domain.Message, dst chan<- domain.Message, drop bool) {
|
|
||||||
for msg := range src {
|
|
||||||
if drop {
|
|
||||||
select {
|
|
||||||
case dst <- msg:
|
|
||||||
default:
|
|
||||||
// drop inbound on internal backpressure
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dst <- msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// client closed input; forwarder exits
|
|
||||||
}(cin, internalIn, opts.DropInbound)
|
|
||||||
|
|
||||||
// Return directional views.
|
|
||||||
return (chan<- domain.Message)(cin), (<-chan domain.Message)(cout), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachClient clears the client attachment and starts the idle close timer if configured.
|
|
||||||
// Does not close clientOut to avoid send-on-closed races with the permanent drain.
|
|
||||||
func (m *Manager) DetachClient(id uuid.UUID) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
s, ok := m.sessions[id]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
m.mu.Unlock()
|
c.resp <- ErrSessionNotFound
|
||||||
return ErrSessionNotFound
|
return
|
||||||
}
|
}
|
||||||
if s.closed {
|
if s.closed {
|
||||||
m.mu.Unlock()
|
c.resp <- ErrSessionClosed
|
||||||
return ErrSessionClosed
|
return
|
||||||
}
|
|
||||||
cin := s.clientIn
|
|
||||||
// Make unattached; permanent drain will drop while nil.
|
|
||||||
s.clientIn, s.clientOut = nil, nil
|
|
||||||
after := s.idleAfter
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
if cin != nil {
|
|
||||||
// We own the channel. Closing signals writers to stop.
|
|
||||||
close(cin)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if after > 0 {
|
old := copySet(s.bound)
|
||||||
m.mu.Lock()
|
toAdd, toDel := identifierSetDifferences(old, c.next)
|
||||||
ss, ok := m.sessions[id]
|
|
||||||
if ok && !ss.closed && ss.clientOut == nil && ss.idleTimer == nil {
|
|
||||||
ss.idleTimer = time.AfterFunc(after, func() { _ = m.CloseSession(id) })
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) Subscribe(id uuid.UUID, ids ...domain.Identifier) error {
|
// 1) Handle removals first.
|
||||||
m.mu.Lock()
|
for _, ident := range toDel {
|
||||||
s, ok := m.sessions[id]
|
if s.attached && s.clientOut != nil {
|
||||||
if !ok {
|
m.router.DeregisterRoute(ident, s.clientOut)
|
||||||
m.mu.Unlock()
|
|
||||||
return ErrSessionNotFound
|
|
||||||
}
|
|
||||||
out := s.internalOut
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
for _, ident := range ids {
|
|
||||||
m.mu.Lock()
|
|
||||||
if _, exists := s.bound[ident]; exists {
|
|
||||||
m.mu.Unlock()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.bound[ident] = struct{}{}
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
if ident.IsRaw() {
|
|
||||||
if err := m.provisionRawStream(ident); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.router.RegisterRoute(ident, out)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) Unsubscribe(id uuid.UUID, ids ...domain.Identifier) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
s, ok := m.sessions[id]
|
|
||||||
if !ok {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return ErrSessionNotFound
|
|
||||||
}
|
|
||||||
out := s.internalOut
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
for _, ident := range ids {
|
|
||||||
m.mu.Lock()
|
|
||||||
if _, exists := s.bound[ident]; !exists {
|
|
||||||
m.mu.Unlock()
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
delete(s.bound, ident)
|
delete(s.bound, ident)
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
m.router.DeregisterRoute(ident, out)
|
|
||||||
if ident.IsRaw() {
|
if ident.IsRaw() {
|
||||||
m.releaseRawStreamIfUnused(ident)
|
if m.decrementStreamRefCount(ident) {
|
||||||
|
if p, subj, err := m.resolveProvider(ident); err == nil {
|
||||||
|
_ = p.StopStream(subj) // fire-and-forget
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) SetSubscriptions(id uuid.UUID, next []domain.Identifier) error {
|
// 2) Handle additions. Collect starts to await.
|
||||||
m.mu.Lock()
|
type startItem struct {
|
||||||
s, ok := m.sessions[id]
|
id domain.Identifier
|
||||||
if !ok {
|
ch <-chan error
|
||||||
m.mu.Unlock()
|
|
||||||
return ErrSessionNotFound
|
|
||||||
}
|
}
|
||||||
old := make(map[domain.Identifier]struct{}, len(s.bound))
|
var starts []startItem
|
||||||
for k := range s.bound {
|
var initErrs []error
|
||||||
old[k] = struct{}{}
|
|
||||||
}
|
|
||||||
out := s.internalOut
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
toAdd, toDel := m.identifierSetDifferences(old, next)
|
|
||||||
|
|
||||||
for _, ident := range toAdd {
|
for _, ident := range toAdd {
|
||||||
m.mu.Lock()
|
// Bind intent now.
|
||||||
s.bound[ident] = struct{}{}
|
s.bound[ident] = struct{}{}
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
if ident.IsRaw() {
|
if !ident.IsRaw() {
|
||||||
if err := m.provisionRawStream(ident); err != nil {
|
if s.attached && s.clientOut != nil {
|
||||||
return err
|
m.router.RegisterRoute(ident, s.clientOut)
|
||||||
}
|
}
|
||||||
}
|
continue
|
||||||
m.router.RegisterRoute(ident, out)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ident := range toDel {
|
p, subj, err := m.resolveProvider(ident)
|
||||||
m.mu.Lock()
|
if err != nil {
|
||||||
_, exists := s.bound[ident]
|
|
||||||
delete(s.bound, ident)
|
delete(s.bound, ident)
|
||||||
m.mu.Unlock()
|
initErrs = append(initErrs, err)
|
||||||
|
continue
|
||||||
if exists {
|
|
||||||
m.router.DeregisterRoute(ident, out)
|
|
||||||
if ident.IsRaw() {
|
|
||||||
m.releaseRawStreamIfUnused(ident)
|
|
||||||
}
|
}
|
||||||
}
|
if !p.IsValidSubject(subj, false) {
|
||||||
}
|
delete(s.bound, ident)
|
||||||
return nil
|
initErrs = append(initErrs, fmt.Errorf("invalid subject %q for provider", subj))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) CloseSession(id uuid.UUID) error {
|
first := m.incrementStreamRefCount(ident)
|
||||||
m.mu.Lock()
|
|
||||||
s, ok := m.sessions[id]
|
|
||||||
if !ok {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return ErrSessionNotFound
|
|
||||||
}
|
|
||||||
if s.closed {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.closed = true
|
|
||||||
if s.idleTimer != nil {
|
|
||||||
s.idleTimer.Stop()
|
|
||||||
s.idleTimer = nil
|
|
||||||
}
|
|
||||||
out := s.internalOut
|
|
||||||
ids := make([]domain.Identifier, 0, len(s.bound))
|
|
||||||
for k := range s.bound {
|
|
||||||
ids = append(ids, k)
|
|
||||||
}
|
|
||||||
cancelInternal := s.cancelInternal
|
|
||||||
// Snapshot clientIn/Out for shutdown signals after unlock.
|
|
||||||
cin := s.clientIn
|
|
||||||
cout := s.clientOut
|
|
||||||
// Remove from map before unlock to prevent new work.
|
|
||||||
delete(m.sessions, id)
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
// Deregister all routes and release raw streams.
|
if first || !p.IsStreamActive(subj) {
|
||||||
for _, ident := range ids {
|
ch := p.StartStream(subj, m.router.IncomingChannel())
|
||||||
m.router.DeregisterRoute(ident, out)
|
starts = append(starts, startItem{id: ident, ch: ch})
|
||||||
if ident.IsRaw() {
|
} else if s.attached && s.clientOut != nil {
|
||||||
m.releaseRawStreamIfUnused(ident)
|
// Already active, just register for this session.
|
||||||
|
m.router.RegisterRoute(ident, s.clientOut)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop inbound forwarder and close internals.
|
// 3) Wait for starts initiated by this call, each with its own timeout.
|
||||||
if cancelInternal != nil {
|
if len(starts) == 0 {
|
||||||
cancelInternal()
|
c.resp <- join(initErrs)
|
||||||
}
|
|
||||||
close(s.internalIn) // end internalIn forwarder
|
|
||||||
close(s.internalOut) // signal drain to finish
|
|
||||||
|
|
||||||
// Wait drain exit, then close clientOut if attached at close time.
|
|
||||||
s.egressWG.Wait()
|
|
||||||
if cout != nil {
|
|
||||||
close(cout)
|
|
||||||
}
|
|
||||||
// Close clientIn to stop client writers if still attached.
|
|
||||||
if cin != nil {
|
|
||||||
close(cin)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) AddProvider(name string, p provider.Provider) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
if _, exists := m.providers[name]; exists {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return fmt.Errorf("provider exists: %s", name)
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
if err := p.Start(); err != nil {
|
|
||||||
return fmt.Errorf("start provider %s: %w", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
m.providers[name] = p
|
|
||||||
m.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) RemoveProvider(name string) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
_, ok := m.providers[name]
|
|
||||||
m.mu.Unlock()
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("provider not found: %s", name)
|
|
||||||
}
|
|
||||||
// TODO: implement full drain and cancel of all streams for this provider if needed.
|
|
||||||
return fmt.Errorf("RemoveProvider not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) provisionRawStream(id domain.Identifier) error {
|
|
||||||
providerName, subject, ok := id.ProviderSubject()
|
|
||||||
if !ok || providerName == "" || subject == "" {
|
|
||||||
return ErrInvalidIdentifier
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
prov, exists := m.providers[providerName]
|
|
||||||
if !exists {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return ErrUnknownProvider
|
|
||||||
}
|
|
||||||
if !prov.IsValidSubject(subject, false) {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return fmt.Errorf("invalid subject %q for provider %s", subject, providerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ch, ok := m.providerStreams[id]; ok {
|
|
||||||
m.rawReferenceCount[id] = m.rawReferenceCount[id] + 1
|
|
||||||
m.mu.Unlock()
|
|
||||||
_ = ch
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ch := make(chan domain.Message, 64)
|
|
||||||
if err := prov.RequestStream(subject, ch); err != nil {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return fmt.Errorf("provision %v: %w", id, err)
|
|
||||||
}
|
|
||||||
m.providerStreams[id] = ch
|
|
||||||
m.rawReferenceCount[id] = 1
|
|
||||||
incoming := m.router.IncomingChannel()
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
// Provider stream -> router.Incoming
|
|
||||||
go func(c chan domain.Message) {
|
|
||||||
for msg := range c {
|
|
||||||
incoming <- msg
|
|
||||||
}
|
|
||||||
}(ch)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) releaseRawStreamIfUnused(id domain.Identifier) {
|
|
||||||
providerName, subject, ok := id.ProviderSubject()
|
|
||||||
if !ok {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mu.Lock()
|
type result struct {
|
||||||
rc := m.rawReferenceCount[id] - 1
|
id domain.Identifier
|
||||||
if rc <= 0 {
|
err error
|
||||||
if ch, ok := m.providerStreams[id]; ok {
|
|
||||||
if prov, exists := m.providers[providerName]; exists {
|
|
||||||
prov.CancelStream(subject)
|
|
||||||
}
|
}
|
||||||
close(ch)
|
done := make(chan result, len(starts))
|
||||||
delete(m.providerStreams, id)
|
|
||||||
|
for _, si := range starts {
|
||||||
|
// Per-start waiter.
|
||||||
|
go func(id domain.Identifier, ch <-chan error) {
|
||||||
|
select {
|
||||||
|
case err := <-ch:
|
||||||
|
done <- result{id: id, err: err}
|
||||||
|
case <-time.After(statusWaitTotal):
|
||||||
|
done <- result{id: id, err: fmt.Errorf("timeout")}
|
||||||
}
|
}
|
||||||
delete(m.rawReferenceCount, id)
|
}(si.id, si.ch)
|
||||||
m.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.rawReferenceCount[id] = rc
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) identifierSetDifferences(old map[domain.Identifier]struct{}, next []domain.Identifier) (toAdd, toDel []domain.Identifier) {
|
// Collect results and apply.
|
||||||
newSet := make(map[domain.Identifier]struct{}, len(next))
|
for i := 0; i < len(starts); i++ {
|
||||||
for _, id := range next {
|
r := <-done
|
||||||
newSet[id] = struct{}{}
|
if r.err != nil {
|
||||||
if _, ok := old[id]; !ok {
|
// Roll back this session's bind and drop ref.
|
||||||
toAdd = append(toAdd, id)
|
delete(s.bound, r.id)
|
||||||
|
_ = m.decrementStreamRefCount(r.id)
|
||||||
|
initErrs = append(initErrs, fmt.Errorf("start %v: %w", r.id, r.err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Success: register for any attached sessions that are bound.
|
||||||
|
for _, sess := range m.sessions {
|
||||||
|
if !sess.attached || sess.clientOut == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, bound := sess.bound[r.id]; bound {
|
||||||
|
m.router.RegisterRoute(r.id, sess.clientOut)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for id := range old {
|
|
||||||
if _, ok := newSet[id]; !ok {
|
|
||||||
toDel = append(toDel, id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.resp <- join(initErrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) handleCloseSession(c closeSessionCmd) {
|
||||||
|
s, ok := m.sessions[c.sid]
|
||||||
|
if !ok {
|
||||||
|
c.resp <- ErrSessionNotFound
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.closeSession(c.sid, s)
|
||||||
|
c.resp <- nil
|
||||||
|
}
|
||||||
|
|||||||
114
services/data_service/internal/manager/session.go
Normal file
114
services/data_service/internal/manager/session.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||||
|
)
|
||||||
|
|
||||||
|
// attachSession wires channels, stops idle timer, and registers ready routes.
|
||||||
|
// Precondition: session exists and is not attached/closed. Runs in loop.
|
||||||
|
func (m *Manager) attachSession(s *session, inBuf, outBuf int) (chan<- domain.Message, <-chan domain.Message, error) {
|
||||||
|
if inBuf <= 0 {
|
||||||
|
inBuf = defaultClientBuf
|
||||||
|
}
|
||||||
|
if outBuf <= 0 {
|
||||||
|
outBuf = defaultClientBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
cin := make(chan domain.Message, inBuf)
|
||||||
|
cout := make(chan domain.Message, outBuf)
|
||||||
|
s.clientIn, s.clientOut = cin, cout
|
||||||
|
|
||||||
|
if s.idleTimer != nil {
|
||||||
|
s.idleTimer.Stop()
|
||||||
|
s.idleTimer = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward clientIn to router.Incoming with drop on backpressure.
|
||||||
|
go func(src <-chan domain.Message, dst chan<- domain.Message) {
|
||||||
|
for msg := range src {
|
||||||
|
select {
|
||||||
|
case dst <- msg:
|
||||||
|
default:
|
||||||
|
// drop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(cin, m.router.IncomingChannel())
|
||||||
|
|
||||||
|
// Register all currently bound that are ready.
|
||||||
|
for ident := range s.bound {
|
||||||
|
if !ident.IsRaw() {
|
||||||
|
m.router.RegisterRoute(ident, cout)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Raw: register only if provider stream is active.
|
||||||
|
if p, subj, err := m.resolveProvider(ident); err == nil && p.IsStreamActive(subj) {
|
||||||
|
m.router.RegisterRoute(ident, cout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.attached = true
|
||||||
|
return cin, cout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// detachSession deregisters all routes, closes channels, and arms idle timer.
|
||||||
|
// Precondition: session exists and is attached. Runs in loop.
|
||||||
|
func (m *Manager) detachSession(sid uuid.UUID, s *session) error {
|
||||||
|
if s.clientOut != nil {
|
||||||
|
for ident := range s.bound {
|
||||||
|
m.router.DeregisterRoute(ident, s.clientOut)
|
||||||
|
}
|
||||||
|
close(s.clientOut)
|
||||||
|
}
|
||||||
|
if s.clientIn != nil {
|
||||||
|
close(s.clientIn)
|
||||||
|
}
|
||||||
|
s.clientIn, s.clientOut = nil, nil
|
||||||
|
s.attached = false
|
||||||
|
|
||||||
|
// Arm idle timer to auto-close the session.
|
||||||
|
s.idleTimer = time.AfterFunc(s.idleAfter, func() {
|
||||||
|
m.cmdCh <- closeSessionCmd{sid: sid, resp: make(chan error, 1)}
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeSession performs full teardown and refcount drops. Runs in loop.
|
||||||
|
func (m *Manager) closeSession(sid uuid.UUID, s *session) {
|
||||||
|
if s.closed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.closed = true
|
||||||
|
|
||||||
|
// Detach if attached.
|
||||||
|
if s.attached {
|
||||||
|
if s.clientOut != nil {
|
||||||
|
for ident := range s.bound {
|
||||||
|
m.router.DeregisterRoute(ident, s.clientOut)
|
||||||
|
}
|
||||||
|
close(s.clientOut)
|
||||||
|
}
|
||||||
|
if s.clientIn != nil {
|
||||||
|
close(s.clientIn)
|
||||||
|
}
|
||||||
|
} else if s.idleTimer != nil {
|
||||||
|
s.idleTimer.Stop()
|
||||||
|
s.idleTimer = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop refs for raw identifiers and stop streams if last ref. Fire-and-forget.
|
||||||
|
for ident := range s.bound {
|
||||||
|
if !ident.IsRaw() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if last := m.decrementStreamRefCount(ident); last {
|
||||||
|
if p, subj, err := m.resolveProvider(ident); err == nil {
|
||||||
|
_ = p.StopStream(subj) // do not wait
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(m.sessions, sid)
|
||||||
|
}
|
||||||
86
services/data_service/internal/manager/types.go
Normal file
86
services/data_service/internal/manager/types.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
package manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||||
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/provider"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Shared constants.
|
||||||
|
const (
|
||||||
|
defaultClientBuf = 256
|
||||||
|
statusWaitTotal = 8 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manager-level errors.
|
||||||
|
var (
|
||||||
|
ErrSessionNotFound = errorf("session not found")
|
||||||
|
ErrSessionClosed = errorf("session closed")
|
||||||
|
ErrClientAlreadyAttached = errorf("client already attached")
|
||||||
|
ErrClientNotAttached = errorf("client not attached")
|
||||||
|
ErrInvalidIdentifier = errorf("invalid identifier")
|
||||||
|
ErrUnknownProvider = errorf("unknown provider")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Session holds per-session state. Owned by the manager loop.
|
||||||
|
type session struct {
|
||||||
|
id uuid.UUID
|
||||||
|
|
||||||
|
clientIn chan domain.Message // caller writes
|
||||||
|
clientOut chan domain.Message // caller reads
|
||||||
|
|
||||||
|
bound map[domain.Identifier]struct{}
|
||||||
|
|
||||||
|
closed bool
|
||||||
|
attached bool
|
||||||
|
idleAfter time.Duration
|
||||||
|
idleTimer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commands posted into the manager loop. One struct per action.
|
||||||
|
type addProviderCmd struct {
|
||||||
|
name string
|
||||||
|
p provider.Provider
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type removeProviderCmd struct {
|
||||||
|
name string
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type newSessionCmd struct {
|
||||||
|
idleAfter time.Duration
|
||||||
|
resp chan struct {
|
||||||
|
id uuid.UUID
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachCmd struct {
|
||||||
|
sid uuid.UUID
|
||||||
|
inBuf, outBuf int
|
||||||
|
resp chan struct {
|
||||||
|
cin chan<- domain.Message
|
||||||
|
cout <-chan domain.Message
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type detachCmd struct {
|
||||||
|
sid uuid.UUID
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type configureCmd struct {
|
||||||
|
sid uuid.UUID
|
||||||
|
next []domain.Identifier
|
||||||
|
resp chan error // returns after starts from this call succeed or timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
type closeSessionCmd struct {
|
||||||
|
sid uuid.UUID
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
@@ -2,139 +2,411 @@ package binance
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
wsURL = "wss://fstream.binance.com/stream"
|
||||||
|
writeRatePerSecond = 8 // hard cap per second
|
||||||
|
writeBurst = 8 // token bucket burst
|
||||||
|
writeWait = 5 * time.Second // per write deadline
|
||||||
|
|
||||||
|
batchPeriod = 1 * time.Second // batch SUB/UNSUB every second
|
||||||
|
|
||||||
|
reconnectMin = 500 * time.Millisecond
|
||||||
|
reconnectMax = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// internal stream states (provider stays simple; manager relies on IsStreamActive)
|
||||||
|
type streamState uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateUnknown streamState = iota
|
||||||
|
statePendingSub
|
||||||
|
stateActive
|
||||||
|
statePendingUnsub
|
||||||
|
stateInactive
|
||||||
|
stateError
|
||||||
|
)
|
||||||
|
|
||||||
type FuturesWebsocket struct {
|
type FuturesWebsocket struct {
|
||||||
conn *websocket.Conn
|
dial websocket.Dialer
|
||||||
activeStreams map[string]chan domain.Message
|
hdr http.Header
|
||||||
|
|
||||||
|
// desired subscriptions and sinks
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
desired map[string]bool // subject -> want subscribed
|
||||||
|
sinks map[string]chan<- domain.Message // subject -> destination
|
||||||
|
states map[string]streamState // subject -> state
|
||||||
|
|
||||||
|
// waiters per subject
|
||||||
|
startWaiters map[string][]chan error
|
||||||
|
stopWaiters map[string][]chan error
|
||||||
|
|
||||||
|
// batching queues
|
||||||
|
subQ chan string
|
||||||
|
unsubQ chan string
|
||||||
|
|
||||||
|
// websocket
|
||||||
|
writeMu sync.Mutex
|
||||||
|
conn *websocket.Conn
|
||||||
|
|
||||||
|
// rate limit tokens
|
||||||
|
tokensCh chan struct{}
|
||||||
|
stopRate chan struct{}
|
||||||
|
|
||||||
|
// lifecycle
|
||||||
|
stopCh chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// ack tracking
|
||||||
|
ackMu sync.Mutex
|
||||||
|
idSeq uint64
|
||||||
|
pendingA map[int64]ackBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
type ackBatch struct {
|
||||||
|
method string // "SUBSCRIBE" or "UNSUBSCRIBE"
|
||||||
|
subjects []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFuturesWebsocket() *FuturesWebsocket {
|
func NewFuturesWebsocket() *FuturesWebsocket {
|
||||||
return &FuturesWebsocket{
|
return &FuturesWebsocket{
|
||||||
activeStreams: make(map[string]chan domain.Message),
|
desired: make(map[string]bool),
|
||||||
|
sinks: make(map[string]chan<- domain.Message),
|
||||||
|
states: make(map[string]streamState),
|
||||||
|
startWaiters: make(map[string][]chan error),
|
||||||
|
stopWaiters: make(map[string][]chan error),
|
||||||
|
subQ: make(chan string, 4096),
|
||||||
|
unsubQ: make(chan string, 4096),
|
||||||
|
tokensCh: make(chan struct{}, writeBurst),
|
||||||
|
stopRate: make(chan struct{}),
|
||||||
|
stopCh: make(chan struct{}),
|
||||||
|
pendingA: make(map[int64]ackBatch),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* provider.Provider */
|
||||||
|
|
||||||
func (b *FuturesWebsocket) Start() error {
|
func (b *FuturesWebsocket) Start() error {
|
||||||
c, _, err := websocket.DefaultDialer.Dial("wss://fstream.binance.com/stream", nil)
|
// token bucket
|
||||||
if err != nil {
|
b.wg.Add(1)
|
||||||
return fmt.Errorf("connect failed: %w", err)
|
go func() {
|
||||||
|
defer b.wg.Done()
|
||||||
|
t := time.NewTicker(time.Second / writeRatePerSecond)
|
||||||
|
defer t.Stop()
|
||||||
|
// prime burst
|
||||||
|
for i := 0; i < writeBurst; i++ {
|
||||||
|
select {
|
||||||
|
case b.tokensCh <- struct{}{}:
|
||||||
|
default:
|
||||||
}
|
}
|
||||||
b.conn = c
|
}
|
||||||
go b.readLoop()
|
for {
|
||||||
|
select {
|
||||||
|
case <-b.stopRate:
|
||||||
|
return
|
||||||
|
case <-t.C:
|
||||||
|
select {
|
||||||
|
case b.tokensCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// connection manager
|
||||||
|
b.wg.Add(1)
|
||||||
|
go b.run()
|
||||||
|
|
||||||
|
// batcher
|
||||||
|
b.wg.Add(1)
|
||||||
|
go b.batcher()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *FuturesWebsocket) Stop() {
|
func (b *FuturesWebsocket) Stop() {
|
||||||
|
close(b.stopCh)
|
||||||
|
close(b.stopRate)
|
||||||
|
|
||||||
|
b.writeMu.Lock()
|
||||||
if b.conn != nil {
|
if b.conn != nil {
|
||||||
err := b.conn.Close()
|
_ = b.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "bye"))
|
||||||
if err != nil {
|
_ = b.conn.Close()
|
||||||
panic(fmt.Errorf("failed to close websocket connection: %w", err))
|
b.conn = nil
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
b.writeMu.Unlock()
|
||||||
|
|
||||||
func (b *FuturesWebsocket) RequestStream(subject string, ch chan domain.Message) error {
|
b.wg.Wait()
|
||||||
|
|
||||||
|
// resolve any remaining waiters with an error
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
for subj, ws := range b.startWaiters {
|
||||||
if _, ok := b.activeStreams[subject]; ok {
|
for _, ch := range ws {
|
||||||
return nil
|
select {
|
||||||
|
case ch <- errors.New("provider stopped"):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
delete(b.startWaiters, subj)
|
||||||
|
}
|
||||||
|
for subj, ws := range b.stopWaiters {
|
||||||
|
for _, ch := range ws {
|
||||||
|
select {
|
||||||
|
case ch <- errors.New("provider stopped"):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
delete(b.stopWaiters, subj)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := map[string]interface{}{
|
func (b *FuturesWebsocket) StartStream(subject string, dst chan<- domain.Message) <-chan error {
|
||||||
"method": "SUBSCRIBE",
|
fmt.Println("Starting stream for subject:", subject)
|
||||||
"params": []string{subject},
|
ch := make(chan error, 1)
|
||||||
"id": len(b.activeStreams) + 1,
|
|
||||||
}
|
if subject == "" {
|
||||||
if err := b.conn.WriteJSON(msg); err != nil {
|
ch <- fmt.Errorf("empty subject")
|
||||||
return fmt.Errorf("subscribe failed: %w", err)
|
close(ch)
|
||||||
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
b.activeStreams[subject] = ch
|
|
||||||
fmt.Println("Subscribed to stream:", subject)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *FuturesWebsocket) CancelStream(subject string) {
|
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
// mark desired, update sink
|
||||||
|
b.desired[subject] = true
|
||||||
|
b.sinks[subject] = dst
|
||||||
|
|
||||||
if _, ok := b.activeStreams[subject]; !ok {
|
// fast path: already active
|
||||||
return
|
if b.states[subject] == stateActive {
|
||||||
|
b.mu.Unlock()
|
||||||
|
ch <- nil
|
||||||
|
close(ch)
|
||||||
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := map[string]interface{}{
|
// enqueue waiter and transition if needed
|
||||||
"method": "UNSUBSCRIBE",
|
b.startWaiters[subject] = append(b.startWaiters[subject], ch)
|
||||||
"params": []string{subject},
|
if b.states[subject] != statePendingSub {
|
||||||
"id": len(b.activeStreams) + 1000,
|
b.states[subject] = statePendingSub
|
||||||
|
select {
|
||||||
|
case b.subQ <- subject:
|
||||||
|
default:
|
||||||
|
// queue full → fail fast
|
||||||
|
ws := b.startWaiters[subject]
|
||||||
|
delete(b.startWaiters, subject)
|
||||||
|
b.states[subject] = stateError
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, w := range ws {
|
||||||
|
w <- fmt.Errorf("subscribe queue full")
|
||||||
|
close(w)
|
||||||
}
|
}
|
||||||
_ = b.conn.WriteJSON(msg)
|
return ch
|
||||||
|
}
|
||||||
fmt.Println("Unsubscribed from stream:", subject)
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
delete(b.activeStreams, subject)
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) StopStream(subject string) <-chan error {
|
||||||
|
fmt.Println("Stopping stream for subject:", subject)
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
|
||||||
|
if subject == "" {
|
||||||
|
ch <- fmt.Errorf("empty subject")
|
||||||
|
close(ch)
|
||||||
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *FuturesWebsocket) GetActiveStreams() []string {
|
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
// mark no longer desired; keep sink until UNSUB ack to avoid drops
|
||||||
|
b.desired[subject] = false
|
||||||
|
|
||||||
var streams []string
|
// already inactive
|
||||||
for k := range b.activeStreams {
|
if b.states[subject] == stateInactive {
|
||||||
streams = append(streams, k)
|
b.mu.Unlock()
|
||||||
|
ch <- nil
|
||||||
|
close(ch)
|
||||||
|
return ch
|
||||||
}
|
}
|
||||||
return streams
|
|
||||||
|
// enqueue waiter and transition if needed
|
||||||
|
b.stopWaiters[subject] = append(b.stopWaiters[subject], ch)
|
||||||
|
if b.states[subject] != statePendingUnsub {
|
||||||
|
b.states[subject] = statePendingUnsub
|
||||||
|
select {
|
||||||
|
case b.unsubQ <- subject:
|
||||||
|
default:
|
||||||
|
// queue full → fail fast
|
||||||
|
ws := b.stopWaiters[subject]
|
||||||
|
delete(b.stopWaiters, subject)
|
||||||
|
b.states[subject] = stateError
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, w := range ws {
|
||||||
|
w <- fmt.Errorf("unsubscribe queue full")
|
||||||
|
close(w)
|
||||||
|
}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) Fetch(_ string) (domain.Message, error) {
|
||||||
|
return domain.Message{}, fmt.Errorf("fetch not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *FuturesWebsocket) IsStreamActive(subject string) bool {
|
func (b *FuturesWebsocket) IsStreamActive(subject string) bool {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
return b.states[subject] == stateActive
|
||||||
_, ok := b.activeStreams[subject]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *FuturesWebsocket) Fetch(_ string) (domain.Message, error) {
|
|
||||||
return domain.Message{}, fmt.Errorf("not supported: websocket provider does not implement fetch")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *FuturesWebsocket) IsValidSubject(subject string, isFetch bool) bool {
|
func (b *FuturesWebsocket) IsValidSubject(subject string, isFetch bool) bool {
|
||||||
if isFetch {
|
return !isFetch && subject != ""
|
||||||
return false
|
|
||||||
}
|
|
||||||
return len(subject) > 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *FuturesWebsocket) readLoop() {
|
/* internals */
|
||||||
for {
|
|
||||||
_, msgBytes, err := b.conn.ReadMessage()
|
func (b *FuturesWebsocket) run() {
|
||||||
|
defer b.wg.Done()
|
||||||
|
|
||||||
|
backoff := reconnectMin
|
||||||
|
|
||||||
|
dial := func() (*websocket.Conn, error) {
|
||||||
|
c, _, err := b.dial.Dial(wsURL, b.hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
|
return nil, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
fmt.Printf("read error: %v\n", err)
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-b.stopCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := dial()
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(backoff)
|
||||||
|
backoff = minDur(backoff*2, reconnectMax)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
backoff = reconnectMin
|
||||||
|
|
||||||
|
b.writeMu.Lock()
|
||||||
|
b.conn = c
|
||||||
|
b.writeMu.Unlock()
|
||||||
|
|
||||||
|
// Resubscribe desired subjects in one batched SUB.
|
||||||
|
want := b.snapshotDesired(true) // only desired==true
|
||||||
|
if len(want) > 0 {
|
||||||
|
_ = b.sendSubscribe(want)
|
||||||
|
b.mu.Lock()
|
||||||
|
for _, s := range want {
|
||||||
|
if b.states[s] != stateActive {
|
||||||
|
b.states[s] = statePendingSub
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
err = b.readLoop(c)
|
||||||
|
|
||||||
|
// tear down connection
|
||||||
|
b.writeMu.Lock()
|
||||||
|
if b.conn != nil {
|
||||||
|
_ = b.conn.Close()
|
||||||
|
b.conn = nil
|
||||||
|
}
|
||||||
|
b.writeMu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-b.stopCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
time.Sleep(backoff)
|
||||||
|
backoff = minDur(backoff*2, reconnectMax)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) batcher() {
|
||||||
|
defer b.wg.Done()
|
||||||
|
|
||||||
|
t := time.NewTicker(batchPeriod)
|
||||||
|
defer t.Stop()
|
||||||
|
|
||||||
|
var subs, unsubs []string
|
||||||
|
|
||||||
|
flush := func() {
|
||||||
|
if len(subs) > 0 {
|
||||||
|
_ = b.sendSubscribe(subs)
|
||||||
|
subs = subs[:0]
|
||||||
|
}
|
||||||
|
if len(unsubs) > 0 {
|
||||||
|
_ = b.sendUnsubscribe(unsubs)
|
||||||
|
unsubs = unsubs[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-b.stopCh:
|
||||||
|
return
|
||||||
|
case s := <-b.subQ:
|
||||||
|
if s != "" {
|
||||||
|
subs = append(subs, s)
|
||||||
|
}
|
||||||
|
case s := <-b.unsubQ:
|
||||||
|
if s != "" {
|
||||||
|
unsubs = append(unsubs, s)
|
||||||
|
}
|
||||||
|
case <-t.C:
|
||||||
|
flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) readLoop(c *websocket.Conn) error {
|
||||||
|
for {
|
||||||
|
_, raw, err := c.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Received message:", string(raw))
|
||||||
|
|
||||||
|
// Stream data or command ack
|
||||||
|
if hasField(raw, `"stream"`) {
|
||||||
var container struct {
|
var container struct {
|
||||||
Stream string `json:"stream"`
|
Stream string `json:"stream"`
|
||||||
Data json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(msgBytes, &container); err != nil {
|
if err := json.Unmarshal(raw, &container); err != nil || container.Stream == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
ch, ok := b.activeStreams[container.Stream]
|
dst, ok := b.sinks[container.Stream]
|
||||||
|
st := b.states[container.Stream]
|
||||||
b.mu.Unlock()
|
b.mu.Unlock()
|
||||||
if !ok {
|
|
||||||
|
if !ok || st == stateInactive || st == statePendingUnsub {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,7 +414,6 @@ func (b *FuturesWebsocket) readLoop() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := domain.Message{
|
msg := domain.Message{
|
||||||
Identifier: id,
|
Identifier: id,
|
||||||
Payload: container.Data,
|
Payload: container.Data,
|
||||||
@@ -150,9 +421,223 @@ func (b *FuturesWebsocket) readLoop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case ch <- msg:
|
case dst <- msg:
|
||||||
default:
|
default:
|
||||||
fmt.Printf("channel for %s is full, dropping message\n", container.Stream)
|
// drop on backpressure
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ack path
|
||||||
|
var ack struct {
|
||||||
|
Result json.RawMessage `json:"result"`
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(raw, &ack); err != nil || ack.ID == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ackMu.Lock()
|
||||||
|
batch, ok := b.pendingA[ack.ID]
|
||||||
|
if ok {
|
||||||
|
delete(b.pendingA, ack.ID)
|
||||||
|
}
|
||||||
|
b.ackMu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ackErr := (len(ack.Result) > 0 && string(ack.Result) != "null")
|
||||||
|
|
||||||
|
switch batch.method {
|
||||||
|
case "SUBSCRIBE":
|
||||||
|
b.mu.Lock()
|
||||||
|
for _, s := range batch.subjects {
|
||||||
|
if ackErr {
|
||||||
|
b.states[s] = stateError
|
||||||
|
// fail all start waiters
|
||||||
|
ws := b.startWaiters[s]
|
||||||
|
delete(b.startWaiters, s)
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- fmt.Errorf("subscribe failed")
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// success
|
||||||
|
b.states[s] = stateActive
|
||||||
|
ws := b.startWaiters[s]
|
||||||
|
delete(b.startWaiters, s)
|
||||||
|
dst := b.sinks[s]
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- nil
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
_ = dst // messages will flow via readLoop
|
||||||
|
b.mu.Lock()
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
case "UNSUBSCRIBE":
|
||||||
|
b.mu.Lock()
|
||||||
|
for _, s := range batch.subjects {
|
||||||
|
if ackErr {
|
||||||
|
b.states[s] = stateError
|
||||||
|
ws := b.stopWaiters[s]
|
||||||
|
delete(b.stopWaiters, s)
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- fmt.Errorf("unsubscribe failed")
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// success
|
||||||
|
b.states[s] = stateInactive
|
||||||
|
delete(b.sinks, s) // stop delivering
|
||||||
|
ws := b.stopWaiters[s]
|
||||||
|
delete(b.stopWaiters, s)
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- nil
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) nextID() int64 {
|
||||||
|
return int64(atomic.AddUint64(&b.idSeq, 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) sendSubscribe(subjects []string) error {
|
||||||
|
if len(subjects) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
id := b.nextID()
|
||||||
|
req := map[string]any{
|
||||||
|
"method": "SUBSCRIBE",
|
||||||
|
"params": subjects,
|
||||||
|
"id": id,
|
||||||
|
}
|
||||||
|
if err := b.writeJSON(req); err != nil {
|
||||||
|
// mark error and fail waiters
|
||||||
|
b.mu.Lock()
|
||||||
|
for _, s := range subjects {
|
||||||
|
b.states[s] = stateError
|
||||||
|
ws := b.startWaiters[s]
|
||||||
|
delete(b.startWaiters, s)
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- fmt.Errorf("subscribe send failed")
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.ackMu.Lock()
|
||||||
|
b.pendingA[id] = ackBatch{method: "SUBSCRIBE", subjects: append([]string(nil), subjects...)}
|
||||||
|
b.ackMu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) sendUnsubscribe(subjects []string) error {
|
||||||
|
if len(subjects) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
id := b.nextID()
|
||||||
|
req := map[string]any{
|
||||||
|
"method": "UNSUBSCRIBE",
|
||||||
|
"params": subjects,
|
||||||
|
"id": id,
|
||||||
|
}
|
||||||
|
if err := b.writeJSON(req); err != nil {
|
||||||
|
b.mu.Lock()
|
||||||
|
for _, s := range subjects {
|
||||||
|
b.states[s] = stateError
|
||||||
|
ws := b.stopWaiters[s]
|
||||||
|
delete(b.stopWaiters, s)
|
||||||
|
b.mu.Unlock()
|
||||||
|
for _, ch := range ws {
|
||||||
|
ch <- fmt.Errorf("unsubscribe send failed")
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.ackMu.Lock()
|
||||||
|
b.pendingA[id] = ackBatch{method: "UNSUBSCRIBE", subjects: append([]string(nil), subjects...)}
|
||||||
|
b.ackMu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) writeJSON(v any) error {
|
||||||
|
// token bucket
|
||||||
|
select {
|
||||||
|
case <-b.stopCh:
|
||||||
|
return fmt.Errorf("stopped")
|
||||||
|
case <-b.tokensCh:
|
||||||
|
}
|
||||||
|
|
||||||
|
b.writeMu.Lock()
|
||||||
|
c := b.conn
|
||||||
|
b.writeMu.Unlock()
|
||||||
|
if c == nil {
|
||||||
|
return fmt.Errorf("not connected")
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = c.SetWriteDeadline(time.Now().Add(writeWait))
|
||||||
|
return c.WriteJSON(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* utilities */
|
||||||
|
|
||||||
|
func (b *FuturesWebsocket) snapshotDesired(onlyTrue bool) []string {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
var out []string
|
||||||
|
for s, want := range b.desired {
|
||||||
|
if !onlyTrue || want {
|
||||||
|
out = append(out, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func minDur(a, b time.Duration) time.Duration {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasField(raw []byte, needle string) bool {
|
||||||
|
// cheap check; avoids another allocation if it's obviously an ACK
|
||||||
|
return json.Valid(raw) && byteContains(raw, needle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func byteContains(b []byte, sub string) bool {
|
||||||
|
n := len(sub)
|
||||||
|
if n == 0 || len(b) < n {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// naive search; sufficient for small frames
|
||||||
|
for i := 0; i <= len(b)-n; i++ {
|
||||||
|
if string(b[i:i+n]) == sub {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
package provider
|
package provider
|
||||||
|
|
||||||
import (
|
import "gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Provider interface {
|
type Provider interface {
|
||||||
Start() error
|
Start() error
|
||||||
Stop()
|
Stop()
|
||||||
|
|
||||||
RequestStream(subject string, channel chan domain.Message) error
|
StartStream(key string, destination chan<- domain.Message) <-chan error
|
||||||
CancelStream(subject string)
|
StopStream(key string) <-chan error
|
||||||
GetActiveStreams() []string
|
|
||||||
IsStreamActive(subject string) bool
|
|
||||||
|
|
||||||
Fetch(subject string) (domain.Message, error)
|
Fetch(key string) (domain.Message, error)
|
||||||
|
|
||||||
IsValidSubject(subject string, isFetch bool) bool
|
IsStreamActive(key string) bool
|
||||||
|
IsValidSubject(key string, isFetch bool) bool
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,9 +13,9 @@ type Router struct {
|
|||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRouter() *Router {
|
func NewRouter(buffer int) *Router {
|
||||||
return &Router{
|
return &Router{
|
||||||
incoming: make(chan domain.Message, 512), // Buffered channel for incoming messages
|
incoming: make(chan domain.Message, buffer), // Buffered channel for incoming messages
|
||||||
routes: make(map[domain.Identifier][]chan<- domain.Message),
|
routes: make(map[domain.Identifier][]chan<- domain.Message),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -33,7 +33,7 @@ func (r *Router) Run() {
|
|||||||
select {
|
select {
|
||||||
case ch <- msg:
|
case ch <- msg:
|
||||||
default:
|
default:
|
||||||
fmt.Println("Dropped message, buffer full!!!") // TODO: Handle full buffer case more gracefully
|
fmt.Println("Router could not push message to a full buffer...") // TODO: Handle full buffer case more gracefully
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r.mu.RUnlock()
|
r.mu.RUnlock()
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func NewGRPCControlServer(m *manager.Manager) *GRPCControlServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartStream creates a new session. It does NOT attach client channels.
|
// StartStream creates a new session. It does NOT attach client channels.
|
||||||
// Your streaming RPC should later call GetChannels(sessionID, opts).
|
// Your streaming RPC should later call AttachClient(sessionID, opts).
|
||||||
func (s *GRPCControlServer) StartStream(_ context.Context, req *pb.StartStreamRequest) (*pb.StartStreamResponse, error) {
|
func (s *GRPCControlServer) StartStream(_ context.Context, req *pb.StartStreamRequest) (*pb.StartStreamResponse, error) {
|
||||||
sessionID, err := s.manager.NewSession(time.Duration(1) * time.Minute) // timeout set to 1 minute
|
sessionID, err := s.manager.NewSession(time.Duration(1) * time.Minute) // timeout set to 1 minute
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -51,7 +51,7 @@ func (s *GRPCControlServer) ConfigureStream(_ context.Context, req *pb.Configure
|
|||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.manager.SetSubscriptions(streamID, ids); err != nil {
|
if err := s.manager.ConfigureSession(streamID, ids); err != nil {
|
||||||
// Map common manager errors to gRPC codes.
|
// Map common manager errors to gRPC codes.
|
||||||
switch err {
|
switch err {
|
||||||
case manager.ErrSessionNotFound:
|
case manager.ErrSessionNotFound:
|
||||||
|
|||||||
@@ -28,15 +28,7 @@ func (s *GRPCStreamingServer) ConnectStream(req *pb.ConnectStreamRequest, stream
|
|||||||
return fmt.Errorf("invalid UUID: %w", err)
|
return fmt.Errorf("invalid UUID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defaults; tune or map from req if your proto carries options.
|
_, out, err := s.manager.AttachClient(sessionID, 256, 1024)
|
||||||
opts := manager.ChannelOpts{
|
|
||||||
InBufSize: 256,
|
|
||||||
OutBufSize: 1024,
|
|
||||||
DropOutbound: true, // do not let slow clients stall producers
|
|
||||||
DropInbound: true, // irrelevant here (we don't send inbound), safe default
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := s.manager.GetChannels(sessionID, opts)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("attach channels: %w", err)
|
return fmt.Errorf("attach channels: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user