Manager paradigm change to an event-loop concurrency style, begun implementing batching in binance futures_websocket.go
This commit is contained in:
@@ -17,10 +17,10 @@ import (
|
||||
func main() {
|
||||
fmt.Println("Starting Data Service...")
|
||||
// Setup
|
||||
r := router.NewRouter()
|
||||
r := router.NewRouter(2048)
|
||||
m := manager.NewManager(r)
|
||||
binanceFutures := binance.NewFuturesWebsocket()
|
||||
m.AddProvider("binance_futures_websocket", binanceFutures)
|
||||
_ = m.AddProvider("binance_futures_websocket", binanceFutures)
|
||||
|
||||
// gRPC Control Server
|
||||
grpcControlServer := grpc.NewServer()
|
||||
|
||||
100
services/data_service/internal/manager/helper.go
Normal file
100
services/data_service/internal/manager/helper.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/provider"
|
||||
)
|
||||
|
||||
// Lightweight error helper to define package-level errors inline.
|
||||
type constErr string
|
||||
|
||||
func (e constErr) Error() string { return string(e) }
|
||||
func errorf(s string) error { return constErr(s) }
|
||||
|
||||
// copySet copies a set of identifiers to a new map.
|
||||
func copySet(in map[domain.Identifier]struct{}) map[domain.Identifier]struct{} {
|
||||
out := make(map[domain.Identifier]struct{}, len(in))
|
||||
for k := range in {
|
||||
out[k] = struct{}{}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// identifierSetDifferences computes additions and deletions from old -> next.
|
||||
func identifierSetDifferences(old map[domain.Identifier]struct{}, next []domain.Identifier) (toAdd, toDel []domain.Identifier) {
|
||||
newSet := make(map[domain.Identifier]struct{}, len(next))
|
||||
for _, id := range next {
|
||||
newSet[id] = struct{}{}
|
||||
if _, ok := old[id]; !ok {
|
||||
toAdd = append(toAdd, id)
|
||||
}
|
||||
}
|
||||
for id := range old {
|
||||
if _, ok := newSet[id]; !ok {
|
||||
toDel = append(toDel, id)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// joinErrors aggregates multiple errors.
|
||||
type joined struct{ es []error }
|
||||
|
||||
func (j joined) Error() string {
|
||||
switch n := len(j.es); {
|
||||
case n == 0:
|
||||
return ""
|
||||
case n == 1:
|
||||
return j.es[0].Error()
|
||||
default:
|
||||
s := j.es[0].Error()
|
||||
for i := 1; i < n; i++ {
|
||||
s += "; " + j.es[i].Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
func join(es []error) error {
|
||||
if len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
return joined{es}
|
||||
}
|
||||
|
||||
// resolveProvider parses a raw identifier and looks up the provider.
|
||||
func (m *Manager) resolveProvider(id domain.Identifier) (provider.Provider, string, error) {
|
||||
provName, subj, ok := id.ProviderSubject()
|
||||
if !ok || provName == "" || subj == "" {
|
||||
return nil, "", ErrInvalidIdentifier
|
||||
}
|
||||
p := m.providers[provName]
|
||||
if p == nil {
|
||||
return nil, "", fmt.Errorf("%w: %s", ErrUnknownProvider, provName)
|
||||
}
|
||||
return p, subj, nil
|
||||
}
|
||||
|
||||
// incrementStreamRefCount increments refcount and returns true if transitioning 0->1.
|
||||
func (m *Manager) incrementStreamRefCount(id domain.Identifier) bool {
|
||||
rc := m.streamRef[id] + 1
|
||||
m.streamRef[id] = rc
|
||||
return rc == 1
|
||||
}
|
||||
|
||||
// decrementStreamRefCount decrements refcount and returns true if transitioning 1->0.
|
||||
func (m *Manager) decrementStreamRefCount(id domain.Identifier) bool {
|
||||
rc, ok := m.streamRef[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
rc--
|
||||
if rc <= 0 {
|
||||
delete(m.streamRef, id)
|
||||
return true
|
||||
}
|
||||
m.streamRef[id] = rc
|
||||
return false
|
||||
}
|
||||
@@ -1,10 +1,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -13,512 +10,376 @@ import (
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/router"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSessionNotFound = errors.New("session not found")
|
||||
ErrSessionClosed = errors.New("session closed")
|
||||
ErrInvalidIdentifier = errors.New("invalid identifier")
|
||||
ErrUnknownProvider = errors.New("unknown provider")
|
||||
ErrClientAlreadyBound = errors.New("client channels already bound")
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInternalBuf = 1024
|
||||
defaultClientBuf = 256
|
||||
)
|
||||
|
||||
type ChannelOpts struct {
|
||||
InBufSize int
|
||||
OutBufSize int
|
||||
DropOutbound bool // If true, drop outbound to client when its buffer is full. If false, block.
|
||||
DropInbound bool // If true, drop inbound from client when internalIn is full. If false, block.
|
||||
}
|
||||
|
||||
// Manager owns providers, sessions, and the router fanout.
|
||||
// Manager is a single-goroutine actor that owns all state.
|
||||
type Manager struct {
|
||||
// Command channel
|
||||
cmdCh chan any
|
||||
|
||||
// State (loop-owned)
|
||||
providers map[string]provider.Provider
|
||||
providerStreams map[domain.Identifier]chan domain.Message
|
||||
rawReferenceCount map[domain.Identifier]int
|
||||
|
||||
sessions map[uuid.UUID]*session
|
||||
streamRef map[domain.Identifier]int
|
||||
|
||||
// Router
|
||||
router *router.Router
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type session struct {
|
||||
id uuid.UUID
|
||||
|
||||
// Stable internal channels.
|
||||
internalIn chan domain.Message // forwarded into router.IncomingChannel()
|
||||
internalOut chan domain.Message // registered as router route target, forwarded to clientOut (or dropped if unattached)
|
||||
|
||||
// Client Channels (optional). Created on GetChannels and cleared on DetachClient.
|
||||
clientIn chan domain.Message // caller writes
|
||||
clientOut chan domain.Message // caller reads
|
||||
|
||||
// Controls the permanent internalIn forwarder.
|
||||
cancelInternal context.CancelFunc
|
||||
|
||||
// Permanent outbound drain control.
|
||||
egressWG sync.WaitGroup
|
||||
|
||||
// Policy
|
||||
dropWhenUnattached bool // always drop when no client attached
|
||||
dropWhenSlow bool // mirror ChannelOpts.DropOutbound
|
||||
dropInbound bool // mirror ChannelOpts.DropInbound
|
||||
|
||||
bound map[domain.Identifier]struct{} // map for quick existence checks
|
||||
closed bool
|
||||
idleAfter time.Duration
|
||||
idleTimer *time.Timer
|
||||
}
|
||||
|
||||
func NewManager(r *router.Router) *Manager {
|
||||
go r.Run()
|
||||
return &Manager{
|
||||
// New creates a manager and starts its run loop.
|
||||
func New(r *router.Router) *Manager {
|
||||
m := &Manager{
|
||||
cmdCh: make(chan any, 256),
|
||||
providers: make(map[string]provider.Provider),
|
||||
providerStreams: make(map[domain.Identifier]chan domain.Message),
|
||||
rawReferenceCount: make(map[domain.Identifier]int),
|
||||
sessions: make(map[uuid.UUID]*session),
|
||||
streamRef: make(map[domain.Identifier]int),
|
||||
router: r,
|
||||
}
|
||||
go r.Run()
|
||||
go m.run()
|
||||
return m
|
||||
}
|
||||
|
||||
// NewSession creates a session with stable internal channels and two permanent workers:
|
||||
// 1) internalIn -> router.Incoming 2) internalOut -> clientOut (or discard if unattached)
|
||||
// Public API (posts commands to loop)
|
||||
|
||||
// AddProvider adds and starts a new provider.
|
||||
func (m *Manager) AddProvider(name string, p provider.Provider) error {
|
||||
resp := make(chan error, 1)
|
||||
m.cmdCh <- addProviderCmd{name: name, p: p, resp: resp}
|
||||
return <-resp
|
||||
}
|
||||
|
||||
// RemoveProvider stops and removes a provider, cleaning up all sessions.
|
||||
func (m *Manager) RemoveProvider(name string) error {
|
||||
resp := make(chan error, 1)
|
||||
m.cmdCh <- removeProviderCmd{name: name, resp: resp}
|
||||
return <-resp
|
||||
}
|
||||
|
||||
// NewSession creates a new session with the given idle timeout.
|
||||
func (m *Manager) NewSession(idleAfter time.Duration) (uuid.UUID, error) {
|
||||
resp := make(chan struct {
|
||||
id uuid.UUID
|
||||
err error
|
||||
}, 1)
|
||||
m.cmdCh <- newSessionCmd{idleAfter: idleAfter, resp: resp}
|
||||
r := <-resp
|
||||
return r.id, r.err
|
||||
}
|
||||
|
||||
// AttachClient attaches a client to a session, creates and returns client channels for the session.
|
||||
func (m *Manager) AttachClient(id uuid.UUID, inBuf, outBuf int) (chan<- domain.Message, <-chan domain.Message, error) {
|
||||
resp := make(chan struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}, 1)
|
||||
m.cmdCh <- attachCmd{sid: id, inBuf: inBuf, outBuf: outBuf, resp: resp}
|
||||
r := <-resp
|
||||
return r.cin, r.cout, r.err
|
||||
}
|
||||
|
||||
// DetachClient detaches the client from the session, closes client channels and arms timeout.
|
||||
func (m *Manager) DetachClient(id uuid.UUID) error {
|
||||
resp := make(chan error, 1)
|
||||
m.cmdCh <- detachCmd{sid: id, resp: resp}
|
||||
return <-resp
|
||||
}
|
||||
|
||||
// ConfigureSession sets the next set of identifiers for the session, starting and stopping streams as needed.
|
||||
func (m *Manager) ConfigureSession(id uuid.UUID, next []domain.Identifier) error {
|
||||
resp := make(chan error, 1)
|
||||
m.cmdCh <- configureCmd{sid: id, next: next, resp: resp}
|
||||
return <-resp
|
||||
}
|
||||
|
||||
// CloseSession closes and removes the session, cleaning up all bindings.
|
||||
func (m *Manager) CloseSession(id uuid.UUID) error {
|
||||
resp := make(chan error, 1)
|
||||
m.cmdCh <- closeSessionCmd{sid: id, resp: resp}
|
||||
return <-resp
|
||||
}
|
||||
|
||||
// The main loop of the manager, processing commands serially.
|
||||
func (m *Manager) run() {
|
||||
for {
|
||||
msg := <-m.cmdCh
|
||||
switch c := msg.(type) {
|
||||
case addProviderCmd:
|
||||
m.handleAddProvider(c)
|
||||
case removeProviderCmd:
|
||||
m.handleRemoveProvider(c)
|
||||
case newSessionCmd:
|
||||
m.handleNewSession(c)
|
||||
case attachCmd:
|
||||
m.handleAttach(c)
|
||||
case detachCmd:
|
||||
m.handleDetach(c)
|
||||
case configureCmd:
|
||||
m.handleConfigure(c)
|
||||
case closeSessionCmd:
|
||||
m.handleCloseSession(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Command handlers, run in loop goroutine. With a single goroutine, no locking is needed.
|
||||
|
||||
func (m *Manager) handleAddProvider(cmd addProviderCmd) {
|
||||
if _, ok := m.providers[cmd.name]; ok {
|
||||
cmd.resp <- fmt.Errorf("provider exists: %s", cmd.name)
|
||||
return
|
||||
}
|
||||
if err := cmd.p.Start(); err != nil {
|
||||
cmd.resp <- fmt.Errorf("start provider %s: %w", cmd.name, err)
|
||||
return
|
||||
}
|
||||
m.providers[cmd.name] = cmd.p
|
||||
cmd.resp <- nil
|
||||
}
|
||||
|
||||
func (m *Manager) handleRemoveProvider(cmd removeProviderCmd) {
|
||||
p, ok := m.providers[cmd.name]
|
||||
if !ok {
|
||||
cmd.resp <- fmt.Errorf("provider not found: %s", cmd.name)
|
||||
return
|
||||
}
|
||||
|
||||
// Clean all identifiers belonging to this provider. Iterates through sessions to reduce provider burden.
|
||||
for _, s := range m.sessions {
|
||||
for ident := range s.bound {
|
||||
provName, subj, ok := ident.ProviderSubject()
|
||||
if !ok || provName != cmd.name {
|
||||
// TODO: add log warning, but basically should never ever happen
|
||||
continue
|
||||
}
|
||||
if s.attached && s.clientOut != nil {
|
||||
m.router.DeregisterRoute(ident, s.clientOut)
|
||||
}
|
||||
delete(s.bound, ident)
|
||||
|
||||
// decrementStreamRefCount returns true if this was the last ref. In which case we want to stop the stream.
|
||||
if ident.IsRaw() && m.decrementStreamRefCount(ident) && subj != "" {
|
||||
_ = p.StopStream(subj) // best-effort as we will remove the provider anyway
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// first iteration above is sound, but as a precaution we also clean up any dangling streamRef entries here
|
||||
for id := range m.streamRef {
|
||||
provName, _, ok := id.ProviderSubject()
|
||||
if !ok || provName != cmd.name {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("manager: warning — dangling streamRef for %s after removing provider %s\n", id.Key(), cmd.name)
|
||||
delete(m.streamRef, id)
|
||||
}
|
||||
|
||||
p.Stop()
|
||||
delete(m.providers, cmd.name)
|
||||
cmd.resp <- nil
|
||||
}
|
||||
|
||||
func (m *Manager) handleNewSession(cmd newSessionCmd) {
|
||||
s := &session{
|
||||
id: uuid.New(),
|
||||
internalIn: make(chan domain.Message, defaultInternalBuf),
|
||||
internalOut: make(chan domain.Message, defaultInternalBuf),
|
||||
bound: make(map[domain.Identifier]struct{}),
|
||||
idleAfter: idleAfter,
|
||||
dropWhenUnattached: true,
|
||||
idleAfter: cmd.idleAfter,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancelInternal = cancel
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[s.id] = s
|
||||
incoming := m.router.IncomingChannel()
|
||||
m.mu.Unlock()
|
||||
// Arm idle timer to auto-close the session.
|
||||
s.idleTimer = time.AfterFunc(cmd.idleAfter, func() {
|
||||
m.cmdCh <- closeSessionCmd{sid: s.id, resp: make(chan error, 1)}
|
||||
})
|
||||
|
||||
// Permanent forwarder: internalIn -> router.Incoming
|
||||
go func(ctx context.Context, in <-chan domain.Message) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case msg, ok := <-in:
|
||||
m.sessions[s.id] = s // added after arming in the case of immediate timeout or error in arming timer
|
||||
|
||||
cmd.resp <- struct {
|
||||
id uuid.UUID
|
||||
err error
|
||||
}{id: s.id, err: nil}
|
||||
}
|
||||
|
||||
func (m *Manager) handleAttach(cmd attachCmd) {
|
||||
s, ok := m.sessions[cmd.sid]
|
||||
if !ok {
|
||||
cmd.resp <- struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}{nil, nil, ErrSessionNotFound}
|
||||
return
|
||||
}
|
||||
// Hook: filter/validate/meter/throttle inbound to router here.
|
||||
incoming <- msg
|
||||
if s.closed {
|
||||
cmd.resp <- struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}{nil, nil, ErrSessionClosed}
|
||||
return
|
||||
}
|
||||
}
|
||||
}(ctx, s.internalIn)
|
||||
|
||||
// Permanent drain: internalOut -> clientOut (drop if unattached)
|
||||
s.egressWG.Add(1)
|
||||
go func(sid uuid.UUID) {
|
||||
defer s.egressWG.Done()
|
||||
for msg := range s.internalOut {
|
||||
m.mu.Lock()
|
||||
// Session might be gone; re-fetch safely.
|
||||
s, ok := m.sessions[sid]
|
||||
var cout chan domain.Message
|
||||
var dropSlow, attached bool
|
||||
if ok {
|
||||
cout = s.clientOut
|
||||
dropSlow = s.dropWhenSlow
|
||||
attached = cout != nil
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
switch {
|
||||
case !attached:
|
||||
// unattached => drop
|
||||
|
||||
case dropSlow: // typical case when attached
|
||||
select {
|
||||
case cout <- msg:
|
||||
default:
|
||||
// drop on slow consumer
|
||||
if s.attached {
|
||||
cmd.resp <- struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}{nil, nil, ErrClientAlreadyAttached}
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
cout <- msg // push to client, block if slow
|
||||
}
|
||||
}
|
||||
}(s.id)
|
||||
cin, cout, err := m.attachSession(s, cmd.inBuf, cmd.outBuf)
|
||||
|
||||
return s.id, nil
|
||||
cmd.resp <- struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}{cin, cout, err}
|
||||
}
|
||||
|
||||
// GetChannels creates a fresh client attachment and hooks inbound (clientIn -> internalIn).
|
||||
// Outbound delivery is handled by the permanent drain.
|
||||
// Only one attachment at a time.
|
||||
func (m *Manager) GetChannels(id uuid.UUID, opts ChannelOpts) (chan<- domain.Message, <-chan domain.Message, error) {
|
||||
if opts.InBufSize <= 0 {
|
||||
opts.InBufSize = defaultClientBuf
|
||||
}
|
||||
if opts.OutBufSize <= 0 {
|
||||
opts.OutBufSize = defaultClientBuf
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
func (m *Manager) handleDetach(cmd detachCmd) {
|
||||
s, ok := m.sessions[cmd.sid]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return nil, nil, ErrSessionNotFound
|
||||
cmd.resp <- ErrSessionNotFound
|
||||
return
|
||||
}
|
||||
if s.closed {
|
||||
m.mu.Unlock()
|
||||
return nil, nil, ErrSessionClosed
|
||||
cmd.resp <- ErrSessionClosed
|
||||
return
|
||||
}
|
||||
if s.clientIn != nil || s.clientOut != nil {
|
||||
m.mu.Unlock()
|
||||
return nil, nil, ErrClientAlreadyBound
|
||||
if !s.attached {
|
||||
cmd.resp <- ErrClientNotAttached
|
||||
return
|
||||
}
|
||||
|
||||
// Create attachment channels.
|
||||
cin := make(chan domain.Message, opts.InBufSize)
|
||||
cout := make(chan domain.Message, opts.OutBufSize)
|
||||
s.clientIn, s.clientOut = cin, cout
|
||||
s.dropWhenSlow = opts.DropOutbound
|
||||
s.dropInbound = opts.DropInbound
|
||||
_ = m.detachSession(cmd.sid, s)
|
||||
|
||||
// Stop idle timer while attached.
|
||||
if s.idleTimer != nil {
|
||||
s.idleTimer.Stop()
|
||||
s.idleTimer = nil
|
||||
}
|
||||
|
||||
internalIn := s.internalIn
|
||||
m.mu.Unlock()
|
||||
|
||||
// Forward clientIn -> internalIn
|
||||
go func(src <-chan domain.Message, dst chan<- domain.Message, drop bool) {
|
||||
for msg := range src {
|
||||
if drop {
|
||||
select {
|
||||
case dst <- msg:
|
||||
default:
|
||||
// drop inbound on internal backpressure
|
||||
}
|
||||
} else {
|
||||
dst <- msg
|
||||
}
|
||||
}
|
||||
// client closed input; forwarder exits
|
||||
}(cin, internalIn, opts.DropInbound)
|
||||
|
||||
// Return directional views.
|
||||
return (chan<- domain.Message)(cin), (<-chan domain.Message)(cout), nil
|
||||
cmd.resp <- nil
|
||||
}
|
||||
|
||||
// DetachClient clears the client attachment and starts the idle close timer if configured.
|
||||
// Does not close clientOut to avoid send-on-closed races with the permanent drain.
|
||||
func (m *Manager) DetachClient(id uuid.UUID) error {
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
func (m *Manager) handleConfigure(c configureCmd) {
|
||||
s, ok := m.sessions[c.sid]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionNotFound
|
||||
c.resp <- ErrSessionNotFound
|
||||
return
|
||||
}
|
||||
if s.closed {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionClosed
|
||||
}
|
||||
cin := s.clientIn
|
||||
// Make unattached; permanent drain will drop while nil.
|
||||
s.clientIn, s.clientOut = nil, nil
|
||||
after := s.idleAfter
|
||||
m.mu.Unlock()
|
||||
|
||||
if cin != nil {
|
||||
// We own the channel. Closing signals writers to stop.
|
||||
close(cin)
|
||||
c.resp <- ErrSessionClosed
|
||||
return
|
||||
}
|
||||
|
||||
if after > 0 {
|
||||
m.mu.Lock()
|
||||
ss, ok := m.sessions[id]
|
||||
if ok && !ss.closed && ss.clientOut == nil && ss.idleTimer == nil {
|
||||
ss.idleTimer = time.AfterFunc(after, func() { _ = m.CloseSession(id) })
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
old := copySet(s.bound)
|
||||
toAdd, toDel := identifierSetDifferences(old, c.next)
|
||||
|
||||
func (m *Manager) Subscribe(id uuid.UUID, ids ...domain.Identifier) error {
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionNotFound
|
||||
}
|
||||
out := s.internalOut
|
||||
m.mu.Unlock()
|
||||
|
||||
for _, ident := range ids {
|
||||
m.mu.Lock()
|
||||
if _, exists := s.bound[ident]; exists {
|
||||
m.mu.Unlock()
|
||||
continue
|
||||
}
|
||||
s.bound[ident] = struct{}{}
|
||||
m.mu.Unlock()
|
||||
|
||||
if ident.IsRaw() {
|
||||
if err := m.provisionRawStream(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m.router.RegisterRoute(ident, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id uuid.UUID, ids ...domain.Identifier) error {
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionNotFound
|
||||
}
|
||||
out := s.internalOut
|
||||
m.mu.Unlock()
|
||||
|
||||
for _, ident := range ids {
|
||||
m.mu.Lock()
|
||||
if _, exists := s.bound[ident]; !exists {
|
||||
m.mu.Unlock()
|
||||
continue
|
||||
// 1) Handle removals first.
|
||||
for _, ident := range toDel {
|
||||
if s.attached && s.clientOut != nil {
|
||||
m.router.DeregisterRoute(ident, s.clientOut)
|
||||
}
|
||||
delete(s.bound, ident)
|
||||
m.mu.Unlock()
|
||||
|
||||
m.router.DeregisterRoute(ident, out)
|
||||
if ident.IsRaw() {
|
||||
m.releaseRawStreamIfUnused(ident)
|
||||
if m.decrementStreamRefCount(ident) {
|
||||
if p, subj, err := m.resolveProvider(ident); err == nil {
|
||||
_ = p.StopStream(subj) // fire-and-forget
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) SetSubscriptions(id uuid.UUID, next []domain.Identifier) error {
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionNotFound
|
||||
// 2) Handle additions. Collect starts to await.
|
||||
type startItem struct {
|
||||
id domain.Identifier
|
||||
ch <-chan error
|
||||
}
|
||||
old := make(map[domain.Identifier]struct{}, len(s.bound))
|
||||
for k := range s.bound {
|
||||
old[k] = struct{}{}
|
||||
}
|
||||
out := s.internalOut
|
||||
m.mu.Unlock()
|
||||
|
||||
toAdd, toDel := m.identifierSetDifferences(old, next)
|
||||
var starts []startItem
|
||||
var initErrs []error
|
||||
|
||||
for _, ident := range toAdd {
|
||||
m.mu.Lock()
|
||||
// Bind intent now.
|
||||
s.bound[ident] = struct{}{}
|
||||
m.mu.Unlock()
|
||||
|
||||
if ident.IsRaw() {
|
||||
if err := m.provisionRawStream(ident); err != nil {
|
||||
return err
|
||||
if !ident.IsRaw() {
|
||||
if s.attached && s.clientOut != nil {
|
||||
m.router.RegisterRoute(ident, s.clientOut)
|
||||
}
|
||||
}
|
||||
m.router.RegisterRoute(ident, out)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ident := range toDel {
|
||||
m.mu.Lock()
|
||||
_, exists := s.bound[ident]
|
||||
p, subj, err := m.resolveProvider(ident)
|
||||
if err != nil {
|
||||
delete(s.bound, ident)
|
||||
m.mu.Unlock()
|
||||
initErrs = append(initErrs, err)
|
||||
continue
|
||||
}
|
||||
if !p.IsValidSubject(subj, false) {
|
||||
delete(s.bound, ident)
|
||||
initErrs = append(initErrs, fmt.Errorf("invalid subject %q for provider", subj))
|
||||
continue
|
||||
}
|
||||
|
||||
if exists {
|
||||
m.router.DeregisterRoute(ident, out)
|
||||
if ident.IsRaw() {
|
||||
m.releaseRawStreamIfUnused(ident)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
first := m.incrementStreamRefCount(ident)
|
||||
|
||||
func (m *Manager) CloseSession(id uuid.UUID) error {
|
||||
m.mu.Lock()
|
||||
s, ok := m.sessions[id]
|
||||
if !ok {
|
||||
m.mu.Unlock()
|
||||
return ErrSessionNotFound
|
||||
}
|
||||
if s.closed {
|
||||
m.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
s.closed = true
|
||||
if s.idleTimer != nil {
|
||||
s.idleTimer.Stop()
|
||||
s.idleTimer = nil
|
||||
}
|
||||
out := s.internalOut
|
||||
ids := make([]domain.Identifier, 0, len(s.bound))
|
||||
for k := range s.bound {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
cancelInternal := s.cancelInternal
|
||||
// Snapshot clientIn/Out for shutdown signals after unlock.
|
||||
cin := s.clientIn
|
||||
cout := s.clientOut
|
||||
// Remove from map before unlock to prevent new work.
|
||||
delete(m.sessions, id)
|
||||
m.mu.Unlock()
|
||||
|
||||
// Deregister all routes and release raw streams.
|
||||
for _, ident := range ids {
|
||||
m.router.DeregisterRoute(ident, out)
|
||||
if ident.IsRaw() {
|
||||
m.releaseRawStreamIfUnused(ident)
|
||||
if first || !p.IsStreamActive(subj) {
|
||||
ch := p.StartStream(subj, m.router.IncomingChannel())
|
||||
starts = append(starts, startItem{id: ident, ch: ch})
|
||||
} else if s.attached && s.clientOut != nil {
|
||||
// Already active, just register for this session.
|
||||
m.router.RegisterRoute(ident, s.clientOut)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop inbound forwarder and close internals.
|
||||
if cancelInternal != nil {
|
||||
cancelInternal()
|
||||
}
|
||||
close(s.internalIn) // end internalIn forwarder
|
||||
close(s.internalOut) // signal drain to finish
|
||||
|
||||
// Wait drain exit, then close clientOut if attached at close time.
|
||||
s.egressWG.Wait()
|
||||
if cout != nil {
|
||||
close(cout)
|
||||
}
|
||||
// Close clientIn to stop client writers if still attached.
|
||||
if cin != nil {
|
||||
close(cin)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddProvider(name string, p provider.Provider) error {
|
||||
m.mu.Lock()
|
||||
if _, exists := m.providers[name]; exists {
|
||||
m.mu.Unlock()
|
||||
return fmt.Errorf("provider exists: %s", name)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
if err := p.Start(); err != nil {
|
||||
return fmt.Errorf("start provider %s: %w", name, err)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.providers[name] = p
|
||||
m.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) RemoveProvider(name string) error {
|
||||
m.mu.Lock()
|
||||
_, ok := m.providers[name]
|
||||
m.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("provider not found: %s", name)
|
||||
}
|
||||
// TODO: implement full drain and cancel of all streams for this provider if needed.
|
||||
return fmt.Errorf("RemoveProvider not implemented")
|
||||
}
|
||||
|
||||
func (m *Manager) provisionRawStream(id domain.Identifier) error {
|
||||
providerName, subject, ok := id.ProviderSubject()
|
||||
if !ok || providerName == "" || subject == "" {
|
||||
return ErrInvalidIdentifier
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
prov, exists := m.providers[providerName]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return ErrUnknownProvider
|
||||
}
|
||||
if !prov.IsValidSubject(subject, false) {
|
||||
m.mu.Unlock()
|
||||
return fmt.Errorf("invalid subject %q for provider %s", subject, providerName)
|
||||
}
|
||||
|
||||
if ch, ok := m.providerStreams[id]; ok {
|
||||
m.rawReferenceCount[id] = m.rawReferenceCount[id] + 1
|
||||
m.mu.Unlock()
|
||||
_ = ch
|
||||
return nil
|
||||
}
|
||||
|
||||
ch := make(chan domain.Message, 64)
|
||||
if err := prov.RequestStream(subject, ch); err != nil {
|
||||
m.mu.Unlock()
|
||||
return fmt.Errorf("provision %v: %w", id, err)
|
||||
}
|
||||
m.providerStreams[id] = ch
|
||||
m.rawReferenceCount[id] = 1
|
||||
incoming := m.router.IncomingChannel()
|
||||
m.mu.Unlock()
|
||||
|
||||
// Provider stream -> router.Incoming
|
||||
go func(c chan domain.Message) {
|
||||
for msg := range c {
|
||||
incoming <- msg
|
||||
}
|
||||
}(ch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) releaseRawStreamIfUnused(id domain.Identifier) {
|
||||
providerName, subject, ok := id.ProviderSubject()
|
||||
if !ok {
|
||||
// 3) Wait for starts initiated by this call, each with its own timeout.
|
||||
if len(starts) == 0 {
|
||||
c.resp <- join(initErrs)
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
rc := m.rawReferenceCount[id] - 1
|
||||
if rc <= 0 {
|
||||
if ch, ok := m.providerStreams[id]; ok {
|
||||
if prov, exists := m.providers[providerName]; exists {
|
||||
prov.CancelStream(subject)
|
||||
type result struct {
|
||||
id domain.Identifier
|
||||
err error
|
||||
}
|
||||
close(ch)
|
||||
delete(m.providerStreams, id)
|
||||
done := make(chan result, len(starts))
|
||||
|
||||
for _, si := range starts {
|
||||
// Per-start waiter.
|
||||
go func(id domain.Identifier, ch <-chan error) {
|
||||
select {
|
||||
case err := <-ch:
|
||||
done <- result{id: id, err: err}
|
||||
case <-time.After(statusWaitTotal):
|
||||
done <- result{id: id, err: fmt.Errorf("timeout")}
|
||||
}
|
||||
delete(m.rawReferenceCount, id)
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}(si.id, si.ch)
|
||||
}
|
||||
m.rawReferenceCount[id] = rc
|
||||
m.mu.Unlock()
|
||||
|
||||
// Collect results and apply.
|
||||
for i := 0; i < len(starts); i++ {
|
||||
r := <-done
|
||||
if r.err != nil {
|
||||
// Roll back this session's bind and drop ref.
|
||||
delete(s.bound, r.id)
|
||||
_ = m.decrementStreamRefCount(r.id)
|
||||
initErrs = append(initErrs, fmt.Errorf("start %v: %w", r.id, r.err))
|
||||
continue
|
||||
}
|
||||
// Success: register for any attached sessions that are bound.
|
||||
for _, sess := range m.sessions {
|
||||
if !sess.attached || sess.clientOut == nil {
|
||||
continue
|
||||
}
|
||||
if _, bound := sess.bound[r.id]; bound {
|
||||
m.router.RegisterRoute(r.id, sess.clientOut)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.resp <- join(initErrs)
|
||||
}
|
||||
|
||||
func (m *Manager) identifierSetDifferences(old map[domain.Identifier]struct{}, next []domain.Identifier) (toAdd, toDel []domain.Identifier) {
|
||||
newSet := make(map[domain.Identifier]struct{}, len(next))
|
||||
for _, id := range next {
|
||||
newSet[id] = struct{}{}
|
||||
if _, ok := old[id]; !ok {
|
||||
toAdd = append(toAdd, id)
|
||||
}
|
||||
}
|
||||
for id := range old {
|
||||
if _, ok := newSet[id]; !ok {
|
||||
toDel = append(toDel, id)
|
||||
}
|
||||
}
|
||||
func (m *Manager) handleCloseSession(c closeSessionCmd) {
|
||||
s, ok := m.sessions[c.sid]
|
||||
if !ok {
|
||||
c.resp <- ErrSessionNotFound
|
||||
return
|
||||
}
|
||||
m.closeSession(c.sid, s)
|
||||
c.resp <- nil
|
||||
}
|
||||
|
||||
114
services/data_service/internal/manager/session.go
Normal file
114
services/data_service/internal/manager/session.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
)
|
||||
|
||||
// attachSession wires channels, stops idle timer, and registers ready routes.
|
||||
// Precondition: session exists and is not attached/closed. Runs in loop.
|
||||
func (m *Manager) attachSession(s *session, inBuf, outBuf int) (chan<- domain.Message, <-chan domain.Message, error) {
|
||||
if inBuf <= 0 {
|
||||
inBuf = defaultClientBuf
|
||||
}
|
||||
if outBuf <= 0 {
|
||||
outBuf = defaultClientBuf
|
||||
}
|
||||
|
||||
cin := make(chan domain.Message, inBuf)
|
||||
cout := make(chan domain.Message, outBuf)
|
||||
s.clientIn, s.clientOut = cin, cout
|
||||
|
||||
if s.idleTimer != nil {
|
||||
s.idleTimer.Stop()
|
||||
s.idleTimer = nil
|
||||
}
|
||||
|
||||
// Forward clientIn to router.Incoming with drop on backpressure.
|
||||
go func(src <-chan domain.Message, dst chan<- domain.Message) {
|
||||
for msg := range src {
|
||||
select {
|
||||
case dst <- msg:
|
||||
default:
|
||||
// drop
|
||||
}
|
||||
}
|
||||
}(cin, m.router.IncomingChannel())
|
||||
|
||||
// Register all currently bound that are ready.
|
||||
for ident := range s.bound {
|
||||
if !ident.IsRaw() {
|
||||
m.router.RegisterRoute(ident, cout)
|
||||
continue
|
||||
}
|
||||
// Raw: register only if provider stream is active.
|
||||
if p, subj, err := m.resolveProvider(ident); err == nil && p.IsStreamActive(subj) {
|
||||
m.router.RegisterRoute(ident, cout)
|
||||
}
|
||||
}
|
||||
|
||||
s.attached = true
|
||||
return cin, cout, nil
|
||||
}
|
||||
|
||||
// detachSession deregisters all routes, closes channels, and arms idle timer.
|
||||
// Precondition: session exists and is attached. Runs in loop.
|
||||
func (m *Manager) detachSession(sid uuid.UUID, s *session) error {
|
||||
if s.clientOut != nil {
|
||||
for ident := range s.bound {
|
||||
m.router.DeregisterRoute(ident, s.clientOut)
|
||||
}
|
||||
close(s.clientOut)
|
||||
}
|
||||
if s.clientIn != nil {
|
||||
close(s.clientIn)
|
||||
}
|
||||
s.clientIn, s.clientOut = nil, nil
|
||||
s.attached = false
|
||||
|
||||
// Arm idle timer to auto-close the session.
|
||||
s.idleTimer = time.AfterFunc(s.idleAfter, func() {
|
||||
m.cmdCh <- closeSessionCmd{sid: sid, resp: make(chan error, 1)}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeSession performs full teardown and refcount drops. Runs in loop.
|
||||
func (m *Manager) closeSession(sid uuid.UUID, s *session) {
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
s.closed = true
|
||||
|
||||
// Detach if attached.
|
||||
if s.attached {
|
||||
if s.clientOut != nil {
|
||||
for ident := range s.bound {
|
||||
m.router.DeregisterRoute(ident, s.clientOut)
|
||||
}
|
||||
close(s.clientOut)
|
||||
}
|
||||
if s.clientIn != nil {
|
||||
close(s.clientIn)
|
||||
}
|
||||
} else if s.idleTimer != nil {
|
||||
s.idleTimer.Stop()
|
||||
s.idleTimer = nil
|
||||
}
|
||||
|
||||
// Drop refs for raw identifiers and stop streams if last ref. Fire-and-forget.
|
||||
for ident := range s.bound {
|
||||
if !ident.IsRaw() {
|
||||
continue
|
||||
}
|
||||
if last := m.decrementStreamRefCount(ident); last {
|
||||
if p, subj, err := m.resolveProvider(ident); err == nil {
|
||||
_ = p.StopStream(subj) // do not wait
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(m.sessions, sid)
|
||||
}
|
||||
86
services/data_service/internal/manager/types.go
Normal file
86
services/data_service/internal/manager/types.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/provider"
|
||||
)
|
||||
|
||||
// Shared constants.
|
||||
const (
|
||||
defaultClientBuf = 256
|
||||
statusWaitTotal = 8 * time.Second
|
||||
)
|
||||
|
||||
// Manager-level errors.
|
||||
var (
|
||||
ErrSessionNotFound = errorf("session not found")
|
||||
ErrSessionClosed = errorf("session closed")
|
||||
ErrClientAlreadyAttached = errorf("client already attached")
|
||||
ErrClientNotAttached = errorf("client not attached")
|
||||
ErrInvalidIdentifier = errorf("invalid identifier")
|
||||
ErrUnknownProvider = errorf("unknown provider")
|
||||
)
|
||||
|
||||
// Session holds per-session state. Owned by the manager loop.
|
||||
type session struct {
|
||||
id uuid.UUID
|
||||
|
||||
clientIn chan domain.Message // caller writes
|
||||
clientOut chan domain.Message // caller reads
|
||||
|
||||
bound map[domain.Identifier]struct{}
|
||||
|
||||
closed bool
|
||||
attached bool
|
||||
idleAfter time.Duration
|
||||
idleTimer *time.Timer
|
||||
}
|
||||
|
||||
// Commands posted into the manager loop. One struct per action.
|
||||
type addProviderCmd struct {
|
||||
name string
|
||||
p provider.Provider
|
||||
resp chan error
|
||||
}
|
||||
|
||||
type removeProviderCmd struct {
|
||||
name string
|
||||
resp chan error
|
||||
}
|
||||
|
||||
type newSessionCmd struct {
|
||||
idleAfter time.Duration
|
||||
resp chan struct {
|
||||
id uuid.UUID
|
||||
err error
|
||||
}
|
||||
}
|
||||
|
||||
type attachCmd struct {
|
||||
sid uuid.UUID
|
||||
inBuf, outBuf int
|
||||
resp chan struct {
|
||||
cin chan<- domain.Message
|
||||
cout <-chan domain.Message
|
||||
err error
|
||||
}
|
||||
}
|
||||
|
||||
type detachCmd struct {
|
||||
sid uuid.UUID
|
||||
resp chan error
|
||||
}
|
||||
|
||||
type configureCmd struct {
|
||||
sid uuid.UUID
|
||||
next []domain.Identifier
|
||||
resp chan error // returns after starts from this call succeed or timeout
|
||||
}
|
||||
|
||||
type closeSessionCmd struct {
|
||||
sid uuid.UUID
|
||||
resp chan error
|
||||
}
|
||||
@@ -2,139 +2,411 @@ package binance
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
)
|
||||
|
||||
const (
|
||||
wsURL = "wss://fstream.binance.com/stream"
|
||||
writeRatePerSecond = 8 // hard cap per second
|
||||
writeBurst = 8 // token bucket burst
|
||||
writeWait = 5 * time.Second // per write deadline
|
||||
|
||||
batchPeriod = 1 * time.Second // batch SUB/UNSUB every second
|
||||
|
||||
reconnectMin = 500 * time.Millisecond
|
||||
reconnectMax = 10 * time.Second
|
||||
)
|
||||
|
||||
// internal stream states (provider stays simple; manager relies on IsStreamActive)
|
||||
type streamState uint8
|
||||
|
||||
const (
|
||||
stateUnknown streamState = iota
|
||||
statePendingSub
|
||||
stateActive
|
||||
statePendingUnsub
|
||||
stateInactive
|
||||
stateError
|
||||
)
|
||||
|
||||
type FuturesWebsocket struct {
|
||||
conn *websocket.Conn
|
||||
activeStreams map[string]chan domain.Message
|
||||
dial websocket.Dialer
|
||||
hdr http.Header
|
||||
|
||||
// desired subscriptions and sinks
|
||||
mu sync.Mutex
|
||||
desired map[string]bool // subject -> want subscribed
|
||||
sinks map[string]chan<- domain.Message // subject -> destination
|
||||
states map[string]streamState // subject -> state
|
||||
|
||||
// waiters per subject
|
||||
startWaiters map[string][]chan error
|
||||
stopWaiters map[string][]chan error
|
||||
|
||||
// batching queues
|
||||
subQ chan string
|
||||
unsubQ chan string
|
||||
|
||||
// websocket
|
||||
writeMu sync.Mutex
|
||||
conn *websocket.Conn
|
||||
|
||||
// rate limit tokens
|
||||
tokensCh chan struct{}
|
||||
stopRate chan struct{}
|
||||
|
||||
// lifecycle
|
||||
stopCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
// ack tracking
|
||||
ackMu sync.Mutex
|
||||
idSeq uint64
|
||||
pendingA map[int64]ackBatch
|
||||
}
|
||||
|
||||
type ackBatch struct {
|
||||
method string // "SUBSCRIBE" or "UNSUBSCRIBE"
|
||||
subjects []string
|
||||
}
|
||||
|
||||
func NewFuturesWebsocket() *FuturesWebsocket {
|
||||
return &FuturesWebsocket{
|
||||
activeStreams: make(map[string]chan domain.Message),
|
||||
desired: make(map[string]bool),
|
||||
sinks: make(map[string]chan<- domain.Message),
|
||||
states: make(map[string]streamState),
|
||||
startWaiters: make(map[string][]chan error),
|
||||
stopWaiters: make(map[string][]chan error),
|
||||
subQ: make(chan string, 4096),
|
||||
unsubQ: make(chan string, 4096),
|
||||
tokensCh: make(chan struct{}, writeBurst),
|
||||
stopRate: make(chan struct{}),
|
||||
stopCh: make(chan struct{}),
|
||||
pendingA: make(map[int64]ackBatch),
|
||||
}
|
||||
}
|
||||
|
||||
/* provider.Provider */
|
||||
|
||||
func (b *FuturesWebsocket) Start() error {
|
||||
c, _, err := websocket.DefaultDialer.Dial("wss://fstream.binance.com/stream", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect failed: %w", err)
|
||||
// token bucket
|
||||
b.wg.Add(1)
|
||||
go func() {
|
||||
defer b.wg.Done()
|
||||
t := time.NewTicker(time.Second / writeRatePerSecond)
|
||||
defer t.Stop()
|
||||
// prime burst
|
||||
for i := 0; i < writeBurst; i++ {
|
||||
select {
|
||||
case b.tokensCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
b.conn = c
|
||||
go b.readLoop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-b.stopRate:
|
||||
return
|
||||
case <-t.C:
|
||||
select {
|
||||
case b.tokensCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// connection manager
|
||||
b.wg.Add(1)
|
||||
go b.run()
|
||||
|
||||
// batcher
|
||||
b.wg.Add(1)
|
||||
go b.batcher()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) Stop() {
|
||||
close(b.stopCh)
|
||||
close(b.stopRate)
|
||||
|
||||
b.writeMu.Lock()
|
||||
if b.conn != nil {
|
||||
err := b.conn.Close()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to close websocket connection: %w", err))
|
||||
_ = b.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "bye"))
|
||||
_ = b.conn.Close()
|
||||
b.conn = nil
|
||||
}
|
||||
b.writeMu.Unlock()
|
||||
|
||||
b.wg.Wait()
|
||||
|
||||
// resolve any remaining waiters with an error
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for subj, ws := range b.startWaiters {
|
||||
for _, ch := range ws {
|
||||
select {
|
||||
case ch <- errors.New("provider stopped"):
|
||||
default:
|
||||
}
|
||||
close(ch)
|
||||
}
|
||||
delete(b.startWaiters, subj)
|
||||
}
|
||||
for subj, ws := range b.stopWaiters {
|
||||
for _, ch := range ws {
|
||||
select {
|
||||
case ch <- errors.New("provider stopped"):
|
||||
default:
|
||||
}
|
||||
close(ch)
|
||||
}
|
||||
delete(b.stopWaiters, subj)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) RequestStream(subject string, ch chan domain.Message) error {
|
||||
func (b *FuturesWebsocket) StartStream(subject string, dst chan<- domain.Message) <-chan error {
|
||||
fmt.Println("Starting stream for subject:", subject)
|
||||
ch := make(chan error, 1)
|
||||
|
||||
if subject == "" {
|
||||
ch <- fmt.Errorf("empty subject")
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
// mark desired, update sink
|
||||
b.desired[subject] = true
|
||||
b.sinks[subject] = dst
|
||||
|
||||
if _, ok := b.activeStreams[subject]; ok {
|
||||
return nil
|
||||
// fast path: already active
|
||||
if b.states[subject] == stateActive {
|
||||
b.mu.Unlock()
|
||||
ch <- nil
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"method": "SUBSCRIBE",
|
||||
"params": []string{subject},
|
||||
"id": len(b.activeStreams) + 1,
|
||||
// enqueue waiter and transition if needed
|
||||
b.startWaiters[subject] = append(b.startWaiters[subject], ch)
|
||||
if b.states[subject] != statePendingSub {
|
||||
b.states[subject] = statePendingSub
|
||||
select {
|
||||
case b.subQ <- subject:
|
||||
default:
|
||||
// queue full → fail fast
|
||||
ws := b.startWaiters[subject]
|
||||
delete(b.startWaiters, subject)
|
||||
b.states[subject] = stateError
|
||||
b.mu.Unlock()
|
||||
for _, w := range ws {
|
||||
w <- fmt.Errorf("subscribe queue full")
|
||||
close(w)
|
||||
}
|
||||
if err := b.conn.WriteJSON(msg); err != nil {
|
||||
return fmt.Errorf("subscribe failed: %w", err)
|
||||
return ch
|
||||
}
|
||||
|
||||
b.activeStreams[subject] = ch
|
||||
fmt.Println("Subscribed to stream:", subject)
|
||||
return nil
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) CancelStream(subject string) {
|
||||
func (b *FuturesWebsocket) StopStream(subject string) <-chan error {
|
||||
fmt.Println("Stopping stream for subject:", subject)
|
||||
ch := make(chan error, 1)
|
||||
|
||||
if subject == "" {
|
||||
ch <- fmt.Errorf("empty subject")
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
// mark no longer desired; keep sink until UNSUB ack to avoid drops
|
||||
b.desired[subject] = false
|
||||
|
||||
if _, ok := b.activeStreams[subject]; !ok {
|
||||
return
|
||||
// already inactive
|
||||
if b.states[subject] == stateInactive {
|
||||
b.mu.Unlock()
|
||||
ch <- nil
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"method": "UNSUBSCRIBE",
|
||||
"params": []string{subject},
|
||||
"id": len(b.activeStreams) + 1000,
|
||||
// enqueue waiter and transition if needed
|
||||
b.stopWaiters[subject] = append(b.stopWaiters[subject], ch)
|
||||
if b.states[subject] != statePendingUnsub {
|
||||
b.states[subject] = statePendingUnsub
|
||||
select {
|
||||
case b.unsubQ <- subject:
|
||||
default:
|
||||
// queue full → fail fast
|
||||
ws := b.stopWaiters[subject]
|
||||
delete(b.stopWaiters, subject)
|
||||
b.states[subject] = stateError
|
||||
b.mu.Unlock()
|
||||
for _, w := range ws {
|
||||
w <- fmt.Errorf("unsubscribe queue full")
|
||||
close(w)
|
||||
}
|
||||
_ = b.conn.WriteJSON(msg)
|
||||
|
||||
fmt.Println("Unsubscribed from stream:", subject)
|
||||
|
||||
delete(b.activeStreams, subject)
|
||||
return ch
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) GetActiveStreams() []string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
var streams []string
|
||||
for k := range b.activeStreams {
|
||||
streams = append(streams, k)
|
||||
}
|
||||
return streams
|
||||
func (b *FuturesWebsocket) Fetch(_ string) (domain.Message, error) {
|
||||
return domain.Message{}, fmt.Errorf("fetch not supported")
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) IsStreamActive(subject string) bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
_, ok := b.activeStreams[subject]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) Fetch(_ string) (domain.Message, error) {
|
||||
return domain.Message{}, fmt.Errorf("not supported: websocket provider does not implement fetch")
|
||||
return b.states[subject] == stateActive
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) IsValidSubject(subject string, isFetch bool) bool {
|
||||
if isFetch {
|
||||
return false
|
||||
}
|
||||
return len(subject) > 0
|
||||
return !isFetch && subject != ""
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) readLoop() {
|
||||
for {
|
||||
_, msgBytes, err := b.conn.ReadMessage()
|
||||
/* internals */
|
||||
|
||||
func (b *FuturesWebsocket) run() {
|
||||
defer b.wg.Done()
|
||||
|
||||
backoff := reconnectMin
|
||||
|
||||
dial := func() (*websocket.Conn, error) {
|
||||
c, _, err := b.dial.Dial(wsURL, b.hdr)
|
||||
if err != nil {
|
||||
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf("read error: %v\n", err)
|
||||
continue
|
||||
return c, nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-b.stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
c, err := dial()
|
||||
if err != nil {
|
||||
time.Sleep(backoff)
|
||||
backoff = minDur(backoff*2, reconnectMax)
|
||||
continue
|
||||
}
|
||||
backoff = reconnectMin
|
||||
|
||||
b.writeMu.Lock()
|
||||
b.conn = c
|
||||
b.writeMu.Unlock()
|
||||
|
||||
// Resubscribe desired subjects in one batched SUB.
|
||||
want := b.snapshotDesired(true) // only desired==true
|
||||
if len(want) > 0 {
|
||||
_ = b.sendSubscribe(want)
|
||||
b.mu.Lock()
|
||||
for _, s := range want {
|
||||
if b.states[s] != stateActive {
|
||||
b.states[s] = statePendingSub
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
err = b.readLoop(c)
|
||||
|
||||
// tear down connection
|
||||
b.writeMu.Lock()
|
||||
if b.conn != nil {
|
||||
_ = b.conn.Close()
|
||||
b.conn = nil
|
||||
}
|
||||
b.writeMu.Unlock()
|
||||
|
||||
select {
|
||||
case <-b.stopCh:
|
||||
return
|
||||
default:
|
||||
time.Sleep(backoff)
|
||||
backoff = minDur(backoff*2, reconnectMax)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) batcher() {
|
||||
defer b.wg.Done()
|
||||
|
||||
t := time.NewTicker(batchPeriod)
|
||||
defer t.Stop()
|
||||
|
||||
var subs, unsubs []string
|
||||
|
||||
flush := func() {
|
||||
if len(subs) > 0 {
|
||||
_ = b.sendSubscribe(subs)
|
||||
subs = subs[:0]
|
||||
}
|
||||
if len(unsubs) > 0 {
|
||||
_ = b.sendUnsubscribe(unsubs)
|
||||
unsubs = unsubs[:0]
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-b.stopCh:
|
||||
return
|
||||
case s := <-b.subQ:
|
||||
if s != "" {
|
||||
subs = append(subs, s)
|
||||
}
|
||||
case s := <-b.unsubQ:
|
||||
if s != "" {
|
||||
unsubs = append(unsubs, s)
|
||||
}
|
||||
case <-t.C:
|
||||
flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) readLoop(c *websocket.Conn) error {
|
||||
for {
|
||||
_, raw, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Received message:", string(raw))
|
||||
|
||||
// Stream data or command ack
|
||||
if hasField(raw, `"stream"`) {
|
||||
var container struct {
|
||||
Stream string `json:"stream"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
if err := json.Unmarshal(msgBytes, &container); err != nil {
|
||||
if err := json.Unmarshal(raw, &container); err != nil || container.Stream == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
ch, ok := b.activeStreams[container.Stream]
|
||||
dst, ok := b.sinks[container.Stream]
|
||||
st := b.states[container.Stream]
|
||||
b.mu.Unlock()
|
||||
if !ok {
|
||||
|
||||
if !ok || st == stateInactive || st == statePendingUnsub {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -142,7 +414,6 @@ func (b *FuturesWebsocket) readLoop() {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
msg := domain.Message{
|
||||
Identifier: id,
|
||||
Payload: container.Data,
|
||||
@@ -150,9 +421,223 @@ func (b *FuturesWebsocket) readLoop() {
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- msg:
|
||||
case dst <- msg:
|
||||
default:
|
||||
fmt.Printf("channel for %s is full, dropping message\n", container.Stream)
|
||||
// drop on backpressure
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Ack path
|
||||
var ack struct {
|
||||
Result json.RawMessage `json:"result"`
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(raw, &ack); err != nil || ack.ID == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
b.ackMu.Lock()
|
||||
batch, ok := b.pendingA[ack.ID]
|
||||
if ok {
|
||||
delete(b.pendingA, ack.ID)
|
||||
}
|
||||
b.ackMu.Unlock()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
ackErr := (len(ack.Result) > 0 && string(ack.Result) != "null")
|
||||
|
||||
switch batch.method {
|
||||
case "SUBSCRIBE":
|
||||
b.mu.Lock()
|
||||
for _, s := range batch.subjects {
|
||||
if ackErr {
|
||||
b.states[s] = stateError
|
||||
// fail all start waiters
|
||||
ws := b.startWaiters[s]
|
||||
delete(b.startWaiters, s)
|
||||
b.mu.Unlock()
|
||||
for _, ch := range ws {
|
||||
ch <- fmt.Errorf("subscribe failed")
|
||||
close(ch)
|
||||
}
|
||||
b.mu.Lock()
|
||||
continue
|
||||
}
|
||||
// success
|
||||
b.states[s] = stateActive
|
||||
ws := b.startWaiters[s]
|
||||
delete(b.startWaiters, s)
|
||||
dst := b.sinks[s]
|
||||
b.mu.Unlock()
|
||||
|
||||
for _, ch := range ws {
|
||||
ch <- nil
|
||||
close(ch)
|
||||
}
|
||||
_ = dst // messages will flow via readLoop
|
||||
b.mu.Lock()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
case "UNSUBSCRIBE":
|
||||
b.mu.Lock()
|
||||
for _, s := range batch.subjects {
|
||||
if ackErr {
|
||||
b.states[s] = stateError
|
||||
ws := b.stopWaiters[s]
|
||||
delete(b.stopWaiters, s)
|
||||
b.mu.Unlock()
|
||||
for _, ch := range ws {
|
||||
ch <- fmt.Errorf("unsubscribe failed")
|
||||
close(ch)
|
||||
}
|
||||
b.mu.Lock()
|
||||
continue
|
||||
}
|
||||
// success
|
||||
b.states[s] = stateInactive
|
||||
delete(b.sinks, s) // stop delivering
|
||||
ws := b.stopWaiters[s]
|
||||
delete(b.stopWaiters, s)
|
||||
b.mu.Unlock()
|
||||
for _, ch := range ws {
|
||||
ch <- nil
|
||||
close(ch)
|
||||
}
|
||||
b.mu.Lock()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) nextID() int64 {
|
||||
return int64(atomic.AddUint64(&b.idSeq, 1))
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) sendSubscribe(subjects []string) error {
|
||||
if len(subjects) == 0 {
|
||||
return nil
|
||||
}
|
||||
id := b.nextID()
|
||||
req := map[string]any{
|
||||
"method": "SUBSCRIBE",
|
||||
"params": subjects,
|
||||
"id": id,
|
||||
}
|
||||
if err := b.writeJSON(req); err != nil {
|
||||
// mark error and fail waiters
|
||||
b.mu.Lock()
|
||||
for _, s := range subjects {
|
||||
b.states[s] = stateError
|
||||
ws := b.startWaiters[s]
|
||||
delete(b.startWaiters, s)
|
||||
b.mu.Unlock()
|
||||
for _, ch := range ws {
|
||||
ch <- fmt.Errorf("subscribe send failed")
|
||||
close(ch)
|
||||
}
|
||||
b.mu.Lock()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
b.ackMu.Lock()
|
||||
b.pendingA[id] = ackBatch{method: "SUBSCRIBE", subjects: append([]string(nil), subjects...)}
|
||||
b.ackMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) sendUnsubscribe(subjects []string) error {
|
||||
if len(subjects) == 0 {
|
||||
return nil
|
||||
}
|
||||
id := b.nextID()
|
||||
req := map[string]any{
|
||||
"method": "UNSUBSCRIBE",
|
||||
"params": subjects,
|
||||
"id": id,
|
||||
}
|
||||
if err := b.writeJSON(req); err != nil {
|
||||
b.mu.Lock()
|
||||
for _, s := range subjects {
|
||||
b.states[s] = stateError
|
||||
ws := b.stopWaiters[s]
|
||||
delete(b.stopWaiters, s)
|
||||
b.mu.Unlock()
|
||||
for _, ch := range ws {
|
||||
ch <- fmt.Errorf("unsubscribe send failed")
|
||||
close(ch)
|
||||
}
|
||||
b.mu.Lock()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
b.ackMu.Lock()
|
||||
b.pendingA[id] = ackBatch{method: "UNSUBSCRIBE", subjects: append([]string(nil), subjects...)}
|
||||
b.ackMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FuturesWebsocket) writeJSON(v any) error {
|
||||
// token bucket
|
||||
select {
|
||||
case <-b.stopCh:
|
||||
return fmt.Errorf("stopped")
|
||||
case <-b.tokensCh:
|
||||
}
|
||||
|
||||
b.writeMu.Lock()
|
||||
c := b.conn
|
||||
b.writeMu.Unlock()
|
||||
if c == nil {
|
||||
return fmt.Errorf("not connected")
|
||||
}
|
||||
|
||||
_ = c.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
/* utilities */
|
||||
|
||||
func (b *FuturesWebsocket) snapshotDesired(onlyTrue bool) []string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
var out []string
|
||||
for s, want := range b.desired {
|
||||
if !onlyTrue || want {
|
||||
out = append(out, s)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func minDur(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func hasField(raw []byte, needle string) bool {
|
||||
// cheap check; avoids another allocation if it's obviously an ACK
|
||||
return json.Valid(raw) && byteContains(raw, needle)
|
||||
}
|
||||
|
||||
func byteContains(b []byte, sub string) bool {
|
||||
n := len(sub)
|
||||
if n == 0 || len(b) < n {
|
||||
return false
|
||||
}
|
||||
// naive search; sufficient for small frames
|
||||
for i := 0; i <= len(b)-n; i++ {
|
||||
if string(b[i:i+n]) == sub {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
)
|
||||
import "gitlab.michelsen.id/phillmichelsen/tessera/services/data_service/internal/domain"
|
||||
|
||||
type Provider interface {
|
||||
Start() error
|
||||
Stop()
|
||||
|
||||
RequestStream(subject string, channel chan domain.Message) error
|
||||
CancelStream(subject string)
|
||||
GetActiveStreams() []string
|
||||
IsStreamActive(subject string) bool
|
||||
StartStream(key string, destination chan<- domain.Message) <-chan error
|
||||
StopStream(key string) <-chan error
|
||||
|
||||
Fetch(subject string) (domain.Message, error)
|
||||
Fetch(key string) (domain.Message, error)
|
||||
|
||||
IsValidSubject(subject string, isFetch bool) bool
|
||||
IsStreamActive(key string) bool
|
||||
IsValidSubject(key string, isFetch bool) bool
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ type Router struct {
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewRouter() *Router {
|
||||
func NewRouter(buffer int) *Router {
|
||||
return &Router{
|
||||
incoming: make(chan domain.Message, 512), // Buffered channel for incoming messages
|
||||
incoming: make(chan domain.Message, buffer), // Buffered channel for incoming messages
|
||||
routes: make(map[domain.Identifier][]chan<- domain.Message),
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func (r *Router) Run() {
|
||||
select {
|
||||
case ch <- msg:
|
||||
default:
|
||||
fmt.Println("Dropped message, buffer full!!!") // TODO: Handle full buffer case more gracefully
|
||||
fmt.Println("Router could not push message to a full buffer...") // TODO: Handle full buffer case more gracefully
|
||||
}
|
||||
}
|
||||
r.mu.RUnlock()
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewGRPCControlServer(m *manager.Manager) *GRPCControlServer {
|
||||
}
|
||||
|
||||
// StartStream creates a new session. It does NOT attach client channels.
|
||||
// Your streaming RPC should later call GetChannels(sessionID, opts).
|
||||
// Your streaming RPC should later call AttachClient(sessionID, opts).
|
||||
func (s *GRPCControlServer) StartStream(_ context.Context, req *pb.StartStreamRequest) (*pb.StartStreamResponse, error) {
|
||||
sessionID, err := s.manager.NewSession(time.Duration(1) * time.Minute) // timeout set to 1 minute
|
||||
if err != nil {
|
||||
@@ -51,7 +51,7 @@ func (s *GRPCControlServer) ConfigureStream(_ context.Context, req *pb.Configure
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
if err := s.manager.SetSubscriptions(streamID, ids); err != nil {
|
||||
if err := s.manager.ConfigureSession(streamID, ids); err != nil {
|
||||
// Map common manager errors to gRPC codes.
|
||||
switch err {
|
||||
case manager.ErrSessionNotFound:
|
||||
|
||||
@@ -28,15 +28,7 @@ func (s *GRPCStreamingServer) ConnectStream(req *pb.ConnectStreamRequest, stream
|
||||
return fmt.Errorf("invalid UUID: %w", err)
|
||||
}
|
||||
|
||||
// Defaults; tune or map from req if your proto carries options.
|
||||
opts := manager.ChannelOpts{
|
||||
InBufSize: 256,
|
||||
OutBufSize: 1024,
|
||||
DropOutbound: true, // do not let slow clients stall producers
|
||||
DropInbound: true, // irrelevant here (we don't send inbound), safe default
|
||||
}
|
||||
|
||||
_, out, err := s.manager.GetChannels(sessionID, opts)
|
||||
_, out, err := s.manager.AttachClient(sessionID, 256, 1024)
|
||||
if err != nil {
|
||||
return fmt.Errorf("attach channels: %w", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user