Added performant in-process message broker to data's routing subpackage
This commit is contained in:
@@ -3,171 +3,312 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/pkg/data"
|
||||
"gitlab.michelsen.id/phillmichelsen/tessera/pkg/data/routing"
|
||||
)
|
||||
|
||||
type SeqPayload struct {
|
||||
Seq uint64
|
||||
/*
|
||||
Realistic-ish market-data style test.
|
||||
|
||||
Model:
|
||||
- 1 publisher per topic (instrument / feed partition)
|
||||
- Each message carries a strictly increasing sequence number (per topic)
|
||||
- Subscribers validate in-order, gap-free delivery
|
||||
- Publishers send with bursty pacing to approximate L3-ish behavior:
|
||||
send BurstSize messages back-to-back, then sleep to maintain AvgRate.
|
||||
|
||||
Defaults are intentionally moderate. Increase topics/rates to stress.
|
||||
*/
|
||||
|
||||
const (
|
||||
NumTopics = 8 // topics/instruments/partitions
|
||||
SubsPerTopic = 6 // fan-out per topic
|
||||
RingCapacity = 1 << 14
|
||||
TestDuration = 60 * time.Second
|
||||
AvgRatePerTopic = 500_000 // msgs/sec per topic (average)
|
||||
BurstSize = 512 // burst messages then sleep to preserve avg
|
||||
|
||||
// If true, subscribers spin-poll (TryReceive). If false, blocking Receive.
|
||||
UseTryReceive = false
|
||||
)
|
||||
|
||||
type topicStats struct {
|
||||
published atomic.Uint64
|
||||
}
|
||||
|
||||
type streamStats struct {
|
||||
sent uint64
|
||||
observed uint64
|
||||
missed uint64
|
||||
lastSeen uint64
|
||||
lastReport time.Time
|
||||
type subStats struct {
|
||||
received atomic.Uint64
|
||||
errors atomic.Uint64
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
fmt.Printf("Market-Data Routing Test\n")
|
||||
fmt.Printf("Topics: %d | Subs/Topic: %d | Duration: %v\n", NumTopics, SubsPerTopic, TestDuration)
|
||||
fmt.Printf("AvgRate/Topic: %d msg/s | BurstSize: %d | Mode: %s\n\n",
|
||||
AvgRatePerTopic, BurstSize, modeName())
|
||||
|
||||
// ---- Knobs ----
|
||||
N := 10
|
||||
duration := 5 * time.Second
|
||||
totalTargetPerSec := 5_000 // total across all streams
|
||||
// ----------------
|
||||
broker := routing.NewBroker()
|
||||
|
||||
rt := routing.NewInprocRouter()
|
||||
topics := make([]string, NumTopics)
|
||||
for i := 0; i < NumTopics; i++ {
|
||||
topics[i] = fmt.Sprintf("FUT_L3_%02d", i)
|
||||
}
|
||||
|
||||
senders := make([]data.Sender, N)
|
||||
receivers := make([]data.Receiver, N)
|
||||
// Create publishers first to size the rings.
|
||||
pubs := make([]routing.Publisher, NumTopics)
|
||||
for i := 0; i < NumTopics; i++ {
|
||||
pubs[i] = broker.RegisterPublisher(topics[i], RingCapacity)
|
||||
}
|
||||
|
||||
for i := range N {
|
||||
st, err := rt.OpenStream(data.StreamID(uuid.New()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// Per-topic published counters (ground truth).
|
||||
tStats := make([]*topicStats, NumTopics)
|
||||
for i := range tStats {
|
||||
tStats[i] = &topicStats{}
|
||||
}
|
||||
|
||||
// Subscribers: attach evenly, validate ordering.
|
||||
var subsWG sync.WaitGroup
|
||||
sStats := make([][]*subStats, NumTopics) // [topic][sub]
|
||||
for ti := 0; ti < NumTopics; ti++ {
|
||||
sStats[ti] = make([]*subStats, SubsPerTopic)
|
||||
for si := 0; si < SubsPerTopic; si++ {
|
||||
sStats[ti][si] = &subStats{}
|
||||
}
|
||||
senders[i] = st.Sender()
|
||||
receivers[i] = st.Receiver()
|
||||
}
|
||||
|
||||
perStreamTarget := totalTargetPerSec / N
|
||||
if perStreamTarget == 0 {
|
||||
perStreamTarget = 1
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), TestDuration)
|
||||
defer cancel()
|
||||
|
||||
fmt.Printf("N=%d duration=%s totalTarget=%d/s perStreamTarget=%d/s\n",
|
||||
N, duration, totalTargetPerSec, perStreamTarget)
|
||||
start := time.Now()
|
||||
|
||||
stopAt := time.Now().Add(duration)
|
||||
for ti := 0; ti < NumTopics; ti++ {
|
||||
topic := topics[ti]
|
||||
for si := 0; si < SubsPerTopic; si++ {
|
||||
sub := broker.RegisterSubscriber(topic)
|
||||
stats := sStats[ti][si]
|
||||
subsWG.Add(1)
|
||||
|
||||
stats := make([]streamStats, N)
|
||||
go func(topicIndex int, subIndex int, subscriber routing.Subscriber, st *subStats) {
|
||||
defer subsWG.Done()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(N + 1)
|
||||
var expected uint64 = 0
|
||||
|
||||
// Publisher: per-stream sender sequence in envelope payload.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tick := time.NewTicker(1 * time.Millisecond)
|
||||
defer tick.Stop()
|
||||
|
||||
perTick := perStreamTarget / 1000
|
||||
rem := perStreamTarget % 1000
|
||||
remAcc := make([]int, N)
|
||||
|
||||
seq := make([]uint64, N)
|
||||
|
||||
for time.Now().Before(stopAt) {
|
||||
<-tick.C
|
||||
|
||||
for i := range N {
|
||||
n := int(perTick)
|
||||
remAcc[i] += rem
|
||||
if remAcc[i] >= 1000 {
|
||||
n++
|
||||
remAcc[i] -= 1000
|
||||
}
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
seq[i]++
|
||||
|
||||
env := data.Envelope{
|
||||
Payload: SeqPayload{Seq: seq[i]},
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
_ = senders[i].Send(ctx, env)
|
||||
stats[i].sent++
|
||||
|
||||
var (
|
||||
env data.Envelope
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
if UseTryReceive {
|
||||
env, ok, err = subscriber.TryReceive()
|
||||
if err != nil {
|
||||
st.errors.Add(1)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
runtime.Gosched()
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
env, err = subscriber.Receive(ctx)
|
||||
if err != nil {
|
||||
// Context cancellation is normal at end of test.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
st.errors.Add(1)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
seq, parseOK := parseSeq(env)
|
||||
if !parseOK {
|
||||
st.errors.Add(1)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if seq != expected {
|
||||
// Out-of-order or gap detected.
|
||||
st.errors.Add(1)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
expected++
|
||||
st.received.Add(1)
|
||||
}
|
||||
}
|
||||
}(ti, si, sub, stats)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Consumers: detect missed sender sequence numbers.
|
||||
for i := range N {
|
||||
idx := i
|
||||
rx := receivers[i]
|
||||
// Publishers: bursty pacing to approximate “average rate with bursts”.
|
||||
var pubsWG sync.WaitGroup
|
||||
for ti := 0; ti < NumTopics; ti++ {
|
||||
pub := pubs[ti]
|
||||
stats := tStats[ti]
|
||||
pubsWG.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
go func(topicIndex int, p routing.Publisher, st *topicStats) {
|
||||
defer pubsWG.Done()
|
||||
|
||||
for time.Now().Before(stopAt) {
|
||||
env, ok, err := rx.TryReceive()
|
||||
if err != nil {
|
||||
// Maintain AvgRatePerTopic as an average by sleeping after bursts.
|
||||
// burstDuration = BurstSize / AvgRatePerTopic seconds
|
||||
burstNs := int64(0)
|
||||
if AvgRatePerTopic > 0 {
|
||||
burstNs = int64(time.Second) * int64(BurstSize) / int64(AvgRatePerTopic)
|
||||
}
|
||||
if burstNs <= 0 {
|
||||
burstNs = 1
|
||||
}
|
||||
|
||||
var seq uint64 = 0
|
||||
|
||||
// Optional small jitter to avoid perfect lockstep across topics.
|
||||
jitter := time.Duration(rand.Intn(200)) * time.Microsecond
|
||||
|
||||
nextBurstAt := time.Now().Add(jitter)
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
|
||||
now := time.Now()
|
||||
if now.Before(nextBurstAt) {
|
||||
time.Sleep(nextBurstAt.Sub(now))
|
||||
}
|
||||
|
||||
p, ok := env.Payload.(SeqPayload)
|
||||
if !ok {
|
||||
// If your Payload is pointer/interface-heavy, adjust accordingly.
|
||||
continue
|
||||
}
|
||||
// Send BurstSize messages back-to-back.
|
||||
sendTime := time.Now()
|
||||
for i := 0; i < BurstSize; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stats[idx].observed++
|
||||
env := data.Envelope{
|
||||
SendTime: sendTime,
|
||||
Descriptor: data.Descriptor{Key: "SEQ"}, // keep your existing descriptor usage
|
||||
Payload: formatSeq(seq),
|
||||
// Any other fields you use can be set here.
|
||||
}
|
||||
|
||||
if stats[idx].lastSeen == 0 {
|
||||
stats[idx].lastSeen = p.Seq
|
||||
continue
|
||||
p.Publish(env)
|
||||
seq++
|
||||
}
|
||||
st.published.Add(uint64(BurstSize))
|
||||
|
||||
if p.Seq > stats[idx].lastSeen+1 {
|
||||
stats[idx].missed += (p.Seq - stats[idx].lastSeen - 1)
|
||||
}
|
||||
stats[idx].lastSeen = p.Seq
|
||||
// Schedule next burst to maintain average rate.
|
||||
nextBurstAt = nextBurstAt.Add(time.Duration(burstNs))
|
||||
}
|
||||
}()
|
||||
}(ti, pub, stats)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
// Wait for timeout, then stop and drain.
|
||||
<-ctx.Done()
|
||||
|
||||
var totalSent, totalObs, totalMiss uint64
|
||||
minDrop, maxDrop := 100.0, 0.0
|
||||
// Ensure publishers exit.
|
||||
pubsWG.Wait()
|
||||
|
||||
for i := range N {
|
||||
totalSent += stats[i].sent
|
||||
totalObs += stats[i].observed
|
||||
totalMiss += stats[i].missed
|
||||
// Subscribers may still be blocked; cancel again and wait.
|
||||
cancel()
|
||||
subsWG.Wait()
|
||||
|
||||
den := stats[i].observed + stats[i].missed
|
||||
dropPct := 0.0
|
||||
if den > 0 {
|
||||
dropPct = 100.0 * float64(stats[i].missed) / float64(den)
|
||||
totalTime := time.Since(start)
|
||||
|
||||
// Report.
|
||||
var totalPublished uint64
|
||||
var totalReceived uint64
|
||||
var totalErrors uint64
|
||||
|
||||
for ti := 0; ti < NumTopics; ti++ {
|
||||
pub := tStats[ti].published.Load()
|
||||
totalPublished += pub
|
||||
|
||||
var topicRecv uint64
|
||||
var topicErr uint64
|
||||
for si := 0; si < SubsPerTopic; si++ {
|
||||
topicRecv += sStats[ti][si].received.Load()
|
||||
topicErr += sStats[ti][si].errors.Load()
|
||||
}
|
||||
if dropPct < minDrop {
|
||||
minDrop = dropPct
|
||||
}
|
||||
if dropPct > maxDrop {
|
||||
maxDrop = dropPct
|
||||
totalReceived += topicRecv
|
||||
totalErrors += topicErr
|
||||
|
||||
// Each subscriber should have received ~published for that topic.
|
||||
avgPerSub := uint64(0)
|
||||
if SubsPerTopic > 0 {
|
||||
avgPerSub = topicRecv / uint64(SubsPerTopic)
|
||||
}
|
||||
|
||||
fmt.Printf("stream[%02d] sent=%6d observed=%6d missed=%6d lastSeen=%6d drop=%5.2f%%\n",
|
||||
i, stats[i].sent, stats[i].observed, stats[i].missed, stats[i].lastSeen, dropPct)
|
||||
fmt.Printf("Topic %s: published=%d | avg_received_per_sub=%d | sub_errors=%d\n",
|
||||
topics[ti], pub, avgPerSub, topicErr)
|
||||
}
|
||||
|
||||
totalDen := totalObs + totalMiss
|
||||
totalDrop := 0.0
|
||||
if totalDen > 0 {
|
||||
totalDrop = 100.0 * float64(totalMiss) / float64(totalDen)
|
||||
}
|
||||
pubRate := float64(totalPublished) / totalTime.Seconds()
|
||||
deliveriesRate := float64(totalReceived) / totalTime.Seconds()
|
||||
|
||||
fmt.Printf("\nTOTAL sent=%d observed=%d missed=%d drop=%.2f%% (min=%.2f%% max=%.2f%%)\n",
|
||||
totalSent, totalObs, totalMiss, totalDrop, minDrop, maxDrop)
|
||||
fmt.Printf("\nTotal Time: %v\n", totalTime)
|
||||
fmt.Printf("Total Published: %d msgs\n", totalPublished)
|
||||
fmt.Printf("Total Deliveries: %d (published * subs/topic, minus cancellations)\n", totalReceived)
|
||||
fmt.Printf("Publish Rate: %.2f msgs/sec (aggregate)\n", pubRate)
|
||||
fmt.Printf("Delivery Rate: %.2f deliveries/sec (aggregate)\n", deliveriesRate)
|
||||
fmt.Printf("Validation Errors: %d\n", totalErrors)
|
||||
|
||||
if totalErrors == 0 {
|
||||
fmt.Printf("Result: PASS (in-order, gap-free until cancellation)\n")
|
||||
} else {
|
||||
fmt.Printf("Result: FAIL (see errors above; test cancels on first detected issue)\n")
|
||||
}
|
||||
}
|
||||
|
||||
func modeName() string {
|
||||
if UseTryReceive {
|
||||
return "TryReceive (spin)"
|
||||
}
|
||||
return "Receive (blocking)"
|
||||
}
|
||||
|
||||
// formatSeq encodes the per-topic sequence into a string payload.
|
||||
// This compiles whether Envelope.Payload is string or interface{} accepting string.
|
||||
func formatSeq(seq uint64) string {
|
||||
// Keep it cheap to parse: decimal only.
|
||||
return strconv.FormatUint(seq, 10)
|
||||
}
|
||||
|
||||
func parseSeq(env data.Envelope) (uint64, bool) {
|
||||
// If you later switch Payload to a structured type, change this accordingly.
|
||||
s, ok := env.Payload.(string)
|
||||
if !ok {
|
||||
// If Payload is defined as string (not interface{}), remove this type assert and just use env.Payload.
|
||||
// This branch is for interface{} payloads where non-string could appear.
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Fast path: no extra fields.
|
||||
// If you later include pubID:seq, you can parse with strings.Cut.
|
||||
if strings.IndexByte(s, ':') >= 0 {
|
||||
_, right, ok := strings.Cut(s, ":")
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
s = right
|
||||
}
|
||||
|
||||
v, err := strconv.ParseUint(s, 10, 64)
|
||||
return v, err == nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user