diff --git a/cmd/fmrtx/main.go b/cmd/fmrtx/main.go index 573070b..7c2a37b 100644 --- a/cmd/fmrtx/main.go +++ b/cmd/fmrtx/main.go @@ -243,18 +243,19 @@ func (b *txBridge) StopTX() error { return b.engine.Stop(context.Background()) func (b *txBridge) TXStats() map[string]any { s := b.engine.Stats() return map[string]any{ - "state": s.State, - "chunksProduced": s.ChunksProduced, - "totalSamples": s.TotalSamples, - "underruns": s.Underruns, - "lateBuffers": s.LateBuffers, - "lastError": s.LastError, - "uptimeSeconds": s.UptimeSeconds, - "maxCycleMs": s.MaxCycleMs, - "maxGenerateMs": s.MaxGenerateMs, - "maxUpsampleMs": s.MaxUpsampleMs, - "maxWriteMs": s.MaxWriteMs, - "queue": s.Queue, + "state": s.State, + "chunksProduced": s.ChunksProduced, + "totalSamples": s.TotalSamples, + "underruns": s.Underruns, + "lateBuffers": s.LateBuffers, + "lastError": s.LastError, + "uptimeSeconds": s.UptimeSeconds, + "maxCycleMs": s.MaxCycleMs, + "maxGenerateMs": s.MaxGenerateMs, + "maxUpsampleMs": s.MaxUpsampleMs, + "maxWriteMs": s.MaxWriteMs, + "queue": s.Queue, + "runtimeIndicator": s.RuntimeIndicator, } } func (b *txBridge) UpdateConfig(lp ctrlpkg.LivePatch) error { diff --git a/cmd/fmrtx/main_test.go b/cmd/fmrtx/main_test.go index b2bd2dd..43bc67f 100644 --- a/cmd/fmrtx/main_test.go +++ b/cmd/fmrtx/main_test.go @@ -33,4 +33,16 @@ func TestTxBridgeExportsQueueStats(t *testing.T) { if queue.Health != output.QueueHealthCritical { t.Fatalf("queue health should be critical with empty queue, got %s", queue.Health) } + + indicatorRaw, ok := stats["runtimeIndicator"] + if !ok { + t.Fatalf("expected runtimeIndicator in tx stats") + } + indicator, ok := indicatorRaw.(apppkg.RuntimeIndicator) + if !ok { + t.Fatalf("runtimeIndicator type mismatch: %T", indicatorRaw) + } + if indicator != apppkg.RuntimeIndicatorQueueCritical { + t.Fatalf("runtime indicator should be queueCritical, got %s", indicator) + } } diff --git a/docs/pro-runtime-hardening-workboard.md b/docs/pro-runtime-hardening-workboard.md index f1eb60e..f1618a3 100644 --- a/docs/pro-runtime-hardening-workboard.md +++ b/docs/pro-runtime-hardening-workboard.md @@ -248,12 +248,14 @@ Generator/Upsampler und Hardwarewriter werden als getrennte Stufen mit kleinem, |---|---|---| | 2026-04-05 | FrameQueue mit Engine-Integration | Queue lebt nach dem Upsampler auf DeviceFrame-Ebene, Kapazität via `runtime.frameQueueCapacity`, `EngineStats` zeigt `QueueStats`, Tests decken Timeouts und Counters ab. | | 2026-04-05 | Queue-Health-Indikator | `QueueStats.Health` gibt `critical`/`low`/`normal` zurück und `txBridge` leitet `EngineStats.Queue` ins `/runtime`-JSON. | +| 2026-04-05 | Runtime-Indikator | `EngineStats.RuntimeIndicator` kombiniert `queue.health` + `lateBuffers`, `/runtime` zeigt `engine.runtimeIndicator`. | ## WS-01 Verifikation | Datum | Fokus | Ergebnis | |---|---|---| | 2026-04-05 | FrameQueue + Engine integration | ✅ `go test ./...` (im `internal`-Modul incl. `frame_queue_test.go`) | | 2026-04-05 | Queue-Health-Indikator | go test ./... deckt `TestFrameQueueHealthIndicator` und `queue.health` ab. | +| 2026-04-05 | Runtime-Indikator | OK `go test ./...` deckt `runtimeIndicator` sowie `/runtime`-Exposition von `engine.runtimeIndicator`. | | 2026-04-05 | Runtime API queue health | ✅ `/runtime` liefert jetzt `engine.queue.health` dank `txBridge.TXStats`. | --- diff --git a/internal/app/engine.go b/internal/app/engine.go index f1920d1..4395b46 100644 --- a/internal/app/engine.go +++ b/internal/app/engine.go @@ -56,20 +56,29 @@ func durationMs(ns uint64) float64 { } type EngineStats struct { - State string `json:"state"` - ChunksProduced uint64 `json:"chunksProduced"` - TotalSamples uint64 `json:"totalSamples"` - Underruns uint64 `json:"underruns"` - LateBuffers uint64 `json:"lateBuffers,omitempty"` - LastError string `json:"lastError,omitempty"` - UptimeSeconds float64 `json:"uptimeSeconds"` - MaxCycleMs float64 `json:"maxCycleMs,omitempty"` - MaxGenerateMs float64 `json:"maxGenerateMs,omitempty"` - MaxUpsampleMs float64 `json:"maxUpsampleMs,omitempty"` - MaxWriteMs float64 `json:"maxWriteMs,omitempty"` - Queue output.QueueStats `json:"queue"` + State string `json:"state"` + ChunksProduced uint64 `json:"chunksProduced"` + TotalSamples uint64 `json:"totalSamples"` + Underruns uint64 `json:"underruns"` + LateBuffers uint64 `json:"lateBuffers,omitempty"` + LastError string `json:"lastError,omitempty"` + UptimeSeconds float64 `json:"uptimeSeconds"` + MaxCycleMs float64 `json:"maxCycleMs,omitempty"` + MaxGenerateMs float64 `json:"maxGenerateMs,omitempty"` + MaxUpsampleMs float64 `json:"maxUpsampleMs,omitempty"` + MaxWriteMs float64 `json:"maxWriteMs,omitempty"` + Queue output.QueueStats `json:"queue"` + RuntimeIndicator RuntimeIndicator `json:"runtimeIndicator"` } +type RuntimeIndicator string + +const ( + RuntimeIndicatorNormal RuntimeIndicator = "normal" + RuntimeIndicatorDegraded RuntimeIndicator = "degraded" + RuntimeIndicatorQueueCritical RuntimeIndicator = "queueCritical" +) + // Engine is the continuous TX loop. It generates composite IQ in chunks, // resamples to device rate, and pushes to hardware in a tight loop. // The hardware buffer_push call is blocking — it returns when the hardware @@ -339,19 +348,33 @@ func (e *Engine) Stats() EngineStats { } errVal, _ := e.lastError.Load().(string) + queue := e.frameQueue.Stats() + lateBuffers := e.lateBuffers.Load() return EngineStats{ - State: state.String(), - ChunksProduced: e.chunksProduced.Load(), - TotalSamples: e.totalSamples.Load(), - Underruns: e.underruns.Load(), - LateBuffers: e.lateBuffers.Load(), - LastError: errVal, - UptimeSeconds: uptime, - MaxCycleMs: durationMs(e.maxCycleNs.Load()), - MaxGenerateMs: durationMs(e.maxGenerateNs.Load()), - MaxUpsampleMs: durationMs(e.maxUpsampleNs.Load()), - MaxWriteMs: durationMs(e.maxWriteNs.Load()), - Queue: e.frameQueue.Stats(), + State: state.String(), + ChunksProduced: e.chunksProduced.Load(), + TotalSamples: e.totalSamples.Load(), + Underruns: e.underruns.Load(), + LateBuffers: lateBuffers, + LastError: errVal, + UptimeSeconds: uptime, + MaxCycleMs: durationMs(e.maxCycleNs.Load()), + MaxGenerateMs: durationMs(e.maxGenerateNs.Load()), + MaxUpsampleMs: durationMs(e.maxUpsampleNs.Load()), + MaxWriteMs: durationMs(e.maxWriteNs.Load()), + Queue: queue, + RuntimeIndicator: runtimeIndicator(queue.Health, lateBuffers), + } +} + +func runtimeIndicator(queueHealth output.QueueHealth, lateBuffers uint64) RuntimeIndicator { + switch { + case queueHealth == output.QueueHealthCritical: + return RuntimeIndicatorQueueCritical + case queueHealth == output.QueueHealthLow || lateBuffers > 0: + return RuntimeIndicatorDegraded + default: + return RuntimeIndicatorNormal } } diff --git a/internal/app/runtime_indicator_test.go b/internal/app/runtime_indicator_test.go new file mode 100644 index 0000000..b90aca2 --- /dev/null +++ b/internal/app/runtime_indicator_test.go @@ -0,0 +1,57 @@ +package app + +import ( + "testing" + + "github.com/jan/fm-rds-tx/internal/output" +) + +func TestRuntimeIndicator(t *testing.T) { + cases := []struct { + name string + queueHealth output.QueueHealth + lateBuffers uint64 + want RuntimeIndicator + }{ + { + name: "normal", + queueHealth: output.QueueHealthNormal, + lateBuffers: 0, + want: RuntimeIndicatorNormal, + }, + { + name: "degradedLateBuffers", + queueHealth: output.QueueHealthNormal, + lateBuffers: 1, + want: RuntimeIndicatorDegraded, + }, + { + name: "degradedQueueLow", + queueHealth: output.QueueHealthLow, + lateBuffers: 0, + want: RuntimeIndicatorDegraded, + }, + { + name: "queueCritical", + queueHealth: output.QueueHealthCritical, + lateBuffers: 0, + want: RuntimeIndicatorQueueCritical, + }, + { + name: "criticalLateBuffers", + queueHealth: output.QueueHealthCritical, + lateBuffers: 3, + want: RuntimeIndicatorQueueCritical, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if got := runtimeIndicator(tc.queueHealth, tc.lateBuffers); got != tc.want { + t.Fatalf("runtime indicator mismatch: queue=%s late=%d want=%s got=%s", + tc.queueHealth, tc.lateBuffers, tc.want, got) + } + }) + } +}