Bladeren bron

Add runtime indicator based on queue signals

tags/v0.9.0
Jan Svabenik 1 maand geleden
bovenliggende
commit
58364659e9
5 gewijzigde bestanden met toevoegingen van 131 en 36 verwijderingen
  1. +13
    -12
      cmd/fmrtx/main.go
  2. +12
    -0
      cmd/fmrtx/main_test.go
  3. +2
    -0
      docs/pro-runtime-hardening-workboard.md
  4. +47
    -24
      internal/app/engine.go
  5. +57
    -0
      internal/app/runtime_indicator_test.go

+ 13
- 12
cmd/fmrtx/main.go Bestand weergeven

@@ -243,18 +243,19 @@ func (b *txBridge) StopTX() error { return b.engine.Stop(context.Background())
func (b *txBridge) TXStats() map[string]any {
s := b.engine.Stats()
return map[string]any{
"state": s.State,
"chunksProduced": s.ChunksProduced,
"totalSamples": s.TotalSamples,
"underruns": s.Underruns,
"lateBuffers": s.LateBuffers,
"lastError": s.LastError,
"uptimeSeconds": s.UptimeSeconds,
"maxCycleMs": s.MaxCycleMs,
"maxGenerateMs": s.MaxGenerateMs,
"maxUpsampleMs": s.MaxUpsampleMs,
"maxWriteMs": s.MaxWriteMs,
"queue": s.Queue,
"state": s.State,
"chunksProduced": s.ChunksProduced,
"totalSamples": s.TotalSamples,
"underruns": s.Underruns,
"lateBuffers": s.LateBuffers,
"lastError": s.LastError,
"uptimeSeconds": s.UptimeSeconds,
"maxCycleMs": s.MaxCycleMs,
"maxGenerateMs": s.MaxGenerateMs,
"maxUpsampleMs": s.MaxUpsampleMs,
"maxWriteMs": s.MaxWriteMs,
"queue": s.Queue,
"runtimeIndicator": s.RuntimeIndicator,
}
}
func (b *txBridge) UpdateConfig(lp ctrlpkg.LivePatch) error {


+ 12
- 0
cmd/fmrtx/main_test.go Bestand weergeven

@@ -33,4 +33,16 @@ func TestTxBridgeExportsQueueStats(t *testing.T) {
if queue.Health != output.QueueHealthCritical {
t.Fatalf("queue health should be critical with empty queue, got %s", queue.Health)
}

indicatorRaw, ok := stats["runtimeIndicator"]
if !ok {
t.Fatalf("expected runtimeIndicator in tx stats")
}
indicator, ok := indicatorRaw.(apppkg.RuntimeIndicator)
if !ok {
t.Fatalf("runtimeIndicator type mismatch: %T", indicatorRaw)
}
if indicator != apppkg.RuntimeIndicatorQueueCritical {
t.Fatalf("runtime indicator should be queueCritical, got %s", indicator)
}
}

+ 2
- 0
docs/pro-runtime-hardening-workboard.md Bestand weergeven

@@ -248,12 +248,14 @@ Generator/Upsampler und Hardwarewriter werden als getrennte Stufen mit kleinem,
|---|---|---|
| 2026-04-05 | FrameQueue mit Engine-Integration | Queue lebt nach dem Upsampler auf DeviceFrame-Ebene, Kapazität via `runtime.frameQueueCapacity`, `EngineStats` zeigt `QueueStats`, Tests decken Timeouts und Counters ab. |
| 2026-04-05 | Queue-Health-Indikator | `QueueStats.Health` gibt `critical`/`low`/`normal` zurück und `txBridge` leitet `EngineStats.Queue` ins `/runtime`-JSON. |
| 2026-04-05 | Runtime-Indikator | `EngineStats.RuntimeIndicator` kombiniert `queue.health` + `lateBuffers`, `/runtime` zeigt `engine.runtimeIndicator`. |

## WS-01 Verifikation
| Datum | Fokus | Ergebnis |
|---|---|---|
| 2026-04-05 | FrameQueue + Engine integration | ✅ `go test ./...` (im `internal`-Modul incl. `frame_queue_test.go`) |
| 2026-04-05 | Queue-Health-Indikator | go test ./... deckt `TestFrameQueueHealthIndicator` und `queue.health` ab. |
| 2026-04-05 | Runtime-Indikator | OK `go test ./...` deckt `runtimeIndicator` sowie `/runtime`-Exposition von `engine.runtimeIndicator`. |
| 2026-04-05 | Runtime API queue health | ✅ `/runtime` liefert jetzt `engine.queue.health` dank `txBridge.TXStats`. |

---


+ 47
- 24
internal/app/engine.go Bestand weergeven

@@ -56,20 +56,29 @@ func durationMs(ns uint64) float64 {
}

type EngineStats struct {
State string `json:"state"`
ChunksProduced uint64 `json:"chunksProduced"`
TotalSamples uint64 `json:"totalSamples"`
Underruns uint64 `json:"underruns"`
LateBuffers uint64 `json:"lateBuffers,omitempty"`
LastError string `json:"lastError,omitempty"`
UptimeSeconds float64 `json:"uptimeSeconds"`
MaxCycleMs float64 `json:"maxCycleMs,omitempty"`
MaxGenerateMs float64 `json:"maxGenerateMs,omitempty"`
MaxUpsampleMs float64 `json:"maxUpsampleMs,omitempty"`
MaxWriteMs float64 `json:"maxWriteMs,omitempty"`
Queue output.QueueStats `json:"queue"`
State string `json:"state"`
ChunksProduced uint64 `json:"chunksProduced"`
TotalSamples uint64 `json:"totalSamples"`
Underruns uint64 `json:"underruns"`
LateBuffers uint64 `json:"lateBuffers,omitempty"`
LastError string `json:"lastError,omitempty"`
UptimeSeconds float64 `json:"uptimeSeconds"`
MaxCycleMs float64 `json:"maxCycleMs,omitempty"`
MaxGenerateMs float64 `json:"maxGenerateMs,omitempty"`
MaxUpsampleMs float64 `json:"maxUpsampleMs,omitempty"`
MaxWriteMs float64 `json:"maxWriteMs,omitempty"`
Queue output.QueueStats `json:"queue"`
RuntimeIndicator RuntimeIndicator `json:"runtimeIndicator"`
}

type RuntimeIndicator string

const (
RuntimeIndicatorNormal RuntimeIndicator = "normal"
RuntimeIndicatorDegraded RuntimeIndicator = "degraded"
RuntimeIndicatorQueueCritical RuntimeIndicator = "queueCritical"
)

// Engine is the continuous TX loop. It generates composite IQ in chunks,
// resamples to device rate, and pushes to hardware in a tight loop.
// The hardware buffer_push call is blocking — it returns when the hardware
@@ -339,19 +348,33 @@ func (e *Engine) Stats() EngineStats {
}
errVal, _ := e.lastError.Load().(string)

queue := e.frameQueue.Stats()
lateBuffers := e.lateBuffers.Load()
return EngineStats{
State: state.String(),
ChunksProduced: e.chunksProduced.Load(),
TotalSamples: e.totalSamples.Load(),
Underruns: e.underruns.Load(),
LateBuffers: e.lateBuffers.Load(),
LastError: errVal,
UptimeSeconds: uptime,
MaxCycleMs: durationMs(e.maxCycleNs.Load()),
MaxGenerateMs: durationMs(e.maxGenerateNs.Load()),
MaxUpsampleMs: durationMs(e.maxUpsampleNs.Load()),
MaxWriteMs: durationMs(e.maxWriteNs.Load()),
Queue: e.frameQueue.Stats(),
State: state.String(),
ChunksProduced: e.chunksProduced.Load(),
TotalSamples: e.totalSamples.Load(),
Underruns: e.underruns.Load(),
LateBuffers: lateBuffers,
LastError: errVal,
UptimeSeconds: uptime,
MaxCycleMs: durationMs(e.maxCycleNs.Load()),
MaxGenerateMs: durationMs(e.maxGenerateNs.Load()),
MaxUpsampleMs: durationMs(e.maxUpsampleNs.Load()),
MaxWriteMs: durationMs(e.maxWriteNs.Load()),
Queue: queue,
RuntimeIndicator: runtimeIndicator(queue.Health, lateBuffers),
}
}

func runtimeIndicator(queueHealth output.QueueHealth, lateBuffers uint64) RuntimeIndicator {
switch {
case queueHealth == output.QueueHealthCritical:
return RuntimeIndicatorQueueCritical
case queueHealth == output.QueueHealthLow || lateBuffers > 0:
return RuntimeIndicatorDegraded
default:
return RuntimeIndicatorNormal
}
}



+ 57
- 0
internal/app/runtime_indicator_test.go Bestand weergeven

@@ -0,0 +1,57 @@
package app

import (
"testing"

"github.com/jan/fm-rds-tx/internal/output"
)

func TestRuntimeIndicator(t *testing.T) {
cases := []struct {
name string
queueHealth output.QueueHealth
lateBuffers uint64
want RuntimeIndicator
}{
{
name: "normal",
queueHealth: output.QueueHealthNormal,
lateBuffers: 0,
want: RuntimeIndicatorNormal,
},
{
name: "degradedLateBuffers",
queueHealth: output.QueueHealthNormal,
lateBuffers: 1,
want: RuntimeIndicatorDegraded,
},
{
name: "degradedQueueLow",
queueHealth: output.QueueHealthLow,
lateBuffers: 0,
want: RuntimeIndicatorDegraded,
},
{
name: "queueCritical",
queueHealth: output.QueueHealthCritical,
lateBuffers: 0,
want: RuntimeIndicatorQueueCritical,
},
{
name: "criticalLateBuffers",
queueHealth: output.QueueHealthCritical,
lateBuffers: 3,
want: RuntimeIndicatorQueueCritical,
},
}

for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if got := runtimeIndicator(tc.queueHealth, tc.lateBuffers); got != tc.want {
t.Fatalf("runtime indicator mismatch: queue=%s late=%d want=%s got=%s",
tc.queueHealth, tc.lateBuffers, tc.want, got)
}
})
}
}

Laden…
Annuleren
Opslaan