Address a set of production-facing edge cases discovered during bug hunting. Included fixes: - make FrameQueue close handling race-safe by replacing the TOCTOU close check with a dedicated close signal channel - relax tone frequency validation when tone amplitude is zero, and default tone amplitude to 0 to avoid unintended test-tone output - validate PI codes consistently whenever provided, and require a PI when RDS is enabled - harden Icecast reconnect backoff against duration overflow - prevent duplicate hard-reload goroutines from rapid repeated ingest-save requests - clamp BS.412 power accumulation against negative float drift before sqrt to avoid NaN gain propagation These changes focus on shutdown safety, config correctness, reconnect robustness, and long-running DSP stability.main
| @@ -140,7 +140,8 @@ type IngestAES67DiscoveryConfig struct { | |||||
| func Default() Config { | func Default() Config { | ||||
| return Config{ | return Config{ | ||||
| Audio: AudioConfig{Gain: 1.0, ToneLeftHz: 1000, ToneRightHz: 1600, ToneAmplitude: 0.4}, | |||||
| // BUG-C fix: tones off by default (was 0.4 — caused unintended audio output). | |||||
| Audio: AudioConfig{Gain: 1.0, ToneLeftHz: 1000, ToneRightHz: 1600, ToneAmplitude: 0}, | |||||
| RDS: RDSConfig{Enabled: true, PI: "1234", PS: "FMRTX", RadioText: "fm-rds-tx", PTY: 0}, | RDS: RDSConfig{Enabled: true, PI: "1234", PS: "FMRTX", RadioText: "fm-rds-tx", PTY: 0}, | ||||
| FM: FMConfig{ | FM: FMConfig{ | ||||
| FrequencyMHz: 100.0, | FrequencyMHz: 100.0, | ||||
| @@ -256,8 +257,9 @@ func (c Config) Validate() error { | |||||
| if c.Audio.Gain < 0 || c.Audio.Gain > 4 { | if c.Audio.Gain < 0 || c.Audio.Gain > 4 { | ||||
| return fmt.Errorf("audio.gain out of range") | return fmt.Errorf("audio.gain out of range") | ||||
| } | } | ||||
| if c.Audio.ToneLeftHz <= 0 || c.Audio.ToneRightHz <= 0 { | |||||
| return fmt.Errorf("audio tone frequencies must be positive") | |||||
| // BUG-B fix: only enforce positive freq when amplitude is non-zero. | |||||
| if c.Audio.ToneAmplitude > 0 && (c.Audio.ToneLeftHz <= 0 || c.Audio.ToneRightHz <= 0) { | |||||
| return fmt.Errorf("audio tone frequencies must be positive when toneAmplitude > 0") | |||||
| } | } | ||||
| if c.Audio.ToneAmplitude < 0 || c.Audio.ToneAmplitude > 1 { | if c.Audio.ToneAmplitude < 0 || c.Audio.ToneAmplitude > 1 { | ||||
| return fmt.Errorf("audio.toneAmplitude out of range") | return fmt.Errorf("audio.toneAmplitude out of range") | ||||
| @@ -411,11 +413,15 @@ func (c Config) Validate() error { | |||||
| if c.Ingest.Icecast.RadioText.MaxLen < 0 || c.Ingest.Icecast.RadioText.MaxLen > 64 { | if c.Ingest.Icecast.RadioText.MaxLen < 0 || c.Ingest.Icecast.RadioText.MaxLen > 64 { | ||||
| return fmt.Errorf("ingest.icecast.radioText.maxLen out of range (0-64)") | return fmt.Errorf("ingest.icecast.radioText.maxLen out of range (0-64)") | ||||
| } | } | ||||
| // Fail-loud PI validation | |||||
| if c.RDS.Enabled { | |||||
| // BUG-D fix: validate PI whenever non-empty, not only when RDS is enabled. | |||||
| // An invalid PI stored in config causes a silent failure when RDS is later | |||||
| // enabled via live-patch. | |||||
| if strings.TrimSpace(c.RDS.PI) != "" { | |||||
| if _, err := ParsePI(c.RDS.PI); err != nil { | if _, err := ParsePI(c.RDS.PI); err != nil { | ||||
| return fmt.Errorf("rds config: %w", err) | return fmt.Errorf("rds config: %w", err) | ||||
| } | } | ||||
| } else if c.RDS.Enabled { | |||||
| return fmt.Errorf("rds.pi is required when rds.enabled is true") | |||||
| } | } | ||||
| if c.RDS.PTY < 0 || c.RDS.PTY > 31 { | if c.RDS.PTY < 0 || c.RDS.PTY > 31 { | ||||
| return fmt.Errorf("rds.pty out of range (0-31)") | return fmt.Errorf("rds.pty out of range (0-31)") | ||||
| @@ -61,6 +61,9 @@ type Server struct { | |||||
| ingestRt IngestRuntime // optional, for /runtime ingest stats | ingestRt IngestRuntime // optional, for /runtime ingest stats | ||||
| saveConfig func(config.Config) error | saveConfig func(config.Config) error | ||||
| hardReload func() | hardReload func() | ||||
| // BUG-F fix: reloadPending prevents multiple concurrent goroutines from | |||||
| // calling hardReload when handleIngestSave is hit multiple times quickly. | |||||
| reloadPending atomic.Bool | |||||
| audit auditCounters | audit auditCounters | ||||
| } | } | ||||
| @@ -693,9 +696,10 @@ func (s *Server) handleIngestSave(w http.ResponseWriter, r *http.Request) { | |||||
| "saved": true, | "saved": true, | ||||
| "reloadScheduled": reloadScheduled, | "reloadScheduled": reloadScheduled, | ||||
| }) | }) | ||||
| if reloadScheduled { | |||||
| if reloadScheduled && s.reloadPending.CompareAndSwap(false, true) { | |||||
| go func(fn func()) { | go func(fn func()) { | ||||
| time.Sleep(250 * time.Millisecond) | time.Sleep(250 * time.Millisecond) | ||||
| s.reloadPending.Store(false) | |||||
| fn() | fn() | ||||
| }(reload) | }(reload) | ||||
| } | } | ||||
| @@ -115,6 +115,13 @@ func (l *BS412Limiter) ProcessChunk(audioPower float64) float64 { | |||||
| old := l.powerBuf[l.bufIdx] | old := l.powerBuf[l.bufIdx] | ||||
| l.powerBuf[l.bufIdx] = audioPower | l.powerBuf[l.bufIdx] = audioPower | ||||
| l.powerSum += audioPower - old | l.powerSum += audioPower - old | ||||
| // BUG-G fix: float64 accumulation over 1200+ chunks can drift slightly | |||||
| // negative due to rounding. A negative powerSum → negative avgPower → | |||||
| // math.Sqrt of negative → NaN → gain becomes NaN, silently disabling | |||||
| // the limiter. Clamp to zero to keep the invariant powerSum >= 0. | |||||
| if l.powerSum < 0 { | |||||
| l.powerSum = 0 | |||||
| } | |||||
| l.bufIdx++ | l.bufIdx++ | ||||
| if l.bufIdx >= len(l.powerBuf) { | if l.bufIdx >= len(l.powerBuf) { | ||||
| l.bufIdx = 0 | l.bufIdx = 0 | ||||
| @@ -20,11 +20,15 @@ func (c ReconnectConfig) nextBackoff(attempt int) time.Duration { | |||||
| if max <= 0 { | if max <= 0 { | ||||
| max = 15000 | max = 15000 | ||||
| } | } | ||||
| maxD := time.Duration(max) * time.Millisecond | |||||
| d := time.Duration(initial) * time.Millisecond | d := time.Duration(initial) * time.Millisecond | ||||
| // BUG-E fix: check d <= 0 (overflow) as well as d >= max. | |||||
| // int64 overflow after ~63 doublings caused d to go negative, | |||||
| // producing spurious short backoffs before recovering. | |||||
| for i := 1; i < attempt; i++ { | for i := 1; i < attempt; i++ { | ||||
| d *= 2 | d *= 2 | ||||
| if d >= time.Duration(max)*time.Millisecond { | |||||
| return time.Duration(max) * time.Millisecond | |||||
| if d <= 0 || d >= maxD { | |||||
| return maxD | |||||
| } | } | ||||
| } | } | ||||
| return d | return d | ||||
| @@ -45,6 +45,9 @@ const ( | |||||
| type FrameQueue struct { | type FrameQueue struct { | ||||
| capacity int | capacity int | ||||
| ch chan *CompositeFrame | ch chan *CompositeFrame | ||||
| // closeCh is closed by Close() and used in Push/Pop selects so that | |||||
| // a concurrent Close() can never race with a channel send. | |||||
| closeCh chan struct{} | |||||
| mu sync.Mutex | mu sync.Mutex | ||||
| depth int | depth int | ||||
| @@ -68,6 +71,7 @@ func NewFrameQueue(capacity int) *FrameQueue { | |||||
| fq := &FrameQueue{ | fq := &FrameQueue{ | ||||
| capacity: capacity, | capacity: capacity, | ||||
| ch: make(chan *CompositeFrame, capacity), | ch: make(chan *CompositeFrame, capacity), | ||||
| closeCh: make(chan struct{}), | |||||
| lowWaterMark: capacity, | lowWaterMark: capacity, | ||||
| } | } | ||||
| fq.trackDepth(0) | fq.trackDepth(0) | ||||
| @@ -121,17 +125,18 @@ func (q *FrameQueue) Push(ctx context.Context, frame *CompositeFrame) error { | |||||
| if frame == nil { | if frame == nil { | ||||
| return errors.New("frame required") | return errors.New("frame required") | ||||
| } | } | ||||
| if q.isClosed() { | |||||
| return ErrFrameQueueClosed | |||||
| } | |||||
| // BUG-05 fix: increment depth BEFORE the channel send so that Stats() | |||||
| // never reports fill=0 while a frame is in the channel awaiting receive. | |||||
| // On context cancellation, undo the increment. | |||||
| // BUG-A fix: use closeCh in the select so that a concurrent Close() can | |||||
| // never race with the send. The old isClosed() pre-check + separate | |||||
| // ch<- send had a TOCTOU gap that could panic with "send on closed channel". | |||||
| // BUG-05 fix: increment depth before the send; undo on cancel/close. | |||||
| q.updateDepth(+1) | q.updateDepth(+1) | ||||
| select { | select { | ||||
| case q.ch <- frame: | case q.ch <- frame: | ||||
| return nil | return nil | ||||
| case <-q.closeCh: | |||||
| q.updateDepth(-1) | |||||
| return ErrFrameQueueClosed | |||||
| case <-ctx.Done(): | case <-ctx.Done(): | ||||
| q.updateDepth(-1) | q.updateDepth(-1) | ||||
| q.recordPushTimeout() | q.recordPushTimeout() | ||||
| @@ -160,6 +165,9 @@ func (q *FrameQueue) Close() { | |||||
| q.mu.Lock() | q.mu.Lock() | ||||
| q.closed = true | q.closed = true | ||||
| q.mu.Unlock() | q.mu.Unlock() | ||||
| // Close closeCh first so blocked Push() calls unblock safely | |||||
| // before close(ch) removes the destination. | |||||
| close(q.closeCh) | |||||
| close(q.ch) | close(q.ch) | ||||
| }) | }) | ||||
| } | } | ||||