Selaa lähdekoodia

test: cover llm runtime provider errors

master
Jan Svabenik 1 kuukausi sitten
vanhempi
commit
18ef01f213
1 muutettua tiedostoa jossa 61 lisäystä ja 0 poistoa
  1. +61
    -0
      internal/llmruntime/runtime_test.go

+ 61
- 0
internal/llmruntime/runtime_test.go Näytä tiedosto

@@ -0,0 +1,61 @@
package llmruntime

import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)

func TestOpenAICompatibleClient_ForwardsTemperatureAndMaxTokens(t *testing.T) {
t.Parallel()

var got struct {
Temperature float64 `json:"temperature"`
MaxTokens int `json:"max_tokens"`
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_ = json.NewDecoder(r.Body).Decode(&got)
_, _ = w.Write([]byte(`{"choices":[{"message":{"content":"ok"}}]}`))
}))
defer server.Close()

factory := NewFactory(2 * time.Second)
client, err := factory.ClientFor("openai")
if err != nil {
t.Fatalf("client creation failed: %v", err)
}
temperature := 0.77
maxTokens := 777
_, err = client.Generate(context.Background(), Request{
Provider: "openai",
BaseURL: server.URL,
Model: "gpt-5.4",
APIKey: "key",
Temperature: &temperature,
MaxTokens: &maxTokens,
SystemPrompt: "system",
UserPrompt: "user",
})
if err != nil {
t.Fatalf("generate failed: %v", err)
}
if got.Temperature != 0.77 {
t.Fatalf("unexpected temperature: %v", got.Temperature)
}
if got.MaxTokens != 777 {
t.Fatalf("unexpected max tokens: %v", got.MaxTokens)
}
}

func TestExtractProviderErrorMessage(t *testing.T) {
t.Parallel()

msg := extractProviderErrorMessage([]byte(`{"error":{"message":"invalid key"}}`))
if !strings.Contains(msg, "invalid key") {
t.Fatalf("unexpected message: %q", msg)
}
}

Loading…
Peruuta
Tallenna