package llmruntime import ( "context" "encoding/json" "net/http" "net/http/httptest" "strings" "testing" "time" ) func TestOpenAICompatibleClient_ForwardsTemperatureAndMaxTokens(t *testing.T) { t.Parallel() var got struct { Temperature float64 `json:"temperature"` MaxTokens int `json:"max_tokens"` } server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = json.NewDecoder(r.Body).Decode(&got) _, _ = w.Write([]byte(`{"choices":[{"message":{"content":"ok"}}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } temperature := 0.77 maxTokens := 777 _, err = client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-5.4", APIKey: "key", Temperature: &temperature, MaxTokens: &maxTokens, SystemPrompt: "system", UserPrompt: "user", }) if err != nil { t.Fatalf("generate failed: %v", err) } if got.Temperature != 0.77 { t.Fatalf("unexpected temperature: %v", got.Temperature) } if got.MaxTokens != 777 { t.Fatalf("unexpected max tokens: %v", got.MaxTokens) } } func TestExtractProviderErrorMessage(t *testing.T) { t.Parallel() msg := extractProviderErrorMessage([]byte(`{"error":{"message":"invalid key"}}`)) if !strings.Contains(msg, "invalid key") { t.Fatalf("unexpected message: %q", msg) } }