package llmruntime import ( "context" "encoding/json" "net/http" "net/http/httptest" "strings" "testing" "time" ) func TestOpenAICompatibleClient_ForwardsTemperatureAndMaxTokens(t *testing.T) { t.Parallel() var got map[string]any server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = json.NewDecoder(r.Body).Decode(&got) _, _ = w.Write([]byte(`{"choices":[{"message":{"content":"ok"}}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } temperature := 0.77 maxTokens := 777 _, err = client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-5.4", APIKey: "key", Temperature: &temperature, MaxTokens: &maxTokens, SystemPrompt: "system", UserPrompt: "user", }) if err != nil { t.Fatalf("generate failed: %v", err) } gotTemperature, _ := got["temperature"].(float64) if gotTemperature != 0.77 { t.Fatalf("unexpected temperature: %v", gotTemperature) } if _, exists := got["max_tokens"]; exists { t.Fatalf("did not expect max_tokens for openai gpt-5 models") } gotMaxCompletionTokens, _ := got["max_completion_tokens"].(float64) if gotMaxCompletionTokens != 777 { t.Fatalf("unexpected max_completion_tokens: %v", gotMaxCompletionTokens) } } func TestOpenAICompatibleClient_UsesMaxTokensForOlderOpenAIModels(t *testing.T) { t.Parallel() var got map[string]any server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = json.NewDecoder(r.Body).Decode(&got) _, _ = w.Write([]byte(`{"choices":[{"message":{"content":"ok"}}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } maxTokens := 512 _, err = client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-4.1", APIKey: "key", MaxTokens: &maxTokens, SystemPrompt: "system", UserPrompt: "user", }) if err != nil { t.Fatalf("generate failed: %v", err) } if _, exists := got["max_completion_tokens"]; exists { t.Fatalf("did not expect max_completion_tokens for non-gpt-5 model") } gotMaxTokens, _ := got["max_tokens"].(float64) if gotMaxTokens != 512 { t.Fatalf("unexpected max_tokens: %v", gotMaxTokens) } } func TestOpenAICompatibleClient_ExtractsMessageContentParts(t *testing.T) { t.Parallel() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{"choices":[{"message":{"content":[{"type":"text","text":"{\"suggestions\":["},{"type":"output_text","text":"{\"fieldPath\":\"hero.title\",\"value\":\"Hello\"}"}]}}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } got, err := client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-5.4-mini", APIKey: "key", SystemPrompt: "system", UserPrompt: "user", }) if err != nil { t.Fatalf("generate failed: %v", err) } if !strings.Contains(got, "hero.title") { t.Fatalf("unexpected extracted content: %q", got) } } func TestOpenAICompatibleClient_ExtractsResponsesOutputShape(t *testing.T) { t.Parallel() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{"id":"resp_123","object":"response","output":[{"type":"message","content":[{"type":"output_text","text":"{\"suggestions\":[{\"fieldPath\":\"hero.subtitle\",\"value\":\"World\"}]}"}]}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } got, err := client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-5.4-mini", APIKey: "key", SystemPrompt: "system", UserPrompt: "user", }) if err != nil { t.Fatalf("generate failed: %v", err) } if !strings.Contains(got, "hero.subtitle") { t.Fatalf("unexpected extracted content: %q", got) } } func TestOpenAICompatibleClient_EmptyContentIncludesShapeDiagnostics(t *testing.T) { t.Parallel() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{"id":"chatcmpl_x","choices":[{"index":0,"finish_reason":"stop","message":{"role":"assistant","content":[]}}]}`)) })) defer server.Close() factory := NewFactory(2 * time.Second) client, err := factory.ClientFor("openai") if err != nil { t.Fatalf("client creation failed: %v", err) } _, err = client.Generate(context.Background(), Request{ Provider: "openai", BaseURL: server.URL, Model: "gpt-5.4-mini", APIKey: "key", SystemPrompt: "system", UserPrompt: "user", }) if err == nil { t.Fatalf("expected generate error") } if !strings.Contains(err.Error(), "empty openai-compatible response content") { t.Fatalf("unexpected error: %v", err) } if !strings.Contains(err.Error(), "message_content_type=array") { t.Fatalf("expected shape diagnostics in error: %v", err) } if !strings.Contains(err.Error(), "message_content_len=0") { t.Fatalf("expected message content length diagnostics in error: %v", err) } if !strings.Contains(err.Error(), "choices0_finish_reason=stop") { t.Fatalf("expected finish reason diagnostics in error: %v", err) } } func TestExtractProviderErrorMessage(t *testing.T) { t.Parallel() msg := extractProviderErrorMessage([]byte(`{"error":{"message":"invalid key"}}`)) if !strings.Contains(msg, "invalid key") { t.Fatalf("unexpected message: %q", msg) } }