diff --git a/backend/.env.example b/backend/.env.example index 6d74639..8fa7162 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -14,5 +14,5 @@ PORT=8080 ALLOWED_ORIGINS=http://localhost:3000 # External APIs -GEMINI_API_KEY=your-gemini-key +OPENAI_API_KEY=your-openai-key PEXELS_API_KEY=your-pexels-key diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index 032bd16..ca9c297 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -93,7 +93,7 @@ func run() error { authMW := middleware.Auth(&jwtAdapter{jm: jwtManager}) // External API clients - geminiClient := gemini.NewClient(cfg.GeminiAPIKey) + geminiClient := gemini.NewClient(cfg.OpenAIAPIKey) pexelsClient := pexels.NewClient(cfg.PexelsAPIKey) // Ingredient domain diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 2b5f530..b7ef54b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -22,7 +22,7 @@ type Config struct { AllowedOrigins []string `envconfig:"ALLOWED_ORIGINS" default:"http://localhost:3000"` // External APIs - GeminiAPIKey string `envconfig:"GEMINI_API_KEY" required:"true"` + OpenAIAPIKey string `envconfig:"OPENAI_API_KEY" required:"true"` PexelsAPIKey string `envconfig:"PEXELS_API_KEY" required:"true"` } diff --git a/backend/internal/gemini/client.go b/backend/internal/gemini/client.go index 96788cf..f89d5db 100644 --- a/backend/internal/gemini/client.go +++ b/backend/internal/gemini/client.go @@ -11,19 +11,19 @@ import ( ) const ( - // groqAPIURL is the Groq OpenAI-compatible endpoint (free tier, no billing required). - groqAPIURL = "https://api.groq.com/openai/v1/chat/completions" + // openaiAPIURL is the OpenAI chat completions endpoint. + openaiAPIURL = "https://api.openai.com/v1/chat/completions" - // groqModel is the default text generation model. - groqModel = "llama-3.3-70b-versatile" + // openaiModel is the default text generation model. + openaiModel = "gpt-4o-mini" - // groqVisionModel supports image inputs in OpenAI vision format. - groqVisionModel = "meta-llama/llama-4-scout-17b-16e-instruct" + // openaiVisionModel supports image inputs. + openaiVisionModel = "gpt-4o" maxRetries = 3 ) -// Client is an HTTP client for the Groq LLM API (OpenAI-compatible). +// Client is an HTTP client for the OpenAI API. type Client struct { apiKey string httpClient *http.Client @@ -39,9 +39,9 @@ func NewClient(apiKey string) *Client { } } -// generateContent sends text messages to the text-only model. +// generateContent sends text messages to the text model. func (c *Client) generateContent(ctx context.Context, messages []map[string]string) (string, error) { - return c.callGroq(ctx, groqModel, 0.7, messages) + return c.callOpenAI(ctx, openaiModel, 0.7, messages) } // generateVisionContent sends an image + text prompt to the vision model. @@ -68,12 +68,12 @@ func (c *Client) generateVisionContent(ctx context.Context, prompt, imageBase64, }, }, } - return c.callGroq(ctx, groqVisionModel, 0.1, messages) + return c.callOpenAI(ctx, openaiVisionModel, 0.1, messages) } -// callGroq is the shared HTTP transport for all Groq requests. +// callOpenAI is the shared HTTP transport for all OpenAI requests. // messages can be []map[string]string (text) or []any (vision with image content). -func (c *Client) callGroq(ctx context.Context, model string, temperature float64, messages any) (string, error) { +func (c *Client) callOpenAI(ctx context.Context, model string, temperature float64, messages any) (string, error) { body := map[string]any{ "model": model, "temperature": temperature, @@ -85,7 +85,7 @@ func (c *Client) callGroq(ctx context.Context, model string, temperature float64 return "", fmt.Errorf("marshal request: %w", err) } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, groqAPIURL, bytes.NewReader(bodyBytes)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, openaiAPIURL, bytes.NewReader(bodyBytes)) if err != nil { return "", fmt.Errorf("create request: %w", err) } @@ -100,7 +100,7 @@ func (c *Client) callGroq(ctx context.Context, model string, temperature float64 if resp.StatusCode != http.StatusOK { raw, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("groq API error %d: %s", resp.StatusCode, string(raw)) + return "", fmt.Errorf("openai API error %d: %s", resp.StatusCode, string(raw)) } var result struct { @@ -114,7 +114,7 @@ func (c *Client) callGroq(ctx context.Context, model string, temperature float64 return "", fmt.Errorf("decode response: %w", err) } if len(result.Choices) == 0 { - return "", fmt.Errorf("empty response from Groq") + return "", fmt.Errorf("empty response from OpenAI") } return result.Choices[0].Message.Content, nil } diff --git a/backend/internal/gemini/recipe.go b/backend/internal/gemini/recipe.go index ac642e8..ef6be73 100644 --- a/backend/internal/gemini/recipe.go +++ b/backend/internal/gemini/recipe.go @@ -68,7 +68,7 @@ type NutritionInfo struct { func (c *Client) GenerateRecipes(ctx context.Context, req RecipeRequest) ([]Recipe, error) { prompt := buildRecipePrompt(req) - // OpenAI-compatible messages format used by Groq. + // OpenAI messages format. messages := []map[string]string{ {"role": "user", "content": prompt}, } diff --git a/backend/internal/menu/handler.go b/backend/internal/menu/handler.go index 3a9416a..1c9bea0 100644 --- a/backend/internal/menu/handler.go +++ b/backend/internal/menu/handler.go @@ -135,7 +135,7 @@ func (h *Handler) GenerateMenu(w http.ResponseWriter, r *http.Request) { menuReq.AvailableProducts = products } - // Generate 7-day plan via Groq. + // Generate 7-day plan via OpenAI. days, err := h.gemini.GenerateMenu(r.Context(), menuReq) if err != nil { slog.Error("generate menu", "user_id", userID, "err", err)