feat: async dish recognition (Kafka/Watermill/SSE) + remove Wire + consolidate migrations
Async recognition pipeline:
- POST /ai/recognize-dish → 202 {job_id, queue_position, estimated_seconds}
- GET /ai/jobs/{id}/stream — SSE stream: queued → processing → done/failed
- Kafka topics: ai.recognize.paid (3 partitions) + ai.recognize.free (1 partition)
- 5-worker WorkerPool with priority loop (paid consumers first)
- SSEBroker via PostgreSQL LISTEN/NOTIFY
- Kafka adapter migrated from franz-go to Watermill (watermill-kafka/v2)
- Docker Compose: added Kafka + Zookeeper + kafka-init service
- Flutter: recognition_service.dart uses SSE; home_screen shows live job status
Remove google/wire (archived):
- Deleted wire.go (wireinject spec) and wire_gen.go
- Added cmd/server/init.go — plain Go manual DI, same initApp() logic
- Removed github.com/google/wire from go.mod
Consolidate migrations:
- Merged 001_initial_schema + 002_seed_data + 003_recognition_jobs into single 001_initial_schema.sql
- Deleted 002_seed_data.sql and 003_recognition_jobs.sql
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"github.com/food-ai/backend/internal/adapters/ai"
|
||||
"github.com/food-ai/backend/internal/domain/dish"
|
||||
"github.com/food-ai/backend/internal/domain/ingredient"
|
||||
@@ -15,7 +17,7 @@ import (
|
||||
"github.com/food-ai/backend/internal/infra/middleware"
|
||||
)
|
||||
|
||||
// DishRepository is the subset of dish.Repository used by this handler.
|
||||
// DishRepository is the subset of dish.Repository used by workers and the handler.
|
||||
type DishRepository interface {
|
||||
FindOrCreate(ctx context.Context, name string) (string, bool, error)
|
||||
FindOrCreateRecipe(ctx context.Context, dishID string, calories, proteinG, fatG, carbsG float64) (string, bool, error)
|
||||
@@ -41,16 +43,35 @@ type Recognizer interface {
|
||||
TranslateDishName(ctx context.Context, name string) (map[string]string, error)
|
||||
}
|
||||
|
||||
// KafkaPublisher publishes job IDs to a Kafka topic.
|
||||
type KafkaPublisher interface {
|
||||
Publish(ctx context.Context, topic, message string) error
|
||||
}
|
||||
|
||||
// Handler handles POST /ai/* recognition endpoints.
|
||||
type Handler struct {
|
||||
recognizer Recognizer
|
||||
ingredientRepo IngredientRepository
|
||||
dishRepo DishRepository
|
||||
jobRepo JobRepository
|
||||
kafkaProducer KafkaPublisher
|
||||
sseBroker *SSEBroker
|
||||
}
|
||||
|
||||
// NewHandler creates a new Handler.
|
||||
func NewHandler(recognizer Recognizer, repo IngredientRepository, dishRepo DishRepository) *Handler {
|
||||
return &Handler{recognizer: recognizer, ingredientRepo: repo, dishRepo: dishRepo}
|
||||
// NewHandler creates a new Handler with async dish recognition support.
|
||||
func NewHandler(
|
||||
recognizer Recognizer,
|
||||
ingredientRepo IngredientRepository,
|
||||
jobRepo JobRepository,
|
||||
kafkaProducer KafkaPublisher,
|
||||
sseBroker *SSEBroker,
|
||||
) *Handler {
|
||||
return &Handler{
|
||||
recognizer: recognizer,
|
||||
ingredientRepo: ingredientRepo,
|
||||
jobRepo: jobRepo,
|
||||
kafkaProducer: kafkaProducer,
|
||||
sseBroker: sseBroker,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -81,39 +102,36 @@ type EnrichedItem struct {
|
||||
|
||||
// ReceiptResponse is the response for POST /ai/recognize-receipt.
|
||||
type ReceiptResponse struct {
|
||||
Items []EnrichedItem `json:"items"`
|
||||
Items []EnrichedItem `json:"items"`
|
||||
Unrecognized []ai.UnrecognizedItem `json:"unrecognized"`
|
||||
}
|
||||
|
||||
// DishResponse is the response for POST /ai/recognize-dish.
|
||||
type DishResponse = ai.DishResult
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// RecognizeReceipt handles POST /ai/recognize-receipt.
|
||||
// Body: {"image_base64": "...", "mime_type": "image/jpeg"}
|
||||
func (h *Handler) RecognizeReceipt(w http.ResponseWriter, r *http.Request) {
|
||||
userID := middleware.UserIDFromCtx(r.Context())
|
||||
func (handler *Handler) RecognizeReceipt(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
userID := middleware.UserIDFromCtx(request.Context())
|
||||
_ = userID // logged for tracing
|
||||
|
||||
var req imageRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.ImageBase64 == "" {
|
||||
writeErrorJSON(w, http.StatusBadRequest, "image_base64 is required")
|
||||
if decodeError := json.NewDecoder(request.Body).Decode(&req); decodeError != nil || req.ImageBase64 == "" {
|
||||
writeErrorJSON(responseWriter, http.StatusBadRequest, "image_base64 is required")
|
||||
return
|
||||
}
|
||||
|
||||
lang := locale.FromContext(r.Context())
|
||||
result, err := h.recognizer.RecognizeReceipt(r.Context(), req.ImageBase64, req.MimeType, lang)
|
||||
if err != nil {
|
||||
slog.Error("recognize receipt", "err", err)
|
||||
writeErrorJSON(w, http.StatusServiceUnavailable, "recognition failed, please try again")
|
||||
lang := locale.FromContext(request.Context())
|
||||
result, recognizeError := handler.recognizer.RecognizeReceipt(request.Context(), req.ImageBase64, req.MimeType, lang)
|
||||
if recognizeError != nil {
|
||||
slog.Error("recognize receipt", "err", recognizeError)
|
||||
writeErrorJSON(responseWriter, http.StatusServiceUnavailable, "recognition failed, please try again")
|
||||
return
|
||||
}
|
||||
|
||||
enriched := h.enrichItems(r.Context(), result.Items)
|
||||
writeJSON(w, http.StatusOK, ReceiptResponse{
|
||||
enriched := handler.enrichItems(request.Context(), result.Items)
|
||||
writeJSON(responseWriter, http.StatusOK, ReceiptResponse{
|
||||
Items: enriched,
|
||||
Unrecognized: result.Unrecognized,
|
||||
})
|
||||
@@ -121,92 +139,108 @@ func (h *Handler) RecognizeReceipt(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// RecognizeProducts handles POST /ai/recognize-products.
|
||||
// Body: {"images": [{"image_base64": "...", "mime_type": "image/jpeg"}, ...]}
|
||||
func (h *Handler) RecognizeProducts(w http.ResponseWriter, r *http.Request) {
|
||||
func (handler *Handler) RecognizeProducts(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
var req imagesRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil || len(req.Images) == 0 {
|
||||
writeErrorJSON(w, http.StatusBadRequest, "at least one image is required")
|
||||
if decodeError := json.NewDecoder(request.Body).Decode(&req); decodeError != nil || len(req.Images) == 0 {
|
||||
writeErrorJSON(responseWriter, http.StatusBadRequest, "at least one image is required")
|
||||
return
|
||||
}
|
||||
if len(req.Images) > 3 {
|
||||
req.Images = req.Images[:3] // cap at 3 photos as per spec
|
||||
}
|
||||
|
||||
// Process each image in parallel.
|
||||
lang := locale.FromContext(r.Context())
|
||||
lang := locale.FromContext(request.Context())
|
||||
allItems := make([][]ai.RecognizedItem, len(req.Images))
|
||||
var wg sync.WaitGroup
|
||||
for i, img := range req.Images {
|
||||
wg.Add(1)
|
||||
go func(i int, img imageRequest) {
|
||||
go func(index int, imageReq imageRequest) {
|
||||
defer wg.Done()
|
||||
items, err := h.recognizer.RecognizeProducts(r.Context(), img.ImageBase64, img.MimeType, lang)
|
||||
if err != nil {
|
||||
slog.Warn("recognize products from image", "index", i, "err", err)
|
||||
items, recognizeError := handler.recognizer.RecognizeProducts(request.Context(), imageReq.ImageBase64, imageReq.MimeType, lang)
|
||||
if recognizeError != nil {
|
||||
slog.Warn("recognize products from image", "index", index, "err", recognizeError)
|
||||
return
|
||||
}
|
||||
allItems[i] = items
|
||||
allItems[index] = items
|
||||
}(i, img)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
merged := MergeAndDeduplicate(allItems)
|
||||
enriched := h.enrichItems(r.Context(), merged)
|
||||
writeJSON(w, http.StatusOK, map[string]any{"items": enriched})
|
||||
enriched := handler.enrichItems(request.Context(), merged)
|
||||
writeJSON(responseWriter, http.StatusOK, map[string]any{"items": enriched})
|
||||
}
|
||||
|
||||
// RecognizeDish handles POST /ai/recognize-dish.
|
||||
// RecognizeDish handles POST /ai/recognize-dish (async).
|
||||
// Enqueues the image for AI processing and returns 202 Accepted with a job_id.
|
||||
// Body: {"image_base64": "...", "mime_type": "image/jpeg"}
|
||||
func (h *Handler) RecognizeDish(w http.ResponseWriter, r *http.Request) {
|
||||
func (handler *Handler) RecognizeDish(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
var req imageRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.ImageBase64 == "" {
|
||||
writeErrorJSON(w, http.StatusBadRequest, "image_base64 is required")
|
||||
if decodeError := json.NewDecoder(request.Body).Decode(&req); decodeError != nil || req.ImageBase64 == "" {
|
||||
writeErrorJSON(responseWriter, http.StatusBadRequest, "image_base64 is required")
|
||||
return
|
||||
}
|
||||
|
||||
lang := locale.FromContext(r.Context())
|
||||
result, err := h.recognizer.RecognizeDish(r.Context(), req.ImageBase64, req.MimeType, lang)
|
||||
if err != nil {
|
||||
slog.Error("recognize dish", "err", err)
|
||||
writeErrorJSON(w, http.StatusServiceUnavailable, "recognition failed, please try again")
|
||||
userID := middleware.UserIDFromCtx(request.Context())
|
||||
userPlan := middleware.UserPlanFromCtx(request.Context())
|
||||
lang := locale.FromContext(request.Context())
|
||||
|
||||
job := &Job{
|
||||
UserID: userID,
|
||||
UserPlan: userPlan,
|
||||
ImageBase64: req.ImageBase64,
|
||||
MimeType: req.MimeType,
|
||||
Lang: lang,
|
||||
}
|
||||
if insertError := handler.jobRepo.InsertJob(request.Context(), job); insertError != nil {
|
||||
slog.Error("insert recognition job", "err", insertError)
|
||||
writeErrorJSON(responseWriter, http.StatusInternalServerError, "failed to create job")
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve dish_id and recipe_id for each candidate in parallel.
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
for i := range result.Candidates {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
candidate := result.Candidates[i]
|
||||
dishID, created, findError := h.dishRepo.FindOrCreate(r.Context(), candidate.DishName)
|
||||
if findError != nil {
|
||||
slog.Warn("find or create dish", "name", candidate.DishName, "err", findError)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
result.Candidates[i].DishID = &dishID
|
||||
mu.Unlock()
|
||||
if created {
|
||||
go h.enrichDishInBackground(dishID, candidate.DishName)
|
||||
}
|
||||
|
||||
recipeID, _, recipeError := h.dishRepo.FindOrCreateRecipe(
|
||||
r.Context(), dishID,
|
||||
candidate.Calories, candidate.ProteinG, candidate.FatG, candidate.CarbsG,
|
||||
)
|
||||
if recipeError != nil {
|
||||
slog.Warn("find or create recipe", "dish_id", dishID, "err", recipeError)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
result.Candidates[i].RecipeID = &recipeID
|
||||
mu.Unlock()
|
||||
}(i)
|
||||
position, positionError := handler.jobRepo.QueuePosition(request.Context(), userPlan, job.CreatedAt)
|
||||
if positionError != nil {
|
||||
position = 0
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
topic := TopicFree
|
||||
if userPlan == "paid" {
|
||||
topic = TopicPaid
|
||||
}
|
||||
if publishError := handler.kafkaProducer.Publish(request.Context(), topic, job.ID); publishError != nil {
|
||||
slog.Error("publish recognition job", "job_id", job.ID, "err", publishError)
|
||||
writeErrorJSON(responseWriter, http.StatusInternalServerError, "failed to enqueue job")
|
||||
return
|
||||
}
|
||||
|
||||
estimatedSeconds := (position + 1) * 6
|
||||
writeJSON(responseWriter, http.StatusAccepted, map[string]any{
|
||||
"job_id": job.ID,
|
||||
"queue_position": position,
|
||||
"estimated_seconds": estimatedSeconds,
|
||||
})
|
||||
}
|
||||
|
||||
// GetJobStream handles GET /ai/jobs/{id}/stream — SSE endpoint for job updates.
|
||||
func (handler *Handler) GetJobStream(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
handler.sseBroker.ServeSSE(responseWriter, request)
|
||||
}
|
||||
|
||||
// GetJob handles GET /ai/jobs/{id} — fetches a job result (for app re-open after backgrounding).
|
||||
func (handler *Handler) GetJob(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
jobID := chi.URLParam(request, "id")
|
||||
userID := middleware.UserIDFromCtx(request.Context())
|
||||
|
||||
job, fetchError := handler.jobRepo.GetJobByID(request.Context(), jobID)
|
||||
if fetchError != nil {
|
||||
writeErrorJSON(responseWriter, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
if job.UserID != userID {
|
||||
writeErrorJSON(responseWriter, http.StatusForbidden, "forbidden")
|
||||
return
|
||||
}
|
||||
writeJSON(responseWriter, http.StatusOK, job)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -214,8 +248,8 @@ func (h *Handler) RecognizeDish(w http.ResponseWriter, r *http.Request) {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// enrichItems matches each recognized item against ingredient_mappings.
|
||||
// Items without a match trigger a Gemini classification call and upsert into the DB.
|
||||
func (h *Handler) enrichItems(ctx context.Context, items []ai.RecognizedItem) []EnrichedItem {
|
||||
// Items without a match trigger a classification call and upsert into the DB.
|
||||
func (handler *Handler) enrichItems(ctx context.Context, items []ai.RecognizedItem) []EnrichedItem {
|
||||
result := make([]EnrichedItem, 0, len(items))
|
||||
for _, item := range items {
|
||||
enriched := EnrichedItem{
|
||||
@@ -227,13 +261,12 @@ func (h *Handler) enrichItems(ctx context.Context, items []ai.RecognizedItem) []
|
||||
StorageDays: 7, // sensible default
|
||||
}
|
||||
|
||||
mapping, err := h.ingredientRepo.FuzzyMatch(ctx, item.Name)
|
||||
if err != nil {
|
||||
slog.Warn("fuzzy match ingredient", "name", item.Name, "err", err)
|
||||
mapping, matchError := handler.ingredientRepo.FuzzyMatch(ctx, item.Name)
|
||||
if matchError != nil {
|
||||
slog.Warn("fuzzy match ingredient", "name", item.Name, "err", matchError)
|
||||
}
|
||||
|
||||
if mapping != nil {
|
||||
// Found existing mapping — use its canonical data.
|
||||
id := mapping.ID
|
||||
enriched.MappingID = &id
|
||||
if mapping.DefaultUnit != nil {
|
||||
@@ -246,12 +279,11 @@ func (h *Handler) enrichItems(ctx context.Context, items []ai.RecognizedItem) []
|
||||
enriched.Category = *mapping.Category
|
||||
}
|
||||
} else {
|
||||
// No mapping — ask AI to classify and save for future reuse.
|
||||
classification, err := h.recognizer.ClassifyIngredient(ctx, item.Name)
|
||||
if err != nil {
|
||||
slog.Warn("classify unknown ingredient", "name", item.Name, "err", err)
|
||||
classification, classifyError := handler.recognizer.ClassifyIngredient(ctx, item.Name)
|
||||
if classifyError != nil {
|
||||
slog.Warn("classify unknown ingredient", "name", item.Name, "err", classifyError)
|
||||
} else {
|
||||
saved := h.saveClassification(ctx, classification)
|
||||
saved := handler.saveClassification(ctx, classification)
|
||||
if saved != nil {
|
||||
id := saved.ID
|
||||
enriched.MappingID = &id
|
||||
@@ -267,41 +299,41 @@ func (h *Handler) enrichItems(ctx context.Context, items []ai.RecognizedItem) []
|
||||
}
|
||||
|
||||
// saveClassification upserts an AI-produced ingredient classification into the DB.
|
||||
func (h *Handler) saveClassification(ctx context.Context, c *ai.IngredientClassification) *ingredient.IngredientMapping {
|
||||
if c == nil || c.CanonicalName == "" {
|
||||
func (handler *Handler) saveClassification(ctx context.Context, classification *ai.IngredientClassification) *ingredient.IngredientMapping {
|
||||
if classification == nil || classification.CanonicalName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
m := &ingredient.IngredientMapping{
|
||||
CanonicalName: c.CanonicalName,
|
||||
Category: strPtr(c.Category),
|
||||
DefaultUnit: strPtr(c.DefaultUnit),
|
||||
CaloriesPer100g: c.CaloriesPer100g,
|
||||
ProteinPer100g: c.ProteinPer100g,
|
||||
FatPer100g: c.FatPer100g,
|
||||
CarbsPer100g: c.CarbsPer100g,
|
||||
StorageDays: intPtr(c.StorageDays),
|
||||
mapping := &ingredient.IngredientMapping{
|
||||
CanonicalName: classification.CanonicalName,
|
||||
Category: strPtr(classification.Category),
|
||||
DefaultUnit: strPtr(classification.DefaultUnit),
|
||||
CaloriesPer100g: classification.CaloriesPer100g,
|
||||
ProteinPer100g: classification.ProteinPer100g,
|
||||
FatPer100g: classification.FatPer100g,
|
||||
CarbsPer100g: classification.CarbsPer100g,
|
||||
StorageDays: intPtr(classification.StorageDays),
|
||||
}
|
||||
|
||||
saved, err := h.ingredientRepo.Upsert(ctx, m)
|
||||
if err != nil {
|
||||
slog.Warn("upsert classified ingredient", "name", c.CanonicalName, "err", err)
|
||||
saved, upsertError := handler.ingredientRepo.Upsert(ctx, mapping)
|
||||
if upsertError != nil {
|
||||
slog.Warn("upsert classified ingredient", "name", classification.CanonicalName, "err", upsertError)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(c.Aliases) > 0 {
|
||||
if err := h.ingredientRepo.UpsertAliases(ctx, saved.ID, "en", c.Aliases); err != nil {
|
||||
slog.Warn("upsert ingredient aliases", "id", saved.ID, "err", err)
|
||||
if len(classification.Aliases) > 0 {
|
||||
if aliasError := handler.ingredientRepo.UpsertAliases(ctx, saved.ID, "en", classification.Aliases); aliasError != nil {
|
||||
slog.Warn("upsert ingredient aliases", "id", saved.ID, "err", aliasError)
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range c.Translations {
|
||||
if err := h.ingredientRepo.UpsertTranslation(ctx, saved.ID, t.Lang, t.Name); err != nil {
|
||||
slog.Warn("upsert ingredient translation", "id", saved.ID, "lang", t.Lang, "err", err)
|
||||
for _, translation := range classification.Translations {
|
||||
if translationError := handler.ingredientRepo.UpsertTranslation(ctx, saved.ID, translation.Lang, translation.Name); translationError != nil {
|
||||
slog.Warn("upsert ingredient translation", "id", saved.ID, "lang", translation.Lang, "err", translationError)
|
||||
}
|
||||
if len(t.Aliases) > 0 {
|
||||
if err := h.ingredientRepo.UpsertAliases(ctx, saved.ID, t.Lang, t.Aliases); err != nil {
|
||||
slog.Warn("upsert ingredient translation aliases", "id", saved.ID, "lang", t.Lang, "err", err)
|
||||
if len(translation.Aliases) > 0 {
|
||||
if aliasError := handler.ingredientRepo.UpsertAliases(ctx, saved.ID, translation.Lang, translation.Aliases); aliasError != nil {
|
||||
slog.Warn("upsert ingredient translation aliases", "id", saved.ID, "lang", translation.Lang, "err", aliasError)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,58 +341,6 @@ func (h *Handler) saveClassification(ctx context.Context, c *ai.IngredientClassi
|
||||
return saved
|
||||
}
|
||||
|
||||
// enrichDishInBackground generates name translations for a newly created dish stub.
|
||||
// Recipe creation is handled synchronously in RecognizeDish.
|
||||
// Runs as a fire-and-forget goroutine so it never blocks the HTTP response.
|
||||
func (h *Handler) enrichDishInBackground(dishID, dishName string) {
|
||||
enrichContext := context.Background()
|
||||
|
||||
translations, translateError := h.recognizer.TranslateDishName(enrichContext, dishName)
|
||||
if translateError != nil {
|
||||
slog.Warn("translate dish name", "name", dishName, "err", translateError)
|
||||
return
|
||||
}
|
||||
for lang, translatedName := range translations {
|
||||
if upsertError := h.dishRepo.UpsertTranslation(enrichContext, dishID, lang, translatedName); upsertError != nil {
|
||||
slog.Warn("upsert dish translation", "dish_id", dishID, "lang", lang, "err", upsertError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// aiRecipeToCreateRequest converts an AI-generated recipe into a dish.CreateRequest.
|
||||
func aiRecipeToCreateRequest(recipe *ai.Recipe) dish.CreateRequest {
|
||||
ingredients := make([]dish.IngredientInput, len(recipe.Ingredients))
|
||||
for i, ingredient := range recipe.Ingredients {
|
||||
ingredients[i] = dish.IngredientInput{
|
||||
Name: ingredient.Name, Amount: ingredient.Amount, Unit: ingredient.Unit,
|
||||
}
|
||||
}
|
||||
steps := make([]dish.StepInput, len(recipe.Steps))
|
||||
for i, step := range recipe.Steps {
|
||||
steps[i] = dish.StepInput{
|
||||
Number: step.Number, Description: step.Description, TimerSeconds: step.TimerSeconds,
|
||||
}
|
||||
}
|
||||
return dish.CreateRequest{
|
||||
Name: recipe.Title,
|
||||
Description: recipe.Description,
|
||||
CuisineSlug: recipe.Cuisine,
|
||||
ImageURL: recipe.ImageURL,
|
||||
Tags: recipe.Tags,
|
||||
Source: "ai",
|
||||
Difficulty: recipe.Difficulty,
|
||||
PrepTimeMin: recipe.PrepTimeMin,
|
||||
CookTimeMin: recipe.CookTimeMin,
|
||||
Servings: recipe.Servings,
|
||||
Calories: recipe.Nutrition.Calories,
|
||||
Protein: recipe.Nutrition.ProteinG,
|
||||
Fat: recipe.Nutrition.FatG,
|
||||
Carbs: recipe.Nutrition.CarbsG,
|
||||
Ingredients: ingredients,
|
||||
Steps: steps,
|
||||
}
|
||||
}
|
||||
|
||||
// MergeAndDeduplicate combines results from multiple images.
|
||||
// Items sharing the same name (case-insensitive) have their quantities summed.
|
||||
func MergeAndDeduplicate(batches [][]ai.RecognizedItem) []ai.RecognizedItem {
|
||||
@@ -373,7 +353,6 @@ func MergeAndDeduplicate(batches [][]ai.RecognizedItem) []ai.RecognizedItem {
|
||||
key := normalizeName(item.Name)
|
||||
if existing, ok := seen[key]; ok {
|
||||
existing.Quantity += item.Quantity
|
||||
// Keep the higher confidence estimate.
|
||||
if item.Confidence > existing.Confidence {
|
||||
existing.Confidence = item.Confidence
|
||||
}
|
||||
@@ -414,14 +393,14 @@ type errorResponse struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func writeErrorJSON(w http.ResponseWriter, status int, msg string) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
_ = json.NewEncoder(w).Encode(errorResponse{Error: msg})
|
||||
func writeErrorJSON(responseWriter http.ResponseWriter, status int, msg string) {
|
||||
responseWriter.Header().Set("Content-Type", "application/json")
|
||||
responseWriter.WriteHeader(status)
|
||||
_ = json.NewEncoder(responseWriter).Encode(errorResponse{Error: msg})
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, status int, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
func writeJSON(responseWriter http.ResponseWriter, status int, value any) {
|
||||
responseWriter.Header().Set("Content-Type", "application/json")
|
||||
responseWriter.WriteHeader(status)
|
||||
_ = json.NewEncoder(responseWriter).Encode(value)
|
||||
}
|
||||
|
||||
37
backend/internal/domain/recognition/job.go
Normal file
37
backend/internal/domain/recognition/job.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package recognition
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/food-ai/backend/internal/adapters/ai"
|
||||
)
|
||||
|
||||
// Job status constants.
|
||||
const (
|
||||
JobStatusPending = "pending"
|
||||
JobStatusProcessing = "processing"
|
||||
JobStatusDone = "done"
|
||||
JobStatusFailed = "failed"
|
||||
)
|
||||
|
||||
// Kafka topic names.
|
||||
const (
|
||||
TopicPaid = "ai.recognize.paid"
|
||||
TopicFree = "ai.recognize.free"
|
||||
)
|
||||
|
||||
// Job represents an async dish recognition task stored in recognition_jobs.
|
||||
type Job struct {
|
||||
ID string
|
||||
UserID string
|
||||
UserPlan string
|
||||
ImageBase64 string
|
||||
MimeType string
|
||||
Lang string
|
||||
Status string
|
||||
Result *ai.DishResult
|
||||
Error *string
|
||||
CreatedAt time.Time
|
||||
StartedAt *time.Time
|
||||
CompletedAt *time.Time
|
||||
}
|
||||
125
backend/internal/domain/recognition/job_repository.go
Normal file
125
backend/internal/domain/recognition/job_repository.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package recognition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/food-ai/backend/internal/adapters/ai"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// JobRepository provides all DB operations on recognition_jobs.
|
||||
type JobRepository interface {
|
||||
InsertJob(ctx context.Context, job *Job) error
|
||||
GetJobByID(ctx context.Context, jobID string) (*Job, error)
|
||||
UpdateJobStatus(ctx context.Context, jobID, status string, result *ai.DishResult, errMsg *string) error
|
||||
QueuePosition(ctx context.Context, userPlan string, createdAt time.Time) (int, error)
|
||||
NotifyJobUpdate(ctx context.Context, jobID string) error
|
||||
}
|
||||
|
||||
// PostgresJobRepository implements JobRepository using a pgxpool.
|
||||
type PostgresJobRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewJobRepository creates a new PostgresJobRepository.
|
||||
func NewJobRepository(pool *pgxpool.Pool) *PostgresJobRepository {
|
||||
return &PostgresJobRepository{pool: pool}
|
||||
}
|
||||
|
||||
// InsertJob inserts a new recognition job and populates the ID and CreatedAt fields.
|
||||
func (repository *PostgresJobRepository) InsertJob(queryContext context.Context, job *Job) error {
|
||||
return repository.pool.QueryRow(queryContext,
|
||||
`INSERT INTO recognition_jobs (user_id, user_plan, image_base64, mime_type, lang)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
RETURNING id, created_at`,
|
||||
job.UserID, job.UserPlan, job.ImageBase64, job.MimeType, job.Lang,
|
||||
).Scan(&job.ID, &job.CreatedAt)
|
||||
}
|
||||
|
||||
// GetJobByID fetches a single job by primary key.
|
||||
func (repository *PostgresJobRepository) GetJobByID(queryContext context.Context, jobID string) (*Job, error) {
|
||||
var job Job
|
||||
var resultJSON []byte
|
||||
|
||||
queryError := repository.pool.QueryRow(queryContext,
|
||||
`SELECT id, user_id, user_plan, image_base64, mime_type, lang, status,
|
||||
result, error, created_at, started_at, completed_at
|
||||
FROM recognition_jobs WHERE id = $1`,
|
||||
jobID,
|
||||
).Scan(
|
||||
&job.ID, &job.UserID, &job.UserPlan,
|
||||
&job.ImageBase64, &job.MimeType, &job.Lang, &job.Status,
|
||||
&resultJSON, &job.Error, &job.CreatedAt, &job.StartedAt, &job.CompletedAt,
|
||||
)
|
||||
if queryError != nil {
|
||||
return nil, queryError
|
||||
}
|
||||
|
||||
if resultJSON != nil {
|
||||
var dishResult ai.DishResult
|
||||
if unmarshalError := json.Unmarshal(resultJSON, &dishResult); unmarshalError == nil {
|
||||
job.Result = &dishResult
|
||||
}
|
||||
}
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
// UpdateJobStatus transitions a job to a new status and records the result or error.
|
||||
func (repository *PostgresJobRepository) UpdateJobStatus(
|
||||
queryContext context.Context,
|
||||
jobID, status string,
|
||||
result *ai.DishResult,
|
||||
errMsg *string,
|
||||
) error {
|
||||
var resultJSON []byte
|
||||
if result != nil {
|
||||
marshalledBytes, marshalError := json.Marshal(result)
|
||||
if marshalError != nil {
|
||||
return marshalError
|
||||
}
|
||||
resultJSON = marshalledBytes
|
||||
}
|
||||
|
||||
switch status {
|
||||
case JobStatusProcessing:
|
||||
_, updateError := repository.pool.Exec(queryContext,
|
||||
`UPDATE recognition_jobs SET status = $1, started_at = now() WHERE id = $2`,
|
||||
status, jobID,
|
||||
)
|
||||
return updateError
|
||||
default:
|
||||
_, updateError := repository.pool.Exec(queryContext,
|
||||
`UPDATE recognition_jobs
|
||||
SET status = $1, result = $2, error = $3, completed_at = now()
|
||||
WHERE id = $4`,
|
||||
status, resultJSON, errMsg, jobID,
|
||||
)
|
||||
return updateError
|
||||
}
|
||||
}
|
||||
|
||||
// QueuePosition counts jobs ahead of createdAt in the same plan's queue.
|
||||
func (repository *PostgresJobRepository) QueuePosition(
|
||||
queryContext context.Context,
|
||||
userPlan string,
|
||||
createdAt time.Time,
|
||||
) (int, error) {
|
||||
var position int
|
||||
queryError := repository.pool.QueryRow(queryContext,
|
||||
`SELECT COUNT(*) FROM recognition_jobs
|
||||
WHERE status IN ('pending', 'processing')
|
||||
AND user_plan = $1
|
||||
AND created_at < $2`,
|
||||
userPlan, createdAt,
|
||||
).Scan(&position)
|
||||
return position, queryError
|
||||
}
|
||||
|
||||
// NotifyJobUpdate sends a PostgreSQL NOTIFY on the job_update channel.
|
||||
func (repository *PostgresJobRepository) NotifyJobUpdate(queryContext context.Context, jobID string) error {
|
||||
_, notifyError := repository.pool.Exec(queryContext, `SELECT pg_notify('job_update', $1)`, jobID)
|
||||
return notifyError
|
||||
}
|
||||
206
backend/internal/domain/recognition/sse.go
Normal file
206
backend/internal/domain/recognition/sse.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package recognition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
|
||||
"github.com/food-ai/backend/internal/infra/middleware"
|
||||
)
|
||||
|
||||
type sseEvent struct {
|
||||
name string
|
||||
data string
|
||||
}
|
||||
|
||||
// SSEBroker manages Server-Sent Events for job status updates.
|
||||
// It listens on the PostgreSQL "job_update" NOTIFY channel and fans out
|
||||
// events to all HTTP clients currently streaming a given job.
|
||||
type SSEBroker struct {
|
||||
pool *pgxpool.Pool
|
||||
jobRepo JobRepository
|
||||
mu sync.RWMutex
|
||||
clients map[string][]chan sseEvent
|
||||
}
|
||||
|
||||
// NewSSEBroker creates a new SSEBroker.
|
||||
func NewSSEBroker(pool *pgxpool.Pool, jobRepo JobRepository) *SSEBroker {
|
||||
return &SSEBroker{
|
||||
pool: pool,
|
||||
jobRepo: jobRepo,
|
||||
clients: make(map[string][]chan sseEvent),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches the PostgreSQL LISTEN loop in a background goroutine.
|
||||
func (broker *SSEBroker) Start(brokerContext context.Context) {
|
||||
go broker.listenLoop(brokerContext)
|
||||
}
|
||||
|
||||
func (broker *SSEBroker) listenLoop(brokerContext context.Context) {
|
||||
conn, acquireError := broker.pool.Acquire(brokerContext)
|
||||
if acquireError != nil {
|
||||
slog.Error("SSEBroker: acquire PG connection", "err", acquireError)
|
||||
return
|
||||
}
|
||||
defer conn.Release()
|
||||
|
||||
if _, listenError := conn.Exec(brokerContext, "LISTEN job_update"); listenError != nil {
|
||||
slog.Error("SSEBroker: LISTEN job_update", "err", listenError)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
notification, waitError := conn.Conn().WaitForNotification(brokerContext)
|
||||
if brokerContext.Err() != nil {
|
||||
return
|
||||
}
|
||||
if waitError != nil {
|
||||
slog.Error("SSEBroker: wait for notification", "err", waitError)
|
||||
return
|
||||
}
|
||||
broker.fanOut(brokerContext, notification.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (broker *SSEBroker) subscribe(jobID string) chan sseEvent {
|
||||
channel := make(chan sseEvent, 10)
|
||||
broker.mu.Lock()
|
||||
broker.clients[jobID] = append(broker.clients[jobID], channel)
|
||||
broker.mu.Unlock()
|
||||
return channel
|
||||
}
|
||||
|
||||
func (broker *SSEBroker) unsubscribe(jobID string, channel chan sseEvent) {
|
||||
broker.mu.Lock()
|
||||
defer broker.mu.Unlock()
|
||||
existing := broker.clients[jobID]
|
||||
for index, existing := range existing {
|
||||
if existing == channel {
|
||||
broker.clients[jobID] = append(broker.clients[jobID][:index], broker.clients[jobID][index+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(broker.clients[jobID]) == 0 {
|
||||
delete(broker.clients, jobID)
|
||||
}
|
||||
}
|
||||
|
||||
func (broker *SSEBroker) fanOut(fanContext context.Context, jobID string) {
|
||||
job, fetchError := broker.jobRepo.GetJobByID(fanContext, jobID)
|
||||
if fetchError != nil {
|
||||
slog.Warn("SSEBroker: get job for fan-out", "job_id", jobID, "err", fetchError)
|
||||
return
|
||||
}
|
||||
|
||||
event, ok := jobToSSEEvent(job)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
broker.mu.RLock()
|
||||
channels := make([]chan sseEvent, len(broker.clients[jobID]))
|
||||
copy(channels, broker.clients[jobID])
|
||||
broker.mu.RUnlock()
|
||||
|
||||
for _, channel := range channels {
|
||||
select {
|
||||
case channel <- event:
|
||||
default:
|
||||
// channel full; skip this delivery
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func jobToSSEEvent(job *Job) (sseEvent, bool) {
|
||||
switch job.Status {
|
||||
case JobStatusProcessing:
|
||||
return sseEvent{name: "processing", data: "{}"}, true
|
||||
case JobStatusDone:
|
||||
resultJSON, marshalError := json.Marshal(job.Result)
|
||||
if marshalError != nil {
|
||||
return sseEvent{}, false
|
||||
}
|
||||
return sseEvent{name: "done", data: string(resultJSON)}, true
|
||||
case JobStatusFailed:
|
||||
errMsg := "recognition failed, please try again"
|
||||
if job.Error != nil {
|
||||
errMsg = *job.Error
|
||||
}
|
||||
errorData, _ := json.Marshal(map[string]string{"error": errMsg})
|
||||
return sseEvent{name: "failed", data: string(errorData)}, true
|
||||
default:
|
||||
return sseEvent{}, false
|
||||
}
|
||||
}
|
||||
|
||||
// ServeSSE handles GET /ai/jobs/{id}/stream — streams SSE events until the job completes.
|
||||
func (broker *SSEBroker) ServeSSE(responseWriter http.ResponseWriter, request *http.Request) {
|
||||
jobID := chi.URLParam(request, "id")
|
||||
userID := middleware.UserIDFromCtx(request.Context())
|
||||
|
||||
job, fetchError := broker.jobRepo.GetJobByID(request.Context(), jobID)
|
||||
if fetchError != nil {
|
||||
writeErrorJSON(responseWriter, http.StatusNotFound, "job not found")
|
||||
return
|
||||
}
|
||||
if job.UserID != userID {
|
||||
writeErrorJSON(responseWriter, http.StatusForbidden, "forbidden")
|
||||
return
|
||||
}
|
||||
|
||||
flusher, supported := responseWriter.(http.Flusher)
|
||||
if !supported {
|
||||
writeErrorJSON(responseWriter, http.StatusInternalServerError, "streaming not supported")
|
||||
return
|
||||
}
|
||||
|
||||
responseWriter.Header().Set("Content-Type", "text/event-stream")
|
||||
responseWriter.Header().Set("Cache-Control", "no-cache")
|
||||
responseWriter.Header().Set("Connection", "keep-alive")
|
||||
responseWriter.Header().Set("X-Accel-Buffering", "no")
|
||||
|
||||
// If the job is already in a terminal state, send the event immediately.
|
||||
if job.Status == JobStatusDone || job.Status == JobStatusFailed {
|
||||
if event, ok := jobToSSEEvent(job); ok {
|
||||
fmt.Fprintf(responseWriter, "event: %s\ndata: %s\n\n", event.name, event.data)
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Subscribe to future notifications before sending the queued event to
|
||||
// avoid a race where the job completes between reading the current state
|
||||
// and registering the subscriber.
|
||||
eventChannel := broker.subscribe(jobID)
|
||||
defer broker.unsubscribe(jobID, eventChannel)
|
||||
|
||||
// Send initial queued event with estimated wait.
|
||||
position, _ := broker.jobRepo.QueuePosition(request.Context(), job.UserPlan, job.CreatedAt)
|
||||
estimatedSeconds := (position + 1) * 6
|
||||
queuedData, _ := json.Marshal(map[string]any{
|
||||
"position": position,
|
||||
"estimated_seconds": estimatedSeconds,
|
||||
})
|
||||
fmt.Fprintf(responseWriter, "event: queued\ndata: %s\n\n", queuedData)
|
||||
flusher.Flush()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventChannel:
|
||||
fmt.Fprintf(responseWriter, "event: %s\ndata: %s\n\n", event.name, event.data)
|
||||
flusher.Flush()
|
||||
if event.name == "done" || event.name == "failed" {
|
||||
return
|
||||
}
|
||||
case <-request.Context().Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
165
backend/internal/domain/recognition/worker.go
Normal file
165
backend/internal/domain/recognition/worker.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package recognition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/food-ai/backend/internal/adapters/kafka"
|
||||
)
|
||||
|
||||
const defaultWorkerCount = 5
|
||||
|
||||
// WorkerPool processes dish recognition jobs from Kafka with priority queuing.
|
||||
// Paid jobs are processed before free jobs.
|
||||
type WorkerPool struct {
|
||||
jobRepo JobRepository
|
||||
recognizer Recognizer
|
||||
dishRepo DishRepository
|
||||
paidConsumer *kafka.Consumer
|
||||
freeConsumer *kafka.Consumer
|
||||
workerCount int
|
||||
paidJobs chan string
|
||||
freeJobs chan string
|
||||
}
|
||||
|
||||
// NewWorkerPool creates a WorkerPool with five workers.
|
||||
func NewWorkerPool(
|
||||
jobRepo JobRepository,
|
||||
recognizer Recognizer,
|
||||
dishRepo DishRepository,
|
||||
paidConsumer *kafka.Consumer,
|
||||
freeConsumer *kafka.Consumer,
|
||||
) *WorkerPool {
|
||||
return &WorkerPool{
|
||||
jobRepo: jobRepo,
|
||||
recognizer: recognizer,
|
||||
dishRepo: dishRepo,
|
||||
paidConsumer: paidConsumer,
|
||||
freeConsumer: freeConsumer,
|
||||
workerCount: defaultWorkerCount,
|
||||
paidJobs: make(chan string, 100),
|
||||
freeJobs: make(chan string, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches the Kafka feeder goroutines and all worker goroutines.
|
||||
func (pool *WorkerPool) Start(workerContext context.Context) {
|
||||
go pool.paidConsumer.Run(workerContext, pool.paidJobs)
|
||||
go pool.freeConsumer.Run(workerContext, pool.freeJobs)
|
||||
for i := 0; i < pool.workerCount; i++ {
|
||||
go pool.runWorker(workerContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *WorkerPool) runWorker(workerContext context.Context) {
|
||||
for {
|
||||
// Priority step: drain paid queue without blocking.
|
||||
select {
|
||||
case jobID := <-pool.paidJobs:
|
||||
pool.processJob(workerContext, jobID)
|
||||
continue
|
||||
case <-workerContext.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Fall back to either queue with a 100ms timeout.
|
||||
select {
|
||||
case jobID := <-pool.paidJobs:
|
||||
pool.processJob(workerContext, jobID)
|
||||
case jobID := <-pool.freeJobs:
|
||||
pool.processJob(workerContext, jobID)
|
||||
case <-workerContext.Done():
|
||||
return
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// nothing available; loop again
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *WorkerPool) processJob(workerContext context.Context, jobID string) {
|
||||
job, fetchError := pool.jobRepo.GetJobByID(workerContext, jobID)
|
||||
if fetchError != nil {
|
||||
slog.Error("worker: fetch job", "job_id", jobID, "err", fetchError)
|
||||
return
|
||||
}
|
||||
|
||||
// Transition to processing.
|
||||
if updateError := pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusProcessing, nil, nil); updateError != nil {
|
||||
slog.Error("worker: set processing status", "job_id", jobID, "err", updateError)
|
||||
}
|
||||
if notifyError := pool.jobRepo.NotifyJobUpdate(workerContext, jobID); notifyError != nil {
|
||||
slog.Warn("worker: notify processing", "job_id", jobID, "err", notifyError)
|
||||
}
|
||||
|
||||
// Run AI recognition.
|
||||
result, recognizeError := pool.recognizer.RecognizeDish(workerContext, job.ImageBase64, job.MimeType, job.Lang)
|
||||
if recognizeError != nil {
|
||||
slog.Error("worker: recognize dish", "job_id", jobID, "err", recognizeError)
|
||||
errMsg := "recognition failed, please try again"
|
||||
_ = pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusFailed, nil, &errMsg)
|
||||
_ = pool.jobRepo.NotifyJobUpdate(workerContext, jobID)
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve dish_id and recipe_id for each candidate in parallel.
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
for index := range result.Candidates {
|
||||
wg.Add(1)
|
||||
go func(candidateIndex int) {
|
||||
defer wg.Done()
|
||||
candidate := result.Candidates[candidateIndex]
|
||||
dishID, created, findError := pool.dishRepo.FindOrCreate(workerContext, candidate.DishName)
|
||||
if findError != nil {
|
||||
slog.Warn("worker: find or create dish", "name", candidate.DishName, "err", findError)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
result.Candidates[candidateIndex].DishID = &dishID
|
||||
mu.Unlock()
|
||||
if created {
|
||||
go enrichDishInBackground(pool.recognizer, pool.dishRepo, dishID, candidate.DishName)
|
||||
}
|
||||
|
||||
recipeID, _, recipeError := pool.dishRepo.FindOrCreateRecipe(
|
||||
workerContext, dishID,
|
||||
candidate.Calories, candidate.ProteinG, candidate.FatG, candidate.CarbsG,
|
||||
)
|
||||
if recipeError != nil {
|
||||
slog.Warn("worker: find or create recipe", "dish_id", dishID, "err", recipeError)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
result.Candidates[candidateIndex].RecipeID = &recipeID
|
||||
mu.Unlock()
|
||||
}(index)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Transition to done.
|
||||
if updateError := pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusDone, result, nil); updateError != nil {
|
||||
slog.Error("worker: set done status", "job_id", jobID, "err", updateError)
|
||||
}
|
||||
if notifyError := pool.jobRepo.NotifyJobUpdate(workerContext, jobID); notifyError != nil {
|
||||
slog.Warn("worker: notify done", "job_id", jobID, "err", notifyError)
|
||||
}
|
||||
}
|
||||
|
||||
// enrichDishInBackground translates a newly created dish name into all supported languages.
|
||||
// Runs as a fire-and-forget goroutine so it never blocks recognition.
|
||||
func enrichDishInBackground(recognizer Recognizer, dishRepo DishRepository, dishID, dishName string) {
|
||||
enrichContext := context.Background()
|
||||
translations, translateError := recognizer.TranslateDishName(enrichContext, dishName)
|
||||
if translateError != nil {
|
||||
slog.Warn("translate dish name", "name", dishName, "err", translateError)
|
||||
return
|
||||
}
|
||||
for lang, translatedName := range translations {
|
||||
if upsertError := dishRepo.UpsertTranslation(enrichContext, dishID, lang, translatedName); upsertError != nil {
|
||||
slog.Warn("upsert dish translation", "dish_id", dishID, "lang", lang, "err", upsertError)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user