Files
food-ai/backend/internal/domain/recognition/worker.go
dbastrikin 39193ec13c feat: async dish recognition (Kafka/Watermill/SSE) + remove Wire + consolidate migrations
Async recognition pipeline:
- POST /ai/recognize-dish → 202 {job_id, queue_position, estimated_seconds}
- GET /ai/jobs/{id}/stream — SSE stream: queued → processing → done/failed
- Kafka topics: ai.recognize.paid (3 partitions) + ai.recognize.free (1 partition)
- 5-worker WorkerPool with priority loop (paid consumers first)
- SSEBroker via PostgreSQL LISTEN/NOTIFY
- Kafka adapter migrated from franz-go to Watermill (watermill-kafka/v2)
- Docker Compose: added Kafka + Zookeeper + kafka-init service
- Flutter: recognition_service.dart uses SSE; home_screen shows live job status

Remove google/wire (archived):
- Deleted wire.go (wireinject spec) and wire_gen.go
- Added cmd/server/init.go — plain Go manual DI, same initApp() logic
- Removed github.com/google/wire from go.mod

Consolidate migrations:
- Merged 001_initial_schema + 002_seed_data + 003_recognition_jobs into single 001_initial_schema.sql
- Deleted 002_seed_data.sql and 003_recognition_jobs.sql

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-18 16:32:06 +02:00

166 lines
5.3 KiB
Go

package recognition
import (
"context"
"log/slog"
"sync"
"time"
"github.com/food-ai/backend/internal/adapters/kafka"
)
const defaultWorkerCount = 5
// WorkerPool processes dish recognition jobs from Kafka with priority queuing.
// Paid jobs are processed before free jobs.
type WorkerPool struct {
jobRepo JobRepository
recognizer Recognizer
dishRepo DishRepository
paidConsumer *kafka.Consumer
freeConsumer *kafka.Consumer
workerCount int
paidJobs chan string
freeJobs chan string
}
// NewWorkerPool creates a WorkerPool with five workers.
func NewWorkerPool(
jobRepo JobRepository,
recognizer Recognizer,
dishRepo DishRepository,
paidConsumer *kafka.Consumer,
freeConsumer *kafka.Consumer,
) *WorkerPool {
return &WorkerPool{
jobRepo: jobRepo,
recognizer: recognizer,
dishRepo: dishRepo,
paidConsumer: paidConsumer,
freeConsumer: freeConsumer,
workerCount: defaultWorkerCount,
paidJobs: make(chan string, 100),
freeJobs: make(chan string, 100),
}
}
// Start launches the Kafka feeder goroutines and all worker goroutines.
func (pool *WorkerPool) Start(workerContext context.Context) {
go pool.paidConsumer.Run(workerContext, pool.paidJobs)
go pool.freeConsumer.Run(workerContext, pool.freeJobs)
for i := 0; i < pool.workerCount; i++ {
go pool.runWorker(workerContext)
}
}
func (pool *WorkerPool) runWorker(workerContext context.Context) {
for {
// Priority step: drain paid queue without blocking.
select {
case jobID := <-pool.paidJobs:
pool.processJob(workerContext, jobID)
continue
case <-workerContext.Done():
return
default:
}
// Fall back to either queue with a 100ms timeout.
select {
case jobID := <-pool.paidJobs:
pool.processJob(workerContext, jobID)
case jobID := <-pool.freeJobs:
pool.processJob(workerContext, jobID)
case <-workerContext.Done():
return
case <-time.After(100 * time.Millisecond):
// nothing available; loop again
}
}
}
func (pool *WorkerPool) processJob(workerContext context.Context, jobID string) {
job, fetchError := pool.jobRepo.GetJobByID(workerContext, jobID)
if fetchError != nil {
slog.Error("worker: fetch job", "job_id", jobID, "err", fetchError)
return
}
// Transition to processing.
if updateError := pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusProcessing, nil, nil); updateError != nil {
slog.Error("worker: set processing status", "job_id", jobID, "err", updateError)
}
if notifyError := pool.jobRepo.NotifyJobUpdate(workerContext, jobID); notifyError != nil {
slog.Warn("worker: notify processing", "job_id", jobID, "err", notifyError)
}
// Run AI recognition.
result, recognizeError := pool.recognizer.RecognizeDish(workerContext, job.ImageBase64, job.MimeType, job.Lang)
if recognizeError != nil {
slog.Error("worker: recognize dish", "job_id", jobID, "err", recognizeError)
errMsg := "recognition failed, please try again"
_ = pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusFailed, nil, &errMsg)
_ = pool.jobRepo.NotifyJobUpdate(workerContext, jobID)
return
}
// Resolve dish_id and recipe_id for each candidate in parallel.
var mu sync.Mutex
var wg sync.WaitGroup
for index := range result.Candidates {
wg.Add(1)
go func(candidateIndex int) {
defer wg.Done()
candidate := result.Candidates[candidateIndex]
dishID, created, findError := pool.dishRepo.FindOrCreate(workerContext, candidate.DishName)
if findError != nil {
slog.Warn("worker: find or create dish", "name", candidate.DishName, "err", findError)
return
}
mu.Lock()
result.Candidates[candidateIndex].DishID = &dishID
mu.Unlock()
if created {
go enrichDishInBackground(pool.recognizer, pool.dishRepo, dishID, candidate.DishName)
}
recipeID, _, recipeError := pool.dishRepo.FindOrCreateRecipe(
workerContext, dishID,
candidate.Calories, candidate.ProteinG, candidate.FatG, candidate.CarbsG,
)
if recipeError != nil {
slog.Warn("worker: find or create recipe", "dish_id", dishID, "err", recipeError)
return
}
mu.Lock()
result.Candidates[candidateIndex].RecipeID = &recipeID
mu.Unlock()
}(index)
}
wg.Wait()
// Transition to done.
if updateError := pool.jobRepo.UpdateJobStatus(workerContext, jobID, JobStatusDone, result, nil); updateError != nil {
slog.Error("worker: set done status", "job_id", jobID, "err", updateError)
}
if notifyError := pool.jobRepo.NotifyJobUpdate(workerContext, jobID); notifyError != nil {
slog.Warn("worker: notify done", "job_id", jobID, "err", notifyError)
}
}
// enrichDishInBackground translates a newly created dish name into all supported languages.
// Runs as a fire-and-forget goroutine so it never blocks recognition.
func enrichDishInBackground(recognizer Recognizer, dishRepo DishRepository, dishID, dishName string) {
enrichContext := context.Background()
translations, translateError := recognizer.TranslateDishName(enrichContext, dishName)
if translateError != nil {
slog.Warn("translate dish name", "name", dishName, "err", translateError)
return
}
for lang, translatedName := range translations {
if upsertError := dishRepo.UpsertTranslation(enrichContext, dishID, lang, translatedName); upsertError != nil {
slog.Warn("upsert dish translation", "dish_id", dishID, "lang", lang, "err", upsertError)
}
}
}