refactor: split worker into paid/free via WORKER_PLAN env var
Replace dual-consumer priority WorkerPool with a single consumer per worker process. WORKER_PLAN=paid|free selects the Kafka topic and consumer group ID (dish-recognition-paid / dish-recognition-free). docker-compose now runs worker-paid and worker-free as separate services for independent scaling. Makefile dev target launches both workers locally. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
.PHONY: run run-worker dev dev-infra-up dev-infra-down test test-integration lint migrate-up migrate-down migrate-create migrate-status docker-up docker-down docker-logs docker-logs-worker
|
.PHONY: run run-worker-paid run-worker-free dev dev-infra-up dev-infra-down test test-integration lint migrate-up migrate-down migrate-create migrate-status docker-up docker-down docker-logs docker-logs-worker
|
||||||
|
|
||||||
ifneq (,$(wildcard .env))
|
ifneq (,$(wildcard .env))
|
||||||
include .env
|
include .env
|
||||||
@@ -9,14 +9,18 @@ endif
|
|||||||
run:
|
run:
|
||||||
go run ./cmd/server
|
go run ./cmd/server
|
||||||
|
|
||||||
run-worker:
|
run-worker-paid:
|
||||||
go run ./cmd/worker
|
WORKER_PLAN=paid go run ./cmd/worker
|
||||||
|
|
||||||
# Start only infra (postgres, kafka) in Docker, run server + worker locally
|
run-worker-free:
|
||||||
|
WORKER_PLAN=free go run ./cmd/worker
|
||||||
|
|
||||||
|
# Start only infra (postgres, kafka) in Docker, run server + both workers locally
|
||||||
dev: dev-infra-up
|
dev: dev-infra-up
|
||||||
@trap 'kill 0' INT; \
|
@trap 'kill 0' INT; \
|
||||||
go run ./cmd/server & \
|
go run ./cmd/server & \
|
||||||
go run ./cmd/worker & \
|
WORKER_PLAN=paid go run ./cmd/worker & \
|
||||||
|
WORKER_PLAN=free go run ./cmd/worker & \
|
||||||
wait
|
wait
|
||||||
|
|
||||||
dev-infra-up:
|
dev-infra-up:
|
||||||
@@ -61,4 +65,4 @@ docker-logs:
|
|||||||
docker compose logs -f app
|
docker compose logs -f app
|
||||||
|
|
||||||
docker-logs-worker:
|
docker-logs-worker:
|
||||||
docker compose logs -f worker
|
docker compose logs -f worker-paid worker-free
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ type workerConfig struct {
|
|||||||
DatabaseURL string `envconfig:"DATABASE_URL" required:"true"`
|
DatabaseURL string `envconfig:"DATABASE_URL" required:"true"`
|
||||||
OpenAIAPIKey string `envconfig:"OPENAI_API_KEY" required:"true"`
|
OpenAIAPIKey string `envconfig:"OPENAI_API_KEY" required:"true"`
|
||||||
KafkaBrokers []string `envconfig:"KAFKA_BROKERS" default:"kafka:9092"`
|
KafkaBrokers []string `envconfig:"KAFKA_BROKERS" default:"kafka:9092"`
|
||||||
|
WorkerPlan string `envconfig:"WORKER_PLAN" default:"free"` // "paid" | "free"
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadConfig() (*workerConfig, error) {
|
func loadConfig() (*workerConfig, error) {
|
||||||
@@ -40,19 +41,18 @@ func initWorker(workerCfg *workerConfig, pool *pgxpool.Pool) (*WorkerApp, error)
|
|||||||
dishRepository := dish.NewRepository(pool)
|
dishRepository := dish.NewRepository(pool)
|
||||||
jobRepository := recognition.NewJobRepository(pool)
|
jobRepository := recognition.NewJobRepository(pool)
|
||||||
|
|
||||||
paidConsumer, paidConsumerError := kafka.NewConsumer(
|
topic := recognition.TopicFree
|
||||||
workerCfg.KafkaBrokers, "dish-recognition-workers", recognition.TopicPaid,
|
groupID := "dish-recognition-free"
|
||||||
)
|
if workerCfg.WorkerPlan == "paid" {
|
||||||
if paidConsumerError != nil {
|
topic = recognition.TopicPaid
|
||||||
return nil, paidConsumerError
|
groupID = "dish-recognition-paid"
|
||||||
}
|
|
||||||
freeConsumer, freeConsumerError := kafka.NewConsumer(
|
|
||||||
workerCfg.KafkaBrokers, "dish-recognition-workers", recognition.TopicFree,
|
|
||||||
)
|
|
||||||
if freeConsumerError != nil {
|
|
||||||
return nil, freeConsumerError
|
|
||||||
}
|
}
|
||||||
|
|
||||||
workerPool := recognition.NewWorkerPool(jobRepository, openaiClient, dishRepository, paidConsumer, freeConsumer)
|
consumer, consumerError := kafka.NewConsumer(workerCfg.KafkaBrokers, groupID, topic)
|
||||||
|
if consumerError != nil {
|
||||||
|
return nil, consumerError
|
||||||
|
}
|
||||||
|
|
||||||
|
workerPool := recognition.NewWorkerPool(jobRepository, openaiClient, dishRepository, consumer)
|
||||||
return &WorkerApp{workerPool: workerPool}, nil
|
return &WorkerApp{workerPool: workerPool}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./firebase-credentials.json:/app/firebase-credentials.json:ro
|
- ./firebase-credentials.json:/app/firebase-credentials.json:ro
|
||||||
|
|
||||||
worker:
|
worker-paid:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
@@ -78,10 +78,25 @@ services:
|
|||||||
DATABASE_URL: postgres://food_ai:food_ai_local@postgres:5432/food_ai?sslmode=disable
|
DATABASE_URL: postgres://food_ai:food_ai_local@postgres:5432/food_ai?sslmode=disable
|
||||||
OPENAI_API_KEY: ${OPENAI_API_KEY}
|
OPENAI_API_KEY: ${OPENAI_API_KEY}
|
||||||
KAFKA_BROKERS: kafka:9092
|
KAFKA_BROKERS: kafka:9092
|
||||||
|
WORKER_PLAN: paid
|
||||||
depends_on:
|
depends_on:
|
||||||
postgres:
|
postgres:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
kafka:
|
kafka-init:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
worker-free:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
command: ["./worker"]
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: postgres://food_ai:food_ai_local@postgres:5432/food_ai?sslmode=disable
|
||||||
|
OPENAI_API_KEY: ${OPENAI_API_KEY}
|
||||||
|
KAFKA_BROKERS: kafka:9092
|
||||||
|
WORKER_PLAN: free
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
kafka-init:
|
kafka-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
|
|||||||
@@ -4,50 +4,42 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/food-ai/backend/internal/adapters/kafka"
|
"github.com/food-ai/backend/internal/adapters/kafka"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultWorkerCount = 5
|
const defaultWorkerCount = 5
|
||||||
|
|
||||||
// WorkerPool processes dish recognition jobs from Kafka with priority queuing.
|
// WorkerPool processes dish recognition jobs from a single Kafka topic.
|
||||||
// Paid jobs are processed before free jobs.
|
|
||||||
type WorkerPool struct {
|
type WorkerPool struct {
|
||||||
jobRepo JobRepository
|
jobRepo JobRepository
|
||||||
recognizer Recognizer
|
recognizer Recognizer
|
||||||
dishRepo DishRepository
|
dishRepo DishRepository
|
||||||
paidConsumer *kafka.Consumer
|
consumer *kafka.Consumer
|
||||||
freeConsumer *kafka.Consumer
|
|
||||||
workerCount int
|
workerCount int
|
||||||
paidJobs chan string
|
jobs chan string
|
||||||
freeJobs chan string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWorkerPool creates a WorkerPool with five workers.
|
// NewWorkerPool creates a WorkerPool with five workers consuming from a single consumer.
|
||||||
func NewWorkerPool(
|
func NewWorkerPool(
|
||||||
jobRepo JobRepository,
|
jobRepo JobRepository,
|
||||||
recognizer Recognizer,
|
recognizer Recognizer,
|
||||||
dishRepo DishRepository,
|
dishRepo DishRepository,
|
||||||
paidConsumer *kafka.Consumer,
|
consumer *kafka.Consumer,
|
||||||
freeConsumer *kafka.Consumer,
|
|
||||||
) *WorkerPool {
|
) *WorkerPool {
|
||||||
return &WorkerPool{
|
return &WorkerPool{
|
||||||
jobRepo: jobRepo,
|
jobRepo: jobRepo,
|
||||||
recognizer: recognizer,
|
recognizer: recognizer,
|
||||||
dishRepo: dishRepo,
|
dishRepo: dishRepo,
|
||||||
paidConsumer: paidConsumer,
|
consumer: consumer,
|
||||||
freeConsumer: freeConsumer,
|
|
||||||
workerCount: defaultWorkerCount,
|
workerCount: defaultWorkerCount,
|
||||||
paidJobs: make(chan string, 100),
|
jobs: make(chan string, 100),
|
||||||
freeJobs: make(chan string, 100),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start launches the Kafka feeder goroutines and all worker goroutines.
|
// Start launches the Kafka feeder goroutine and all worker goroutines.
|
||||||
func (pool *WorkerPool) Start(workerContext context.Context) {
|
func (pool *WorkerPool) Start(workerContext context.Context) {
|
||||||
go pool.paidConsumer.Run(workerContext, pool.paidJobs)
|
go pool.consumer.Run(workerContext, pool.jobs)
|
||||||
go pool.freeConsumer.Run(workerContext, pool.freeJobs)
|
|
||||||
for i := 0; i < pool.workerCount; i++ {
|
for i := 0; i < pool.workerCount; i++ {
|
||||||
go pool.runWorker(workerContext)
|
go pool.runWorker(workerContext)
|
||||||
}
|
}
|
||||||
@@ -55,26 +47,11 @@ func (pool *WorkerPool) Start(workerContext context.Context) {
|
|||||||
|
|
||||||
func (pool *WorkerPool) runWorker(workerContext context.Context) {
|
func (pool *WorkerPool) runWorker(workerContext context.Context) {
|
||||||
for {
|
for {
|
||||||
// Priority step: drain paid queue without blocking.
|
|
||||||
select {
|
select {
|
||||||
case jobID := <-pool.paidJobs:
|
case jobID := <-pool.jobs:
|
||||||
pool.processJob(workerContext, jobID)
|
|
||||||
continue
|
|
||||||
case <-workerContext.Done():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall back to either queue with a 100ms timeout.
|
|
||||||
select {
|
|
||||||
case jobID := <-pool.paidJobs:
|
|
||||||
pool.processJob(workerContext, jobID)
|
|
||||||
case jobID := <-pool.freeJobs:
|
|
||||||
pool.processJob(workerContext, jobID)
|
pool.processJob(workerContext, jobID)
|
||||||
case <-workerContext.Done():
|
case <-workerContext.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
// nothing available; loop again
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user