feat: split worker into separate binary (cmd/worker)

Kafka consumers and WorkerPool are moved out of the server process into
a dedicated worker binary. Server now handles HTTP + SSE only; worker
handles Kafka consumption and AI processing.

- cmd/worker/main.go + init.go: new binary with minimal config
  (DATABASE_URL, OPENAI_API_KEY, KAFKA_BROKERS)
- cmd/server: remove WorkerPool, paidConsumer, freeConsumer
- Dockerfile: builds both server and worker binaries
- docker-compose.yml: add worker service
- Makefile: add run-worker and docker-logs-worker targets
- README.md: document worker startup and env vars

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
dbastrikin
2026-03-18 20:09:33 +02:00
parent 0f533ccaeb
commit 48fd2baa8c
9 changed files with 186 additions and 28 deletions

View File

@@ -9,9 +9,8 @@ import (
// App bundles the HTTP handler with background services that need lifecycle management.
type App struct {
handler http.Handler
workerPool *recognition.WorkerPool
sseBroker *recognition.SSEBroker
handler http.Handler
sseBroker *recognition.SSEBroker
}
// ServeHTTP implements http.Handler.
@@ -19,9 +18,8 @@ func (application *App) ServeHTTP(responseWriter http.ResponseWriter, request *h
application.handler.ServeHTTP(responseWriter, request)
}
// Start launches the SSE broker's LISTEN loop and the worker pool goroutines.
// Start launches the SSE broker's LISTEN loop.
// Call this once before the HTTP server begins accepting connections.
func (application *App) Start(applicationContext context.Context) {
application.sseBroker.Start(applicationContext)
application.workerPool.Start(applicationContext)
}

View File

@@ -46,24 +46,15 @@ func initApp(appConfig *config.Config, pool *pgxpool.Pool) (*App, error) {
ingredientHandler := ingredient.NewHandler(ingredientRepository)
productHandler := product.NewHandler(productRepository)
// Kafka producer and consumers
// Kafka producer
kafkaProducer, kafkaProducerError := newKafkaProducer(appConfig)
if kafkaProducerError != nil {
return nil, kafkaProducerError
}
paidConsumer, paidConsumerError := newPaidKafkaConsumer(appConfig)
if paidConsumerError != nil {
return nil, paidConsumerError
}
freeConsumer, freeConsumerError := newFreeKafkaConsumer(appConfig)
if freeConsumerError != nil {
return nil, freeConsumerError
}
// Recognition pipeline
jobRepository := recognition.NewJobRepository(pool)
sseBroker := recognition.NewSSEBroker(pool, jobRepository)
workerPool := recognition.NewWorkerPool(jobRepository, openaiClient, dishRepository, paidConsumer, freeConsumer)
recognitionHandler := recognition.NewHandler(openaiClient, ingredientRepository, jobRepository, kafkaProducer, sseBroker)
menuRepository := menu.NewRepository(pool)
@@ -101,8 +92,7 @@ func initApp(appConfig *config.Config, pool *pgxpool.Pool) (*App, error) {
mainTagListHandler,
)
return &App{
handler: httpHandler,
workerPool: workerPool,
sseBroker: sseBroker,
handler: httpHandler,
sseBroker: sseBroker,
}, nil
}

View File

@@ -197,14 +197,6 @@ func newKafkaProducer(appConfig *config.Config) (*kafka.Producer, error) {
return kafka.NewProducer(appConfig.KafkaBrokers)
}
func newPaidKafkaConsumer(appConfig *config.Config) (*kafka.Consumer, error) {
return kafka.NewConsumer(appConfig.KafkaBrokers, "dish-recognition-workers", recognition.TopicPaid)
}
func newFreeKafkaConsumer(appConfig *config.Config) (*kafka.Consumer, error) {
return kafka.NewConsumer(appConfig.KafkaBrokers, "dish-recognition-workers", recognition.TopicFree)
}
// ---------------------------------------------------------------------------
// Interface assertions (compile-time checks)
// ---------------------------------------------------------------------------