feat: async dish recognition (Kafka/Watermill/SSE) + remove Wire + consolidate migrations
Async recognition pipeline:
- POST /ai/recognize-dish → 202 {job_id, queue_position, estimated_seconds}
- GET /ai/jobs/{id}/stream — SSE stream: queued → processing → done/failed
- Kafka topics: ai.recognize.paid (3 partitions) + ai.recognize.free (1 partition)
- 5-worker WorkerPool with priority loop (paid consumers first)
- SSEBroker via PostgreSQL LISTEN/NOTIFY
- Kafka adapter migrated from franz-go to Watermill (watermill-kafka/v2)
- Docker Compose: added Kafka + Zookeeper + kafka-init service
- Flutter: recognition_service.dart uses SSE; home_screen shows live job status
Remove google/wire (archived):
- Deleted wire.go (wireinject spec) and wire_gen.go
- Added cmd/server/init.go — plain Go manual DI, same initApp() logic
- Removed github.com/google/wire from go.mod
Consolidate migrations:
- Merged 001_initial_schema + 002_seed_data + 003_recognition_jobs into single 001_initial_schema.sql
- Deleted 002_seed_data.sql and 003_recognition_jobs.sql
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
125
backend/internal/domain/recognition/job_repository.go
Normal file
125
backend/internal/domain/recognition/job_repository.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package recognition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/food-ai/backend/internal/adapters/ai"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// JobRepository provides all DB operations on recognition_jobs.
|
||||
type JobRepository interface {
|
||||
InsertJob(ctx context.Context, job *Job) error
|
||||
GetJobByID(ctx context.Context, jobID string) (*Job, error)
|
||||
UpdateJobStatus(ctx context.Context, jobID, status string, result *ai.DishResult, errMsg *string) error
|
||||
QueuePosition(ctx context.Context, userPlan string, createdAt time.Time) (int, error)
|
||||
NotifyJobUpdate(ctx context.Context, jobID string) error
|
||||
}
|
||||
|
||||
// PostgresJobRepository implements JobRepository using a pgxpool.
|
||||
type PostgresJobRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewJobRepository creates a new PostgresJobRepository.
|
||||
func NewJobRepository(pool *pgxpool.Pool) *PostgresJobRepository {
|
||||
return &PostgresJobRepository{pool: pool}
|
||||
}
|
||||
|
||||
// InsertJob inserts a new recognition job and populates the ID and CreatedAt fields.
|
||||
func (repository *PostgresJobRepository) InsertJob(queryContext context.Context, job *Job) error {
|
||||
return repository.pool.QueryRow(queryContext,
|
||||
`INSERT INTO recognition_jobs (user_id, user_plan, image_base64, mime_type, lang)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
RETURNING id, created_at`,
|
||||
job.UserID, job.UserPlan, job.ImageBase64, job.MimeType, job.Lang,
|
||||
).Scan(&job.ID, &job.CreatedAt)
|
||||
}
|
||||
|
||||
// GetJobByID fetches a single job by primary key.
|
||||
func (repository *PostgresJobRepository) GetJobByID(queryContext context.Context, jobID string) (*Job, error) {
|
||||
var job Job
|
||||
var resultJSON []byte
|
||||
|
||||
queryError := repository.pool.QueryRow(queryContext,
|
||||
`SELECT id, user_id, user_plan, image_base64, mime_type, lang, status,
|
||||
result, error, created_at, started_at, completed_at
|
||||
FROM recognition_jobs WHERE id = $1`,
|
||||
jobID,
|
||||
).Scan(
|
||||
&job.ID, &job.UserID, &job.UserPlan,
|
||||
&job.ImageBase64, &job.MimeType, &job.Lang, &job.Status,
|
||||
&resultJSON, &job.Error, &job.CreatedAt, &job.StartedAt, &job.CompletedAt,
|
||||
)
|
||||
if queryError != nil {
|
||||
return nil, queryError
|
||||
}
|
||||
|
||||
if resultJSON != nil {
|
||||
var dishResult ai.DishResult
|
||||
if unmarshalError := json.Unmarshal(resultJSON, &dishResult); unmarshalError == nil {
|
||||
job.Result = &dishResult
|
||||
}
|
||||
}
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
// UpdateJobStatus transitions a job to a new status and records the result or error.
|
||||
func (repository *PostgresJobRepository) UpdateJobStatus(
|
||||
queryContext context.Context,
|
||||
jobID, status string,
|
||||
result *ai.DishResult,
|
||||
errMsg *string,
|
||||
) error {
|
||||
var resultJSON []byte
|
||||
if result != nil {
|
||||
marshalledBytes, marshalError := json.Marshal(result)
|
||||
if marshalError != nil {
|
||||
return marshalError
|
||||
}
|
||||
resultJSON = marshalledBytes
|
||||
}
|
||||
|
||||
switch status {
|
||||
case JobStatusProcessing:
|
||||
_, updateError := repository.pool.Exec(queryContext,
|
||||
`UPDATE recognition_jobs SET status = $1, started_at = now() WHERE id = $2`,
|
||||
status, jobID,
|
||||
)
|
||||
return updateError
|
||||
default:
|
||||
_, updateError := repository.pool.Exec(queryContext,
|
||||
`UPDATE recognition_jobs
|
||||
SET status = $1, result = $2, error = $3, completed_at = now()
|
||||
WHERE id = $4`,
|
||||
status, resultJSON, errMsg, jobID,
|
||||
)
|
||||
return updateError
|
||||
}
|
||||
}
|
||||
|
||||
// QueuePosition counts jobs ahead of createdAt in the same plan's queue.
|
||||
func (repository *PostgresJobRepository) QueuePosition(
|
||||
queryContext context.Context,
|
||||
userPlan string,
|
||||
createdAt time.Time,
|
||||
) (int, error) {
|
||||
var position int
|
||||
queryError := repository.pool.QueryRow(queryContext,
|
||||
`SELECT COUNT(*) FROM recognition_jobs
|
||||
WHERE status IN ('pending', 'processing')
|
||||
AND user_plan = $1
|
||||
AND created_at < $2`,
|
||||
userPlan, createdAt,
|
||||
).Scan(&position)
|
||||
return position, queryError
|
||||
}
|
||||
|
||||
// NotifyJobUpdate sends a PostgreSQL NOTIFY on the job_update channel.
|
||||
func (repository *PostgresJobRepository) NotifyJobUpdate(queryContext context.Context, jobID string) error {
|
||||
_, notifyError := repository.pool.Exec(queryContext, `SELECT pg_notify('job_update', $1)`, jobID)
|
||||
return notifyError
|
||||
}
|
||||
Reference in New Issue
Block a user