Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .beads/backup/backup_state.json
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
{
"last_dolt_commit": "74hjbh5dsambsmc06pb0v5ag2bv2cllq",
"last_dolt_commit": "bkp9f7illjujnoj0ts8e04otd1tr7od7",
"last_event_id": 0,
"timestamp": "2026-03-24T14:59:16.853732Z",
"timestamp": "2026-03-24T15:21:47.129544Z",
"counts": {
"issues": 0,
"events": 0,
"issues": 8,
"events": 42,
"comments": 0,
"dependencies": 0,
"labels": 0,
"dependencies": 7,
"labels": 24,
"config": 11
}
}
7 changes: 7 additions & 0 deletions .beads/backup/dependencies.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.1","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.2","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.3","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.4","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.5","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.6","type":"parent-child"}
{"created_at":"2026-03-24T11:05:47Z","created_by":"Marius van Niekerk","depends_on_id":"bd-pui","issue_id":"bd-pui.7","type":"parent-child"}
42 changes: 42 additions & 0 deletions .beads/backup/events.jsonl

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions .beads/backup/issues.jsonl

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions .beads/backup/labels.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{"issue_id":"bd-pui","label":"deduplication"}
{"issue_id":"bd-pui","label":"refactor"}
{"issue_id":"bd-pui","label":"simplification"}
{"issue_id":"bd-pui.1","label":"deduplication"}
{"issue_id":"bd-pui.1","label":"refactor"}
{"issue_id":"bd-pui.1","label":"simplification"}
{"issue_id":"bd-pui.2","label":"deduplication"}
{"issue_id":"bd-pui.2","label":"refactor"}
{"issue_id":"bd-pui.2","label":"simplification"}
{"issue_id":"bd-pui.3","label":"deduplication"}
{"issue_id":"bd-pui.3","label":"refactor"}
{"issue_id":"bd-pui.3","label":"simplification"}
{"issue_id":"bd-pui.4","label":"deduplication"}
{"issue_id":"bd-pui.4","label":"refactor"}
{"issue_id":"bd-pui.4","label":"simplification"}
{"issue_id":"bd-pui.5","label":"deduplication"}
{"issue_id":"bd-pui.5","label":"refactor"}
{"issue_id":"bd-pui.5","label":"simplification"}
{"issue_id":"bd-pui.6","label":"deduplication"}
{"issue_id":"bd-pui.6","label":"refactor"}
{"issue_id":"bd-pui.6","label":"simplification"}
{"issue_id":"bd-pui.7","label":"deduplication"}
{"issue_id":"bd-pui.7","label":"refactor"}
{"issue_id":"bd-pui.7","label":"simplification"}
56 changes: 3 additions & 53 deletions cmd/roborev/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -667,8 +667,7 @@ func runAnalyzeAndFix(cmd *cobra.Command, ep daemon.DaemonEndpoint, repoRoot str
// waitForAnalysisJob polls until the job completes and returns the review.
// The context controls the maximum wait time.
func waitForAnalysisJob(ctx context.Context, ep daemon.DaemonEndpoint, jobID int64) (*storage.Review, error) {
client := ep.HTTPClient(30 * time.Second)
baseURL := ep.BaseURL()
api := newDaemonReviewAPI(ep.BaseURL(), ep.HTTPClient(30*time.Second))
pollInterval := 1 * time.Second
maxInterval := 5 * time.Second

Expand All @@ -680,64 +679,15 @@ func waitForAnalysisJob(ctx context.Context, ep daemon.DaemonEndpoint, jobID int
default:
}

req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/jobs?id=%d", baseURL, jobID), nil)
if err != nil {
return nil, fmt.Errorf("create request: %w", err)
}

resp, err := client.Do(req)
job, err := api.getJob(ctx, jobID)
if err != nil {
return nil, fmt.Errorf("check job status: %w", err)
}

if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("server error (%d): %s", resp.StatusCode, body)
}

var jobsResp struct {
Jobs []storage.ReviewJob `json:"jobs"`
}
if err := json.NewDecoder(resp.Body).Decode(&jobsResp); err != nil {
resp.Body.Close()
return nil, fmt.Errorf("parse job status: %w", err)
}
resp.Body.Close()

if len(jobsResp.Jobs) == 0 {
return nil, fmt.Errorf("job %d not found", jobID)
}

job := jobsResp.Jobs[0]
switch job.Status {
case storage.JobStatusDone:
// Fetch the review
reviewReq, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/review?job_id=%d", baseURL, jobID), nil)
if err != nil {
return nil, fmt.Errorf("create review request: %w", err)
}

reviewResp, err := client.Do(reviewReq)
if err != nil {
return nil, fmt.Errorf("fetch review: %w", err)
}
defer reviewResp.Body.Close()

if reviewResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(reviewResp.Body)
return nil, fmt.Errorf("fetch review (%d): %s", reviewResp.StatusCode, body)
}

var review storage.Review
if err := json.NewDecoder(reviewResp.Body).Decode(&review); err != nil {
return nil, fmt.Errorf("parse review: %w", err)
}
return &review, nil

return api.getReview(ctx, jobID, "review")
case storage.JobStatusFailed:
return nil, fmt.Errorf("job failed: %s", job.Error)

case storage.JobStatusCanceled:
return nil, fmt.Errorf("job was canceled")
}
Expand Down
79 changes: 79 additions & 0 deletions cmd/roborev/daemon_api.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package main

import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"

"github.com/roborev-dev/roborev/internal/storage"
)

var errReviewNotFound = errors.New("review not found")

type daemonReviewAPI struct {
baseURL string
client *http.Client
}

func newDaemonReviewAPI(baseURL string, client *http.Client) daemonReviewAPI {
return daemonReviewAPI{baseURL: baseURL, client: client}
}

func (a daemonReviewAPI) getJob(ctx context.Context, jobID int64) (*storage.ReviewJob, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/api/jobs?id=%d", a.baseURL, jobID), nil)
if err != nil {
return nil, fmt.Errorf("create request: %w", err)
}

resp, err := a.client.Do(req)
if err != nil {
return nil, fmt.Errorf("fetch job: %w", err)
}
defer resp.Body.Close()

if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("fetch job (%d): %s", resp.StatusCode, body)
}

var result struct {
Jobs []storage.ReviewJob `json:"jobs"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("parse job: %w", err)
}
if len(result.Jobs) == 0 {
return nil, fmt.Errorf("%w: %d", ErrJobNotFound, jobID)
}
return &result.Jobs[0], nil
}

func (a daemonReviewAPI) getReview(ctx context.Context, jobID int64, label string) (*storage.Review, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/api/review?job_id=%d", a.baseURL, jobID), nil)
if err != nil {
return nil, fmt.Errorf("create %s request: %w", label, err)
}

resp, err := a.client.Do(req)
if err != nil {
return nil, fmt.Errorf("fetch %s: %w", label, err)
}
defer resp.Body.Close()

if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("%w: job %d", errReviewNotFound, jobID)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("fetch %s (%d): %s", label, resp.StatusCode, body)
}

var review storage.Review
if err := json.NewDecoder(resp.Body).Decode(&review); err != nil {
return nil, fmt.Errorf("parse %s: %w", label, err)
}
return &review, nil
}
113 changes: 21 additions & 92 deletions cmd/roborev/daemon_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@ package main

import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
Expand All @@ -19,8 +21,11 @@ import (
// waitForJob polls until a job completes and displays the review
// Uses the provided serverAddr to ensure we poll the same daemon that received the job.
func waitForJob(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet bool) error {
serverAddr := ep.BaseURL()
client := ep.HTTPClient(5 * time.Second)
ctx := cmd.Context()
if ctx == nil {
ctx = context.Background()
}
api := newDaemonReviewAPI(ep.BaseURL(), ep.HTTPClient(5*time.Second))

if !quiet {
cmd.Printf("Waiting for review to complete...")
Expand All @@ -33,33 +38,11 @@ func waitForJob(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet
const maxUnknownRetries = 10 // Give up after 10 consecutive unknown statuses

for {
resp, err := client.Get(fmt.Sprintf("%s/api/jobs?id=%d", serverAddr, jobID))
job, err := api.getJob(ctx, jobID)
if err != nil {
return fmt.Errorf("failed to check job status: %w", err)
}

// Handle non-200 responses
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return fmt.Errorf("server error checking job status (%d): %s", resp.StatusCode, body)
}

var jobsResp struct {
Jobs []storage.ReviewJob `json:"jobs"`
}
if err := json.NewDecoder(resp.Body).Decode(&jobsResp); err != nil {
resp.Body.Close()
return fmt.Errorf("failed to parse job status: %w", err)
}
resp.Body.Close()

if len(jobsResp.Jobs) == 0 {
return fmt.Errorf("%w: %d", ErrJobNotFound, jobID)
}

job := jobsResp.Jobs[0]

switch job.Status {
case storage.JobStatusDone:
if !quiet {
Expand Down Expand Up @@ -111,24 +94,17 @@ func waitForJob(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet
// showReview fetches and displays a review by job ID
// When quiet is true, suppresses output but still returns exit code based on verdict.
func showReview(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet bool) error {
client := ep.HTTPClient(5 * time.Second)
resp, err := client.Get(fmt.Sprintf("%s/api/review?job_id=%d", ep.BaseURL(), jobID))
if err != nil {
return fmt.Errorf("failed to fetch review: %w", err)
ctx := cmd.Context()
if ctx == nil {
ctx = context.Background()
}
defer resp.Body.Close()

if resp.StatusCode == http.StatusNotFound {
api := newDaemonReviewAPI(ep.BaseURL(), ep.HTTPClient(5*time.Second))
review, err := api.getReview(ctx, jobID, "review")
if errors.Is(err, errReviewNotFound) {
return fmt.Errorf("no review found for job %d", jobID)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("server error fetching review (%d): %s", resp.StatusCode, body)
}

var review storage.Review
if err := json.NewDecoder(resp.Body).Decode(&review); err != nil {
return fmt.Errorf("failed to parse review: %w", err)
if err != nil {
return err
}

if !quiet {
Expand Down Expand Up @@ -234,59 +210,12 @@ func waitForReview(jobID int64) (*storage.Review, error) {
}

func waitForReviewWithInterval(jobID int64, pollInterval time.Duration) (*storage.Review, error) {
ep := getDaemonEndpoint()
addr := ep.BaseURL()
client := ep.HTTPClient(10 * time.Second)

for {
resp, err := client.Get(fmt.Sprintf("%s/api/jobs?id=%d", addr, jobID))
if err != nil {
return nil, fmt.Errorf("polling job %d: %w", jobID, err)
}

if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, fmt.Errorf("polling job %d: server returned %s", jobID, resp.Status)
}

var result struct {
Jobs []storage.ReviewJob `json:"jobs"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
resp.Body.Close()
return nil, fmt.Errorf("polling job %d: decode error: %w", jobID, err)
}
resp.Body.Close()

if len(result.Jobs) == 0 {
return nil, fmt.Errorf("job %d not found", jobID)
}

job := result.Jobs[0]
switch job.Status {
case storage.JobStatusDone:
// Get the review
reviewResp, err := client.Get(fmt.Sprintf("%s/api/review?job_id=%d", addr, jobID))
if err != nil {
return nil, err
}
defer reviewResp.Body.Close()

var review storage.Review
if err := json.NewDecoder(reviewResp.Body).Decode(&review); err != nil {
return nil, err
}
return &review, nil

case storage.JobStatusFailed:
return nil, fmt.Errorf("job failed: %s", job.Error)

case storage.JobStatusCanceled:
return nil, fmt.Errorf("job was canceled")
}

time.Sleep(pollInterval)
client, err := daemon.NewHTTPClientFromRuntime()
if err != nil {
return nil, err
}
client.SetPollInterval(pollInterval)
return client.WaitForReview(jobID)
}

// enqueueReview enqueues a review job and returns the job ID
Expand Down
Loading
Loading