Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 16 additions & 10 deletions pkg/api/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,27 +252,33 @@ func (jobs jobDetailAPIResult) limit(req *http.Request) jobDetailAPIResult {
return ret
}

// PrintJobDetailsReportFromDB renders the detailed list of runs for matching jobs.
func PrintJobDetailsReportFromDB(w http.ResponseWriter, req *http.Request, dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) error {
var start, end int

// List all ProwJobRuns for the given release in the last two weeks.
// TODO: 14 days matches orig API behavior, may want to add query params in future to control.
// JobDetailsReport runs the job details query without HTTP response handling.
func JobDetailsReport(dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) ([]*models.ProwJobRun, error) {
since := reportEnd.Add(-14 * 24 * time.Hour)

prowJobRuns := make([]*models.ProwJobRun, 0)
res := dbc.DB.Joins("ProwJob").
Where("name LIKE ?", "%"+jobSearchStr+"%").
Where("timestamp > ?", since).
Where("release = ?", release).
Preload("Tests", "status = ?", 12). // Only pre-load test results with failure status.
Preload("Tests", "status = ?", 12).
Preload("Tests.Test").
Find(&prowJobRuns)
if res.Error != nil {
log.Errorf("error querying %s ProwJobRuns from db: %v", jobSearchStr, res.Error)
return res.Error
return nil, res.Error
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major | ⚡ Quick win

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Verify raw error returns in the modified file.
rg -n -C2 'return nil, res\.Error|return err' pkg/api/jobs.go

Repository: openshift/sippy

Length of output: 396


🏁 Script executed:

# Check the file content around the error return lines and verify imports
head -n 30 pkg/api/jobs.go && echo -e "\n---MIDDLE SECTION---\n" && sed -n '260,285p' pkg/api/jobs.go

Repository: openshift/sippy

Length of output: 1765


🏁 Script executed:

# Get the full function signatures to verify return types
sed -n '240,285p' pkg/api/jobs.go

Repository: openshift/sippy

Length of output: 1650


Wrap propagated errors with context before returning.

Both error returns pass raw errors upstream. Add fmt.Errorf with context and %w wrapping to make failures diagnosable.

Suggested fix
 	if res.Error != nil {
-		return nil, res.Error
+		return nil, fmt.Errorf("querying prow job runs for release %q and job pattern %q: %w", release, jobSearchStr, res.Error)
 	}
@@
 	if err != nil {
 		log.Errorf("error querying %s ProwJobRuns from db: %v", jobSearchStr, err)
-		return err
+		return fmt.Errorf("building job details report for %q: %w", jobSearchStr, err)
 	}

Applies to lines 267 and 280 per coding guidelines: "Errors should be wrapped with context using fmt.Errorf with %w."

🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@pkg/api/jobs.go` at line 267, The two places that return raw errors (the
statements returning "nil, res.Error") should wrap the propagated error with
context using fmt.Errorf and %w; replace each raw return with something like
fmt.Errorf("failed to <describe operation> while calling res: %w", res.Error) so
callers get actionable context. Locate the returns that use the local variable
res (the "return nil, res.Error" occurrences) and wrap res.Error with fmt.Errorf
including a short description of the operation (e.g., "fetching job", "creating
job" or similar) and %w to preserve the original error.

}
log.WithFields(log.Fields{"prowJobRuns": len(prowJobRuns), "since": since}).Info("loaded ProwJobRuns from db")
return prowJobRuns, nil
}

// PrintJobDetailsReportFromDB renders the detailed list of runs for matching jobs.
func PrintJobDetailsReportFromDB(w http.ResponseWriter, req *http.Request, dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) error {
var start, end int

prowJobRuns, err := JobDetailsReport(dbc, release, jobSearchStr, reportEnd)
if err != nil {
log.Errorf("error querying %s ProwJobRuns from db: %v", jobSearchStr, err)
return err
}

jobDetails := map[string]*jobDetail{}
for _, pjr := range prowJobRuns {
Expand Down
256 changes: 256 additions & 0 deletions pkg/flags/postgres_benchmarking_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,256 @@
package flags

import (
"fmt"
"os"
"testing"
"time"

"github.com/openshift/sippy/pkg/api"
"github.com/openshift/sippy/pkg/db"
"github.com/openshift/sippy/pkg/db/query"
"github.com/openshift/sippy/pkg/filter"
log "github.com/sirupsen/logrus"
)

const benchmarkRelease = "4.22"
const benchmarkTestName = "[Monitor:legacy-test-framework-invariants-pathological][sig-arch] events should not repeat pathologically for ns/kube-system"
const benchmarkJobName = "periodic-ci-openshift-release-main-ci-4.22-e2e-aws-ovn"

type benchmarkCase struct {
name string
fn func(dbc *db.DB) error
}

func getIndividualBenchmarkCases() map[string]benchmarkCase {
return map[string]benchmarkCase{
"FindTestsByRelease": {
name: "FindTestsByRelease",
fn: func(dbc *db.DB) error {
type testResult struct {
ID uint
Name string
}
var results []testResult
res := dbc.DB.Raw(`
SELECT DISTINCT t.id, t.name
FROM tests t
JOIN prow_job_run_tests pjrt ON pjrt.test_id = t.id
JOIN prow_job_runs pjr ON pjr.id = pjrt.prow_job_run_id
JOIN prow_jobs pj ON pj.id = pjr.prow_job_id
WHERE pj.release = ?
AND t.name LIKE ?
AND pjrt.created_at > NOW() - INTERVAL '14 days'
ORDER BY t.name
LIMIT 20`, benchmarkRelease, "%events should not repeat%").Scan(&results)
if res.Error != nil {
return res.Error
}
log.Printf("Found %d tests matching pattern for release %s", len(results), benchmarkRelease)
for _, r := range results {
log.Printf(" [%d] %s", r.ID, r.Name)
}
return nil
},
},
}
}

func getBenchmarkCases() []benchmarkCase {
return []benchmarkCase{
{
name: "TestDurations",
fn: func(dbc *db.DB) error {
durations, err := query.TestDurations(dbc, benchmarkRelease,
benchmarkTestName, nil, nil)

if err == nil {
log.Printf("Found %d test durations", len(durations))
}

return err
},
},
{
name: "TestOutputs",
fn: func(dbc *db.DB) error {
testOutputs, err := query.TestOutputs(dbc, benchmarkRelease,
benchmarkTestName, nil, nil, 10)

if err == nil {
log.Printf("Found %d test outputs", len(testOutputs))
}

return err
},
},
{
name: "JobDetails",
fn: func(dbc *db.DB) error {
jobRuns, err := api.JobDetailsReport(dbc, benchmarkRelease,
benchmarkJobName, time.Now())

if err == nil {
log.Printf("Found %d job runs", len(jobRuns))
}

return err
},
},
{
name: "TestAnalysisOverall",
fn: func(dbc *db.DB) error {
results, err := api.GetTestAnalysisOverallFromDB(dbc, nil,
benchmarkRelease, benchmarkTestName, time.Now())

if err == nil {
for group, rows := range results {
log.Printf("TestAnalysisOverall group %s: %d rows", group, len(rows))
}
}

return err
},
},
{
name: "TestAnalysisByJob",
fn: func(dbc *db.DB) error {
results, err := api.GetTestAnalysisByJobFromDB(dbc, nil,
benchmarkRelease, benchmarkTestName, time.Now())

if err == nil {
log.Printf("TestAnalysisByJob: %d groups", len(results))
}

return err
},
},
{
name: "TestAnalysisByJobWithVariantFilter",
fn: func(dbc *db.DB) error {
f := &filter.Filter{
Items: []filter.FilterItem{
{Field: "variants", Value: "aws", Not: false},
},
}
results, err := api.GetTestAnalysisByJobFromDB(dbc, f,
benchmarkRelease, benchmarkTestName, time.Now())

if err == nil {
log.Printf("TestAnalysisByJobWithVariantFilter: %d groups", len(results))
}

return err
},
},
{
name: "TestAnalysisPassRate",
fn: func(dbc *db.DB) error {
type passRate struct {
CurrentSuccesses int
CurrentRuns int
CurrentPassPercent float64
}
var result passRate
res := dbc.DB.Raw(query.QueryTestAnalysis,
time.Now().Add(-24*14*time.Hour),
benchmarkTestName,
[]string{benchmarkJobName}).Scan(&result)
if res.Error != nil {
return res.Error
}
log.Printf("TestAnalysisPassRate: %d/%d runs (%.1f%%)",
result.CurrentSuccesses, result.CurrentRuns, result.CurrentPassPercent)
return nil
},
},
}
}

func getBenchmarkDBClient(t *testing.T) *db.DB {
t.Helper()
dsn := os.Getenv("db_benchmarking_dsn")
if dsn == "" {
t.Skip("skipping: set db_benchmarking_dsn to run")
}

dbFlags := &PostgresFlags{
LogLevel: 4,
DSN: dsn,
}

dbc, err := dbFlags.GetDBClient()
if err != nil {
t.Fatalf("couldn't get DB client: %v", err)
}
return dbc
}
Comment on lines +170 to +187
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor | ⚡ Quick win

Close the benchmark DB client with t.Cleanup to avoid leaked connections.

The helper opens a DB client but never closes it. Register cleanup when the client is created.

Proposed fix
 func getBenchmarkDBClient(t *testing.T) *db.DB {
 	t.Helper()
 	dsn := os.Getenv("db_benchmarking_dsn")
 	if dsn == "" {
 		t.Skip("skipping: set db_benchmarking_dsn to run")
 	}
@@
 	dbc, err := dbFlags.GetDBClient()
 	if err != nil {
 		t.Fatalf("couldn't get DB client: %v", err)
 	}
+	sqlDB, err := dbc.DB.DB()
+	if err != nil {
+		t.Fatalf("couldn't get sql DB handle: %v", err)
+	}
+	t.Cleanup(func() {
+		if err := sqlDB.Close(); err != nil {
+			t.Logf("failed to close DB client: %v", err)
+		}
+	})
 	return dbc
 }
🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@pkg/flags/postgres_benchmarking_test.go` around lines 170 - 187, The helper
getBenchmarkDBClient currently returns a *db.DB without closing it; after
successfully creating dbc in getBenchmarkDBClient register a t.Cleanup that
closes the client (e.g. t.Cleanup(func(){ _ = dbc.Close() } ) or log any close
error) so the connection is closed when the test finishes; place the t.Cleanup
call immediately after the dbc, err check and before returning dbc.


func Test_BenchmarkIndividual(t *testing.T) {
dbc := getBenchmarkDBClient(t)
iterations := 3
cases := getBenchmarkCases()

for _, bc := range cases {
t.Run(bc.name, func(t *testing.T) {
var totalDuration time.Duration
for i := 0; i < iterations; i++ {
start := time.Now()
err := bc.fn(dbc)
elapsed := time.Since(start)
if err != nil {
t.Fatalf("iteration %d failed: %v", i+1, err)
}
totalDuration += elapsed
fmt.Printf(" %s iteration %d: %s\n", bc.name, i+1, elapsed)
}
avg := totalDuration / time.Duration(iterations)
fmt.Printf(" %s total: %s, avg: %s (%d iterations)\n",
bc.name, totalDuration, avg, iterations)
})
}
}

func Test_BenchmarkFindTestsByRelease(t *testing.T) {
dbc := getBenchmarkDBClient(t)
iterations := 1
bc := getIndividualBenchmarkCases()["FindTestsByRelease"]

var totalDuration time.Duration
for i := 0; i < iterations; i++ {
start := time.Now()
err := bc.fn(dbc)
elapsed := time.Since(start)
if err != nil {
t.Fatalf("iteration %d failed: %v", i+1, err)
}
totalDuration += elapsed
fmt.Printf(" %s iteration %d: %s\n", bc.name, i+1, elapsed)
}
avg := totalDuration / time.Duration(iterations)
fmt.Printf(" %s total: %s, avg: %s (%d iterations)\n",
bc.name, totalDuration, avg, iterations)
}

func Test_BenchmarkGroup(t *testing.T) {
dbc := getBenchmarkDBClient(t)
iterations := 1
cases := getBenchmarkCases()

var totalDuration time.Duration
for i := 0; i < iterations; i++ {
start := time.Now()
for _, bc := range cases {
err := bc.fn(dbc)
if err != nil {
t.Fatalf("group iteration %d, case %s failed: %v", i+1, bc.name, err)
}
}
elapsed := time.Since(start)
totalDuration += elapsed
fmt.Printf(" group iteration %d: %s\n", i+1, elapsed)
}
avg := totalDuration / time.Duration(iterations)
fmt.Printf(" group total: %s, avg: %s (%d iterations)\n",
totalDuration, avg, iterations)
}