diff --git a/pkg/api/jobs.go b/pkg/api/jobs.go index 3a4e19efb..c693fc73c 100644 --- a/pkg/api/jobs.go +++ b/pkg/api/jobs.go @@ -252,27 +252,33 @@ func (jobs jobDetailAPIResult) limit(req *http.Request) jobDetailAPIResult { return ret } -// PrintJobDetailsReportFromDB renders the detailed list of runs for matching jobs. -func PrintJobDetailsReportFromDB(w http.ResponseWriter, req *http.Request, dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) error { - var start, end int - - // List all ProwJobRuns for the given release in the last two weeks. - // TODO: 14 days matches orig API behavior, may want to add query params in future to control. +// JobDetailsReport runs the job details query without HTTP response handling. +func JobDetailsReport(dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) ([]*models.ProwJobRun, error) { since := reportEnd.Add(-14 * 24 * time.Hour) - prowJobRuns := make([]*models.ProwJobRun, 0) res := dbc.DB.Joins("ProwJob"). Where("name LIKE ?", "%"+jobSearchStr+"%"). Where("timestamp > ?", since). Where("release = ?", release). - Preload("Tests", "status = ?", 12). // Only pre-load test results with failure status. + Preload("Tests", "status = ?", 12). Preload("Tests.Test"). Find(&prowJobRuns) if res.Error != nil { - log.Errorf("error querying %s ProwJobRuns from db: %v", jobSearchStr, res.Error) - return res.Error + return nil, res.Error } log.WithFields(log.Fields{"prowJobRuns": len(prowJobRuns), "since": since}).Info("loaded ProwJobRuns from db") + return prowJobRuns, nil +} + +// PrintJobDetailsReportFromDB renders the detailed list of runs for matching jobs. +func PrintJobDetailsReportFromDB(w http.ResponseWriter, req *http.Request, dbc *db.DB, release, jobSearchStr string, reportEnd time.Time) error { + var start, end int + + prowJobRuns, err := JobDetailsReport(dbc, release, jobSearchStr, reportEnd) + if err != nil { + log.Errorf("error querying %s ProwJobRuns from db: %v", jobSearchStr, err) + return err + } jobDetails := map[string]*jobDetail{} for _, pjr := range prowJobRuns { diff --git a/pkg/flags/postgres_benchmarking_test.go b/pkg/flags/postgres_benchmarking_test.go new file mode 100644 index 000000000..c07299e6a --- /dev/null +++ b/pkg/flags/postgres_benchmarking_test.go @@ -0,0 +1,256 @@ +package flags + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/openshift/sippy/pkg/api" + "github.com/openshift/sippy/pkg/db" + "github.com/openshift/sippy/pkg/db/query" + "github.com/openshift/sippy/pkg/filter" + log "github.com/sirupsen/logrus" +) + +const benchmarkRelease = "4.22" +const benchmarkTestName = "[Monitor:legacy-test-framework-invariants-pathological][sig-arch] events should not repeat pathologically for ns/kube-system" +const benchmarkJobName = "periodic-ci-openshift-release-main-ci-4.22-e2e-aws-ovn" + +type benchmarkCase struct { + name string + fn func(dbc *db.DB) error +} + +func getIndividualBenchmarkCases() map[string]benchmarkCase { + return map[string]benchmarkCase{ + "FindTestsByRelease": { + name: "FindTestsByRelease", + fn: func(dbc *db.DB) error { + type testResult struct { + ID uint + Name string + } + var results []testResult + res := dbc.DB.Raw(` + SELECT DISTINCT t.id, t.name + FROM tests t + JOIN prow_job_run_tests pjrt ON pjrt.test_id = t.id + JOIN prow_job_runs pjr ON pjr.id = pjrt.prow_job_run_id + JOIN prow_jobs pj ON pj.id = pjr.prow_job_id + WHERE pj.release = ? + AND t.name LIKE ? + AND pjrt.created_at > NOW() - INTERVAL '14 days' + ORDER BY t.name + LIMIT 20`, benchmarkRelease, "%events should not repeat%").Scan(&results) + if res.Error != nil { + return res.Error + } + log.Printf("Found %d tests matching pattern for release %s", len(results), benchmarkRelease) + for _, r := range results { + log.Printf(" [%d] %s", r.ID, r.Name) + } + return nil + }, + }, + } +} + +func getBenchmarkCases() []benchmarkCase { + return []benchmarkCase{ + { + name: "TestDurations", + fn: func(dbc *db.DB) error { + durations, err := query.TestDurations(dbc, benchmarkRelease, + benchmarkTestName, nil, nil) + + if err == nil { + log.Printf("Found %d test durations", len(durations)) + } + + return err + }, + }, + { + name: "TestOutputs", + fn: func(dbc *db.DB) error { + testOutputs, err := query.TestOutputs(dbc, benchmarkRelease, + benchmarkTestName, nil, nil, 10) + + if err == nil { + log.Printf("Found %d test outputs", len(testOutputs)) + } + + return err + }, + }, + { + name: "JobDetails", + fn: func(dbc *db.DB) error { + jobRuns, err := api.JobDetailsReport(dbc, benchmarkRelease, + benchmarkJobName, time.Now()) + + if err == nil { + log.Printf("Found %d job runs", len(jobRuns)) + } + + return err + }, + }, + { + name: "TestAnalysisOverall", + fn: func(dbc *db.DB) error { + results, err := api.GetTestAnalysisOverallFromDB(dbc, nil, + benchmarkRelease, benchmarkTestName, time.Now()) + + if err == nil { + for group, rows := range results { + log.Printf("TestAnalysisOverall group %s: %d rows", group, len(rows)) + } + } + + return err + }, + }, + { + name: "TestAnalysisByJob", + fn: func(dbc *db.DB) error { + results, err := api.GetTestAnalysisByJobFromDB(dbc, nil, + benchmarkRelease, benchmarkTestName, time.Now()) + + if err == nil { + log.Printf("TestAnalysisByJob: %d groups", len(results)) + } + + return err + }, + }, + { + name: "TestAnalysisByJobWithVariantFilter", + fn: func(dbc *db.DB) error { + f := &filter.Filter{ + Items: []filter.FilterItem{ + {Field: "variants", Value: "aws", Not: false}, + }, + } + results, err := api.GetTestAnalysisByJobFromDB(dbc, f, + benchmarkRelease, benchmarkTestName, time.Now()) + + if err == nil { + log.Printf("TestAnalysisByJobWithVariantFilter: %d groups", len(results)) + } + + return err + }, + }, + { + name: "TestAnalysisPassRate", + fn: func(dbc *db.DB) error { + type passRate struct { + CurrentSuccesses int + CurrentRuns int + CurrentPassPercent float64 + } + var result passRate + res := dbc.DB.Raw(query.QueryTestAnalysis, + time.Now().Add(-24*14*time.Hour), + benchmarkTestName, + []string{benchmarkJobName}).Scan(&result) + if res.Error != nil { + return res.Error + } + log.Printf("TestAnalysisPassRate: %d/%d runs (%.1f%%)", + result.CurrentSuccesses, result.CurrentRuns, result.CurrentPassPercent) + return nil + }, + }, + } +} + +func getBenchmarkDBClient(t *testing.T) *db.DB { + t.Helper() + dsn := os.Getenv("db_benchmarking_dsn") + if dsn == "" { + t.Skip("skipping: set db_benchmarking_dsn to run") + } + + dbFlags := &PostgresFlags{ + LogLevel: 4, + DSN: dsn, + } + + dbc, err := dbFlags.GetDBClient() + if err != nil { + t.Fatalf("couldn't get DB client: %v", err) + } + return dbc +} + +func Test_BenchmarkIndividual(t *testing.T) { + dbc := getBenchmarkDBClient(t) + iterations := 3 + cases := getBenchmarkCases() + + for _, bc := range cases { + t.Run(bc.name, func(t *testing.T) { + var totalDuration time.Duration + for i := 0; i < iterations; i++ { + start := time.Now() + err := bc.fn(dbc) + elapsed := time.Since(start) + if err != nil { + t.Fatalf("iteration %d failed: %v", i+1, err) + } + totalDuration += elapsed + fmt.Printf(" %s iteration %d: %s\n", bc.name, i+1, elapsed) + } + avg := totalDuration / time.Duration(iterations) + fmt.Printf(" %s total: %s, avg: %s (%d iterations)\n", + bc.name, totalDuration, avg, iterations) + }) + } +} + +func Test_BenchmarkFindTestsByRelease(t *testing.T) { + dbc := getBenchmarkDBClient(t) + iterations := 1 + bc := getIndividualBenchmarkCases()["FindTestsByRelease"] + + var totalDuration time.Duration + for i := 0; i < iterations; i++ { + start := time.Now() + err := bc.fn(dbc) + elapsed := time.Since(start) + if err != nil { + t.Fatalf("iteration %d failed: %v", i+1, err) + } + totalDuration += elapsed + fmt.Printf(" %s iteration %d: %s\n", bc.name, i+1, elapsed) + } + avg := totalDuration / time.Duration(iterations) + fmt.Printf(" %s total: %s, avg: %s (%d iterations)\n", + bc.name, totalDuration, avg, iterations) +} + +func Test_BenchmarkGroup(t *testing.T) { + dbc := getBenchmarkDBClient(t) + iterations := 1 + cases := getBenchmarkCases() + + var totalDuration time.Duration + for i := 0; i < iterations; i++ { + start := time.Now() + for _, bc := range cases { + err := bc.fn(dbc) + if err != nil { + t.Fatalf("group iteration %d, case %s failed: %v", i+1, bc.name, err) + } + } + elapsed := time.Since(start) + totalDuration += elapsed + fmt.Printf(" group iteration %d: %s\n", i+1, elapsed) + } + avg := totalDuration / time.Duration(iterations) + fmt.Printf(" group total: %s, avg: %s (%d iterations)\n", + totalDuration, avg, iterations) +}