From 7316dc099d16ad1fb6fb0d17e0b7962edcbe67d9 Mon Sep 17 00:00:00 2001 From: Paul Reinlein Date: Wed, 11 Feb 2026 10:18:21 -0500 Subject: [PATCH 1/2] Optimize benchmarks --- lading_payload/benches/apache_common.rs | 32 +++++---- lading_payload/benches/ascii.rs | 32 +++++---- lading_payload/benches/block.rs | 4 +- lading_payload/benches/datadog_logs.rs | 32 +++++---- lading_payload/benches/dogstatsd.rs | 36 +++++----- lading_payload/benches/fluent.rs | 34 +++++----- lading_payload/benches/json.rs | 34 ++++++---- lading_payload/benches/opentelemetry_log.rs | 37 ++++++----- .../benches/opentelemetry_metric.rs | 66 ++++++++++--------- .../benches/opentelemetry_traces.rs | 34 +++++----- lading_payload/benches/splunk_hec.rs | 32 +++++---- lading_payload/benches/syslog.rs | 34 +++++----- lading_payload/benches/trace_agent.rs | 36 +++++----- 13 files changed, 247 insertions(+), 196 deletions(-) diff --git a/lading_payload/benches/apache_common.rs b/lading_payload/benches/apache_common.rs index 2e4f7bc45..48efbecb7 100644 --- a/lading_payload/benches/apache_common.rs +++ b/lading_payload/benches/apache_common.rs @@ -1,6 +1,6 @@ //! Benchmarks for Apache Common log payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, apache_common}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -12,23 +12,27 @@ fn apache_common_setup(c: &mut Criterion) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(19_690_716); let _ac = apache_common::ApacheCommon::new(&mut rng); - }) + }); }); } -fn apache_common_all(c: &mut Criterion) { - let mut group = c.benchmark_group("apache_common_all"); +fn apache_common_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("apache_common_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut ac = apache_common::ApacheCommon::new(&mut rng); - let mut writer = Vec::with_capacity(size); - - ac.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let ac = apache_common::ApacheCommon::new(&mut rng); + (rng, ac, Vec::with_capacity(size)) + }, + |(rng, mut ac, mut writer)| { + ac.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -37,7 +41,7 @@ fn apache_common_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = apache_common_setup, ); @@ -47,7 +51,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = apache_common_all, + targets = apache_common_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/ascii.rs b/lading_payload/benches/ascii.rs index 95e15d712..05838897c 100644 --- a/lading_payload/benches/ascii.rs +++ b/lading_payload/benches/ascii.rs @@ -1,6 +1,6 @@ //! Benchmarks for ASCII payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, ascii}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -12,23 +12,27 @@ fn ascii_setup(c: &mut Criterion) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(19_690_716); let _dd = ascii::Ascii::new(&mut rng); - }) + }); }); } -fn ascii_all(c: &mut Criterion) { - let mut group = c.benchmark_group("ascii_all"); +fn ascii_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("ascii_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut asc = ascii::Ascii::new(&mut rng); - let mut writer = Vec::with_capacity(size); - - asc.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let asc = ascii::Ascii::new(&mut rng); + (rng, asc, Vec::with_capacity(size)) + }, + |(rng, mut asc, mut writer)| { + asc.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -37,7 +41,7 @@ fn ascii_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = ascii_setup, ); @@ -47,7 +51,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = ascii_all, + targets = ascii_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/block.rs b/lading_payload/benches/block.rs index 89dc2672c..53c880dd6 100644 --- a/lading_payload/benches/block.rs +++ b/lading_payload/benches/block.rs @@ -157,7 +157,7 @@ fn cache_handle_creation(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(30)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = cache_setup, ); @@ -165,7 +165,7 @@ criterion_group!( criterion_group!( name = operations_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = cache_advance, cache_peek, cache_total_size, cache_read_at, cache_handle_creation, diff --git a/lading_payload/benches/datadog_logs.rs b/lading_payload/benches/datadog_logs.rs index eacf4b1d3..848176bc5 100644 --- a/lading_payload/benches/datadog_logs.rs +++ b/lading_payload/benches/datadog_logs.rs @@ -1,6 +1,6 @@ //! Benchmarks for Datadog Logs payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{DatadogLog, Serialize}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -12,23 +12,27 @@ fn datadog_logs_setup(c: &mut Criterion) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(19_690_716); let _dd = DatadogLog::new(&mut rng); - }) + }); }); } -fn datadog_logs_all(c: &mut Criterion) { - let mut group = c.benchmark_group("datadog_logs_all"); +fn datadog_logs_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("datadog_logs_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut dd = DatadogLog::new(&mut rng); - let mut writer = Vec::with_capacity(size); - - dd.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let dd = DatadogLog::new(&mut rng); + (rng, dd, Vec::with_capacity(size)) + }, + |(rng, mut dd, mut writer)| { + dd.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -37,7 +41,7 @@ fn datadog_logs_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = datadog_logs_setup, ); @@ -47,7 +51,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = datadog_logs_all, + targets = datadog_logs_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/dogstatsd.rs b/lading_payload/benches/dogstatsd.rs index 77ea76ab2..cdbf6ab7f 100644 --- a/lading_payload/benches/dogstatsd.rs +++ b/lading_payload/benches/dogstatsd.rs @@ -1,6 +1,6 @@ -//! Benchmarks for DogStatsD payload generation. +//! Benchmarks for `DogStatsD` payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, dogstatsd}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -12,24 +12,28 @@ fn dogstatsd_setup(c: &mut Criterion) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(19_690_716); let _dd = dogstatsd::DogStatsD::default(&mut rng); - }) + }); }); } -fn dogstatsd_all(c: &mut Criterion) { - let mut group = c.benchmark_group("dogstatsd_all"); +fn dogstatsd_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("dogstatsd_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut dd = - dogstatsd::DogStatsD::default(&mut rng).expect("failed to create DogStatsD"); - let mut writer = Vec::with_capacity(size); - - dd.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let dd = dogstatsd::DogStatsD::default(&mut rng) + .expect("failed to create DogStatsD"); + (rng, dd, Vec::with_capacity(size)) + }, + |(rng, mut dd, mut writer)| { + dd.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -38,7 +42,7 @@ fn dogstatsd_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = dogstatsd_setup, ); @@ -48,7 +52,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = dogstatsd_all, + targets = dogstatsd_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/fluent.rs b/lading_payload/benches/fluent.rs index 8fe8a4f38..e94a313c2 100644 --- a/lading_payload/benches/fluent.rs +++ b/lading_payload/benches/fluent.rs @@ -1,6 +1,6 @@ //! Benchmarks for Fluent payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Fluent, Serialize}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -12,23 +12,27 @@ fn fluent_setup(c: &mut Criterion) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(19_690_716); let _f = Fluent::new(&mut rng); - }) + }); }); } -fn fluent_all(c: &mut Criterion) { - let mut group = c.benchmark_group("fluent_all"); - for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { +fn fluent_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("fluent_throughput"); + for size in &[MIB, 10 * MIB, 100 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut ta = Fluent::new(&mut rng); - let mut writer = Vec::with_capacity(size); - - ta.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let ta = Fluent::new(&mut rng); + (rng, ta, Vec::with_capacity(size)) + }, + |(rng, mut ta, mut writer)| { + ta.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -37,7 +41,7 @@ fn fluent_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = fluent_setup, ); @@ -47,7 +51,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = fluent_all, + targets = fluent_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/json.rs b/lading_payload/benches/json.rs index 3edd5fee7..f50002399 100644 --- a/lading_payload/benches/json.rs +++ b/lading_payload/benches/json.rs @@ -1,33 +1,39 @@ //! Benchmarks for JSON payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Json, Serialize}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; const MIB: usize = 1_048_576; +#[allow(clippy::default_constructed_unit_structs)] fn json_setup(c: &mut Criterion) { c.bench_function("json_setup", |b| { b.iter(|| { let _json = Json::default(); - }) + }); }); } -fn json_all(c: &mut Criterion) { - let mut group = c.benchmark_group("json_all"); +#[allow(clippy::default_constructed_unit_structs)] +fn json_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("json_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let rng = SmallRng::seed_from_u64(19_690_716); - let mut json = Json::default(); - let mut writer = Vec::with_capacity(size); - - json.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let rng = SmallRng::seed_from_u64(19_690_716); + let json = Json::default(); + (rng, json, Vec::with_capacity(size)) + }, + |(rng, mut json, mut writer)| { + json.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -36,7 +42,7 @@ fn json_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = json_setup, ); @@ -46,7 +52,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = json_all, + targets = json_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/opentelemetry_log.rs b/lading_payload/benches/opentelemetry_log.rs index 058d9b819..5091a9beb 100644 --- a/lading_payload/benches/opentelemetry_log.rs +++ b/lading_payload/benches/opentelemetry_log.rs @@ -1,6 +1,6 @@ //! Benchmarks for OpenTelemetry log payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{OpentelemetryLogs, Serialize, opentelemetry::log::Config}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -13,24 +13,29 @@ fn opentelemetry_log_setup(c: &mut Criterion) { let mut rng = SmallRng::seed_from_u64(19_690_716); let _ot = OpentelemetryLogs::new(Config::default(), MIB, &mut rng) .expect("failed to create log generator"); - }) + }); }); } -fn opentelemetry_log_all(c: &mut Criterion) { - let mut group = c.benchmark_group("opentelemetry_log_all"); - for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { +fn opentelemetry_log_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("opentelemetry_log_throughput"); + // Benching 100+ MiB pushes the benchmark runtime to >60 minutes + for size in &[MIB, 10 * MIB, 100 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut ot = OpentelemetryLogs::new(Config::default(), size, &mut rng) - .expect("failed to create log generator"); - let mut writer = Vec::with_capacity(size); - - ot.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let ot = OpentelemetryLogs::new(Config::default(), size, &mut rng) + .expect("failed to create log generator"); + (rng, ot, Vec::with_capacity(size)) + }, + |(rng, mut ot, mut writer)| { + ot.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -39,7 +44,7 @@ fn opentelemetry_log_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = opentelemetry_log_setup, ); @@ -49,7 +54,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = opentelemetry_log_all, + targets = opentelemetry_log_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/opentelemetry_metric.rs b/lading_payload/benches/opentelemetry_metric.rs index 95a73984f..587ebeb3a 100644 --- a/lading_payload/benches/opentelemetry_metric.rs +++ b/lading_payload/benches/opentelemetry_metric.rs @@ -1,6 +1,6 @@ //! Benchmarks for OpenTelemetry metric payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::common::config::ConfRange; use lading_payload::{ OpentelemetryMetrics, Serialize, @@ -32,39 +32,43 @@ fn opentelemetry_metric_setup(c: &mut Criterion) { }; let _ot = OpentelemetryMetrics::new(config, MIB, &mut rng) .expect("failed to create metrics generator"); - }) + }); }); } -fn opentelemetry_metric_all(c: &mut Criterion) { - let mut group = c.benchmark_group("opentelemetry_metric_all"); - for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { +fn opentelemetry_metric_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("opentelemetry_metric_throughput"); + for size in &[MIB, 10 * MIB, 100 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let config = Config { - metric_weights: MetricWeights { - gauge: 50, - sum_delta: 25, - sum_cumulative: 25, - }, - contexts: Contexts { - total_contexts: ConfRange::Constant(100), - attributes_per_resource: ConfRange::Inclusive { min: 1, max: 64 }, - scopes_per_resource: ConfRange::Inclusive { min: 1, max: 32 }, - attributes_per_scope: ConfRange::Inclusive { min: 0, max: 4 }, - metrics_per_scope: ConfRange::Inclusive { min: 1, max: 128 }, - attributes_per_metric: ConfRange::Inclusive { min: 0, max: 255 }, - }, - }; - let mut ot = OpentelemetryMetrics::new(config, size, &mut rng) - .expect("failed to create metrics generator"); - let mut writer = Vec::with_capacity(size); - - ot.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let config = Config { + metric_weights: MetricWeights { + gauge: 50, + sum_delta: 25, + sum_cumulative: 25, + }, + contexts: Contexts { + total_contexts: ConfRange::Constant(100), + attributes_per_resource: ConfRange::Inclusive { min: 1, max: 64 }, + scopes_per_resource: ConfRange::Inclusive { min: 1, max: 32 }, + attributes_per_scope: ConfRange::Inclusive { min: 0, max: 4 }, + metrics_per_scope: ConfRange::Inclusive { min: 1, max: 128 }, + attributes_per_metric: ConfRange::Inclusive { min: 0, max: 255 }, + }, + }; + let ot = OpentelemetryMetrics::new(config, size, &mut rng) + .expect("failed to create metrics generator"); + (rng, ot, Vec::with_capacity(size)) + }, + |(rng, mut ot, mut writer)| { + ot.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -73,7 +77,7 @@ fn opentelemetry_metric_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = opentelemetry_metric_setup, ); @@ -83,7 +87,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = opentelemetry_metric_all, + targets = opentelemetry_metric_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/opentelemetry_traces.rs b/lading_payload/benches/opentelemetry_traces.rs index 9a5857e88..818055638 100644 --- a/lading_payload/benches/opentelemetry_traces.rs +++ b/lading_payload/benches/opentelemetry_traces.rs @@ -1,6 +1,6 @@ //! Benchmarks for OpenTelemetry trace payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::opentelemetry::trace::{ AttributeConfig, AttributeValueConfig, Config, DatabaseServiceConfig, GrpcServiceConfig, OperationConfig, ServiceConfig, ServiceType, SuboperationConfig, @@ -105,25 +105,29 @@ fn opentelemetry_traces_setup(c: &mut Criterion) { let mut rng = SmallRng::seed_from_u64(19_690_716); let _ot = OpentelemetryTraces::with_config(&config, &mut rng) .expect("failed to create trace generator"); - }) + }); }); } -fn opentelemetry_traces_all(c: &mut Criterion) { +fn opentelemetry_traces_throughput(c: &mut Criterion) { let config = bench_config(); - let mut group = c.benchmark_group("opentelemetry_traces_all"); + let mut group = c.benchmark_group("opentelemetry_traces_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut ot = OpentelemetryTraces::with_config(&config, &mut rng) - .expect("failed to create trace generator"); - let mut writer = Vec::with_capacity(size); - - ot.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let ot = OpentelemetryTraces::with_config(&config, &mut rng) + .expect("failed to create trace generator"); + (rng, ot, Vec::with_capacity(size)) + }, + |(rng, mut ot, mut writer)| { + ot.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -132,7 +136,7 @@ fn opentelemetry_traces_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = opentelemetry_traces_setup, ); @@ -142,7 +146,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = opentelemetry_traces_all, + targets = opentelemetry_traces_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/splunk_hec.rs b/lading_payload/benches/splunk_hec.rs index 95f6eaa40..f6c6f332c 100644 --- a/lading_payload/benches/splunk_hec.rs +++ b/lading_payload/benches/splunk_hec.rs @@ -1,6 +1,6 @@ //! Benchmarks for Splunk HEC payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, SplunkHec}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -11,23 +11,27 @@ fn splunk_hec_setup(c: &mut Criterion) { c.bench_function("splunk_hec_setup", |b| { b.iter(|| { let _hec = SplunkHec::default(); - }) + }); }); } -fn splunk_hec_all(c: &mut Criterion) { - let mut group = c.benchmark_group("splunk_hec_all"); +fn splunk_hec_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("splunk_hec_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let rng = SmallRng::seed_from_u64(19_690_716); - let mut hec = SplunkHec::default(); - let mut writer = Vec::with_capacity(size); - - hec.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let rng = SmallRng::seed_from_u64(19_690_716); + let hec = SplunkHec::default(); + (rng, hec, Vec::with_capacity(size)) + }, + |(rng, mut hec, mut writer)| { + hec.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -36,7 +40,7 @@ fn splunk_hec_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = splunk_hec_setup, ); @@ -46,7 +50,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = splunk_hec_all, + targets = splunk_hec_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/syslog.rs b/lading_payload/benches/syslog.rs index 9d7af350d..10b51d9e9 100644 --- a/lading_payload/benches/syslog.rs +++ b/lading_payload/benches/syslog.rs @@ -1,6 +1,6 @@ //! Benchmarks for Syslog 5424 payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, Syslog5424}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -11,24 +11,28 @@ fn syslog_setup(c: &mut Criterion) { c.bench_function("syslog_setup", |b| { b.iter(|| { let _syslog = Syslog5424::default(); - }) + }); }); } -fn syslog_all(c: &mut Criterion) { - let mut group = c.benchmark_group("syslog_all"); +fn syslog_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("syslog_throughput"); for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let rng = SmallRng::seed_from_u64(19_690_716); - let mut syslog = Syslog5424::default(); - let mut writer = Vec::with_capacity(size); - - syslog - .to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let rng = SmallRng::seed_from_u64(19_690_716); + let syslog = Syslog5424::default(); + (rng, syslog, Vec::with_capacity(size)) + }, + |(rng, mut syslog, mut writer)| { + syslog + .to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -37,7 +41,7 @@ fn syslog_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = syslog_setup, ); @@ -47,7 +51,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = syslog_all, + targets = syslog_throughput, ); criterion_main!(setup_benches, throughput_benches); diff --git a/lading_payload/benches/trace_agent.rs b/lading_payload/benches/trace_agent.rs index 8a90044e7..3915977da 100644 --- a/lading_payload/benches/trace_agent.rs +++ b/lading_payload/benches/trace_agent.rs @@ -1,6 +1,6 @@ //! Benchmarks for Datadog trace agent payload generation. -use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use lading_payload::{Serialize, trace_agent::v04}; use rand::{SeedableRng, rngs::SmallRng}; use std::time::Duration; @@ -13,24 +13,28 @@ fn trace_agent_setup(c: &mut Criterion) { let mut rng = SmallRng::seed_from_u64(19_690_716); let _ta = v04::V04::with_config(v04::Config::default(), &mut rng) .expect("failed to create trace agent"); - }) + }); }); } -fn trace_agent_all(c: &mut Criterion) { - let mut group = c.benchmark_group("trace_agent_all"); - for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] { +fn trace_agent_throughput(c: &mut Criterion) { + let mut group = c.benchmark_group("trace_agent_throughput"); + for size in &[MIB, 10 * MIB, 100 * MIB] { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { - b.iter(|| { - let mut rng = SmallRng::seed_from_u64(19_690_716); - let mut ta = v04::V04::with_config(v04::Config::default(), &mut rng) - .expect("failed to create trace agent"); - let mut writer = Vec::with_capacity(size); - - ta.to_bytes(rng, size, &mut writer) - .expect("failed to convert to bytes"); - }); + b.iter_batched( + || { + let mut rng = SmallRng::seed_from_u64(19_690_716); + let ta = v04::V04::with_config(v04::Config::default(), &mut rng) + .expect("failed to create trace agent"); + (rng, ta, Vec::with_capacity(size)) + }, + |(rng, mut ta, mut writer)| { + ta.to_bytes(rng, size, &mut writer) + .expect("failed to convert to bytes"); + }, + BatchSize::PerIteration, + ); }); } group.finish(); @@ -39,7 +43,7 @@ fn trace_agent_all(c: &mut Criterion) { criterion_group!( name = setup_benches; config = Criterion::default() - .measurement_time(Duration::from_secs(10)) + .measurement_time(Duration::from_secs(5)) .warm_up_time(Duration::from_secs(1)); targets = trace_agent_setup, ); @@ -49,7 +53,7 @@ criterion_group!( config = Criterion::default() .measurement_time(Duration::from_secs(30)) .warm_up_time(Duration::from_secs(1)); - targets = trace_agent_all, + targets = trace_agent_throughput, ); criterion_main!(setup_benches, throughput_benches); From c2d5460a4539f78d3730c13c871d3d7428b4e4b5 Mon Sep 17 00:00:00 2001 From: Paul Reinlein Date: Wed, 11 Feb 2026 11:42:14 -0500 Subject: [PATCH 2/2] Update hunt to use targetted benchmarks --- .claude/skills/lading-optimize-hunt/SKILL.md | 45 +++++++++++++++----- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/.claude/skills/lading-optimize-hunt/SKILL.md b/.claude/skills/lading-optimize-hunt/SKILL.md index 5f829a090..b7ab3251a 100644 --- a/.claude/skills/lading-optimize-hunt/SKILL.md +++ b/.claude/skills/lading-optimize-hunt/SKILL.md @@ -62,6 +62,8 @@ Otherwise, check pending hunt issues or pick from hot subsystems: | `lading_throttle` | Capacity calculation, rate limiting | | `lading` | Generators, blackholes, target management | +**CRITICAL: The target must have an associated criterion benchmark.** + --- ## Phase 2: Analyze Target @@ -97,23 +99,48 @@ Otherwise, check pending hunt issues or pick from hot subsystems: **CRITICAL: Capture baseline metrics BEFORE making any code changes.** +### Identify the Benchmark Target + +Each `lading_payload` source module has a matching benchmark target. Use `--bench ` to run **only** the relevant benchmark instead of the full suite. + +| Source module | `--bench` target | Fingerprint config dir | +|---|---|---| +| `apache_common.rs` | `apache_common` | `ci/fingerprints/apache_common/` | +| `ascii.rs` | `ascii` | `ci/fingerprints/ascii/` | +| `block.rs` | `block` | *(none — use json)* | +| `datadog_logs.rs` | `datadog_logs` | `ci/fingerprints/datadog_logs/` | +| `dogstatsd.rs` | `dogstatsd` | `ci/fingerprints/dogstatsd/` | +| `fluent.rs` | `fluent` | `ci/fingerprints/fluent/` | +| `json.rs` | `json` | `ci/fingerprints/json/` | +| `opentelemetry_log.rs` | `opentelemetry_log` | `ci/fingerprints/otel_logs/` | +| `opentelemetry_metric.rs` | `opentelemetry_metric` | `ci/fingerprints/otel_metrics/` | +| `opentelemetry_traces.rs` | `opentelemetry_traces` | `ci/fingerprints/otel_traces/` | +| `splunk_hec.rs` | `splunk_hec` | `ci/fingerprints/splunk_hec/` | +| `syslog.rs` | `syslog` | `ci/fingerprints/syslog/` | +| `trace_agent.rs` | `trace_agent` | `ci/fingerprints/trace_agent_v04/` | + +Set these once and use them throughout: + +```bash +BENCH= # e.g. json, syslog, dogstatsd +CONFIG=ci/fingerprints//lading.yaml +``` + ### Stage 1: Micro-benchmark Baseline -Use `cargo criterion` for micro-benchmarks. +Run **only** the benchmark for your target: ```bash -cargo criterion 2>&1 | tee /tmp/criterion-baseline.log +cargo criterion --bench "$BENCH" 2>&1 | tee /tmp/criterion-baseline.log ``` **Note:** Criterion stores baseline data automatically for later comparison. ### Stage 2: Macro-benchmark Baseline -Choose a config file that exercises your target code path: +Use the matching fingerprint config: ```bash -# Common configs: ci/fingerprints/{json,syslog,dogstatsd}/lading.yaml -CONFIG=ci/fingerprints/json/lading.yaml cargo build --release --bin payloadtool hyperfine --warmup 3 --runs 10 --export-json /tmp/baseline.json \ "./target/release/payloadtool $CONFIG" @@ -145,10 +172,10 @@ ci/validate ### Stage 1: Micro-benchmarks (inner loops) -Re-run the same criterion benchmarks with your changes: +Re-run the **same** benchmark target with your changes: ```bash -cargo criterion 2>&1 | tee /tmp/criterion-optimized.log +cargo criterion --bench "$BENCH" 2>&1 | tee /tmp/criterion-optimized.log ``` Note: Criterion automatically compares against the last run and reports percentage changes. @@ -166,11 +193,9 @@ Example output looks like: `time: [1.2345 ms 1.2456 ms 1.2567 ms] change: [-5.12 ### Stage 2: Macro-benchmarks (end-to-end payloadtool) -Only run this if Stage 1 showed improvement. Use the SAME config as baseline: +Only run this if Stage 1 showed improvement. Use the SAME `$CONFIG` as Phase 3: ```bash -# Use same CONFIG as Phase 3 -CONFIG=ci/fingerprints/json/lading.yaml cargo build --release --bin payloadtool hyperfine --warmup 3 --export-json /tmp/optimized.json \ "./target/release/payloadtool $CONFIG"