Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 35 additions & 10 deletions .claude/skills/lading-optimize-hunt/SKILL.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ Otherwise, check pending hunt issues or pick from hot subsystems:
| `lading_throttle` | Capacity calculation, rate limiting |
| `lading` | Generators, blackholes, target management |

**CRITICAL: The target must have an associated criterion benchmark.**

---

## Phase 2: Analyze Target
Expand Down Expand Up @@ -97,23 +99,48 @@ Otherwise, check pending hunt issues or pick from hot subsystems:

**CRITICAL: Capture baseline metrics BEFORE making any code changes.**

### Identify the Benchmark Target

Each `lading_payload` source module has a matching benchmark target. Use `--bench <name>` to run **only** the relevant benchmark instead of the full suite.

| Source module | `--bench` target | Fingerprint config dir |
|---|---|---|
| `apache_common.rs` | `apache_common` | `ci/fingerprints/apache_common/` |
| `ascii.rs` | `ascii` | `ci/fingerprints/ascii/` |
| `block.rs` | `block` | *(none — use json)* |
| `datadog_logs.rs` | `datadog_logs` | `ci/fingerprints/datadog_logs/` |
| `dogstatsd.rs` | `dogstatsd` | `ci/fingerprints/dogstatsd/` |
| `fluent.rs` | `fluent` | `ci/fingerprints/fluent/` |
| `json.rs` | `json` | `ci/fingerprints/json/` |
| `opentelemetry_log.rs` | `opentelemetry_log` | `ci/fingerprints/otel_logs/` |
| `opentelemetry_metric.rs` | `opentelemetry_metric` | `ci/fingerprints/otel_metrics/` |
| `opentelemetry_traces.rs` | `opentelemetry_traces` | `ci/fingerprints/otel_traces/` |
| `splunk_hec.rs` | `splunk_hec` | `ci/fingerprints/splunk_hec/` |
| `syslog.rs` | `syslog` | `ci/fingerprints/syslog/` |
| `trace_agent.rs` | `trace_agent` | `ci/fingerprints/trace_agent_v04/` |

Set these once and use them throughout:

```bash
BENCH=<target> # e.g. json, syslog, dogstatsd
CONFIG=ci/fingerprints/<config_dir>/lading.yaml
```

### Stage 1: Micro-benchmark Baseline

Use `cargo criterion` for micro-benchmarks.
Run **only** the benchmark for your target:

```bash
cargo criterion 2>&1 | tee /tmp/criterion-baseline.log
cargo criterion --bench "$BENCH" 2>&1 | tee /tmp/criterion-baseline.log
```

**Note:** Criterion stores baseline data automatically for later comparison.

### Stage 2: Macro-benchmark Baseline

Choose a config file that exercises your target code path:
Use the matching fingerprint config:

```bash
# Common configs: ci/fingerprints/{json,syslog,dogstatsd}/lading.yaml
CONFIG=ci/fingerprints/json/lading.yaml
cargo build --release --bin payloadtool
hyperfine --warmup 3 --runs 10 --export-json /tmp/baseline.json \
"./target/release/payloadtool $CONFIG"
Expand Down Expand Up @@ -145,10 +172,10 @@ ci/validate

### Stage 1: Micro-benchmarks (inner loops)

Re-run the same criterion benchmarks with your changes:
Re-run the **same** benchmark target with your changes:

```bash
cargo criterion 2>&1 | tee /tmp/criterion-optimized.log
cargo criterion --bench "$BENCH" 2>&1 | tee /tmp/criterion-optimized.log
```

Note: Criterion automatically compares against the last run and reports percentage changes.
Expand All @@ -166,11 +193,9 @@ Example output looks like: `time: [1.2345 ms 1.2456 ms 1.2567 ms] change: [-5.12

### Stage 2: Macro-benchmarks (end-to-end payloadtool)

Only run this if Stage 1 showed improvement. Use the SAME config as baseline:
Only run this if Stage 1 showed improvement. Use the SAME `$CONFIG` as Phase 3:

```bash
# Use same CONFIG as Phase 3
CONFIG=ci/fingerprints/json/lading.yaml
cargo build --release --bin payloadtool
hyperfine --warmup 3 --export-json /tmp/optimized.json \
"./target/release/payloadtool $CONFIG"
Expand Down
32 changes: 18 additions & 14 deletions lading_payload/benches/apache_common.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for Apache Common log payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, apache_common};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn apache_common_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _ac = apache_common::ApacheCommon::new(&mut rng);
})
});
});
}

fn apache_common_all(c: &mut Criterion) {
let mut group = c.benchmark_group("apache_common_all");
fn apache_common_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("apache_common_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut ac = apache_common::ApacheCommon::new(&mut rng);
let mut writer = Vec::with_capacity(size);

ac.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let ac = apache_common::ApacheCommon::new(&mut rng);
(rng, ac, Vec::with_capacity(size))
},
|(rng, mut ac, mut writer)| {
ac.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn apache_common_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = apache_common_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = apache_common_all,
targets = apache_common_throughput,
);

criterion_main!(setup_benches, throughput_benches);
32 changes: 18 additions & 14 deletions lading_payload/benches/ascii.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for ASCII payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, ascii};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn ascii_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = ascii::Ascii::new(&mut rng);
})
});
});
}

fn ascii_all(c: &mut Criterion) {
let mut group = c.benchmark_group("ascii_all");
fn ascii_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("ascii_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut asc = ascii::Ascii::new(&mut rng);
let mut writer = Vec::with_capacity(size);

asc.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let asc = ascii::Ascii::new(&mut rng);
(rng, asc, Vec::with_capacity(size))
},
|(rng, mut asc, mut writer)| {
asc.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn ascii_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = ascii_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = ascii_all,
targets = ascii_throughput,
);

criterion_main!(setup_benches, throughput_benches);
4 changes: 2 additions & 2 deletions lading_payload/benches/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,15 +157,15 @@ fn cache_handle_creation(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = cache_setup,
);

criterion_group!(
name = operations_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = cache_advance, cache_peek, cache_total_size,
cache_read_at, cache_handle_creation,
Expand Down
32 changes: 18 additions & 14 deletions lading_payload/benches/datadog_logs.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for Datadog Logs payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{DatadogLog, Serialize};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn datadog_logs_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = DatadogLog::new(&mut rng);
})
});
});
}

fn datadog_logs_all(c: &mut Criterion) {
let mut group = c.benchmark_group("datadog_logs_all");
fn datadog_logs_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("datadog_logs_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut dd = DatadogLog::new(&mut rng);
let mut writer = Vec::with_capacity(size);

dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let dd = DatadogLog::new(&mut rng);
(rng, dd, Vec::with_capacity(size))
},
|(rng, mut dd, mut writer)| {
dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn datadog_logs_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = datadog_logs_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = datadog_logs_all,
targets = datadog_logs_throughput,
);

criterion_main!(setup_benches, throughput_benches);
36 changes: 20 additions & 16 deletions lading_payload/benches/dogstatsd.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for DogStatsD payload generation.
//! Benchmarks for `DogStatsD` payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, dogstatsd};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,24 +12,28 @@ fn dogstatsd_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = dogstatsd::DogStatsD::default(&mut rng);
})
});
});
}

fn dogstatsd_all(c: &mut Criterion) {
let mut group = c.benchmark_group("dogstatsd_all");
fn dogstatsd_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("dogstatsd_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut dd =
dogstatsd::DogStatsD::default(&mut rng).expect("failed to create DogStatsD");
let mut writer = Vec::with_capacity(size);

dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let dd = dogstatsd::DogStatsD::default(&mut rng)
.expect("failed to create DogStatsD");
(rng, dd, Vec::with_capacity(size))
},
|(rng, mut dd, mut writer)| {
dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -38,7 +42,7 @@ fn dogstatsd_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = dogstatsd_setup,
);
Expand All @@ -48,7 +52,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = dogstatsd_all,
targets = dogstatsd_throughput,
);

criterion_main!(setup_benches, throughput_benches);
Loading
Loading