Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 18 additions & 14 deletions lading_payload/benches/apache_common.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for Apache Common log payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, apache_common};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn apache_common_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _ac = apache_common::ApacheCommon::new(&mut rng);
})
});
});
}

fn apache_common_all(c: &mut Criterion) {
let mut group = c.benchmark_group("apache_common_all");
fn apache_common_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("apache_common_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut ac = apache_common::ApacheCommon::new(&mut rng);
let mut writer = Vec::with_capacity(size);

ac.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let ac = apache_common::ApacheCommon::new(&mut rng);
(rng, ac, Vec::with_capacity(size))
},
|(rng, mut ac, mut writer)| {
ac.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn apache_common_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = apache_common_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = apache_common_all,
targets = apache_common_throughput,
);

criterion_main!(setup_benches, throughput_benches);
32 changes: 18 additions & 14 deletions lading_payload/benches/ascii.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for ASCII payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, ascii};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn ascii_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = ascii::Ascii::new(&mut rng);
})
});
});
}

fn ascii_all(c: &mut Criterion) {
let mut group = c.benchmark_group("ascii_all");
fn ascii_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("ascii_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut asc = ascii::Ascii::new(&mut rng);
let mut writer = Vec::with_capacity(size);

asc.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let asc = ascii::Ascii::new(&mut rng);
(rng, asc, Vec::with_capacity(size))
},
|(rng, mut asc, mut writer)| {
asc.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn ascii_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = ascii_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = ascii_all,
targets = ascii_throughput,
);

criterion_main!(setup_benches, throughput_benches);
4 changes: 2 additions & 2 deletions lading_payload/benches/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,15 +157,15 @@ fn cache_handle_creation(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = cache_setup,
);

criterion_group!(
name = operations_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = cache_advance, cache_peek, cache_total_size,
cache_read_at, cache_handle_creation,
Expand Down
32 changes: 18 additions & 14 deletions lading_payload/benches/datadog_logs.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for Datadog Logs payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{DatadogLog, Serialize};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn datadog_logs_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = DatadogLog::new(&mut rng);
})
});
});
}

fn datadog_logs_all(c: &mut Criterion) {
let mut group = c.benchmark_group("datadog_logs_all");
fn datadog_logs_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("datadog_logs_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut dd = DatadogLog::new(&mut rng);
let mut writer = Vec::with_capacity(size);

dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let dd = DatadogLog::new(&mut rng);
(rng, dd, Vec::with_capacity(size))
},
|(rng, mut dd, mut writer)| {
dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn datadog_logs_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = datadog_logs_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = datadog_logs_all,
targets = datadog_logs_throughput,
);

criterion_main!(setup_benches, throughput_benches);
36 changes: 20 additions & 16 deletions lading_payload/benches/dogstatsd.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for DogStatsD payload generation.
//! Benchmarks for `DogStatsD` payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Serialize, dogstatsd};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,24 +12,28 @@ fn dogstatsd_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _dd = dogstatsd::DogStatsD::default(&mut rng);
})
});
});
}

fn dogstatsd_all(c: &mut Criterion) {
let mut group = c.benchmark_group("dogstatsd_all");
fn dogstatsd_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("dogstatsd_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut dd =
dogstatsd::DogStatsD::default(&mut rng).expect("failed to create DogStatsD");
let mut writer = Vec::with_capacity(size);

dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let dd = dogstatsd::DogStatsD::default(&mut rng)
.expect("failed to create DogStatsD");
(rng, dd, Vec::with_capacity(size))
},
|(rng, mut dd, mut writer)| {
dd.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -38,7 +42,7 @@ fn dogstatsd_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = dogstatsd_setup,
);
Expand All @@ -48,7 +52,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = dogstatsd_all,
targets = dogstatsd_throughput,
);

criterion_main!(setup_benches, throughput_benches);
34 changes: 19 additions & 15 deletions lading_payload/benches/fluent.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Benchmarks for Fluent payload generation.

use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use lading_payload::{Fluent, Serialize};
use rand::{SeedableRng, rngs::SmallRng};
use std::time::Duration;
Expand All @@ -12,23 +12,27 @@ fn fluent_setup(c: &mut Criterion) {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let _f = Fluent::new(&mut rng);
})
});
});
}

fn fluent_all(c: &mut Criterion) {
let mut group = c.benchmark_group("fluent_all");
for size in &[MIB, 10 * MIB, 100 * MIB, 1_000 * MIB] {
fn fluent_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("fluent_throughput");
for size in &[MIB, 10 * MIB, 100 * MIB] {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter(|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let mut ta = Fluent::new(&mut rng);
let mut writer = Vec::with_capacity(size);

ta.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
});
b.iter_batched(
|| {
let mut rng = SmallRng::seed_from_u64(19_690_716);
let ta = Fluent::new(&mut rng);
(rng, ta, Vec::with_capacity(size))
},
|(rng, mut ta, mut writer)| {
ta.to_bytes(rng, size, &mut writer)
.expect("failed to convert to bytes");
},
BatchSize::PerIteration,
);
});
}
group.finish();
Expand All @@ -37,7 +41,7 @@ fn fluent_all(c: &mut Criterion) {
criterion_group!(
name = setup_benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(10))
.measurement_time(Duration::from_secs(5))
.warm_up_time(Duration::from_secs(1));
targets = fluent_setup,
);
Expand All @@ -47,7 +51,7 @@ criterion_group!(
config = Criterion::default()
.measurement_time(Duration::from_secs(30))
.warm_up_time(Duration::from_secs(1));
targets = fluent_all,
targets = fluent_throughput,
);

criterion_main!(setup_benches, throughput_benches);
Loading
Loading