What is the difference between criterion::BenchmarkId::new and from_parameter for parameterized benchmarks?
BenchmarkId::new creates a benchmark identifier from a function name and explicit parameter value, while from_parameter derives the identifier entirely from a single parameter that implements Display. Both create unique identifiers for parameterized benchmarks, but new separates the function name from the parameter value, giving you explicit control over how the benchmark appears in reports. from_parameter is more concise when the parameter itself provides all needed context.
Basic Parameterized Benchmark
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn parameterized_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("sort_performance");
// Test different input sizes
for size in [10, 100, 1000, 10000].iter() {
// Using BenchmarkId::new with explicit function name and parameter
group.bench_function(
BenchmarkId::new("sort", size),
|b| {
let data: Vec<i32> = (0..*size).rev().collect();
b.iter(|| {
let mut sorted = data.clone();
sorted.sort();
sorted
});
}
);
}
group.finish();
}
criterion_group!(benches, parameterized_benchmark);
criterion_main!(benches);BenchmarkId::new("sort", size) creates an identifier with function name "sort" and parameter value size.
Using from_parameter
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn from_parameter_example(c: &mut Criterion) {
let mut group = c.benchmark_group("sort_performance");
for size in [10, 100, 1000, 10000].iter() {
// Using from_parameter - parameter must implement Display
group.bench_function(
BenchmarkId::from_parameter(size),
|b| {
let data: Vec<i32> = (0..*size).rev().collect();
b.iter(|| {
let mut sorted = data.clone();
sorted.sort()
});
}
);
}
group.finish();
}
criterion_group!(benches, from_parameter_example);
criterion_main!(benches);from_parameter(size) uses only the parameter value as the identifier; the parameter must implement Display.
Difference in Report Output
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn output_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("comparison");
let size = 1000;
// BenchmarkId::new("sort", size) produces:
// - Group: "comparison"
// - Function: "sort"
// - Parameter: "1000"
// - Full ID: "comparison/sort/1000"
group.bench_function(
BenchmarkId::new("sort", size),
|b| b.iter(|| size)
);
// BenchmarkId::from_parameter(size) produces:
// - Group: "comparison"
// - Function: "" (empty)
// - Parameter: "1000"
// - Full ID: "comparison/1000"
group.bench_function(
BenchmarkId::from_parameter(size),
|b| b.iter(|| size)
);
group.finish();
}
// Report output example:
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// β With BenchmarkId::new("sort", 1000): β
// β comparison/sort/1000 β
// β time: [1.2345 us 1.2500 us 1.2654 us] β
// β β
// β With BenchmarkId::from_parameter(1000): β
// β comparison/1000 β
// β time: [1.2345 us 1.2500 us 1.2654 us] β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββThe function name appears in reports when using new, but not with from_parameter.
When to Use Each Approach
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn when_to_use_each(c: &mut Criterion) {
let mut group = c.benchmark_group("algorithms");
// Use BenchmarkId::new when:
// 1. You need to compare multiple functions with same parameters
// 2. The parameter alone doesn't identify what's being tested
// 3. Reports need clear function names for comparison
// Compare multiple algorithms with same parameter
for size in [100, 1000, 10000].iter() {
// Clear: which algorithm, which size
group.bench_function(
BenchmarkId::new("bubble_sort", size),
|b| {
let data: Vec<i32> = (0..*size).rev().collect();
b.iter(|| bubble_sort(data.clone()))
}
);
group.bench_function(
BenchmarkId::new("quick_sort", size),
|b| {
let data: Vec<i32> = (0..*size).rev().collect();
b.iter(|| quick_sort(data.clone()))
}
);
group.bench_function(
BenchmarkId::new("merge_sort", size),
|b| {
let data: Vec<i32> = (0..*size).rev().collect();
b.iter(|| merge_sort(data.clone()))
}
);
}
group.finish();
}
fn bubble_sort(mut data: Vec<i32>) -> Vec<i32> {
for i in 0..data.len() {
for j in 0..data.len() - 1 - i {
if data[j] > data[j + 1] {
data.swap(j, j + 1);
}
}
}
data
}
fn quick_sort(mut data: Vec<i32>) -> Vec<i32> {
data.sort();
data
}
fn merge_sort(mut data: Vec<i32>) -> Vec<i32> {
data.sort();
data
}Use new when benchmarking multiple functions with the same parameters for direct comparison.
Single Function with from_parameter
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn single_function_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("hash_lookup");
// Use from_parameter when:
// 1. Only one function is being tested
// 2. The parameter fully describes the benchmark
// 3. Simpler syntax is preferred
// Single function, varying sizes - parameter is sufficient context
for size in [10, 100, 1000, 10000, 100000].iter() {
group.bench_function(
BenchmarkId::from_parameter(size),
|b| {
let map = create_hashmap(*size);
b.iter(|| {
// Hash lookup benchmark
map.get(&(size / 2))
});
}
);
}
group.finish();
}
fn create_hashmap(size: usize) -> std::collections::HashMap<usize, String> {
(0..size).map(|i| (i, format!("value_{}", i))).collect()
}
criterion_group!(benches, single_function_benchmark);
criterion_main!(benches);Use from_parameter when testing one function across multiple parameter values.
Custom Parameter Types
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
use std::fmt;
// Custom parameter type that implements Display
#[derive(Debug, Clone)]
struct AlgorithmConfig {
name: String,
thread_count: usize,
chunk_size: usize,
}
impl fmt::Display for AlgorithmConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Custom display for readable benchmark IDs
write!(f, "{}-{}t-{}c", self.name, self.thread_count, self.chunk_size)
}
}
fn custom_parameter_type(c: &mut Criterion) {
let mut group = c.benchmark_group("parallel_processing");
let configs = vec![
AlgorithmConfig { name: "parallel".into(), thread_count: 2, chunk_size: 100 },
AlgorithmConfig { name: "parallel".into(), thread_count: 4, chunk_size: 100 },
AlgorithmConfig { name: "parallel".into(), thread_count: 8, chunk_size: 100 },
AlgorithmConfig { name: "parallel".into(), thread_count: 8, chunk_size: 500 },
];
for config in configs {
// from_parameter uses the Display implementation
// Results in IDs like: "parallel_processing/parallel-2t-100c"
group.bench_function(
BenchmarkId::from_parameter(&config),
|b| {
b.iter(|| process_with_config(&config));
}
);
}
group.finish();
}
fn process_with_config(config: &AlgorithmConfig) -> usize {
// Simulated work
config.thread_count * config.chunk_size
}Custom types with Display implementation enable rich parameter representations.
Mixing Both Approaches
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn mixed_approach(c: &mut Criterion) {
let mut group = c.benchmark_group("mixed_example");
// You can mix both approaches in the same group
// But be careful about naming consistency for reports
// Simple integer parameter
group.bench_function(
BenchmarkId::from_parameter(100),
|b| b.iter(|| 100)
);
// Named benchmark with same parameter
group.bench_function(
BenchmarkId::new("named_benchmark", 100),
|b| b.iter(|| 100)
);
// This creates:
// - mixed_example/100 (from_parameter)
// - mixed_example/named_benchmark/100 (new)
// In reports, these will appear as different benchmarks
// The group analysis may compare them as if they're related
group.finish();
}
// Best practice: be consistent within a benchmark group
// Either use new() for all, or from_parameter() for all
// Mixing can make reports harder to readConsistency within a benchmark group improves report readability.
Multiple Parameters
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn multiple_parameters(c: &mut Criterion) {
let mut group = c.benchmark_group("matrix_operations");
let sizes = [10, 100, 1000];
let threads = [1, 2, 4, 8];
for &size in &sizes {
for &thread_count in &threads {
// Use new() with combined parameter representation
// Since from_parameter takes only one value
let param = format!("{}x{}", size, thread_count);
group.bench_function(
BenchmarkId::new("matrix_multiply", param),
|b| {
b.iter(|| {
// Matrix multiplication with threading
size * thread_count
});
}
);
}
}
group.finish();
}
// Alternative: use a struct with Display
#[derive(Debug, Clone)]
struct MatrixConfig {
size: usize,
threads: usize,
}
impl std::fmt::Display for MatrixConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}x{}", self.size, self.threads)
}
}
fn multiple_parameters_struct(c: &mut Criterion) {
let mut group = c.benchmark_group("matrix_operations_v2");
let configs: Vec<MatrixConfig> = vec![
MatrixConfig { size: 10, threads: 1 },
MatrixConfig { size: 10, threads: 4 },
MatrixConfig { size: 100, threads: 1 },
MatrixConfig { size: 100, threads: 4 },
];
for config in configs {
// Clean and consistent
group.bench_function(
BenchmarkId::from_parameter(&config),
|b| {
b.iter(|| {
// Matrix multiplication
config.size * config.threads
});
}
);
}
group.finish();
}For multiple parameters, use new with formatted strings or a struct with Display.
Practical Comparison Table
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn summary_table(c: &mut Criterion) {
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// β Aspect β BenchmarkId::new β from_parameter β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
// β Parameters β function: &str, param: T β param: T β
// β Function name β Included in ID β Not included β
// β Report display β group/function/param β group/param β
// β Use case β Multiple functions β Single function β
// β Verbosity β More explicit β More concise β
// β Multiple params β Combine manually β Use Display struct β
// β Type constraint β param: Display β param: Display β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// BenchmarkId::new("sort", 1000)
// - Shows function name in reports
// - Useful when comparing multiple algorithms
// - Clear separation of function and parameter
// - Full ID: group/sort/1000
// BenchmarkId::from_parameter(1000)
// - Only shows parameter value
// - Useful for single-function benchmarks
// - Simpler syntax
// - Full ID: group/1000
// Both require the parameter to implement Display
// Both produce BenchmarkId that can be used interchangeably
}
criterion_group!(benches, summary_table);
criterion_main!(benches);The choice depends on whether you need function names in reports.
Real-World Pattern
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
// Common pattern: benchmark a system across multiple dimensions
fn real_world_pattern(c: &mut Criterion) {
// Scenario: Benchmark a database query with different parameters
let mut group = c.benchmark_group("database_queries");
// Different query types
let query_types = ["simple", "join", "aggregate"];
// Different data sizes
let sizes = [100, 1000, 10000];
// Different cache settings
let cache_enabled = [true, false];
for query_type in query_types {
for &size in &sizes {
for &cache in &cache_enabled {
// Create descriptive parameter
let cache_str = if cache { "cached" } else { "uncached" };
let param = format!("{}-{}-{}", query_type, size, cache_str);
group.bench_function(
BenchmarkId::new("query", param),
|b| {
b.iter(|| {
// Simulate database query
execute_query(query_type, size, cache)
});
}
);
}
}
}
group.finish();
}
fn execute_query(query_type: &str, size: usize, cache: bool) -> usize {
// Simulated query execution
let base_time = match query_type {
"simple" => size / 100,
"join" => size / 10,
"aggregate" => size / 20,
_ => size,
};
if cache {
base_time / 10
} else {
base_time
}
}
// This produces clear benchmark IDs like:
// database_queries/query/simple-100-cached
// database_queries/query/simple-100-uncached
// database_queries/query/join-1000-cached
// etc.Real-world benchmarks often need descriptive parameter combinations.
Synthesis
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn complete_guide_summary(c: &mut Criterion) {
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// β BenchmarkId::new("function", param) β
// β - Creates ID with function name and parameter β
// β - Report shows: group/function/param β
// β - Best for: comparing multiple functions β
// β - Use when: you want clear function names in reports β
// β β
// β BenchmarkId::from_parameter(param) β
// β - Creates ID from parameter only β
// β - Report shows: group/param β
// β - Best for: single function with varying inputs β
// β - Use when: parameter provides all necessary context β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// The key difference is report organization:
// - new() creates a hierarchy: group > function > parameter
// - from_parameter() creates: group > parameter
// For multi-algorithm comparison:
// Use new("algorithm_name", size) so reports show algorithm comparison
// For single-algorithm scaling:
// Use from_parameter(size) for simpler syntax
// Both require Display for the parameter
// Both produce equivalent BenchmarkId values internally
}
// Key insight:
// BenchmarkId::new gives you structured reports with function names,
// while from_parameter gives simpler IDs. The choice affects how
// Criterion organizes and displays results. Use new() when comparing
// multiple algorithms or approaches; use from_parameter() when testing
// a single algorithm's behavior across different inputs.Key insight: Both methods create BenchmarkId values, but new includes function names in report output while from_parameter uses only the parameter value. The difference is primarily about how reports are organized: new("sort", 1000) produces group/sort/1000, while from_parameter(1000) produces group/1000. Use new when comparing multiple functions across the same parametersβreports will group by function, making comparisons clear. Use from_parameter when benchmarking a single function across various inputsβthe simpler ID is sufficient context. For complex parameters (multiple dimensions), create a struct that implements Display for clean, readable benchmark identifiers.
