Loading page…
Rust walkthroughs
Loading page…
Scoped threads allow you to spawn threads that can borrow data from the parent scope, rather than requiring owned data with 'static lifetime. This is achieved through std::thread::scope, which guarantees all spawned threads complete before the scope exits.
Key benefits:
'static data — Threads can reference local variablesArc needed — Share stack data directly without reference countingBefore scoped threads (Rust 1.63+), sharing data with threads required Arc or moving ownership. Now you can borrow stack data safely within a scope.
The scope function takes a closure that receives a &Scope reference, which provides spawn() to create scoped threads.
use std::thread;
fn main() {
let data = vec![1, 2, 3, 4, 5];
// Spawn threads that borrow `data` from the parent scope
thread::scope(|s| {
for i in 0..5 {
// Each thread borrows data - no Arc needed!
s.spawn(move || {
println!("Element {}: {}", i, data[i]);
});
}
}); // All threads are joined here before scope exits
println!("All threads completed, data is still valid: {:?}", data);
}use std::thread;
fn parallel_sum(data: &[i32]) -> i32 {
let mut sums = vec![0; 4]; // One sum per thread
thread::scope(|s| {
let chunk_size = (data.len() + 3) / 4;
for (i, chunk) in data.chunks(chunk_size).enumerate() {
let sum_ref = &mut sums[i];
s.spawn(move || {
*sum_ref = chunk.iter().sum();
});
}
});
sums.iter().sum()
}
fn main() {
let data: Vec<i32> = (1..=100).collect();
let result = parallel_sum(&data);
println!("Sum of 1..100: {}", result); // 5050
}use std::thread;
use std::sync::Mutex;
fn main() {
let counter = Mutex::new(0);
let data = vec![10, 20, 30, 40, 50];
thread::scope(|s| {
for value in &data {
s.spawn(|| {
let mut num = counter.lock().unwrap();
*num += value;
});
}
});
println!("Total: {}", *counter.lock().unwrap()); // 150
}use std::thread;
fn main() {
let data = vec!["hello", "world", "rust", "scoped", "threads"];
let results: Vec<String> = thread::scope(|s| {
// Collect the JoinHandles
let handles: Vec<_> = data.iter()
.map(|&word| {
s.spawn(move || {
word.to_uppercase()
})
})
.collect();
// Join all threads and collect results
handles.into_iter()
.map(|h| h.join().unwrap())
.collect()
});
println!("Results: {:?}", results);
}use std::thread;
fn parallel_map<T, U, F>(items: &[T], f: F) -> Vec<U>
where
T: Sync,
U: Send,
F: Fn(&T) -> U + Sync,
{
thread::scope(|s| {
items.iter()
.map(|item| s.spawn(|| f(item)))
.collect::<Vec<_>>()
.into_iter()
.map(|handle| handle.join().unwrap())
.collect()
})
}
fn main() {
let numbers = vec![1, 2, 3, 4, 5, 6, 7, 8];
let squares = parallel_map(&numbers, |n| n * n);
println!("Squares: {:?}", squares);
let cubes = parallel_map(&numbers, |n| n * n * n);
println!("Cubes: {:?}", cubes);
}use std::thread;
use std::sync::Arc;
fn main() {
let data = vec![1, 2, 3, 4, 5];
// Regular thread requires Arc for shared data
println!("=== Regular Thread (requires Arc) ===");
{
let data = Arc::new(data.clone());
let handles: Vec<_> = (0..5)
.map(|i| {
let data = Arc::clone(&data);
thread::spawn(move || {
println!("Regular thread {}: {}", i, data[i]);
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
// Scoped thread can borrow directly
println!("\n=== Scoped Thread (borrows directly) ===");
{
thread::scope(|s| {
for i in 0..5 {
s.spawn(move || {
println!("Scoped thread {}: {}", i, data[i]);
});
}
});
}
// Data is still available after scope
println!("\nData after scoped threads: {:?}", data);
}use std::thread;
use std::fs;
fn main() {
let files = vec![
"src/main.rs",
"Cargo.toml",
];
let results = thread::scope(|s| {
files.iter()
.map(|path| {
s.spawn(move || {
fs::read_to_string(path)
.map(|content| (path.to_string(), content.lines().count()))
})
})
.collect::<Vec<_>>()
});
for handle in results {
match handle.join().unwrap() {
Ok((path, lines)) => println!("{}: {} lines", path, lines),
Err(e) => println!("Error: {}", e),
}
}
}use std::thread;
fn main() {
let outer_data = vec![10, 20, 30];
thread::scope(|outer_scope| {
for (i, val) in outer_data.iter().enumerate() {
outer_scope.spawn(move || {
println!("Outer thread {}: processing {}", i, val);
// Nested scope
let inner_data = vec![*val; 3];
thread::scope(|inner_scope| {
for (j, inner_val) in inner_data.iter().enumerate() {
inner_scope.spawn(move || {
println!(" Inner thread {}.{}: {}", i, j, inner_val);
});
}
});
});
}
});
println!("All nested scopes completed");
}use std::thread;
type Matrix = Vec<Vec<i32>>;
fn matrix_multiply(a: &Matrix, b: &Matrix) -> Matrix {
let rows_a = a.len();
let cols_b = b[0].len();
let cols_a = a[0].len();
// Initialize result matrix
let result = vec![vec![0; cols_b]; rows_a];
let result = thread::scope(|s| {
// Create mutable references for each row
let mut handles = Vec::new();
for i in 0..rows_a {
let result_ref = &result[i];
// We need interior mutability for parallel writes to different rows
// Here we use a different approach: compute each row separately
handles.push(s.spawn(move || {
(0..cols_b).map(|j| {
(0..cols_a).map(|k| a[i][k] * b[k][j]).sum()
}).collect::<Vec<i32>>()
}));
}
handles.into_iter()
.map(|h| h.join().unwrap())
.collect()
});
result
}
fn main() {
let a = vec![
vec![1, 2, 3],
vec![4, 5, 6],
];
let b = vec![
vec![7, 8],
vec![9, 10],
vec![11, 12],
];
let result = matrix_multiply(&a, &b);
for row in &result {
println!("{:?}", row);
}
}use std::thread;
fn main() {
let data = vec![1, 2, 3, 4, 5];
let results: Vec<Result<i32, String>> = thread::scope(|s| {
data.iter()
.map(|&n| {
s.spawn(move || {
if n == 3 {
Err("Three is not allowed!".to_string())
} else {
Ok(n * 2)
}
})
})
.collect::<Vec<_>>()
.into_iter()
.map(|h| h.join().unwrap())
.collect()
});
for result in results {
match result {
Ok(value) => println!("Success: {}", value),
Err(e) => println!("Error: {}", e),
}
}
}use std::thread;
use std::sync::Mutex;
fn process_in_parallel<T, R, F>(items: &[T], num_threads: usize, f: F) -> Vec<R>
where
T: Sync,
R: Send,
F: Fn(&T) -> R + Sync,
{
let results = Mutex::new(Vec::with_capacity(items.len()));
thread::scope(|s| {
let chunk_size = (items.len() + num_threads - 1) / num_threads;
for chunk in items.chunks(chunk_size) {
s.spawn(|| {
let chunk_results: Vec<R> = chunk.iter().map(&f).collect();
results.lock().unwrap().extend(chunk_results);
});
}
});
results.into_inner().unwrap()
}
fn main() {
let urls = (0..20).map(|i| format!("https://example.com/{}", i)).collect::<Vec<_>>();
let results = process_in_parallel(&urls, 4, |url| {
// Simulate work
format!("Fetched: {}", url)
});
println!("Processed {} items", results.len());
}use std::thread;
use std::sync::mpsc;
fn main() {
let data = vec![1, 2, 3, 4, 5];
let (tx, rx) = mpsc::channel();
thread::scope(|s| {
// Producer threads
for &value in &data {
let tx = tx.clone();
s.spawn(move || {
tx.send(value * 2).unwrap();
});
}
// Drop the original sender so rx.iter() terminates
drop(tx);
});
// Collect all results
let results: Vec<_> = rx.iter().collect();
println!("Results: {:?}", results);
}| Feature | Scoped Threads | Regular Threads |
|---------|---------------|-----------------|
| Borrow stack data | ✅ Yes | ❌ No (requires 'static) |
| Requires Arc | ❌ No | ✅ Often needed |
| Automatic join | ✅ Yes | ❌ Manual .join() |
| Lifetime flexibility | ✅ Can use local data | ❌ Owned only |
Key Methods:
| Method | Description |
|--------|-------------|
| thread::scope(|s| {...}) | Create a scope for spawning threads |
| s.spawn(|| {...}) | Spawn a thread within the scope |
| handle.join() | Wait for thread to complete, get result |
When to Use Scoped Threads:
Arc overhead for simple sharingKey Points:
'static dataArc when sharing stack dataspawn() returns a JoinHandle like regular threadsjoin() as Resultcrossbeam crate for older versions)