Loading page…
Rust walkthroughs
Loading page…
The parking_lot crate provides faster, smaller, and more featureful synchronization primitives compared to the standard library. It offers Mutex, RwLock, Condvar, and other primitives with better performance characteristics, smaller memory footprint, and additional features like fairness guarantees and deadlock detection (in debug mode). The crate is designed as a drop-in replacement for std::sync primitives.
Key components:
Mutex<T> — mutual exclusion lock with poisoning immunityRwLock<T> — reader-writer lock with better fairnessCondvar — condition variable for signalingReentrantMutex — mutex that can be locked recursivelyOnce — one-time initialization primitive# Cargo.toml
[dependencies]
parking_lot = "0.12"use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
fn main() {
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
handles.push(thread::spawn(move || {
let mut num = counter.lock();
*num += 1;
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("Result: {}", *counter.lock());
}use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
fn main() {
// Basic mutex usage
let data = Mutex::new(vec![1, 2, 3]);
// Lock and modify
{
let mut guard = data.lock();
guard.push(4);
guard.push(5);
}
println!("Data: {:?}", *data.lock());
// Shared data across threads
let shared = Arc::new(Mutex::new(String::from("Hello")));
let mut handles = vec![];
for i in 0..5 {
let shared = Arc::clone(&shared);
handles.push(thread::spawn(move || {
let mut s = shared.lock();
s.push_str(&format!(" {}", i));
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("Result: {}", *shared.lock());
// Mutex with complex data
struct BankAccount {
balance: f64,
owner: String,
}
let account = Mutex::new(BankAccount {
balance: 100.0,
owner: "Alice".to_string(),
});
// Deposit
{
let mut acc = account.lock();
acc.balance += 50.0;
}
// Withdraw
{
let mut acc = account.lock();
if acc.balance >= 25.0 {
acc.balance -= 25.0;
}
}
let acc = account.lock();
println!("Account: {} has ${:.2}", acc.owner, acc.balance);
}use parking_lot::Mutex;
use std::sync;
fn main() {
// parking_lot Mutex
let pl_mutex = Mutex::new(42);
{
let guard = pl_mutex.lock();
// No unwrap needed - parking_lot doesn't poison
println!("parking_lot value: {}", *guard);
}
// std Mutex
let std_mutex = sync::Mutex::new(42);
{
let guard = std_mutex.lock().unwrap(); // unwrap needed (poisoning)
println!("std value: {}", *guard);
}
// Key differences:
// 1. parking_lot::Mutex::lock() returns MutexGuard directly (no Result)
// 2. No poisoning - lock always succeeds
// 3. Smaller memory footprint
// 4. Faster in most cases
// 5. Fair lock by default (prevents starvation)
}
// Poisoning behavior comparison
fn poisoning_example() {
use std::sync::Arc;
use std::thread;
// std Mutex - poisoning
let std_mutex = Arc::new(sync::Mutex::new(0));
let std_clone = Arc::clone(&std_mutex);
let handle = thread::spawn(move || {
let _guard = std_clone.lock().unwrap();
panic!("Thread panicked while holding lock");
});
if handle.join().is_err() {
// Lock is poisoned
match std_mutex.lock() {
Ok(guard) => println!("Unexpected success: {}", *guard),
Err(poisoned) => {
println!("Lock poisoned, recovering...");
let guard = poisoned.into_inner();
println!("Recovered value: {}", *guard);
}
}
}
// parking_lot Mutex - no poisoning
let pl_mutex = Arc::new(Mutex::new(0));
let pl_clone = Arc::clone(&pl_mutex);
let handle = thread::spawn(move || {
let _guard = pl_clone.lock();
panic!("Thread panicked while holding lock");
});
if handle.join().is_err() {
// Lock is NOT poisoned - can use directly
let guard = pl_mutex.lock();
println!("Value after panic: {}", *guard);
}
}use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
fn main() {
// Basic RwLock usage
let data = RwLock::new(vec![1, 2, 3]);
// Multiple readers
{
let r1 = data.read();
let r2 = data.read(); // OK - multiple readers allowed
println!("Readers: {:?} and {:?}", *r1, *r2);
}
// Writer
{
let mut w = data.write();
w.push(4);
}
println!("Data: {:?}", *data.read());
// Concurrent reads
let shared = Arc::new(RwLock::new((0..100).collect::<Vec<i32>>()));
let mut handles = vec![];
for i in 0..5 {
let shared = Arc::clone(&shared);
handles.push(thread::spawn(move || {
let data = shared.read();
let sum: i32 = data.iter().sum();
println!("Thread {}: sum = {}", i, sum);
}));
}
for handle in handles {
handle.join().unwrap();
}
// Writer with upgradable read
let lock = RwLock::new(10);
{
// Start with read access
let r = lock.read();
println!("Initial value: {}", *r);
drop(r);
// Upgrade to write
let mut w = lock.write();
*w += 5;
println!("Modified value: {}", *w);
}
}
// RwLock with upgradable reads
fn upgradable_read_example() {
use parking_lot::RwLock;
let lock = RwLock::new(0);
// Upgradable read - can be upgraded to write
{
let r = lock.upgradable_read();
if *r < 10 {
let mut w = RwLock::upgrade(r); // Consumes r, returns write guard
*w = 10;
}
}
println!("Value: {}", *lock.read());
}use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
fn main() {
// Simple producer-consumer
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair2 = Arc::clone(&pair);
// Consumer thread
let consumer = thread::spawn(move || {
let (lock, cvar) = &*pair2;
let mut started = lock.lock();
while !*started {
cvar.wait(&mut started);
}
println!("Consumer: started!");
});
// Producer thread
thread::sleep(std::time::Duration::from_millis(100));
{
let (lock, cvar) = &*pair;
let mut started = lock.lock();
*started = true;
cvar.notify_one();
}
consumer.join().unwrap();
// Bounded queue example
bounded_queue_example();
}
fn bounded_queue_example() {
struct Queue<T> {
items: Vec<T>,
capacity: usize,
}
struct SharedQueue<T> {
queue: Mutex<Queue<T>>,
not_empty: Condvar,
not_full: Condvar,
}
impl<T> SharedQueue<T> {
fn new(capacity: usize) -> Self {
Self {
queue: Mutex::new(Queue {
items: Vec::with_capacity(capacity),
capacity,
}),
not_empty: Condvar::new(),
not_full: Condvar::new(),
}
}
fn push(&self, item: T) {
let mut queue = self.queue.lock();
while queue.items.len() == queue.capacity {
self.not_full.wait(&mut queue);
}
queue.items.push(item);
self.not_empty.notify_one();
}
fn pop(&self) -> T {
let mut queue = self.queue.lock();
while queue.items.is_empty() {
self.not_empty.wait(&mut queue);
}
let item = queue.items.remove(0);
self.not_full.notify_one();
item
}
}
let queue = Arc::new(SharedQueue::new(3));
let producer_queue = Arc::clone(&queue);
let producer = thread::spawn(move || {
for i in 0..10 {
producer_queue.push(i);
println!("Produced: {}", i);
}
});
let consumer_queue = Arc::clone(&queue);
let consumer = thread::spawn(move || {
for _ in 0..10 {
let item = consumer_queue.pop();
println!("Consumed: {}", item);
}
});
producer.join().unwrap();
consumer.join().unwrap();
}use parking_lot::ReentrantMutex;
fn main() {
// ReentrantMutex can be locked multiple times by the same thread
let mutex = ReentrantMutex::new(0);
{
let guard1 = mutex.lock();
{
let guard2 = mutex.lock(); // OK - same thread
{
let guard3 = mutex.lock(); // OK - same thread
println!("Nested locks: {}, {}, {}", *guard1, *guard2, *guard3);
}
}
}
// Useful for recursive functions
let data = ReentrantMutex::new(vec![1, 2, 3]);
fn recursive_sum(data: &ReentrantMutex<Vec<i32>>, start: usize, end: usize) -> i32 {
if start >= end {
return 0;
}
if end - start == 1 {
let guard = data.lock();
return guard[start];
}
let mid = (start + end) / 2;
let left = recursive_sum(data, start, mid);
let right = recursive_sum(data, mid, end);
// Can lock again - same thread
let guard = data.lock();
println!("Summing indices {}..{} with value {}", start, end, guard[start]);
left + right
}
let sum = recursive_sum(&data, 0, 3);
println!("Sum: {}", sum);
// Callback patterns
let callback_mutex = ReentrantMutex::new(0);
fn with_callback<F>(mutex: &ReentrantMutex<i32>, f: F)
where
F: FnOnce(&mut i32),
{
let mut guard = mutex.lock();
f(&mut *guard);
}
with_callback(&callback_mutex, |value| {
*value += 1;
// Callback might want to lock again
with_callback(&callback_mutex, |v| {
*v += 1;
});
});
println!("Callback result: {}", *callback_mutex.lock());
}use parking_lot::{Once, OnceCell};
use std::sync::Arc;
use std::thread;
fn main() {
// Once - one-time initialization
static INIT: Once = Once::new();
static mut VALUE: i32 = 0;
INIT.call_once(|| {
unsafe { VALUE = 42; }
println!("Initialized!");
});
// Subsequent calls do nothing
INIT.call_once(|| {
println!("This won't print");
});
println!("Value: {}", unsafe { VALUE });
// OnceCell - lazy initialization
let cell = OnceCell::new();
assert!(cell.get().is_none());
// Initialize
cell.get_or_init(|| {
println!("Computing value...");
42
});
println!("Cell value: {:?}", cell.get());
// Won't recompute
cell.get_or_init(|| {
println!("This won't print");
100
});
println!("Cell value: {:?}", cell.get());
// Thread-safe initialization
once_cell_threaded();
}
fn once_cell_threaded() {
let cell = Arc::new(OnceCell::new());
let mut handles = vec![];
for i in 0..5 {
let cell = Arc::clone(&cell);
handles.push(thread::spawn(move || {
let value = cell.get_or_init(|| {
println!("Thread {} initializing", i);
thread::sleep(std::time::Duration::from_millis(100));
i * 10
});
println!("Thread {} got value: {}", i, value);
}));
}
for handle in handles {
handle.join().unwrap();
}
}use parking_lot::FairMutex;
use std::sync::Arc;
use std::thread;
fn main() {
// FairMutex provides FIFO fairness guarantee
// Prevents thread starvation
let mutex = FairMutex::new(0);
{
let mut guard = mutex.lock();
*guard += 1;
}
// Regular Mutex vs FairMutex:
// - Mutex: Faster, but may starve waiting threads
// - FairMutex: Slightly slower, but guarantees FIFO order
// Use FairMutex when:
// - Fairness is critical
// - Avoiding priority inversion
// - Real-time systems
let fair = Arc::new(FairMutex::new(vec![]));
let mut handles = vec![];
for i in 0..5 {
let fair = Arc::clone(&fair);
handles.push(thread::spawn(move || {
let mut guard = fair.lock();
guard.push(i);
println!("Thread {} acquired lock", i);
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("Result: {:?}", *fair.lock());
}use parking_lot::{Mutex, MappedMutexGuard};
fn main() {
struct AppState {
users: Vec<String>,
settings: std::collections::HashMap<String, String>,
counter: u32,
}
let state = Mutex::new(AppState {
users: vec!["Alice".to_string(), "Bob".to_string()],
settings: std::collections::HashMap::new(),
counter: 0,
});
// Lock the entire state but only access a part
{
let users: MappedMutexGuard<Vec<String>> = MutexGuard::map(state.lock(), |s| &mut s.users);
users.push("Charlie".to_string());
println!("Users: {:?}", *users);
}
// The rest of the state is still accessible
{
let mut guard = state.lock();
guard.counter += 1;
println!("Counter: {}", guard.counter);
}
// Another mapped guard
{
let settings = MutexGuard::map(state.lock(), |s| &mut s.settings);
settings.insert("theme".to_string(), "dark".to_string());
println!("Settings: {:?}", *settings);
}
}
// Import needed for MutexGuard::map
use parking_lot::MutexGuard;use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
// In debug builds, parking_lot can detect deadlocks
// Enable with feature flag: parking_lot = { version = "0.12", features = ["deadlock_detection"] }
fn main() {
// This would deadlock without deadlock detection
let a = Arc::new(Mutex::new(0));
let b = Arc::new(Mutex::new(0));
let a1 = Arc::clone(&a);
let b1 = Arc::clone(&b);
let a2 = Arc::clone(&a);
let b2 = Arc::clone(&b);
// Thread 1: lock a, then b
let t1 = thread::spawn(move || {
let _g1 = a1.lock();
thread::sleep(std::time::Duration::from_millis(100));
let _g2 = b1.lock(); // Will wait forever
});
// Thread 2: lock b, then a
let t2 = thread::spawn(move || {
let _g1 = b2.lock();
thread::sleep(std::time::Duration::from_millis(100));
let _g2 = a2.lock(); // Will wait forever
});
// With deadlock detection, the program will panic with a useful message
// Without it, the program hangs
// Comment out to avoid actual deadlock in this example:
// t1.join().unwrap();
// t2.join().unwrap();
println!("Skipped deadlock demonstration");
}
// Proper lock ordering to avoid deadlocks
fn proper_locking() {
let a = Arc::new(Mutex::new(0));
let b = Arc::new(Mutex::new(0));
// Always lock in the same order
let guard_a = a.lock();
let guard_b = b.lock();
// Or use try_lock to avoid blocking forever
if let Ok(guard_a) = a.try_lock() {
if let Ok(guard_b) = b.try_lock() {
// Both locks acquired
}
}
}use parking_lot::{Mutex, RwLock};
use std::time::Duration;
use std::thread;
fn main() {
// try_lock for Mutex
let mutex = Mutex::new(0);
// Non-blocking lock attempt
match mutex.try_lock() {
Some(guard) => {
println!("Got lock immediately: {}", *guard);
}
None => {
println!("Lock is held by another thread");
}
}
// try_lock_for - wait with timeout
{
let guard = mutex.lock();
thread::spawn(move || {
thread::sleep(Duration::from_millis(200));
drop(guard);
});
// Won't get lock (held for 200ms)
match mutex.try_lock_for(Duration::from_millis(50)) {
Some(guard) => println!("Got lock: {}", *guard),
None => println!("Timeout waiting for lock"),
}
thread::sleep(Duration::from_millis(200));
}
// Now lock is free
match mutex.try_lock_for(Duration::from_millis(50)) {
Some(guard) => println!("Got lock now: {}", *guard),
None => println!("Still can't get lock"),
}
// RwLock try operations
let rwlock = RwLock::new(0);
match rwlock.try_read() {
Some(guard) => println!("Got read lock: {}", *guard),
None => println!("Can't get read lock"),
}
match rwlock.try_write() {
Some(guard) => println!("Got write lock: {}", *guard),
None => println!("Can't get write lock"),
}
match rwlock.try_upgradable_read() {
Some(guard) => println!("Got upgradable read: {}", *guard),
None => println!("Can't get upgradable read"),
}
}use parking_lot::RwLock;
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
struct Cache<K, V> {
data: RwLock<HashMap<K, V>>,
}
impl<K, V> Cache<K, V>
where
K: std::hash::Hash + Eq + Clone + Send + 'static,
V: Clone + Send + 'static,
{
fn new() -> Self {
Self {
data: RwLock::new(HashMap::new()),
}
}
fn get(&self, key: &K) -> Option<V> {
let data = self.data.read();
data.get(key).cloned()
}
fn insert(&self, key: K, value: V) {
let mut data = self.data.write();
data.insert(key, value);
}
fn remove(&self, key: &K) -> Option<V> {
let mut data = self.data.write();
data.remove(key)
}
fn len(&self) -> usize {
let data = self.data.read();
data.len()
}
fn get_or_insert<F>(&self, key: K, f: F) -> V
where
F: FnOnce() -> V,
{
// Check if exists (read lock)
{
let data = self.data.read();
if let Some(value) = data.get(&key) {
return value.clone();
}
}
// Not found, need to insert (write lock)
let mut data = self.data.write();
// Double-check (another thread might have inserted)
data.entry(key).or_insert_with(f).clone()
}
}
fn main() {
let cache = Arc::new(Cache::new());
let mut handles = vec![];
for i in 0..10 {
let cache = Arc::clone(&cache);
handles.push(thread::spawn(move || {
let key = format!("key{}", i % 3);
// Get or insert
let value = cache.get_or_insert(key.clone(), || {
println!("Computing value for {}", key);
format!("value_for_{}", key)
});
println!("Thread {} got: {}", i, value);
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("Cache size: {}", cache.len());
}use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::time::Duration;
struct Connection {
id: usize,
in_use: bool,
}
struct ConnectionPool {
connections: Mutex<Vec<Connection>>,
available: Condvar,
}
impl ConnectionPool {
fn new(size: usize) -> Self {
let connections = (0..size)
.map(|id| Connection { id, in_use: false })
.collect();
Self {
connections: Mutex::new(connections),
available: Condvar::new(),
}
}
fn acquire(&self) -> ConnectionGuard {
let mut connections = self.connections.lock();
// Wait for available connection
loop {
if let Some(conn) = connections.iter_mut().find(|c| !c.in_use) {
conn.in_use = true;
let id = conn.id;
return ConnectionGuard {
id,
pool: self as *const ConnectionPool,
};
}
self.available.wait(&mut connections);
}
}
fn acquire_timeout(&self, timeout: Duration) -> Option<ConnectionGuard> {
let mut connections = self.connections.lock();
let start = std::time::Instant::now();
loop {
if let Some(conn) = connections.iter_mut().find(|c| !c.in_use) {
conn.in_use = true;
let id = conn.id;
return Some(ConnectionGuard {
id,
pool: self as *const ConnectionPool,
});
}
let elapsed = start.elapsed();
if elapsed >= timeout {
return None;
}
self.available.wait_until(&mut connections, || {
connections.iter().any(|c| !c.in_use) || start.elapsed() >= timeout
});
}
}
fn release(&self, id: usize) {
let mut connections = self.connections.lock();
if let Some(conn) = connections.iter_mut().find(|c| c.id == id) {
conn.in_use = false;
self.available.notify_one();
}
}
}
struct ConnectionGuard {
id: usize,
pool: *const ConnectionPool,
}
impl ConnectionGuard {
fn id(&self) -> usize {
self.id
}
}
impl Drop for ConnectionGuard {
fn drop(&mut self) {
unsafe {
(*self.pool).release(self.id);
}
}
}
fn main() {
let pool = Arc::new(ConnectionPool::new(3));
let mut handles = vec![];
for i in 0..10 {
let pool = Arc::clone(&pool);
handles.push(std::thread::spawn(move || {
let conn = pool.acquire();
println!("Thread {} acquired connection {}", i, conn.id());
std::thread::sleep(Duration::from_millis(100));
println!("Thread {} releasing connection {}", i, conn.id());
drop(conn);
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("All done");
}use parking_lot::{Mutex as PlMutex, RwLock as PlRwLock};
use std::sync::{Mutex as StdMutex, RwLock as StdRwLock};
use std::sync::Arc;
use std::time::Instant;
fn main() {
const ITERATIONS: u32 = 1_000_000;
const THREADS: u32 = 4;
// parking_lot Mutex
{
let mutex = Arc::new(PlMutex::new(0u32));
let start = Instant::now();
let handles: Vec<_> = (0..THREADS)
.map(|_| {
let mutex = Arc::clone(&mutex);
std::thread::spawn(move || {
for _ in 0..ITERATIONS / THREADS {
let mut guard = mutex.lock();
*guard += 1;
}
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
let elapsed = start.elapsed();
println!("parking_lot Mutex: {:?} (result: {})", elapsed, *mutex.lock());
}
// std Mutex
{
let mutex = Arc::new(StdMutex::new(0u32));
let start = Instant::now();
let handles: Vec<_> = (0..THREADS)
.map(|_| {
let mutex = Arc::clone(&mutex);
std::thread::spawn(move || {
for _ in 0..ITERATIONS / THREADS {
let mut guard = mutex.lock().unwrap();
*guard += 1;
}
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
let elapsed = start.elapsed();
println!("std Mutex: {:?} (result: {})", elapsed, *mutex.lock().unwrap());
}
// parking_lot RwLock
{
let rwlock = Arc::new(PlRwLock::new(0u32));
let start = Instant::now();
let handles: Vec<_> = (0..THREADS)
.map(|_| {
let rwlock = Arc::clone(&rwlock);
std::thread::spawn(move || {
for _ in 0..ITERATIONS / THREADS {
let mut guard = rwlock.write();
*guard += 1;
}
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
let elapsed = start.elapsed();
println!("parking_lot RwLock: {:?} (result: {})", elapsed, *rwlock.read());
}
// std RwLock
{
let rwlock = Arc::new(StdRwLock::new(0u32));
let start = Instant::now();
let handles: Vec<_> = (0..THREADS)
.map(|_| {
let rwlock = Arc::clone(&rwlock);
std::thread::spawn(move || {
for _ in 0..ITERATIONS / THREADS {
let mut guard = rwlock.write().unwrap();
*guard += 1;
}
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
let elapsed = start.elapsed();
println!("std RwLock: {:?} (result: {})", elapsed, *rwlock.read().unwrap());
}
}parking_lot provides faster, smaller synchronization primitives than std::syncMutex<T> is a drop-in replacement with no poisoningRwLock<T> allows multiple readers or one writer, with upgradable readsReentrantMutex allows recursive locking by the same threadCondvar enables thread signaling and waitingOnce and OnceCell provide one-time initializationFairMutex guarantees FIFO ordering to prevent starvationtry_lock(), try_lock_for(), and try_lock_until()deadlock_detection feature for debug buildsMutexGuard::map() to create guards to parts of locked datalock() returns guard directly, not Result