How do I use high-performance concurrency primitives with parking_lot in Rust?
Walkthrough
The parking_lot crate provides replacement synchronization primitives for the standard library's Mutex, RwLock, Condvar, and others. It offers smaller memory footprint, faster performance in many scenarios, and additional features like deadlock detection. The crate gets its name from its "parking" mechanism—threads are put to sleep (parked) when waiting for locks rather than spinning. It's designed to be a drop-in replacement for std::sync types with a compatible API.
Key concepts:
- Mutex — smaller and faster than
std::sync::Mutex - RwLock — improved read-write lock with better fairness
- Condvar — condition variable with efficient thread parking
- Deadlock Detection — optional feature to detect deadlocks at runtime
- Memory Efficient — uses less memory than standard library equivalents
Code Example
# Cargo.toml
[dependencies]
parking_lot = "0.12"use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
fn main() {
let data = Arc::new(Mutex::new(0));
let handles: Vec<_> = (0..10)
.map(|_| {
let data = Arc::clone(&data);
thread::spawn(move || {
let mut lock = data.lock();
*lock += 1;
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
println!("Final value: {}", *data.lock());
}Basic Mutex Usage
use parking_lot::Mutex;
fn main() {
let mutex = Mutex::new(42);
// Lock returns a MutexGuard (no Result - won't poison)
let mut guard = mutex.lock();
*guard += 1;
println!("Value: {}", *guard);
// Guard is automatically released when dropped
drop(guard);
// Try to lock without blocking
if let Some(guard) = mutex.try_lock() {
println!("Got lock: {}", *guard);
}
}Mutex vs std::sync::Mutex
use parking_lot::Mutex as PlMutex;
use std::sync::Mutex as StdMutex;
fn main() {
// parking_lot::Mutex - lock() returns guard directly
let pl_mutex = PlMutex::new(42);
let _guard = pl_mutex.lock(); // No Result, no poisoning
// std::sync::Mutex - lock() returns Result (can poison)
let std_mutex = StdMutex::new(42);
let _guard = std_mutex.lock().unwrap(); // Must handle Result
println!("parking_lot: smaller, no poisoning, often faster");
}RwLock for Read-Heavy Workloads
use parking_lot::RwLock;
use std::thread;
use std::sync::Arc;
fn main() {
let lock = Arc::new(RwLock::new(vec![1, 2, 3]));
// Multiple readers can hold locks simultaneously
let read_handles: Vec<_> = (0..3)
.map(|i| {
let lock = Arc::clone(&lock);
thread::spawn(move || {
let rguard = lock.read();
println!("Reader {}: {:?}", i, *rguard);
})
})
.collect();
for handle in read_handles {
handle.join().unwrap();
}
// Writer blocks all readers
let wguard = lock.write();
wguard.push(4);
drop(wguard);
println!("After write: {:?}", *lock.read());
}RwLock Read vs Write Guards
use parking_lot::RwLock;
fn main() {
let lock = RwLock::new(10);
// Read lock - multiple readers allowed
{
let r1 = lock.read();
let r2 = lock.read(); // Works - multiple readers
println!("Read 1: {}, Read 2: {}", *r1, *r2);
}
// Write lock - exclusive access
{
let mut w = lock.write();
*w += 5;
println!("After write: {}", *w);
}
// Upgradable read lock
{
let r = lock.upgradable_read();
println!("Upgradable read: {}", *r);
// Upgrade to write lock
let mut w = RwLock::upgrade(r);
*w *= 2;
println!("After upgrade: {}", *w);
}
}Condvar for Thread Signaling
use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
fn main() {
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair_clone = Arc::clone(&pair);
// Waiter thread
let waiter = thread::spawn(move || {
let (lock, cvar) = &*pair_clone;
let mut started = lock.lock();
while !*started {
cvar.wait(&mut started);
}
println!("Waiter: condition met!");
});
// Notifier thread
{
let (lock, cvar) = &*pair;
let mut started = lock.lock();
*started = true;
cvar.notify_one();
}
waiter.join().unwrap();
}Condvar with Timeout
use parking_lot::{Mutex, Condvar};
use std::time::{Duration, Instant};
fn main() {
let pair = (Mutex::new(false), Condvar::new());
let (lock, cvar) = &pair;
let mut guard = lock.lock();
let start = Instant::now();
// Wait with timeout
let result = cvar.wait_until(&mut guard, Instant::now() + Duration::from_millis(100));
println!("Wait timed out: {}", result.timed_out());
println!("Elapsed: {:?}", start.elapsed());
}ReentrantMutex
use parking_lot::ReentrantMutex;
fn main() {
// Allows the same thread to lock multiple times
let mutex = ReentrantMutex::new(0);
{
let guard1 = mutex.lock();
println!("First lock: {}", *guard1);
{
let guard2 = mutex.lock(); // Same thread can lock again
println!("Second lock: {}", *guard2);
}
}
println!("Value: {}", *mutex.lock());
}
// Example: recursive function with mutex
fn recursive_function(mutex: &ReentrantMutex<Vec<i32>>, depth: u32) {
if depth == 0 {
return;
}
let guard = mutex.lock();
println!("Depth {}: {:?}", depth, *guard);
// Safe to call recursively because we hold the lock
// and ReentrantMutex allows same-thread re-locking
drop(guard);
recursive_function(mutex, depth - 1);
}Once for One-Time Initialization
use parking_lot::Once;
static mut GLOBAL_DATA: Option<String> = None;
static INIT: Once = Once::new();
fn get_data() -> &'static str {
INIT.call_once(|| {
unsafe {
GLOBAL_DATA = Some("Initialized!".to_string());
}
});
unsafe { GLOBAL_DATA.as_ref().unwrap() }
}
fn main() {
println!("Before: initialized = {}", INIT.is_completed());
println!("Data: {}", get_data());
println!("After: initialized = {}", INIT.is_completed());
}Barrier for Thread Synchronization
use parking_lot::Barrier;
use std::sync::Arc;
use std::thread;
fn main() {
let barrier = Arc::new(Barrier::new(3));
let mut handles = vec![];
for i in 0..3 {
let barrier = Arc::clone(&barrier);
handles.push(thread::spawn(move || {
println!("Thread {} before barrier", i);
barrier.wait();
println!("Thread {} after barrier", i);
}));
}
for handle in handles {
handle.join().unwrap();
}
}Semaphore
use parking_lot::Semaphore;
fn main() {
// Semaphore with 2 permits
let sem = Semaphore::new(2);
// Acquire permits
let permit1 = sem.try_acquire();
let permit2 = sem.try_acquire();
let permit3 = sem.try_acquire(); // Should fail
println!("Permit 1: {:?}", permit1.is_some());
println!("Permit 2: {:?}", permit2.is_some());
println!("Permit 3: {:?}", permit3.is_some());
drop(permit1); // Return permit
let permit4 = sem.try_acquire();
println!("Permit 4 (after returning): {:?}", permit4.is_some());
}Fair Mutex
use parking_lot::{FairMutex, MutexFair};
use std::sync::Arc;
use std::thread;
fn main() {
let mutex = Arc::new(FairMutex::new(0));
// Fair mutex ensures first-come-first-served locking
let handles: Vec<_> = (0..5)
.map(|i| {
let mutex = Arc::clone(&mutex);
thread::spawn(move || {
let mut guard = mutex.lock();
println!("Thread {} acquired lock", i);
*guard += 1;
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
println!("Final value: {}", *mutex.lock());
}Deadlock Detection
# Cargo.toml
[dependencies]
parking_lot = { version = "0.12", features = ["deadlock_detection"] }use parking_lot::Mutex;
use std::thread;
fn main() {
let a = Mutex::new(1);
let b = Mutex::new(2);
// Deadlock: thread 1 holds A, wants B; thread 2 holds B, wants A
let a1 = &a;
let b1 = &b;
let h1 = thread::spawn(move || {
let _a = a1.lock();
thread::sleep(std::time::Duration::from_millis(100));
let _b = b1.lock(); // Will deadlock
});
let h2 = thread::spawn(move || {
let _b = b.lock();
thread::sleep(std::time::Duration::from_millis(100));
let _a = a.lock(); // Will deadlock
});
// In practice, use timeout or avoid nested locks
// Deadlock detection will panic on deadlocks
println!("Warning: The above would deadlock without timeout");
// Uncomment to see deadlock detection in action:
// h1.join().unwrap();
// h2.join().unwrap();
}Producer-Consumer Pattern
use parking_lot::{Mutex, Condvar};
use std::collections::VecDeque;
use std::sync::Arc;
use std::thread;
struct Queue<T> {
data: Mutex<VecDeque<T>>,
not_empty: Condvar,
}
impl<T> Queue<T> {
fn new() -> Self {
Self {
data: Mutex::new(VecDeque::new()),
not_empty: Condvar::new(),
}
}
fn push(&self, item: T) {
self.data.lock().push_back(item);
self.not_empty.notify_one();
}
fn pop(&self) -> T {
let mut guard = self.data.lock();
while guard.is_empty() {
self.not_empty.wait(&mut guard);
}
guard.pop_front().unwrap()
}
}
fn main() {
let queue = Arc::new(Queue::new());
let producer = {
let queue = Arc::clone(&queue);
thread::spawn(move || {
for i in 0..5 {
queue.push(i);
println!("Produced: {}", i);
thread::sleep(std::time::Duration::from_millis(50));
}
})
};
let consumer = {
let queue = Arc::clone(&queue);
thread::spawn(move || {
for _ in 0..5 {
let item = queue.pop();
println!("Consumed: {}", item);
}
})
};
producer.join().unwrap();
consumer.join().unwrap();
}Thread-Safe Cache
use parking_lot::RwLock;
use std::collections::HashMap;
use std::sync::Arc;
struct Cache<K, V> {
data: RwLock<HashMap<K, V>>,
}
impl<K: std::hash::Hash + Eq + Clone, V: Clone> Cache<K, V> {
fn new() -> Self {
Self {
data: RwLock::new(HashMap::new()),
}
}
fn get(&self, key: &K) -> Option<V> {
self.data.read().get(key).cloned()
}
fn insert(&self, key: K, value: V) {
self.data.write().insert(key, value);
}
fn remove(&self, key: &K) -> Option<V> {
self.data.write().remove(key)
}
fn len(&self) -> usize {
self.data.read().len()
}
}
fn main() {
let cache = Arc::new(Cache::new());
cache.insert("a", 1);
cache.insert("b", 2);
println!("a: {:?}", cache.get(&"a".to_string()));
println!("b: {:?}", cache.get(&"b".to_string()));
println!("len: {}", cache.len());
}Lock Statistics
use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
fn main() {
let mutex = Arc::new(Mutex::new(0));
// Create lock statistics
let stats = parking_lot::Mutex::new(0);
let handles: Vec<_> = (0..5)
.map(|_| {
let mutex = Arc::clone(&mutex);
thread::spawn(move || {
for _ in 0..1000 {
let mut guard = mutex.lock();
*guard += 1;
}
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
println!("Final value: {}", *mutex.lock());
}Custom Types in Mutex
use parking_lot::Mutex;
use std::sync::Arc;
#[derive(Debug)]
struct SharedState {
counter: u64,
messages: Vec<String>,
}
impl SharedState {
fn new() -> Self {
Self {
counter: 0,
messages: Vec::new(),
}
}
fn increment(&mut self) {
self.counter += 1;
}
fn add_message(&mut self, msg: String) {
self.messages.push(msg);
}
}
fn main() {
let state = Arc::new(Mutex::new(SharedState::new()));
{
let mut s = state.lock();
s.increment();
s.add_message("Hello".to_string());
}
{
let s = state.lock();
println!("State: {:?}", *s);
}
}Avoiding Lock Contention
use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
fn main() {
// Sharded mutex for reduced contention
const SHARDS: usize = 16;
let shards: Vec<_> = (0..SHARDS).map(|_| Arc::new(Mutex::new(0))).collect();
let handles: Vec<_> = (0..100)
.map(|i| {
let shards = shards.clone();
thread::spawn(move || {
let shard_idx = i % SHARDS;
let mut lock = shards[shard_idx].lock();
*lock += 1;
})
})
.collect();
for handle in handles {
handle.join().unwrap();
}
let total: i32 = shards.iter().map(|s| *s.lock()).sum();
println!("Total: {}", total);
}RwLock Write Lock Downgrade
use parking_lot::RwLock;
fn main() {
let lock = RwLock::new(42);
// Get write lock
let write_guard = lock.write();
println!("Write guard: {}", *write_guard);
// Downgrade to read lock without releasing
let read_guard = RwLock::downgrade(write_guard);
println!("Read guard after downgrade: {}", *read_guard);
// Can now have other readers
let read_guard2 = lock.read();
println!("Second reader: {}", *read_guard2);
}MappedMutexGuard
use parking_lot::{Mutex, MappedMutexGuard};
struct Data {
value: i32,
name: String,
}
fn main() {
let data = Mutex::new(Data {
value: 42,
name: "test".to_string(),
});
// Lock and map to a specific field
let guard = data.lock();
let value_guard = Mutex::map_lock(guard, |d| &mut d.value);
println!("Value: {}", *value_guard);
// value_guard gives access only to the i32 field
}Timeout on Try Lock
use parking_lot::Mutex;
use std::time::{Duration, Instant};
fn try_lock_with_timeout(mutex: &Mutex<i32>, timeout: Duration) -> Option<parking_lot::MutexGuard<i32>> {
let start = Instant::now();
loop {
if let Some(guard) = mutex.try_lock() {
return Some(guard);
}
if start.elapsed() >= timeout {
return None;
}
std::thread::yield_now();
}
}
fn main() {
let mutex = Mutex::new(42);
match try_lock_with_timeout(&mutex, Duration::from_millis(100)) {
Some(guard) => println!("Got lock: {}", *guard),
None => println!("Timeout acquiring lock"),
}
}Summary
parking_lot::Mutexis smaller and faster thanstd::sync::Mutex- No lock poisoning — lock() returns guard directly, not Result
RwLocksupports upgradable reads and write lock downgradesCondvarprovides efficient thread signaling with wait/notify patternReentrantMutexallows same thread to lock multiple timesBarriersynchronizes multiple threads at a pointSemaphorelimits concurrent access to a resourceOnceensures one-time initialization- Enable
deadlock_detectionfeature for debugging deadlocks try_lock()attempts acquisition without blocking- Use
RwLockfor read-heavy workloads with multiple readers - Perfect for: high-performance synchronization, database connection pools, caches, thread-safe state management
