How do I use high-performance locks with parking_lot in Rust?

Walkthrough

The parking_lot crate provides alternative synchronization primitives that are smaller, faster, and more flexible than those in the standard library. Its Mutex and RwLock types use a "parking" mechanism where threads are put to sleep and woken up efficiently, rather than spinning. The crate also provides Once, Condvar, Semaphore, and other utilities. These locks are fair (prevent thread starvation) and don't poison on panic, making them more ergonomic than std::sync primitives.

Key concepts:

  1. Mutex — mutual exclusion lock, smaller and faster than std::sync::Mutex
  2. RwLock — reader-writer lock with better performance characteristics
  3. Fair locking — threads are woken in FIFO order, preventing starvation
  4. No poisoning — locks don't become poisoned on panic, you always get the data
  5. Condvar — condition variable for signaling between threads

Code Example

# Cargo.toml
[dependencies]
parking_lot = "0.12"
use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let counter = Arc::new(Mutex::new(0));
    let mut handles = vec![];
    
    for _ in 0..10 {
        let counter = Arc::clone(&counter);
        handles.push(thread::spawn(move || {
            let mut num = counter.lock();
            *num += 1;
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    println!("Result: {}", *counter.lock());
}

Basic Mutex Operations

use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    // Create a mutex-protected value
    let mutex = Mutex::new(42);
    
    // Lock and access the value
    {
        let guard = mutex.lock();
        println!("Value: {}", *guard);
        
        // Modify the value
        // guard is a MutexGuard<i32> that derefs to &i32 or &mut i32
    }
    
    // Lock is released when guard goes out of scope
    
    // Try to lock (non-blocking)
    match mutex.try_lock() {
        Some(guard) => println!("Got lock: {}", *guard),
        None => println!("Lock is held by another thread"),
    }
    
    // Check if locked
    println!("Is locked: {}", mutex.is_locked());
}

Mutex vs std::sync::Mutex

use parking_lot::Mutex as PlMutex;
use std::sync::Mutex as StdMutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    // parking_lot::Mutex
    let pl_mutex = Arc::new(PlMutex::new(0));
    
    // Lock returns a guard directly (no Result)
    {
        let guard = pl_mutex.lock();
        println!("parking_lot value: {}", *guard);
        // No poisoning, always get the value
    }
    
    // std::sync::Mutex
    let std_mutex = Arc::new(StdMutex::new(0));
    
    // Lock returns Result<guard, PoisonError>
    {
        let guard = std_mutex.lock().unwrap(); // Must handle potential poison
        println!("std value: {}", *guard);
    }
    
    // Key differences:
    // 1. parking_lot::Mutex::lock() returns MutexGuard directly
    // 2. std::sync::Mutex::lock() returns LockResult<MutexGuard>
    // 3. parking_lot doesn't poison on panic
    // 4. parking_lot is smaller and faster
    
    println!("\nDifferences:");
    println!("- parking_lot: lock() -> MutexGuard");
    println!("- std: lock() -> LockResult<MutexGuard>");
    println!("- parking_lot: no poisoning");
    println!("- parking_lot: smaller memory footprint");
}

RwLock for Reader-Writer Locks

use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let lock = RwLock::new(vec![1, 2, 3]);
    
    // Multiple readers can hold the lock simultaneously
    {
        let r1 = lock.read();
        let r2 = lock.read();
        let r3 = lock.read();
        
        println!("Readers: {:?}, {:?}, {:?}", *r1, *r2, *r3);
        // All three readers can access simultaneously
    }
    
    // Writer needs exclusive access
    {
        let mut w = lock.write();
        w.push(4);
        println!("After write: {:?}", *w);
    }
    
    // Try operations (non-blocking)
    match lock.try_read() {
        Some(guard) => println!("Got read lock: {:?}", *guard),
        None => println!("Could not get read lock"),
    }
    
    match lock.try_write() {
        Some(guard) => println!("Got write lock"),
        None => println!("Could not get write lock"),
    }
}

RwLock with Multiple Threads

use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let data = Arc::new(RwLock::new(vec![]));
    let mut handles = vec![];
    
    // Spawn writer threads
    for i in 0..3 {
        let data = Arc::clone(&data);
        handles.push(thread::spawn(move || {
            let mut w = data.write();
            w.push(format!("Writer {}", i));
            println!("Writer {} added item", i);
        }));
    }
    
    // Spawn reader threads
    for i in 0..5 {
        let data = Arc::clone(&data);
        handles.push(thread::spawn(move || {
            let r = data.read();
            println!("Reader {} saw: {:?}", i, *r);
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    println!("\nFinal data: {:?}", *data.read());
}

Condvar for Thread Signaling

use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
 
fn main() {
    let pair = Arc::new((Mutex::new(false), Condvar::new()));
    let pair2 = Arc::clone(&pair);
    
    // Spawn a thread that waits for a signal
    let handle = thread::spawn(move || {
        let (lock, cvar) = &*pair2;
        let mut started = lock.lock();
        
        println!("Waiting for signal...");
        while !*started {
            cvar.wait(&mut started);
        }
        println!("Received signal, proceeding!");
    });
    
    // Give the thread time to start waiting
    thread::sleep(std::time::Duration::from_millis(100));
    
    // Signal the thread
    {
        let (lock, cvar) = &*pair;
        let mut started = lock.lock();
        *started = true;
        cvar.notify_one();
        println!("Sent signal");
    }
    
    handle.join().unwrap();
}

Condvar with Predicate

use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
 
struct BoundedQueue {
    items: Vec<i32>,
    capacity: usize,
}
 
impl BoundedQueue {
    fn new(capacity: usize) -> Self {
        Self { items: Vec::new(), capacity }
    }
    
    fn is_full(&self) -> bool {
        self.items.len() >= self.capacity
    }
    
    fn is_empty(&self) -> bool {
        self.items.is_empty()
    }
}
 
fn main() {
    let queue = Arc::new((Mutex::new(BoundedQueue::new(3)), Condvar::new()));
    
    let producer_queue = Arc::clone(&queue);
    let producer = thread::spawn(move || {
        let (lock, cvar) = &*producer_queue;
        for i in 0..10 {
            let mut q = lock.lock();
            
            // Wait until queue is not full
            while q.is_full() {
                println!("Producer: queue full, waiting...");
                cvar.wait(&mut q);
            }
            
            q.items.push(i);
            println!("Producer: added {}", i);
            cvar.notify_all(); // Notify consumers
        }
    });
    
    let consumer_queue = Arc::clone(&queue);
    let consumer = thread::spawn(move || {
        let (lock, cvar) = &*consumer_queue;
        for _ in 0..10 {
            let mut q = lock.lock();
            
            // Wait until queue is not empty
            while q.is_empty() {
                println!("Consumer: queue empty, waiting...");
                cvar.wait(&mut q);
            }
            
            let item = q.items.remove(0);
            println!("Consumer: got {}", item);
            cvar.notify_all(); // Notify producers
        }
    });
    
    producer.join().unwrap();
    consumer.join().unwrap();
}

Once for One-Time Initialization

use parking_lot::Once;
use std::sync::Arc;
use std::thread;
 
static mut CONFIG: Option<String> = None;
static INIT: Once = Once::new();
 
fn get_config() -> &'static str {
    INIT.call_once(|| {
        println!("Initializing config...");
        // Simulate expensive initialization
        std::thread::sleep(std::time::Duration::from_millis(100));
        unsafe {
            CONFIG = Some("production".to_string());
        }
    });
    
    unsafe { CONFIG.as_ref().unwrap() }
}
 
fn main() {
    let mut handles = vec![];
    
    // Multiple threads call get_config
    for _ in 0..5 {
        handles.push(thread::spawn(|| {
            let config = get_config();
            println!("Got config: {}", config);
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
}

Once vs std::sync::Once

use parking_lot::Once as PlOnce;
use std::sync::Once as StdOnce;
 
fn main() {
    // parking_lot::Once
    let pl_once = PlOnce::new();
    pl_once.call_once(|| {
        println!("parking_lot: This runs once");
    });
    
    // Check if called
    println!("parking_lot once called: {}", pl_once.state().done());
    
    // std::sync::Once
    let std_once = StdOnce::new();
    std_once.call_once(|| {
        println!("std: This runs once");
    });
    
    // parking_lot Once provides state inspection
    // std::sync::Once does not expose state
    
    println!("\nparking_lot::Once advantages:");
    println!("- Can check if already executed");
    println!("- Can get state information");
    println!("- Smaller memory footprint");
}

FairLock for Debugging Deadlocks

use parking_lot::{Mutex, FairMutex};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
 
fn main() {
    // Regular mutex (fair by default in parking_lot)
    let mutex = Arc::new(Mutex::new(0));
    
    // parking_lot Mutex is already fair (FIFO order)
    // No need for a separate FairMutex in most cases
    
    let m1 = Arc::clone(&mutex);
    let m2 = Arc::clone(&mutex);
    
    let h1 = thread::spawn(move || {
        for _ in 0..3 {
            let guard = m1.lock();
            println!("Thread 1 has lock");
            thread::sleep(Duration::from_millis(10));
        }
    });
    
    let h2 = thread::spawn(move || {
        for _ in 0..3 {
            let guard = m2.lock();
            println!("Thread 2 has lock");
            thread::sleep(Duration::from_millis(10));
        }
    });
    
    h1.join().unwrap();
    h2.join().unwrap();
    
    println!("\nAll threads completed fairly");
}

Deadlock Detection

// Enable deadlock detection with feature flag:
// [dependencies]
// parking_lot = { version = "0.12", features = ["deadlock_detection"] }
 
use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    // Note: This example will deadlock if run!
    // The deadlock_detection feature will panic and show backtrace
    
    println!("Deadlock detection example (commented out to avoid hanging):");
    
    // Uncomment to see deadlock detection in action:
    // let m1 = Arc::new(Mutex::new(1));
    // let m2 = Arc::new(Mutex::new(2));
    // 
    // let m1_clone = Arc::clone(&m1);
    // let m2_clone = Arc::clone(&m2);
    // 
    // let h1 = thread::spawn(move || {
    //     let _g1 = m1_clone.lock();
    //     thread::sleep(std::time::Duration::from_millis(100));
    //     let _g2 = m2_clone.lock(); // Will deadlock
    // });
    // 
    // let h2 = thread::spawn(move || {
    //     let _g2 = m2.lock();
    //     thread::sleep(std::time::Duration::from_millis(100));
    //     let _g1 = m1.lock(); // Will deadlock
    // });
    // 
    // h1.join().unwrap();
    // h2.join().unwrap();
    
    println!("Enable 'deadlock_detection' feature to catch deadlocks");
    println!("The program will panic with a backtrace showing where deadlock occurred");
}

Semaphore for Resource Limiting

use parking_lot::Semaphore;
use std::sync::Arc;
use std::thread;
 
fn main() {
    // Create semaphore with 3 permits
    let sem = Arc::new(Semaphore::new(3));
    let mut handles = vec![];
    
    // Spawn 10 threads but only 3 can run concurrently
    for i in 0..10 {
        let sem = Arc::clone(&sem);
        handles.push(thread::spawn(move || {
            println!("Thread {} waiting for permit", i);
            
            let permit = sem.acquire();
            println!("Thread {} got permit", i);
            
            // Simulate work
            thread::sleep(std::time::Duration::from_millis(100));
            
            println!("Thread {} releasing permit", i);
            drop(permit);
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    println!("All threads completed");
}

MappedMutexGuard for Partial Access

use parking_lot::{Mutex, MappedMutexGuard};
 
struct Config {
    name: String,
    version: String,
    settings: Settings,
}
 
struct Settings {
    debug: bool,
    verbose: bool,
}
 
fn main() {
    let config = Mutex::new(Config {
        name: "MyApp".to_string(),
        version: "1.0.0".to_string(),
        settings: Settings {
            debug: false,
            verbose: false,
        },
    });
    
    // Lock and get a guard to the whole config
    let guard = config.lock();
    
    // Access nested field
    println!("Name: {}", guard.name);
    
    // Create a mapped guard that only accesses a part
    let settings_guard: MappedMutexGuard<Settings> = MutexGuard::map(guard, |c| &mut c.settings);
    
    // Now we can modify just settings
    settings_guard.debug = true;
    settings_guard.verbose = true;
    
    println!("Debug: {}, Verbose: {}", settings_guard.debug, settings_guard.verbose);
}

ReentrantMutex for Recursive Locking

use parking_lot::ReentrantMutex;
 
fn main() {
    // ReentrantMutex allows the same thread to lock multiple times
    let mutex = ReentrantMutex::new(0);
    
    let guard1 = mutex.lock();
    println!("First lock: {}", *guard1);
    
    {
        // Same thread can lock again
        let guard2 = mutex.lock();
        println!("Second lock (same thread): {}", *guard2);
        
        {
            // And again
            let guard3 = mutex.lock();
            println!("Third lock (same thread): {}", *guard3);
        }
    }
    
    // Lock count is tracked, must unlock same number of times
    println!("All locks released when guards drop");
}

ReentrantMutex for Recursive Functions

use parking_lot::ReentrantMutex;
use std::sync::Arc;
 
struct Tree {
    value: i32,
    children: Vec<Tree>,
}
 
impl Tree {
    fn new(value: i32) -> Self {
        Self { value, children: vec![] }
    }
    
    fn add_child(&mut self, child: Tree) {
        self.children.push(child);
    }
}
 
fn process_tree(tree: &ReentrantMutex<Tree>, depth: usize) {
    let guard = tree.lock();
    
    println!("{}Node value: {}", "  ".repeat(depth), guard.value);
    
    for child in &guard.children {
        process_tree(tree, depth + 1); // This would deadlock with regular Mutex!
    }
}
 
fn main() {
    let mut root = Tree::new(1);
    root.add_child(Tree::new(2));
    root.add_child(Tree::new(3));
    root.children[0].add_child(Tree::new(4));
    
    let tree = ReentrantMutex::new(root);
    
    // This works because ReentrantMutex allows recursive locking
    process_tree(&tree, 0);
}

RwLock Read vs Upgradable Read

use parking_lot::RwLock;
 
fn main() {
    let lock = RwLock::new(0);
    
    // Regular read lock - multiple readers allowed
    let r1 = lock.read();
    let r2 = lock.read();
    println!("Multiple readers: {}, {}", *r1, *r2);
    drop(r1);
    drop(r2);
    
    // Upgradable read - can be upgraded to write
    let upgradable = lock.upgradable_read();
    println!("Upgradable read: {}", *upgradable);
    
    // Can read while holding upgradable
    let r3 = lock.read(); // This works!
    println!("Can still read: {}", *r3);
    drop(r3);
    
    // Upgrade to write lock (blocks new readers)
    let mut write = RwLockUpgradableReadGuard::upgrade(upgradable);
    *write += 1;
    println!("After upgrade and write: {}", *write);
}

Performance Comparison

use parking_lot::Mutex as PlMutex;
use std::sync::Mutex as StdMutex;
use std::sync::Arc;
use std::thread;
use std::time::Instant;
 
fn benchmark_parking_lot(threads: usize, iterations: usize) -> u128 {
    let mutex = Arc::new(PlMutex::new(0u64));
    let mut handles = vec![];
    
    let start = Instant::now();
    
    for _ in 0..threads {
        let mutex = Arc::clone(&mutex);
        handles.push(thread::spawn(move || {
            for _ in 0..iterations {
                let mut guard = mutex.lock();
                *guard += 1;
            }
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    start.elapsed().as_micros()
}
 
fn benchmark_std(threads: usize, iterations: usize) -> u128 {
    let mutex = Arc::new(StdMutex::new(0u64));
    let mut handles = vec![];
    
    let start = Instant::now();
    
    for _ in 0..threads {
        let mutex = Arc::clone(&mutex);
        handles.push(thread::spawn(move || {
            for _ in 0..iterations {
                let mut guard = mutex.lock().unwrap();
                *guard += 1;
            }
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    start.elapsed().as_micros()
}
 
fn main() {
    let threads = 4;
    let iterations = 100_000;
    
    println!("Benchmark: {} threads, {} iterations each", threads, iterations);
    println!();
    
    // Run benchmarks
    let pl_time = benchmark_parking_lot(threads, iterations);
    let std_time = benchmark_std(threads, iterations);
    
    println!("parking_lot::Mutex: {} microseconds", pl_time);
    println!("std::sync::Mutex:    {} microseconds", std_time);
    println!();
    
    if pl_time < std_time {
        let speedup = (std_time as f64 / pl_time as f64 * 100.0).round() / 100.0;
        println!("parking_lot is ~{}x faster", speedup);
    }
}

Thread-Safe Counter

use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
struct Counter {
    value: i64,
}
 
impl Counter {
    fn new() -> Self {
        Self { value: 0 }
    }
    
    fn increment(&mut self) -> i64 {
        self.value += 1;
        self.value
    }
    
    fn decrement(&mut self) -> i64 {
        self.value -= 1;
        self.value
    }
    
    fn get(&self) -> i64 {
        self.value
    }
}
 
fn main() {
    let counter = Arc::new(Mutex::new(Counter::new()));
    let mut handles = vec![];
    
    for _ in 0..10 {
        let counter = Arc::clone(&counter);
        handles.push(thread::spawn(move || {
            for _ in 0..1000 {
                counter.lock().increment();
            }
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    println!("Final count: {}", counter.lock().get());
}

Global State with Lazy Initialization

use parking_lot::Mutex;
use std::sync::Arc;
 
// Global state with parking_lot
lazy_static::lazy_static! {
    static ref GLOBAL_STATE: Arc<Mutex<GlobalState>> = Arc::new(Mutex::new(GlobalState::new()));
}
 
struct GlobalState {
    connections: u64,
    requests: u64,
}
 
impl GlobalState {
    fn new() -> Self {
        Self { connections: 0, requests: 0 }
    }
}
 
fn increment_connections() {
    let mut state = GLOBAL_STATE.lock();
    state.connections += 1;
}
 
fn increment_requests() {
    let mut state = GLOBAL_STATE.lock();
    state.requests += 1;
}
 
fn get_stats() -> (u64, u64) {
    let state = GLOBAL_STATE.lock();
    (state.connections, state.requests)
}
 
fn main() {
    increment_connections();
    increment_requests();
    increment_requests();
    
    let (connections, requests) = get_stats();
    println!("Connections: {}, Requests: {}", connections, requests);
}

Avoiding Lock Guard Issues

use parking_lot::Mutex;
 
fn main() {
    let mutex = Mutex::new(vec![1, 2, 3]);
    
    // CORRECT: Lock, modify, release in separate statements
    {
        let mut guard = mutex.lock();
        guard.push(4);
    }
    
    // INCORRECT: Holding lock while doing other work
    // This can cause contention
    {
        let mut guard = mutex.lock();
        guard.push(5);
        // Don't do expensive work while holding lock!
        // expensive_computation();  // BAD
    }
    
    // CORRECT: Copy data out, then process
    let data: Vec<i32> = mutex.lock().clone();
    // Now we can process without holding the lock
    let sum: i32 = data.iter().sum();
    println!("Sum: {}", sum);
    
    // CORRECT: Fine-grained locking
    {
        let mut guard = mutex.lock();
        guard.push(6);
        // Lock released here
    }
    
    println!("Final data: {:?}", *mutex.lock());
}

Real-World Example: Connection Pool

use parking_lot::Mutex;
use std::sync::Arc;
use std::collections::VecDeque;
 
#[derive(Debug)]
struct Connection {
    id: usize,
}
 
impl Connection {
    fn new(id: usize) -> Self {
        println!("Creating connection {}", id);
        Self { id }
    }
    
    fn execute(&self, query: &str) -> String {
        format!("Connection {} executed: {}", self.id, query)
    }
}
 
struct ConnectionPool {
    available: VecDeque<Connection>,
    in_use: usize,
    max_size: usize,
}
 
impl ConnectionPool {
    fn new(size: usize) -> Self {
        let available: VecDeque<Connection> = (0..size)
            .map(|i| Connection::new(i))
            .collect();
        
        Self {
            available,
            in_use: 0,
            max_size: size,
        }
    }
    
    fn acquire(&mut self) -> Option<Connection> {
        if let Some(conn) = self.available.pop_front() {
            self.in_use += 1;
            Some(conn)
        } else {
            None
        }
    }
    
    fn release(&mut self, conn: Connection) {
        self.available.push_back(conn);
        self.in_use -= 1;
    }
    
    fn stats(&self) -> (usize, usize) {
        (self.available.len(), self.in_use)
    }
}
 
fn main() {
    let pool = Arc::new(Mutex::new(ConnectionPool::new(5)));
    
    // Acquire a connection
    let conn = pool.lock().acquire().expect("No available connections");
    println!("Acquired connection {}", conn.id);
    
    // Use the connection (outside the lock)
    let result = conn.execute("SELECT * FROM users");
    println!("Result: {}", result);
    
    // Release the connection
    pool.lock().release(conn);
    
    // Check pool stats
    let (available, in_use) = pool.lock().stats();
    println!("Pool stats: {} available, {} in use", available, in_use);
}

Summary

  • parking_lot::Mutex<T> is smaller and faster than std::sync::Mutex<T>
  • parking_lot::RwLock<T> provides better read/write lock performance
  • Locks are fair (FIFO) — prevents thread starvation
  • No poisoning — lock() returns guard directly, not Result
  • try_lock() returns Option<MutexGuard>None if lock is held
  • Condvar pairs with Mutex for thread signaling
  • Once ensures code runs exactly once across threads
  • Semaphore limits concurrent access to resources
  • ReentrantMutex allows same thread to lock multiple times
  • Enable deadlock_detection feature to catch deadlocks at runtime
  • Use MappedMutexGuard to access nested fields
  • Always release locks as soon as possible to minimize contention
  • Clone data out of locks before doing expensive operations
  • Perfect for: high-performance concurrent data structures, connection pools, caches, counters