How do I work with Parking Lot for High-Performance Synchronization in Rust?

Walkthrough

Parking Lot provides faster synchronization primitives than the standard library. It implements a "parking lot" thread-scheduling algorithm where threads wait on a central queue, leading to better performance and smaller memory footprint.

Key concepts:

  • Mutex — Faster mutual exclusion lock
  • RwLock — Faster reader-writer lock
  • Condvar — Condition variable for signaling
  • Once — One-time initialization
  • Fairness — Prevents thread starvation

When to use Parking Lot:

  • High-contention scenarios
  • Performance-critical code
  • When you need fair locking
  • Smaller memory footprint needed
  • When you need lock timeouts

When NOT to use Parking Lot:

  • Simple projects (std is fine)
  • Async code (use tokio::sync instead)
  • When std performance is adequate

Code Examples

Basic Mutex

use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let data = Arc::new(Mutex::new(0));
    let mut handles = vec![];
    
    for _ in 0..10 {
        let data = Arc::clone(&data);
        handles.push(thread::spawn(move || {
            let mut guard = data.lock();
            *guard += 1;
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
    
    println!("Final value: {}", *data.lock());  // 10
}

RwLock for Multiple Readers

use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let data = Arc::new(RwLock::new(vec![1, 2, 3]));
    let mut handles = vec![];
    
    // Multiple readers
    for i in 0..5 {
        let data = Arc::clone(&data);
        handles.push(thread::spawn(move || {
            let guard = data.read();
            println!("Reader {}: {:?}", i, *guard);
        }));
    }
    
    // One writer
    {
        let data = Arc::clone(&data);
        handles.push(thread::spawn(move || {
            let mut guard = data.write();
            guard.push(4);
            println!("Writer added element");
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
}

Try Lock (Non-Blocking)

use parking_lot::Mutex;
 
fn main() {
    let mutex = Mutex::new(42);
    
    // Try to acquire without blocking
    if let Some(guard) = mutex.try_lock() {
        println!("Got lock: {}", *guard);
    } else {
        println!("Lock is held");
    }
    
    // Regular lock blocks until available
    let guard = mutex.lock();
    println!("Value: {}", *guard);
}

Lock with Timeout

use parking_lot::Mutex;
use std::time::Duration;
 
fn main() {
    let mutex = Mutex::new(0);
    
    // Try lock with timeout
    match mutex.lock_timeout(Duration::from_millis(100)) {
        guard if guard.is_locked() => {
            println!("Acquired lock: {}", *guard);
        }
        _ => {
            println!("Failed to acquire within timeout");
        }
    }
}

Condition Variable

use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
 
struct Shared {
    data: Mutex<i32>,
    condvar: Condvar,
}
 
fn main() {
    let shared = Arc::new(Shared {
        data: Mutex::new(0),
        condvar: Condvar::new(),
    });
    
    // Consumer waits for data
    let consumer_shared = Arc::clone(&shared);
    let consumer = thread::spawn(move || {
        let mut guard = consumer_shared.data.lock();
        while *guard < 10 {
            consumer_shared.condvar.wait(&mut guard);
        }
        println!("Consumer got: {}", *guard);
    });
    
    // Producer updates data
    let producer_shared = Arc::clone(&shared);
    let producer = thread::spawn(move || {
        let mut guard = producer_shared.data.lock();
        *guard = 42;
        producer_shared.condvar.notify_all();
        println!("Producer set value");
    });
    
    consumer.join().unwrap();
    producer.join().unwrap();
}

Once for One-Time Initialization

use parking_lot::Once;
 
static mut SINGLETON: Option<String> = None;
static INIT: Once = Once::new();
 
fn get_singleton() -> &'static str {
    INIT.call_once(|| {
        unsafe { SINGLETON = Some("Initialized".to_string()); }
    });
    unsafe { SINGLETON.as_ref().unwrap() }
}
 
fn main() {
    println!("Value: {}", get_singleton());
    println!("Value: {}", get_singleton());  // No reinitialization
}

Fair Locking

use parking_lot::Mutex;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let mutex = Arc::new(Mutex::new(0));
    
    // parking_lot ensures fair ordering
    // First thread to request lock gets it first
    
    let handles: Vec<_> = (0..5)
        .map(|i| {
            let mutex = Arc::clone(&mutex);
            thread::spawn(move || {
                let _guard = mutex.lock();
                println!("Thread {} acquired lock", i);
                // Simulate work
                thread::sleep(std::time::Duration::from_millis(10));
            })
        })
        .collect();
    
    for handle in handles {
        handle.join().unwrap();
    }
}

RwLock Read Recursive

use parking_lot::RwLock;
 
fn main() {
    let lock = RwLock::new(42);
    
    // Recursive read lock (same thread can re-lock)
    let guard1 = lock.read();
    let guard2 = lock.read_recursive();  // Allows recursive reads
    
    println!("Value: {}", *guard1);
    println!("Value: {}", *guard2);
    
    // Upgradable read
    let upgradable = lock.upgradable_read();
    println!("Upgradable read: {}", *upgradable);
    
    // Upgrade to write
    let mut write = RwLockUpgradableReadGuard::upgrade(upgradable);
    *write = 100;
    println!("After upgrade: {}", *write);
}
 
use parking_lot::RwLockUpgradableReadGuard;

Barrier for Thread Coordination

use parking_lot::Barrier;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let barrier = Arc::new(Barrier::new(3));
    let mut handles = vec![];
    
    for i in 0..3 {
        let barrier = Arc::clone(&barrier);
        handles.push(thread::spawn(move || {
            println!("Thread {} before barrier", i);
            barrier.wait();
            println!("Thread {} after barrier", i);
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
}

Semaphore for Resource Limiting

use parking_lot::Semaphore;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let sem = Arc::new(Semaphore::new(3));  // Max 3 concurrent
    let mut handles = vec![];
    
    for i in 0..10 {
        let sem = Arc::clone(&sem);
        handles.push(thread::spawn(move || {
            let _permit = sem.acquire();
            println!("Thread {} acquired permit", i);
            thread::sleep(std::time::Duration::from_millis(100));
            // Permit released when dropped
        }));
    }
    
    for handle in handles {
        handle.join().unwrap();
    }
}

Deadlock Detection (Debug)

// Enable deadlock detection in Cargo.toml:
// parking_lot = { version = "0.12", features = ["deadlock_detection"] }
 
use parking_lot::Mutex;
use std::thread;
 
fn main() {
    let m1 = Mutex::new(0);
    let m2 = Mutex::new(0);
    
    // Check for deadlocks (in debug builds)
    // parking_lot will panic if deadlock is detected
    
    let h1 = thread::spawn(|| {
        // Potential deadlock scenario
    });
    
    // On program exit, check for leaked locks
    // parking_lot::deadlock::check_deadlock();
}

Comparing with std::sync

use parking_lot::Mutex as PlMutex;
use std::sync::Mutex as StdMutex;
use std::time::Instant;
 
fn benchmark_parking_lot(iterations: u32) -> u128 {
    let mutex = PlMutex::new(0u64);
    let start = Instant::now();
    
    for _ in 0..iterations {
        let mut guard = mutex.lock();
        *guard += 1;
    }
    
    start.elapsed().as_nanos()
}
 
fn benchmark_std(iterations: u32) -> u128 {
    let mutex = StdMutex::new(0u64);
    let start = Instant::now();
    
    for _ in 0..iterations {
        let mut guard = mutex.lock().unwrap();
        *guard += 1;
    }
    
    start.elapsed().as_nanos()
}
 
fn main() {
    let iterations = 100_000;
    
    let pl_time = benchmark_parking_lot(iterations);
    let std_time = benchmark_std(iterations);
    
    println!("parking_lot: {} ns", pl_time);
    println!("std::sync:   {} ns", std_time);
}

Mapped MutexGuard

use parking_lot::{Mutex, MappedMutexGuard};
 
struct Config {
    settings: Settings,
    version: u32,
}
 
struct Settings {
    debug: bool,
    log_level: String,
}
 
fn main() {
    let config = Mutex::new(Config {
        settings: Settings {
            debug: true,
            log_level: "info".to_string(),
        },
        version: 1,
    });
    
    // Lock and map to inner field
    let settings_guard: MappedMutexGuard<Settings> = MutexGuard::map(config.lock(), |c| &mut c.settings);
    
    println!("Debug: {}", settings_guard.debug);
}
 
use parking_lot::MutexGuard;

Reentrant Mutex

use parking_lot::ReentrantMutex;
 
fn main() {
    let mutex = ReentrantMutex::new(0);
    
    // Same thread can acquire multiple times
    let guard1 = mutex.lock();
    let guard2 = mutex.lock();  // Same thread - OK!
    
    println!("Value: {}", *guard2);
    
    // Must drop in reverse order
    drop(guard2);
    drop(guard1);
}

Fair RwLock

use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
 
fn main() {
    let lock = Arc::new(RwLock::new(0));
    
    // Writers don't starve with fair locking
    let reader_handles: Vec<_> = (0..3)
        .map(|i| {
            let lock = Arc::clone(&lock);
            thread::spawn(move || {
                for _ in 0..5 {
                    let guard = lock.read();
                    println!("Reader {}: {}", i, *guard);
                }
            })
        })
        .collect();
    
    let writer_handle = {
        let lock = Arc::clone(&lock);
        thread::spawn(move || {
            for _ in 0..3 {
                let mut guard = lock.write();
                *guard += 1;
                println!("Writer: {}", *guard);
            }
        })
    };
    
    for h in reader_handles { h.join().unwrap(); }
    writer_handle.join().unwrap();
}

Raw Mutex Operations

use parking_lot::RawMutex;
 
fn main() {
    let raw = RawMutex::INIT;
    
    // Low-level lock operations
    unsafe {
        raw.lock();
        // Critical section
        raw.unlock();
    }
}

Thread Parking

use parking_lot::{Mutex, Condvar};
use std::sync::Arc;
use std::thread;
 
fn main() {
    let pair = Arc::new((Mutex::new(false), Condvar::new()));
    let pair2 = Arc::clone(&pair);
    
    thread::spawn(move || {
        let (lock, cvar) = &*pair2;
        let mut started = lock.lock();
        *started = true;
        cvar.notify_one();
    });
    
    let (lock, cvar) = &*pair;
    let mut started = lock.lock();
    while !*started {
        cvar.wait(&mut started);
    }
    println!("Thread has started!");
}

Summary

Parking Lot Key Imports:

use parking_lot::{Mutex, RwLock, Condvar, Once, Barrier, Semaphore};
use parking_lot::{ReentrantMutex, MappedMutexGuard, MutexGuard};

Main Types:

Type Description
Mutex<T> Mutual exclusion lock
RwLock<T> Reader-writer lock
ReentrantMutex<T> Lock same thread can re-acquire
Condvar Condition variable
Once One-time initialization
Barrier Synchronization point
Semaphore Resource limiting

Key Methods:

// Mutex
mutex.lock();              // Block until acquired
mutex.try_lock();         // Option<MutexGuard>
mutex.lock_timeout(dur);  // Timed lock
 
// RwLock
rwlock.read();            // Read guard
rwlock.write();           // Write guard
rwlock.upgradable_read(); // Can upgrade to write
rwlock.try_read();        // Non-blocking

Comparison with std:

Feature parking_lot std::sync
Performance Faster Good
Memory Smaller Larger
Fairness Yes No
Try lock Yes Yes
Timeout Yes No (Mutex)
Deadlock detection Debug feature No
Reentrant Yes No
Result handling No unwrap needed Must unwrap

Key Points:

  • parking_lot is faster than std for high-contention
  • No .unwrap() needed on lock (unlike std::sync::Mutex)
  • Supports lock timeouts
  • Fair locking prevents starvation
  • Smaller memory footprint
  • Use RwLock for read-heavy workloads
  • Use Condvar for signaling between threads
  • Enable deadlock_detection feature for debugging
  • For async code, use tokio::sync instead