Loading page…
Rust walkthroughs
Loading page…
tokio::sync::Mutex and parking_lot::Mutex for use in async code?The fundamental difference is that tokio::sync::Mutex is async-aware and yields the task when contended, while parking_lot::Mutex is a blocking mutex that will block the entire thread. In async code, blocking a thread can stall the executor and prevent other tasks from running, making the choice between these mutexes critical for application performance and correctness.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// tokio::sync::Mutex - async lock
async fn tokio_mutex_example() {
let mutex = Arc::new(TokioMutex::new(42));
let lock = mutex.lock().await; // Yields if contended
println!("Value: {}", *lock);
}
// parking_lot::Mutex - blocking lock
fn parking_lot_mutex_example() {
let mutex = Arc::new(ParkingLotMutex::new(42));
let lock = mutex.lock(); // Blocks thread if contended
println!("Value: {}", *lock);
}The .await on tokio's mutex signals that the task can yield, while parking_lot's mutex blocks synchronously.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
async fn contended_tokio_mutex() {
let mutex = Arc::new(TokioMutex::new(0));
let m1 = Arc::clone(&mutex);
let h1 = tokio::spawn(async move {
let mut guard = m1.lock().await;
// Task yields here while holding lock
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
*guard += 1;
});
let m2 = Arc::clone(&mutex);
let h2 = tokio::spawn(async move {
// This tries to lock while h1 holds it
// The task yields, allowing other tasks to run
let mut guard = m2.lock().await;
*guard += 1;
});
h1.await.unwrap();
h2.await.unwrap();
println!("Final value: {}", *mutex.lock().await); // 2
}
async fn contended_parking_lot_mutex() {
let mutex = Arc::new(ParkingLotMutex::new(0));
let m1 = Arc::clone(&mutex);
let h1 = tokio::spawn(async move {
let mut guard = m1.lock();
// Blocks thread! Other tasks on same thread can't run
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
*guard += 1;
});
let m2 = Arc::clone(&mutex);
let h2 = tokio::spawn(async move {
// Tries to lock - will block thread
let mut guard = m2.lock();
*guard += 1;
});
h1.await.unwrap();
h2.await.unwrap();
}With tokio's mutex, the task yields and the executor can run other tasks. With parking_lot, the thread blocks.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
#[tokio::main(flavor = "multi_thread", worker_threads = 2)]
async fn blocking_mutex_problem() {
let mutex = Arc::new(ParkingLotMutex::new(0));
// Spawn many tasks that need the mutex
let mut handles = vec![];
for i in 0..10 {
let m = Arc::clone(&mutex);
handles.push(tokio::spawn(async move {
let guard = m.lock(); // Blocks thread
// While holding lock, do async work
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
println!("Task {} got lock", i);
}));
}
// Problem: With only 2 worker threads and blocking locks,
// we can deadlock or severely limit concurrency
// - 2 threads can hold the mutex
// - But they're blocked in sleep, not yielding
// - Other tasks pile up waiting
for h in handles {
h.await.unwrap();
}
}
#[tokio::main(flavor = "multi_thread", worker_threads = 2)]
async fn async_mutex_solution() {
let mutex = Arc::new(TokioMutex::new(0));
let mut handles = vec![];
for i in 0..10 {
let m = Arc::clone(&mutex);
handles.push(tokio::spawn(async move {
let guard = m.lock().await; // Yields if contended
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
println!("Task {} got lock", i);
}));
}
// With async mutex:
// - Tasks yield while waiting for lock
// - Executor can run other work
// - Better concurrency
for h in handles {
h.await.unwrap();
}
}Blocking mutexes can cause executor stalls in async contexts.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// USE PARKING_LOT MUTEX when:
// 1. The critical section is short and synchronous
// 2. No async operations while holding the lock
// 3. Performance is critical
struct Cache {
data: ParkingLotMutex<Vec<String>>, // Short, sync operations
}
impl Cache {
fn get(&self, index: usize) -> Option<String> {
let guard = self.data.lock();
guard.get(index).cloned() // Quick, synchronous
}
fn add(&self, item: String) {
let guard = self.data.lock();
guard.push(item); // Quick, synchronous
}
}
// USE TOKIO MUTEX when:
// 1. Need to hold lock across await points
// 2. Long-running operations while locked
// 3. Critical section includes async work
struct AsyncBuffer {
data: TokioMutex<Vec<u8>>,
}
impl AsyncBuffer {
async fn process_and_clear(&self) -> Vec<u8> {
let mut guard = self.data.lock().await;
let result = guard.clone();
// Async operation while holding lock
self.flush_to_disk(&result).await;
guard.clear();
result
}
async fn flush_to_disk(&self, data: &[u8]) {
// Simulated async disk write
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}
}Choose based on whether you hold the lock across await points.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
use std::time::Instant;
#[tokio::main]
async fn performance_comparison() {
const ITERATIONS: u32 = 100_000;
// Uncontended tokio mutex
let mutex = Arc::new(TokioMutex::new(0u32));
let start = Instant::now();
for _ in 0..ITERATIONS {
*mutex.lock().await += 1;
}
let tokio_time = start.elapsed();
// Uncontended parking_lot mutex
let mutex = Arc::new(ParkingLotMutex::new(0u32));
let start = Instant::now();
for _ in 0..ITERATIONS {
*mutex.lock() += 1;
}
let parking_lot_time = start.elapsed();
println!("Tokio mutex: {:?}", tokio_time);
println!("Parking lot mutex: {:?}", parking_lot_time);
// parking_lot is typically 2-10x faster for uncontended locks
// because it avoids async overhead
}Parking_lot is significantly faster when you don't need async semantics.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// CORRECT: Using tokio mutex when holding across await
struct SharedState {
counter: TokioMutex<u32>,
}
impl SharedState {
async fn increment_and_notify(&self) {
let mut guard = self.counter.lock().await;
*guard += 1;
// Holding lock across await - OK with tokio mutex
self.notify(*guard).await;
}
async fn notify(&self, value: u32) {
// Simulated async notification
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}
}
// INCORRECT: Using parking_lot mutex across await
struct BadSharedState {
counter: ParkingLotMutex<u32>,
}
impl BadSharedState {
async fn increment_and_notify(&self) {
let mut guard = self.counter.lock();
*guard += 1;
// BAD: Holding blocking lock across await
// This blocks the executor thread
self.notify(*guard).await;
// Guard dropped here
}
async fn notify(&self, value: u32) {
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}
}Never hold a blocking mutex across an await point.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
async fn lock_scope_tokio() {
let mutex = Arc::new(TokioMutex::new(vec![1, 2, 3]));
// Lock scope is clear
{
let mut guard = mutex.lock().await;
guard.push(4);
// Can await here
some_async_work().await;
guard.push(5);
}
// Lock released
// Lock also released by returning the guard
let guard = mutex.lock().await;
guard.len() // Guard dropped here
}
async fn lock_scope_parking_lot() {
let mutex = Arc::new(ParkingLotMutex::new(vec![1, 2, 3]));
// Lock scope
{
let mut guard = mutex.lock();
guard.push(4);
// CANNOT await here - would block executor
// some_async_work().await; // BAD!
guard.push(5);
}
let guard = mutex.lock();
guard.len() // Guard dropped here
}
async fn some_async_work() {
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}The scope of the lock guard matters for async code.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// Pattern: Async-safe shared state
struct AsyncService {
state: Arc<TokioMutex<ServiceState>>,
}
struct ServiceState {
connections: Vec<String>,
request_count: u64,
}
impl AsyncService {
async fn handle_request(&self, conn: String) {
let mut state = self.state.lock().await;
state.connections.push(conn);
state.request_count += 1;
// Maybe do async work while locked
if state.request_count % 100 == 0 {
self.log_metrics(&*state).await;
}
}
async fn log_metrics(&self, state: &ServiceState) {
// Async logging
}
}
// Pattern: Fast sync state with parking_lot
struct FastCache {
hits: ParkingLotMutex<u64>,
misses: ParkingLotMutex<u64>,
}
impl FastCache {
fn record_hit(&self) {
*self.hits.lock() += 1;
}
fn record_miss(&self) {
*self.misses.lock() += 1;
}
fn stats(&self) -> (u64, u64) {
// Lock both - quick operation
let hits = *self.hits.lock();
let misses = *self.misses.lock();
(hits, misses)
}
}Match the mutex type to the access pattern.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
async fn try_lock_tokio() {
let mutex = Arc::new(TokioMutex::new(42));
// tokio::sync::Mutex::try_lock returns immediately
match mutex.try_lock() {
Ok(guard) => {
println!("Got lock: {}", *guard);
}
Err(_) => {
println!("Lock is held, doing other work");
// Can do async work here
some_async_work().await;
}
}
}
fn try_lock_parking_lot() {
let mutex = Arc::new(ParkingLotMutex::new(42));
// parking_lot::Mutex::try_lock returns immediately
match mutex.try_lock() {
Some(guard) => {
println!("Got lock: {}", *guard);
}
None => {
println!("Lock is held");
}
}
}
async fn some_async_work() {
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}Both support non-blocking lock attempts.
use tokio::sync::RwLock as TokioRwLock;
use parking_lot::RwLock as ParkingLotRwLock;
use std::sync::Arc;
// Same principles apply to RwLock
async fn rwlock_example() {
// tokio RwLock - async-aware
let rwlock = Arc::new(TokioRwLock::new(vec![1, 2, 3]));
// Multiple readers can hold lock concurrently
let r1 = rwlock.read().await;
let r2 = rwlock.read().await; // OK - concurrent reads
drop(r1);
drop(r2);
// Writer needs exclusive access
let mut w = rwlock.write().await;
w.push(4);
// parking_lot RwLock - blocking
let rwlock = Arc::new(ParkingLotRwLock::new(vec![1, 2, 3]));
let r1 = rwlock.read();
let r2 = rwlock.read(); // OK - concurrent reads (blocking)
drop(r1);
drop(r2);
let mut w = rwlock.write(); // Blocks thread
w.push(4);
}RwLock variants follow the same pattern as Mutex.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// Both can deadlock with multiple mutexes
async fn deadlock_tokio() {
let m1 = Arc::new(TokioMutex::new(0));
let m2 = Arc::new(TokioMutex::new(0));
let m1a = Arc::clone(&m1);
let m2a = Arc::clone(&m2);
let h1 = tokio::spawn(async move {
let g1 = m1a.lock().await;
let g2 = m2a.lock().await; // Deadlock if h2 holds m2
});
let m1b = Arc::clone(&m1);
let m2b = Arc::clone(&m2);
let h2 = tokio::spawn(async move {
let g2 = m2b.lock().await;
let g1 = m1b.lock().await; // Deadlock if h1 holds m1
});
}
fn deadlock_parking_lot() {
let m1 = Arc::new(ParkingLotMutex::new(0));
let m2 = Arc::new(ParkingLotMutex::new(0));
let m1a = Arc::clone(&m1);
let m2a = Arc::clone(&m2);
let h1 = std::thread::spawn(move || {
let g1 = m1a.lock();
let g2 = m2a.lock(); // Deadlock
});
let m1b = Arc::clone(&m1);
let m2b = Arc::clone(&m2);
let h2 = std::thread::spawn(move || {
let g2 = m2b.lock();
let g1 = m1b.lock(); // Deadlock
});
}Both mutexes can deadlock; the async nature doesn't prevent circular waits.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
use std::sync::Arc;
// Use parking_lot for fast stats/counters
// Use tokio for state held across async operations
struct Service {
// Fast metrics - parking_lot
request_count: ParkingLotMutex<u64>,
error_count: ParkingLotMutex<u64>,
// Async state - tokio
connections: TokioMutex<Vec<Connection>>,
}
struct Connection {
id: u64,
address: String,
}
impl Service {
async fn handle_request(&self) {
// Quick metric update - no await
*self.request_count.lock() += 1;
// Async state update - can await
let mut conns = self.connections.lock().await;
// Async operation while holding connection state
self.process_concurrent(&mut conns).await;
}
async fn process_concurrent(&self, conns: &mut Vec<Connection>) {
// Async processing
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}
fn metrics(&self) -> (u64, u64) {
// Fast sync metric read
(*self.request_count.lock(), *self.error_count.lock())
}
}Use each mutex type for its appropriate use case in the same application.
use tokio::sync::Mutex as TokioMutex;
use parking_lot::Mutex as ParkingLotMutex;
// Both guards are Send (can be moved between threads)
// But tokio's MutexGuard is not Send if held across await in single-threaded runtime
#[tokio::main(flavor = "current_thread")] // Single-threaded
async fn single_threaded_issue() {
let mutex = Arc::new(TokioMutex::new(0));
let guard = mutex.lock().await;
// This is problematic in single-threaded runtime
// The guard is held across await, blocking the only thread
some_async_work().await;
drop(guard);
}
// In multi-threaded runtime, this works but reduces concurrency
#[tokio::main(flavor = "multi_thread")]
async fn multi_threaded_ok() {
let mutex = Arc::new(TokioMutex::new(0));
let guard = mutex.lock().await;
// OK - other threads can handle other tasks
some_async_work().await;
drop(guard);
}
async fn some_async_work() {
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
}Single-threaded runtimes have special considerations for async mutexes.
The choice between tokio::sync::Mutex and parking_lot::Mutex depends on how you use the lock:
Comparison table:
| Characteristic | tokio::sync::Mutex | parking_lot::Mutex |
|---------------|-------------------|-------------------|
| Lock acquisition | Async (.await) | Blocking |
| Contended behavior | Yields task | Blocks thread |
| Performance (uncontended) | Slower | Faster |
| Hold across .await | ✅ Safe | ❌ Blocks executor |
| Short critical sections | Overkill | Ideal |
| Executor impact | None | Can starve other tasks |
Decision guide:
// Use parking_lot::Mutex when:
// - Critical section is short and synchronous
// - No async operations while holding lock
// - Performance is critical
let mutex = parking_lot::Mutex::new(data);
let guard = mutex.lock();
guard.update(); // Quick sync operation
drop(guard);
// Use tokio::sync::Mutex when:
// - Must hold lock across await points
// - Critical section includes async work
// - Lock might be held for extended periods
let mutex = tokio::sync::Mutex::new(data);
let guard = mutex.lock().await;
guard.update();
some_async_work().await; // OK to await while holding
guard.update_more();
drop(guard);Key insight: The primary consideration is whether you hold the lock across .await points. If you do, use tokio::sync::Mutex. If you don't, parking_lot::Mutex offers better performance.