Loading page…
Rust walkthroughs
Loading page…
An RwLock (read-write lock) is a synchronization primitive that allows multiple readers OR a single writer to access data concurrently. Unlike a Mutex which provides exclusive access to a single thread, RwLock distinguishes between read and write access.
Key characteristics:
RwLock is ideal when:
The standard library provides std::sync::RwLock, but for production code, consider parking_lot::RwLock for better performance.
use std::sync::RwLock;
fn main() {
let lock = RwLock::new(5);
// Multiple readers can access simultaneously
{
let r1 = lock.read().unwrap();
let r2 = lock.read().unwrap();
println!("Reader 1: {}", *r1);
println!("Reader 2: {}", *r2);
} // Readers released
// Writer needs exclusive access
{
let mut w = lock.write().unwrap();
*w += 1;
println!("Writer: {}", *w);
}
}use std::sync::{Arc, RwLock};
use std::thread;
use std::collections::HashMap;
struct Cache {
data: RwLock<HashMap<String, String>>,
}
impl Cache {
fn new() -> Self {
Self {
data: RwLock::new(HashMap::new()),
}
}
fn get(&self, key: &str) -> Option<String> {
let data = self.data.read().unwrap();
data.get(key).cloned()
}
fn insert(&self, key: String, value: String) {
let mut data = self.data.write().unwrap();
data.insert(key, value);
}
fn remove(&self, key: &str) -> Option<String> {
let mut data = self.data.write().unwrap();
data.remove(key)
}
}
fn main() {
let cache = Arc::new(Cache::new());
// Initial data
cache.insert("config".to_string(), "default".to_string());
let mut handles = vec![];
// Spawn reader threads
for i in 0..5 {
let cache_clone = Arc::clone(&cache);
let handle = thread::spawn(move || {
if let Some(value) = cache_clone.get("config") {
println!("Reader {}: got config = {}", i, value);
} else {
println!("Reader {}: config not found", i);
}
});
handles.push(handle);
}
// Spawn writer thread
let cache_clone = Arc::clone(&cache);
let writer = thread::spawn(move || {
cache_clone.insert("config".to_string(), "updated".to_string());
println!("Writer: updated config");
});
for handle in handles {
handle.join().unwrap();
}
writer.join().unwrap();
}use std::sync::{Arc, RwLock};
use std::thread;
fn main() {
let data = Arc::new(RwLock::new(vec![1, 2, 3]));
let mut handles = vec![];
// Spawn multiple readers - they run concurrently
for i in 0..3 {
let data_clone = Arc::clone(&data);
let handle = thread::spawn(move || {
let read_guard = data_clone.read().unwrap();
println!("Reader {}: {:?}", i, *read_guard);
// Simulate work while holding read lock
thread::sleep(std::time::Duration::from_millis(100));
println!("Reader {} done", i);
});
handles.push(handle);
}
// Give readers a moment to start
thread::sleep(std::time::Duration::from_millis(10));
// Writer waits for all readers to finish
let data_clone = Arc::clone(&data);
let writer = thread::spawn(move || {
println!("Writer: trying to acquire write lock...");
let mut write_guard = data_clone.write().unwrap();
println!("Writer: acquired write lock");
write_guard.push(4);
println!("Writer: added element");
});
for handle in handles {
handle.join().unwrap();
}
writer.join().unwrap();
println!("Final data: {:?}", *data.read().unwrap());
}use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
fn main() {
let data = Arc::new(RwLock::new(0));
let data_clone = Arc::clone(&data);
let writer = thread::spawn(move || {
let mut guard = data_clone.write().unwrap();
println!("Writer acquired lock");
// Hold the lock for a while
thread::sleep(Duration::from_millis(500));
*guard = 42;
println!("Writer done");
});
thread::sleep(Duration::from_millis(50)); // Let writer acquire lock
let data_clone = Arc::clone(&data);
let reader = thread::spawn(move || {
// Try to read without blocking
match data_clone.try_read() {
Ok(guard) => println!("Got read: {}", *guard),
Err(e) => println!("Could not read: {}", e),
}
// Try to write without blocking
match data_clone.try_write() {
Ok(mut guard) => {
*guard = 100;
println!("Got write, set to 100");
}
Err(e) => println!("Could not write: {}", e),
}
});
writer.join().unwrap();
reader.join().unwrap();
println!("Final value: {}", *data.read().unwrap());
}use std::sync::{Arc, RwLock};
fn main() {
// Standard RwLock doesn't support upgrading
// You must release the read lock before acquiring write lock
// This can cause race conditions - be careful!
let data = Arc::new(RwLock::new(HashMap::<String, i32>::new()));
// Pattern: check then update
fn get_or_insert(cache: &RwLock<HashMap<String, i32>>, key: &str, default: i32) -> i32 {
// First, try to read
{
let read_guard = cache.read().unwrap();
if let Some(&value) = read_guard.get(key) {
return value;
}
} // Release read lock
// Now acquire write lock
{
let mut write_guard = cache.write().unwrap();
// MUST check again - another thread may have inserted
write_guard.entry(key.to_string()).or_insert(default);
*write_guard.get(key).unwrap()
}
}
let result = get_or_insert(&data, "count", 0);
println!("Result: {}", result);
}
use std::collections::HashMap;// Add to Cargo.toml:
// [dependencies]
// parking_lot = "0.12"
// parking_lot provides a better RwLock:
// - No poisoning (no unwrap needed)
// - Smaller memory footprint
// - Faster performance
// - Supports read lock upgrading
use parking_lot::RwLock;
use std::sync::Arc;
use std::thread;
fn main() {
let data = Arc::new(RwLock::new(vec![1, 2, 3]));
let data_clone = Arc::clone(&data);
let handle = thread::spawn(move || {
let read_guard = data_clone.read();
println!("Reader: {:?}", *read_guard);
});
handle.join().unwrap();
// No unwrap needed with parking_lot
let mut write_guard = data.write();
write_guard.push(4);
println!("Final: {:?}", *data.read());
}use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Instant;
struct Database {
users: RwLock<Vec<(u32, String)>>,
}
impl Database {
fn new() -> Self {
let mut users = Vec::new();
for i in 0..1000 {
users.push((i, format!("user_{}", i)));
}
Self {
users: RwLock::new(users),
}
}
fn find_user(&self, id: u32) -> Option<String> {
let users = self.users.read().unwrap();
users.iter()
.find(|(uid, _)| *uid == id)
.map(|(_, name)| name.clone())
}
fn add_user(&self, id: u32, name: String) {
let mut users = self.users.write().unwrap();
users.push((id, name));
}
fn user_count(&self) -> usize {
self.users.read().unwrap().len()
}
}
fn main() {
let db = Arc::new(Database::new());
let start = Instant::now();
let mut handles = vec![];
// 10 reader threads
for i in 0..10 {
let db_clone = Arc::clone(&db);
handles.push(thread::spawn(move || {
for j in 0..100 {
let id = (i * 100 + j) % 1000;
if let Some(name) = db_clone.find_user(id as u32) {
// Process user
let _ = name.len();
}
}
}));
}
// 1 writer thread
let db_clone = Arc::clone(&db);
handles.push(thread::spawn(move || {
for i in 1000..1010 {
db_clone.add_user(i, format!("new_user_{}", i));
thread::sleep(std::time::Duration::from_micros(100));
}
}));
for handle in handles {
handle.join().unwrap();
}
println!("Total users: {}", db.user_count());
println!("Time: {:?}", start.elapsed());
}use std::sync::{Arc, RwLock};
use std::thread;
fn main() {
let data1 = Arc::new(RwLock::new(0));
let data2 = Arc::new(RwLock::new(0));
let d1 = Arc::clone(&data1);
let d2 = Arc::clone(&data2);
let handle1 = thread::spawn(move || {
// Always acquire locks in the same order!
let mut g1 = d1.write().unwrap();
let mut g2 = d2.write().unwrap();
*g1 += 1;
*g2 += 1;
});
let d1 = Arc::clone(&data1);
let d2 = Arc::clone(&data2);
let handle2 = thread::spawn(move || {
// Same order as handle1 - prevents deadlock
let mut g1 = d1.write().unwrap();
let mut g2 = d2.write().unwrap();
*g1 += 10;
*g2 += 10;
});
handle1.join().unwrap();
handle2.join().unwrap();
println!("data1: {}, data2: {}", *data1.read().unwrap(), *data2.read().unwrap());
}use std::sync::{Arc, RwLock, Mutex};
use std::thread;
use std::time::Instant;
fn main() {
const NUM_READERS: usize = 10;
const NUM_READS: usize = 100_000;
// Test with RwLock
let rwlock_data = Arc::new(RwLock::new(0i32));
let start = Instant::now();
let mut handles = vec![];
for _ in 0..NUM_READERS {
let data = Arc::clone(&rwlock_data);
handles.push(thread::spawn(move || {
for _ in 0..NUM_READS {
let _ = *data.read().unwrap();
}
}));
}
for h in handles {
h.join().unwrap();
}
let rwlock_time = start.elapsed();
// Test with Mutex
let mutex_data = Arc::new(Mutex::new(0i32));
let start = Instant::now();
let mut handles = vec![];
for _ in 0..NUM_READERS {
let data = Arc::clone(&mutex_data);
handles.push(thread::spawn(move || {
for _ in 0..NUM_READS {
let _ = *data.lock().unwrap();
}
}));
}
for h in handles {
h.join().unwrap();
}
let mutex_time = start.elapsed();
println!("RwLock: {:?}", rwlock_time);
println!("Mutex: {:?}", mutex_time);
println!("RwLock is {}x faster for read-heavy workloads",
mutex_time.as_secs_f64() / rwlock_time.as_secs_f64());
}| Method | Description | Blocks When |
|--------|-------------|-------------|
| read() | Acquire read lock | Write lock held |
| try_read() | Non-blocking read attempt | Would block |
| write() | Acquire write lock | Any lock held |
| try_write() | Non-blocking write attempt | Would block |
RwLock vs Mutex:
| Feature | RwLock | Mutex | |---------|--------|-------| | Concurrent readers | ✅ Multiple | ❌ Only one | | Write exclusivity | ✅ Exclusive | ✅ Exclusive | | Memory overhead | Higher | Lower | | Best for | Read-heavy | Write-heavy or equal R/W |
When to Use RwLock:
When to Use Mutex Instead:
Key Points:
try_read() and try_write() to avoid blockingparking_lot::RwLock for better performance