Loading page…
Rust walkthroughs
Loading page…
Atomic types in Rust provide lock-free thread-safe operations on primitive values. They are located in std::sync::atomic and enable safe concurrent access without the overhead of mutexes for simple operations.
Key atomic types:
AtomicBool — Boolean value with atomic operationsAtomicI32, AtomicI64, AtomicU32, AtomicU64 — Integer typesAtomicIsize, AtomicUsize — Pointer-sized integersAtomicPtr<T> — Raw pointer with atomic operationsEach atomic type provides methods like:
load() / store() — Read and write valuesswap() — Atomically swap valuescompare_exchange() — Conditional swap (CAS operation)fetch_add() / fetch_sub() — Atomic arithmeticfetch_and() / fetch_or() / fetch_xor() — Atomic bitwise operationsAll atomic operations require a Ordering parameter that specifies memory ordering constraints:
Relaxed — No ordering guarantees, only atomicityRelease — Prevents reordering of writes before this operationAcquire — Prevents reordering of reads after this operationAcqRel — Combines Acquire and ReleaseSeqCst — Sequentially consistent (strongest guarantee)use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::Arc;
use std::thread;
fn main() {
let counter = Arc::new(AtomicI32::new(0));
let mut handles = vec![];
// Spawn 10 threads, each incrementing 1000 times
for _ in 0..10 {
let counter_clone = Arc::clone(&counter);
let handle = thread::spawn(move || {
for _ in 0..1000 {
// Atomically increment
counter_clone.fetch_add(1, Ordering::SeqCst);
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Final counter: {}", counter.load(Ordering::SeqCst));
// Always 10000, no race conditions!
}use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
fn main() {
let running = Arc::new(AtomicBool::new(true));
let running_clone = Arc::clone(&running);
// Worker thread that runs until flag is set
let worker = thread::spawn(move || {
let mut count = 0;
while running_clone.load(Ordering::Relaxed) {
count += 1;
thread::sleep(Duration::from_millis(1));
}
println!("Worker stopped after {} iterations", count);
});
// Let worker run for a bit
thread::sleep(Duration::from_millis(50));
// Signal the worker to stop
running.store(false, Ordering::Relaxed);
worker.join().unwrap();
println!("Worker has stopped");
}use std::sync::atomic::{AtomicI32, Ordering};
fn main() {
let value = AtomicI32::new(5);
// compare_exchange(current, new, success_ordering, failure_ordering)
// Returns Ok(previous) if successful, Err(previous) if failed
// Try to change 5 to 10 (should succeed)
match value.compare_exchange(5, 10, Ordering::SeqCst, Ordering::SeqCst) {
Ok(_) => println!("Successfully changed 5 to 10"),
Err(v) => println!("Failed, value was {}", v),
}
// Try to change 5 to 20 (should fail, value is now 10)
match value.compare_exchange(5, 20, Ordering::SeqCst, Ordering::SeqCst) {
Ok(_) => println!("Successfully changed 5 to 20"),
Err(v) => println!("Failed, value was {} (expected 5)", v),
}
println!("Final value: {}", value.load(Ordering::SeqCst));
}use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::thread;
struct AtomicCounter {
value: AtomicU64,
}
impl AtomicCounter {
fn new() -> Self {
Self { value: AtomicU64::new(0) }
}
fn increment(&self) -> u64 {
// fetch_add returns the previous value
self.value.fetch_add(1, Ordering::SeqCst)
}
fn get(&self) -> u64 {
self.value.load(Ordering::SeqCst)
}
fn reset(&self) {
self.value.store(0, Ordering::SeqCst);
}
}
fn main() {
let counter = Arc::new(AtomicCounter::new());
let mut handles = vec![];
for _ in 0..5 {
let counter_clone = Arc::clone(&counter);
handles.push(thread::spawn(move || {
for _ in 0..1000 {
counter_clone.increment();
}
}));
}
for handle in handles {
handle.join().unwrap();
}
println!("Counter value: {}", counter.get()); // 5000
}use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
struct SpinLock {
locked: AtomicBool,
}
impl SpinLock {
fn new() -> Self {
Self { locked: AtomicBool::new(false) }
}
fn lock(&self) {
// Spin until we can acquire the lock
while self.locked.compare_exchange(
false, // expected: unlocked
true, // new: locked
Ordering::Acquire,
Ordering::Relaxed,
).is_err() {
// Spin wait (hint to CPU that we're spinning)
std::hint::spin_loop();
}
}
fn unlock(&self) {
self.locked.store(false, Ordering::Release);
}
}
fn main() {
let lock = Arc::new(SpinLock::new());
let mut handles = vec![];
for i in 0..5 {
let lock_clone = Arc::clone(&lock);
handles.push(thread::spawn(move || {
lock_clone.lock();
println!("Thread {} acquired lock", i);
thread::sleep(std::time::Duration::from_millis(100));
println!("Thread {} releasing lock", i);
lock_clone.unlock();
}));
}
for handle in handles {
handle.join().unwrap();
}
}use std::sync::atomic::{AtomicI32, Ordering};
fn main() {
let value = AtomicI32::new(10);
// fetch_add returns the OLD value
let old = value.fetch_add(5, Ordering::SeqCst);
println!("Added 5, old value was {}, new value is {}", old, value.load(Ordering::SeqCst));
// fetch_sub
let old = value.fetch_sub(3, Ordering::SeqCst);
println!("Subtracted 3, old value was {}, new value is {}", old, value.load(Ordering::SeqCst));
// fetch_and (bitwise AND)
let value = AtomicI32::new(0b1111);
let old = value.fetch_and(0b1010, Ordering::SeqCst);
println!("AND with 0b1010, old was {:b}, new is {:b}", old, value.load(Ordering::SeqCst));
// fetch_or (bitwise OR)
let value = AtomicI32::new(0b1000);
let old = value.fetch_or(0b0010, Ordering::SeqCst);
println!("OR with 0b0010, old was {:b}, new is {:b}", old, value.load(Ordering::SeqCst));
// fetch_xor (bitwise XOR)
let value = AtomicI32::new(0b1100);
let old = value.fetch_xor(0b0110, Ordering::SeqCst);
println!("XOR with 0b0110, old was {:b}, new is {:b}", old, value.load(Ordering::SeqCst));
// fetch_max / fetch_min (available on newer Rust versions)
let value = AtomicI32::new(5);
value.fetch_max(10, Ordering::SeqCst); // Sets to 10 if 10 > current
println!("After fetch_max(10): {}", value.load(Ordering::SeqCst));
}use std::sync::atomic::{AtomicBool, AtomicI32, Ordering};
use std::sync::Arc;
use std::thread;
fn main() {
// Relaxed: Only guarantees atomicity, no ordering
let relaxed_counter = AtomicI32::new(0);
relaxed_counter.fetch_add(1, Ordering::Relaxed);
// Acquire/Release: Establishes happens-before relationship
let data = Arc::new((AtomicBool::new(false), AtomicI32::new(0)));
let data_clone = Arc::clone(&data);
let writer = thread::spawn(move || {
data_clone.1.store(42, Ordering::Relaxed); // Write data
data_clone.0.store(true, Ordering::Release); // Release signals data is ready
});
let reader = thread::spawn(move || {
while !data.0.load(Ordering::Acquire) { // Acquire sees the release
std::hint::spin_loop();
}
// Guaranteed to see 42 because of the acquire/release pair
println!("Data: {}", data.1.load(Ordering::Relaxed));
});
writer.join().unwrap();
reader.join().unwrap();
// SeqCst: Strongest guarantee, total order of all SeqCst operations
// Use when in doubt, but it may be slower than necessary
println!("\nOrdering strength (weakest to strongest):");
println!("Relaxed < Acquire/Release < AcqRel < SeqCst");
}use std::sync::atomic::{AtomicPtr, Ordering};
use std::ptr;
struct Node {
value: i32,
next: *mut Node,
}
fn main() {
let head = AtomicPtr::new(ptr::null_mut());
// Create a new node
let new_node = Box::into_raw(Box::new(Node { value: 1, next: ptr::null_mut() }));
// Atomically swap the head pointer
let old_head = head.swap(new_node, Ordering::SeqCst);
if old_head.is_null() {
println!("First node added");
}
// Add another node
let second_node = Box::into_raw(Box::new(Node { value: 2, next: ptr::null_mut() }));
// Use compare_exchange to atomically insert
loop {
let current = head.load(Ordering::SeqCst);
unsafe { (*second_node).next = current; }
match head.compare_exchange(current, second_node, Ordering::SeqCst, Ordering::SeqCst) {
Ok(_) => break,
Err(_) => continue, // Another thread modified head, retry
}
}
// Read and print
let current = head.load(Ordering::SeqCst);
unsafe {
let mut node = current;
while !node.is_null() {
println!("Node value: {}", (*node).value);
node = (*node).next;
}
}
// Cleanup (in real code, you'd need proper memory reclamation)
unsafe {
let _ = Box::from_raw(new_node);
let _ = Box::from_raw(second_node);
}
}use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
struct LazyInit<T> {
initialized: AtomicBool,
value: std::sync::Mutex<Option<T>>,
}
impl<T: Clone + Send> LazyInit<T> {
fn new() -> Self {
Self {
initialized: AtomicBool::new(false),
value: std::sync::Mutex::new(None),
}
}
fn get_or_init<F: FnOnce() -> T>(&self, init: F) -> T {
// Fast path: already initialized
if self.initialized.load(Ordering::Acquire) {
return self.value.lock().unwrap().clone().unwrap();
}
// Try to be the one to initialize
if self.initialized.compare_exchange(
false,
true,
Ordering::AcqRel,
Ordering::Acquire,
).is_ok() {
// We won the race, initialize
let mut guard = self.value.lock().unwrap();
*guard = Some(init());
}
self.value.lock().unwrap().clone().unwrap()
}
}
fn main() {
let lazy = Arc::new(LazyInit::<String>::new());
let mut handles = vec![];
for i in 0..5 {
let lazy_clone = Arc::clone(&lazy);
handles.push(thread::spawn(move || {
let value = lazy_clone.get_or_init(|| {
println!("Thread {} is initializing", i);
"Initialized!".to_string()
});
println!("Thread {} got: {}", i, value);
}));
}
for handle in handles {
handle.join().unwrap();
}
}| Atomic Type | Description | Common Use Case |
|-------------|-------------|-----------------|
| AtomicBool | Boolean flag | Stop signals, flags |
| AtomicI32/64 | Signed integers | Counters, IDs |
| AtomicU32/64 | Unsigned integers | Counters, indices |
| AtomicPtr<T> | Raw pointer | Lock-free data structures |
| AtomicUsize | Pointer-sized | Indexing, sizes |
Ordering Cheat Sheet:
| Ordering | Use Case |
|----------|----------|
| Relaxed | Simple counters, statistics |
| Release | Publishing data to other threads |
| Acquire | Reading published data |
| AcqRel | Combined read-modify-write |
| SeqCst | When in doubt, strongest guarantee |
Key Points:
SeqCst is safest but may be overkill; Acquire/Release is often sufficientcompare_exchange for lock-free algorithms (CAS loops)Mutex or RwLock instead