Loading page…
Rust walkthroughs
Loading page…
Arc (Atomic Reference Counting) is a thread-safe version of Rc<T> that allows multiple ownership of the same data across threads. It uses atomic operations for reference counting, making it safe to share data between threads.
Key concepts:
Arc<T> implements Send and Sync when T: Send + SyncArc pointers can point to the same data&T from Arc, not &mut TRc due to atomic operationsWhen to use Arc:
When NOT to use Arc:
Rc for lower overhead)Mutex or RwLock)use std::sync::Arc;
use std::thread;
fn main() {
let data = Arc::new(vec![1, 2, 3, 4, 5]);
// Clone the Arc to create another reference
let data_clone = Arc::clone(&data);
// Both point to the same data
println!("Original: {:?}", *data);
println!("Clone: {:?}", *data_clone);
// Reference count is 2
println!("Reference count: {}", Arc::strong_count(&data));
}use std::sync::Arc;
use std::thread;
fn main() {
let data = Arc::new(vec![1, 2, 3, 4, 5]);
let mut handles = vec![];
for i in 0..3 {
// Clone Arc for each thread
let data_clone = Arc::clone(&data);
let handle = thread::spawn(move || {
println!("Thread {}: data = {:?}", i, *data_clone);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("All threads complete, ref count: {}", Arc::strong_count(&data));
}use std::sync::{Arc, Mutex};
use std::thread;
fn main() {
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter_clone = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter_clone.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Final counter: {}", *counter.lock().unwrap());
}use std::sync::{Arc, RwLock};
use std::thread;
fn main() {
let data = Arc::new(RwLock::new(vec![1, 2, 3]));
let mut handles = vec![];
// Reader threads
for i in 0..3 {
let data_clone = Arc::clone(&data);
let handle = thread::spawn(move || {
let read = data_clone.read().unwrap();
println!("Reader {}: {:?}", i, *read);
});
handles.push(handle);
}
// Writer thread
{
let data_clone = Arc::clone(&data);
let handle = thread::spawn(move || {
let mut write = data_clone.write().unwrap();
write.push(4);
println!("Writer: added 4");
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Final data: {:?}", *data.read().unwrap());
}use std::sync::Arc;
use std::thread;
#[derive(Debug)]
struct Config {
host: String,
port: u16,
debug: bool,
}
fn main() {
let config = Arc::new(Config {
host: String::from("localhost"),
port: 8080,
debug: true,
});
let mut handles = vec![];
for i in 0..3 {
let config = Arc::clone(&config);
let handle = thread::spawn(move || {
println!("Thread {} using config: {}:{}",
i, config.host, config.port);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}use std::sync::{Arc, Weak};
fn main() {
let strong = Arc::new(42);
println!("After creation: strong={}, weak={}",
Arc::strong_count(&strong), Arc::weak_count(&strong));
// Create another strong reference
let strong2 = Arc::clone(&strong);
println!("After clone: strong={}, weak={}",
Arc::strong_count(&strong), Arc::weak_count(&strong));
// Create a weak reference
let weak = Arc::downgrade(&strong);
println!("After downgrade: strong={}, weak={}",
Arc::strong_count(&strong), Arc::weak_count(&strong));
// Upgrade weak back to strong
if let Some(strong3) = weak.upgrade() {
println!("Upgraded: strong={}, weak={}",
Arc::strong_count(&strong), Arc::weak_count(&strong));
}
drop(strong2);
drop(strong);
// Now data is dropped, weak can't upgrade
println!("After drops: weak can upgrade? {}", weak.upgrade().is_some());
}use std::sync::{Arc, Weak, Mutex};
struct Node {
value: i32,
next: Mutex<Option<Arc<Node>>>,
prev: Mutex<Option<Weak<Node>>>, // Use Weak to avoid cycles
}
impl Node {
fn new(value: i32) -> Arc<Self> {
Arc::new(Self {
value,
next: Mutex::new(None),
prev: Mutex::new(None),
})
}
}
fn main() {
let a = Node::new(1);
let b = Node::new(2);
// Link a -> b
*a.next.lock().unwrap() = Some(Arc::clone(&b));
// Link b -> a (using Weak to prevent cycle)
*b.prev.lock().unwrap() = Some(Arc::downgrade(&a));
println!("a.value = {}", a.value);
println!("b.value = {}", b.value);
// Check reference counts
println!("a ref count: {}", Arc::strong_count(&a));
println!("b ref count: {}", Arc::strong_count(&b));
}use std::sync::Arc;
fn main() {
let arc = Arc::new(42);
// Can unwrap when only one reference exists
match Arc::try_unwrap(arc) {
Ok(value) => println!("Got ownership: {}", value),
Err(arc) => println!("Multiple references, still: {}", *arc),
}
// Multiple references prevents unwrap
let arc = Arc::new(100);
let clone = Arc::clone(&arc);
match Arc::try_unwrap(arc) {
Ok(value) => println!("Got ownership: {}", value),
Err(arc) => println!("Multiple references, still: {}", *arc),
}
}use std::sync::Arc;
fn main() {
let arc = Arc::new(42);
// into_inner returns Some only if we have the only reference
if let Some(value) = Arc::into_inner(arc) {
println!("Got inner value: {}", value);
}
let arc = Arc::new(100);
let _clone = Arc::clone(&arc);
// Returns None because there are multiple references
match Arc::into_inner(arc) {
Some(value) => println!("Got inner: {}", value),
None => println!("Multiple references exist"),
}
}use std::sync::Arc;
use std::thread;
struct Task {
id: usize,
name: String,
}
fn main() {
let shared_state = Arc::new(std::sync::Mutex::new(Vec::<usize>::new()));
let mut handles = vec![];
for i in 0..5 {
let state = Arc::clone(&shared_state);
let handle = thread::spawn(move || {
// Simulate work
thread::sleep(std::time::Duration::from_millis(10));
let mut data = state.lock().unwrap();
data.push(i * 2);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
let result = shared_state.lock().unwrap();
println!("Results: {:?}", *result);
}use std::sync::Arc;
use std::rc::Rc;
fn main() {
// Rc - single-threaded only
let rc = Rc::new(42);
let rc_clone = Rc::clone(&rc);
println!("Rc count: {}", Rc::strong_count(&rc));
// Arc - thread-safe
let arc = Arc::new(42);
let arc_clone = Arc::clone(&arc);
println!("Arc count: {}", Arc::strong_count(&arc));
// Rc cannot be sent to threads
// thread::spawn(move || {
// println!("{}", *rc_clone); // ERROR: Rc is not Send
// });
// Arc can be sent to threads
thread::spawn(move || {
println!("Arc in thread: {}", *arc_clone);
}).join().unwrap();
}use std::sync::{Arc, RwLock};
use std::collections::HashMap;
use std::thread;
struct Cache<K, V> {
data: RwLock<HashMap<K, V>>,
}
impl<K, V> Cache<K, V>
where
K: std::hash::Hash + Eq + Clone + Send + 'static,
V: Clone + Send + 'static,
{
fn new() -> Self {
Self {
data: RwLock::new(HashMap::new()),
}
}
fn get(&self, key: &K) -> Option<V> {
self.data.read().unwrap().get(key).cloned()
}
fn insert(&self, key: K, value: V) {
self.data.write().unwrap().insert(key, value);
}
}
fn main() {
let cache = Arc::new(Cache::<String, i32>::new());
// Populate cache
cache.insert(String::from("one"), 1);
cache.insert(String::from("two"), 2);
let mut handles = vec![];
for i in 0..3 {
let cache = Arc::clone(&cache);
let handle = thread::spawn(move || {
if let Some(value) = cache.get(&String::from("one")) {
println!("Thread {} got: {}", i, value);
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
fn main() {
let counter = Arc::new(AtomicUsize::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
// Atomic increment, no lock needed
counter.fetch_add(1, Ordering::SeqCst);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Final count: {}", counter.load(Ordering::SeqCst));
}use std::sync::{Arc, Mutex};
use std::thread;
type Listener = Box<dyn Fn(&str) + Send>;
struct EventEmitter {
listeners: Mutex<Vec<Listener>>,
}
impl EventEmitter {
fn new() -> Self {
Self {
listeners: Mutex::new(Vec::new()),
}
}
fn subscribe(&self, listener: Listener) {
self.listeners.lock().unwrap().push(listener);
}
fn emit(&self, event: &str) {
let listeners = self.listeners.lock().unwrap();
for listener in listeners.iter() {
listener(event);
}
}
}
fn main() {
let emitter = Arc::new(EventEmitter::new());
// Subscribe from main thread
emitter.subscribe(Box::new(|event| {
println!("Listener 1: {}", event);
}));
let emitter_clone = Arc::clone(&emitter);
let handle = thread::spawn(move || {
emitter_clone.emit("Hello from thread!");
});
handle.join().unwrap();
}use std::sync::{Arc, Mutex};
struct Connection {
id: usize,
}
impl Connection {
fn new(id: usize) -> Self {
println!("Creating connection {}", id);
Self { id }
}
fn query(&self, sql: &str) {
println!("Connection {} executing: {}", self.id, sql);
}
}
struct ConnectionPool {
connections: Mutex<Vec<Connection>>,
next_id: usize,
}
impl ConnectionPool {
fn new(size: usize) -> Self {
let connections: Vec<Connection> = (0..size)
.map(|id| Connection::new(id))
.collect();
Self {
connections: Mutex::new(connections),
next_id: size,
}
}
fn get_connection(&self) -> Option<Connection> {
let mut conns = self.connections.lock().unwrap();
conns.pop()
}
fn return_connection(&self, conn: Connection) {
let mut conns = self.connections.lock().unwrap();
conns.push(conn);
}
}
fn main() {
let pool = Arc::new(ConnectionPool::new(3));
let pool_clone = Arc::clone(&pool);
if let Some(conn) = pool_clone.get_connection() {
conn.query("SELECT * FROM users");
pool_clone.return_connection(conn);
}
}use std::sync::Arc;
fn main() {
// Arc has pointer-sized overhead
println!("Size of Arc<i32>: {} bytes", std::mem::size_of::<Arc<i32>>());
println!("Size of Arc<String>: {} bytes", std::mem::size_of::<Arc<String>>());
println!("Size of Arc<Vec<i32>>: {} bytes", std::mem::size_of::<Arc<Vec<i32>>>());
// Internal layout:
// - Pointer to heap allocation
// - Heap contains: strong_count, weak_count, data
let arc = Arc::new(42i32);
println!("Arc points to: {:?}", Arc::as_ptr(&arc));
}Arc Methods:
| Method | Description |
|--------|-------------|
| new(value) | Create a new Arc |
| clone(&self) | Create another reference (increment count) |
| strong_count(&self) | Get number of strong references |
| weak_count(&self) | Get number of weak references |
| downgrade(&self) | Create a Weak reference |
| try_unwrap(arc) | Try to get ownership |
| into_inner(arc) | Get inner if only reference |
| as_ptr(&self) | Get raw pointer to data |
| ptr_eq(a, b) | Check if two Arcs point to same data |
Arc vs Rc Comparison:
| Feature | Rc | Arc |
|---------|-----|-----|
| Thread-safe | No | Yes |
| Send | No | Yes (when T: Send + Sync) |
| Sync | No | Yes (when T: Send + Sync) |
| Overhead | Lower | Higher (atomic ops) |
| Use case | Single-threaded | Multi-threaded |
Common Arc Patterns:
| Pattern | Combination | Use Case |
|---------|-------------|----------|
| Shared config | Arc<Config> | Read-only config |
| Shared counter | Arc<Mutex<T>> | Mutable shared state |
| Shared collection | Arc<RwLock<Vec<T>>> | Read-heavy shared data |
| Atomic counter | Arc<AtomicUsize> | Lock-free counter |
| Weak references | Arc + Weak | Avoid cycles |
Key Points:
Arc<T> is the thread-safe version of Rc<T>Send and Sync when T: Send + SyncRc due to atomic overhead&T (immutable reference)Mutex or RwLock for mutable accessWeak<T> to avoid reference cyclesArc::try_unwrap() and Arc::into_inner() for ownership extractionRc for single-threaded contexts