Loading pageâŚ
Rust walkthroughs
Loading pageâŚ
lru::LruCache::get_mut differ from lru::LruCache::get in terms of cache ordering side effects?Both lru::LruCache::get and lru::LruCache::get_mut promote the accessed entry to the "most recently used" position when the entry is found, updating the cache's ordering. The key difference is that get_mut returns a mutable reference allowing modification of the value, while get returns an immutable reference. Both methods have the same side effect on cache ordering: they move the accessed key to the most-recently-used position. This is intentional behaviorâLRU cache ordering should reflect access patterns, and both reads and writes are meaningful accesses that should update recency. If you need to inspect without promoting, you must use peek or peek_mut instead.
use lru::LruCache;
use std::num::NonZeroUsize;
fn basic_lru_behavior() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
// Insert items: most recent is the last inserted
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: least recent -> most recent
// [a, b, c]
println!("LRU: {:?}", cache.peek_lru()); // Some(("a", 1))
println!("MRU: {:?}", cache.peek_mru()); // Some(("c", 3))
}The LRU cache tracks recency order, with the least recently used at one end and most recently used at the other.
get: Access and Promoteuse lru::LruCache;
use std::num::NonZeroUsize;
fn get_promotes() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: [a, b, c] (a is LRU, c is MRU)
// Access "a" - it becomes most recently used
let value = cache.get(&"a");
println!("Value: {:?}", value); // Some(&1)
// Order changed: [b, c, a] (b is LRU, a is MRU)
println!("LRU: {:?}", cache.peek_lru()); // Some(("b", 2))
println!("MRU: {:?}", cache.peek_mru()); // Some(("a", 1))
}get retrieves the value and promotes the entry to most recently used.
get_mut: Same Ordering Side Effectuse lru::LruCache;
use std::num::NonZeroUsize;
fn get_mut_promotes() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: [a, b, c] (a is LRU, c is MRU)
// Access "a" with get_mut - it also becomes most recently used
if let Some(value) = cache.get_mut(&"a") {
*value += 10; // Modify the value
}
// Order changed: [b, c, a] (b is LRU, a is MRU)
println!("LRU: {:?}", cache.peek_lru()); // Some(("b", 2))
println!("MRU: {:?}", cache.peek_mru()); // Some(("a", 11))
// The value was modified AND the entry was promoted
}get_mut also promotes the entryâthe ordering side effect is identical to get.
use lru::LruCache;
use std::num::NonZeroUsize;
fn ordering_comparison() {
// Test with get
let mut cache1: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache1.put("a", 1);
cache1.put("b", 2);
cache1.put("c", 3);
cache1.get(&"a");
let order1: Vec<_> = cache1.iter().map(|(k, _)| *k).collect();
println!("After get: {:?}", order1); // ["c", "b", "a"] (MRU to LRU in iter)
// Test with get_mut
let mut cache2: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache2.put("a", 1);
cache2.put("b", 2);
cache2.put("c", 3);
cache2.get_mut(&"a");
let order2: Vec<_> = cache2.iter().map(|(k, _)| *k).collect();
println!("After get_mut: {:?}", order2); // ["c", "b", "a"]
// Both produce the same ordering
assert_eq!(order1, order2);
}Both methods produce identical ordering changesâboth promote the accessed entry.
use lru::LruCache;
use std::num::NonZeroUsize;
fn mutability_difference() {
let mut cache: LruCache<&str, Vec<u32>> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("items", vec![1, 2, 3]);
// get returns immutable reference
let value = cache.get(&"items");
match value {
Some(v) => {
println!("Items: {:?}", v);
// v.push(4); // Error: cannot mutate immutable reference
}
None => {}
}
// get_mut returns mutable reference
if let Some(value) = cache.get_mut(&"items") {
value.push(4); // Can modify
value.push(5); // Multiple modifications
}
println!("Modified items: {:?}", cache.get(&"items")); // Some([1, 2, 3, 4, 5])
}The key difference is that get_mut allows modification of the cached value.
use lru::LruCache;
use std::num::NonZeroUsize;
fn peek_no_promote() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: [a, b, c]
// peek does NOT promote
let value = cache.peek(&"a");
println!("Peeked: {:?}", value); // Some(&1)
// peek_mut also does NOT promote
if let Some(value) = cache.peek_mut(&"a") {
*value += 10;
}
// Order unchanged: [a, b, c]
println!("LRU: {:?}", cache.peek_lru()); // Some(("a", 11)) - still "a"
println!("MRU: {:?}", cache.peek_mru()); // Some(("c", 3)) - still "c"
// Compare: get/get_mut would have promoted "a" to MRU
}Use peek and peek_mut to access entries without affecting cache ordering.
use lru::LruCache;
use std::num::NonZeroUsize;
fn eviction_order_scenario() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: [a, b, c] - a will be evicted first
// If we want to update "a" but keep it as eviction candidate:
if let Some(value) = cache.peek_mut(&"a") {
*value += 10; // Update without promoting
}
// Adding new item will evict "a" (still LRU)
cache.put("d", 4);
// "a" was evicted
assert!(!cache.contains(&"a"));
assert!(cache.contains(&"b"));
assert!(cache.contains(&"c"));
assert!(cache.contains(&"d"));
// If we had used get_mut, "b" would be evicted instead
}Using peek_mut preserves the eviction order, which matters when you need to modify values without changing their recency.
use lru::LruCache;
use std::num::NonZeroUsize;
fn read_only_patterns() {
let mut cache: LruCache<&str, Data> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("key1", Data { value: 42, accessed: 0 });
cache.put("key2", Data { value: 100, accessed: 0 });
// get for read-only access - still promotes
if let Some(data) = cache.get(&"key1") {
println!("Value: {}", data.value);
// Cannot modify data
}
// If you don't want to promote during read:
if let Some(data) = cache.peek(&"key2") {
println!("Value: {}", data.value);
// Cache order unchanged
}
}
#[derive(Clone)]
struct Data {
value: u32,
accessed: u32,
}Choose get vs peek based on whether the access should update recency.
use lru::LruCache;
use std::num::NonZeroUsize;
fn write_patterns() {
let mut cache: LruCache<&str, Data> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("key1", Data { value: 42, reads: 0 });
// get_mut - promotes AND allows modification
if let Some(data) = cache.get_mut(&"key1") {
data.value += 1;
data.reads += 1;
}
// "key1" is now MRU
cache.put("key2", Data { value: 10, reads: 0 });
cache.put("key3", Data { value: 20, reads: 0 });
// Order: key2, key3, key1 (key2 is LRU after this)
// peek_mut - modify WITHOUT promoting
if let Some(data) = cache.peek_mut(&"key2") {
data.reads += 1;
}
// Order unchanged
// Adding new item evicts key2 (still LRU)
cache.put("key4", Data { value: 30, reads: 0 });
assert!(!cache.contains(&"key2"));
}
struct Data {
value: u32,
reads: u32,
}Choose get_mut vs peek_mut based on whether modifications should update recency.
use lru::LruCache;
use std::num::NonZeroUsize;
fn method_summary() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("key", 1);
// get(&key)
// - Returns Option<&V>
// - Promotes key to MRU if found
// - Use when: reading and access should update recency
let _ = cache.get(&"key");
// get_mut(&key)
// - Returns Option<&mut V>
// - Promotes key to MRU if found
// - Use when: modifying and access should update recency
if let Some(v) = cache.get_mut(&"key") {
*v += 1;
}
// peek(&key)
// - Returns Option<&V>
// - Does NOT promote key
// - Use when: reading without affecting eviction order
let _ = cache.peek(&"key");
// peek_mut(&key)
// - Returns Option<&mut V>
// - Does NOT promote key
// - Use when: modifying without affecting eviction order
if let Some(v) = cache.peek_mut(&"key") {
*v += 1;
}
}Each method serves a specific use case regarding mutability and ordering effects.
use lru::LruCache;
use std::num::NonZeroUsize;
fn iteration_order() {
let mut cache: LruCache<&str, u32> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
// iter() yields MRU to LRU
let mru_order: Vec<_> = cache.iter().map(|(k, _)| *k).collect();
println!("MRU to LRU: {:?}", mru_order); // ["d", "c", "b", "a"]
// After get("a")
cache.get(&"a");
let new_order: Vec<_> = cache.iter().map(|(k, _)| *k).collect();
println!("After get: {:?}", new_order); // ["a", "d", "c", "b"]
// "a" is now MRU, "b" is now LRU
// After get_mut("c")
cache.get_mut(&"c");
let newer_order: Vec<_> = cache.iter().map(|(k, _)| *k).collect();
println!("After get_mut: {:?}", newer_order); // ["c", "a", "d", "b"]
// "c" is now MRU, "b" is still LRU
}Both get and get_mut move the accessed key to the front of iteration (MRU position).
use lru::LruCache;
use std::num::NonZeroUsize;
struct CacheMetrics {
hits: u64,
misses: u64,
}
struct Item {
data: String,
access_count: u32,
}
fn access_pattern_example() {
let mut cache: LruCache<String, Item> = LruCache::new(NonZeroUsize::new(100).unwrap());
let mut metrics = CacheMetrics { hits: 0, misses: 0 };
// Function to get or insert, with access counting
fn get_or_insert(
cache: &mut LruCache<String, Item>,
key: &str,
metrics: &mut CacheMetrics,
) -> &Item {
if cache.contains(key) {
metrics.hits += 1;
} else {
metrics.misses += 1;
cache.put(key.to_string(), Item {
data: format!("data for {}", key),
access_count: 0,
});
}
// Use get_mut to promote AND increment access count
cache.get_mut(key).map(|item| {
item.access_count += 1;
item
}).unwrap()
}
// Using peek_mut when we want to update stats without affecting eviction
fn update_stats_only(cache: &mut LruCache<String, Item>, key: &str) {
if let Some(item) = cache.peek_mut(key) {
item.access_count += 1;
// Key position unchanged - won't be kept just for stats
}
}
// Populating cache
for i in 0..5 {
let key = format!("key{}", i);
get_or_insert(&mut cache, &key, &mut metrics);
}
println!("Hits: {}, Misses: {}", metrics.hits, metrics.misses);
}Real-world caches often need to choose between updating recency or preserving eviction order.
| Method | Returns | Promotes Entry | Use Case |
|--------|---------|----------------|----------|
| get | Option<&V> | Yes | Read with access counting |
| get_mut | Option<&mut V> | Yes | Modify with access counting |
| peek | Option<&V> | No | Read without affecting eviction |
| peek_mut | Option<&mut V> | No | Modify without affecting eviction |
lru::LruCache::get and lru::LruCache::get_mut share identical ordering side effectsâboth promote the accessed entry to most recently used. The difference lies solely in mutability:
get returns Option<&V>: Read-only access, promotes entry to MRU position. Use when reading should count as an access that keeps the item in cache longer.
get_mut returns Option<&mut V>: Mutable access, promotes entry to MRU position. Use when modifying should count as an access. The mutation itself doesn't affect orderingâonly the access does.
Key insight: LRU caches model access recency, and both reads and writes are accesses. If you need to access (read or write) without affecting recency, use peek or peek_mut instead. These "look without touching" methods are essential for scenarios like:
The naming convention (get/get_mut vs peek/peek_mut) clearly signals whether the operation affects eviction ordering: "get" implies retrieval that matters for the cache algorithm, while "peek" implies looking without consequence.