What is the purpose of lru::LruCache::iter_mut for modifying cached values without affecting LRU order?

iter_mut provides mutable access to cached values while intentionally avoiding any LRU order updates, allowing modifications to entries without marking them as recently used. This is essential when you need to update cached data—such as incrementing counters, modifying nested fields, or updating metadata—without promoting those entries to the "most recently used" position in the cache.

The LRU Cache and Order Semantics

use lru::LruCache;
 
fn lru_basics() {
    // LRU cache evicts least recently used entries when full
    let mut cache: LruCache<&str, i32> = LruCache::new(3);
    
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    
    // Order is: c (most recent), b, a (least recent)
    
    // Getting 'a' moves it to most recent
    let _ = cache.get(&"a");
    
    // Order is now: a (most recent), c, b (least recent)
    
    // If we add another entry, 'b' would be evicted
}

LRU caches track access order to determine which entries to evict.

Order Updates During Normal Access

use lru::LruCache;
 
fn order_updates() {
    let mut cache: LruCache<&str, i32> = LruCache::new(5);
    
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    cache.put("d", 4);
    cache.put("e", 5);
    
    // Order: e, d, c, b, a (most to least recent)
    
    // get() updates order
    let _ = cache.get(&"a");
    // Order: a, e, d, c, b
    
    // peek() does NOT update order
    let _ = cache.peek(&"c");
    // Order unchanged: a, e, d, c, b
    
    // put() updates order
    cache.put("b", 20);
    // Order: b, a, e, d, c
}

get updates LRU order, peek doesn't—this pattern extends to iterators.

The iter_mut Method

use lru::LruCache;
 
fn iter_mut_example() {
    let mut cache: LruCache<&str, i32> = LruCache::new(5);
    
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    cache.put("d", 4);
    cache.put("e", 5);
    
    // Order: e, d, c, b, a
    
    // iter_mut provides mutable access WITHOUT updating order
    for (key, value) in cache.iter_mut() {
        *value *= 10;  // Modify values in place
    }
    
    // Values modified but order unchanged: e, d, c, b, a
    
    assert_eq!(cache.get(&"a"), Some(&10));
    assert_eq!(cache.get(&"b"), Some(&20));
    
    // Order after modifications via iter_mut: still e, d, c, b, a
    // No order updates occurred!
}

iter_mut allows modifying values while preserving LRU order.

Why Order Matters in LRU Caches

use lru::LruCache;
 
fn why_order_matters() {
    let mut cache: LruCache<&str, Vec<i32>> = LruCache::new(3);
    
    cache.put("a", vec![1, 2, 3]);
    cache.put("b", vec![4, 5, 6]);
    cache.put("c", vec![7, 8, 9]);
    
    // Order: c (most), b, a (least)
    
    // If we use get() to modify, we affect order:
    if let Some(vec) = cache.get_mut(&"a") {
        vec.push(4);  // Modifies value
    }
    // Order now: a (most), c, b (least)
    // 'a' was promoted to most recently used
    
    // This might not be desired for internal updates
    
    // Adding a new entry now would evict 'b', not 'a'
    cache.put("d", vec![10, 11, 12]);
    // 'b' is evicted, even though 'a' was only modified, not "used"
}

Using get_mut to modify values promotes entries, which may not be desired.

Use Case: Counters and Statistics

use lru::LruCache;
 
fn counter_example() {
    // Cache stores request counts for different users
    let mut cache: LruCache<u64, u64> = LruCache::new(100);
    
    cache.put(1, 0);  // User 1
    cache.put(2, 0);  // User 2
    cache.put(3, 0);  // User 3
    
    // Process requests, incrementing counters
    for user_id in [1, 1, 2, 1, 3, 2, 1, 1] {
        // We want to increment the counter but NOT promote this user
        // The "recency" should reflect actual data access, not counter updates
        
        if let Some(count) = cache.iter_mut().find_map(|(&k, v)| {
            if k == user_id { Some(v) } else { None }
        }) {
            *count += 1;
        }
    }
    
    // Order remains based on original inserts/gets, not counter updates
    // This is crucial: counter maintenance is different from data access
}

Counter updates are maintenance operations, not "uses" of cached data.

Use Case: Metadata Updates

use lru::LruCache;
 
struct CacheEntry {
    data: String,
    access_count: u64,
    last_modified: std::time::Instant,
}
 
fn metadata_example() {
    let mut cache: LruCache<String, CacheEntry> = LruCache::new(50);
    
    cache.put("key1".to_string(), CacheEntry {
        data: "important data".to_string(),
        access_count: 0,
        last_modified: std::time::Instant::now(),
    });
    
    // When the actual data is accessed, use get() to update order
    if let Some(entry) = cache.get_mut(&"key1".to_string()) {
        entry.access_count += 1;  // This IS an access
    }
    // Order updated: key1 is most recent
    
    // For background metadata updates, use iter_mut
    for (_, entry) in cache.iter_mut() {
        entry.last_modified = std::time::Instant::now();  // Background update
    }
    // Order NOT updated
}

Background operations shouldn't affect the "recency" semantics of cached entries.

Use Case: Bulk Modifications

use lru::LruCache;
 
fn bulk_modification() {
    let mut cache: LruCache<String, i32> = LruCache::new(100);
    
    for i in 0..100 {
        cache.put(format!("key_{}", i), i);
    }
    
    // Order: key_99 (most) ... key_0 (least)
    
    // Bulk modification without affecting order
    for (key, value) in cache.iter_mut() {
        if key.starts_with("key_5") {
            *value += 1000;  // Update subset of entries
        }
    }
    
    // All key_5* values updated
    // Order unchanged - entries not promoted
    
    // This is different from:
    for key in ["key_50", "key_51", "key_52"] {
        if let Some(v) = cache.get_mut(key) {
            *v += 1000;  // Updates AND promotes each entry
        }
    }
    // Order changed: key_52 most recent, then key_51, then key_50
}

Bulk modifications shouldn't disrupt the access pattern of the cache.

iter vs iter_mut vs get_mut

use lru::LruCache;
 
fn iterator_comparison() {
    let mut cache: LruCache<&str, i32> = LruCache::new(5);
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    
    // iter: Immutable access, no order update
    for (k, v) in cache.iter() {
        println!("{}: {}", k, v);
        // Cannot modify v
    }
    // Order unchanged
    
    // iter_mut: Mutable access, no order update
    for (k, v) in cache.iter_mut() {
        *v += 10;  // Can modify v
    }
    // Order unchanged
    
    // get_mut: Mutable access to SINGLE entry, DOES update order
    if let Some(v) = cache.get_mut(&"a") {
        *v += 100;
    }
    // Order changed: 'a' promoted to most recent
}

Three access patterns: immutable read, mutable without promotion, mutable with promotion.

The Internal Implementation

use lru::LruCache;
 
// The internal structure of LruCache:
// - A HashMap for O(1) key lookup
// - A doubly-linked list for LRU order
 
// iter_mut iterates through the linked list
// It returns references to values without modifying the list
 
fn implementation_insight() {
    // The linked list maintains:
    // - "Most recently used" at the front
    // - "Least recently used" at the back
    
    // get/get_mut operations:
    // 1. Find node in HashMap
    // 2. Detach node from current position
    // 3. Attach node at front (most recent)
    
    // iter_mut operations:
    // 1. Iterate through nodes
    // 2. Return mutable references to values
    // 3. DO NOT modify the linked list structure
    
    // This is why iter_mut doesn't affect order:
    // The linked list structure remains unchanged
}

iter_mut avoids the linked-list modifications that get_mut performs.

Iteration Order

use lru::LruCache;
 
fn iteration_order() {
    let mut cache: LruCache<&str, i32> = LruCache::new(5);
    
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    
    cache.get(&"a");  // Promote 'a'
    
    // Order: a (most), c, b (least)
    
    // Both iter and iter_mut iterate from most to least recent
    let items: Vec<_> = cache.iter().collect();
    // Items in order: [("a", &1), ("c", &3), ("b", &2)]
    
    // This matches the LRU order
    // Most recent first, least recent last
}

Both iter and iter_mut iterate in most-recent to least-recent order.

Combined Operations

use lru::LruCache;
 
fn combined_operations() {
    let mut cache: LruCache<String, i32> = LruCache::new(10);
    
    // Populate cache
    for i in 0..10 {
        cache.put(format!("key_{}", i), i);
    }
    
    // Some entries are "hot" (frequently accessed)
    cache.get(&"key_0".to_string());  // Promoted
    cache.get(&"key_1".to_string());  // Promoted
    
    // Background task: decay all counters
    for (_, value) in cache.iter_mut() {
        *value = (*value as f64 * 0.9) as i32;  // Decay
    }
    // Order unchanged: key_1, key_0, key_9, ... key_2
    
    // When we add new entries, old "cold" entries are evicted
    // Not the ones we just modified via iter_mut
}

Background maintenance doesn't interfere with the access pattern.

When to Use Each Method

use lru::LruCache;
 
fn method_selection() {
    let mut cache: LruCache<String, Vec<i32>> = LruCache::new(100);
    
    // Use get() when:
    // - You're retrieving data for actual use
    // - Access should count as "recent use"
    // - The entry should be promoted
    
    let data = cache.get(&"key".to_string());
    // Correct: reading data for processing
    
    // Use get_mut() when:
    // - You're modifying data based on user/external action
    // - The modification IS an access
    // - The entry should be promoted
    
    if let Some(vec) = cache.get_mut(&"key".to_string()) {
        vec.push(42);  // User added data
    }
    // Correct: user action should promote entry
    
    // Use peek() when:
    // - You need to check a value without affecting order
    // - Inspection/debugging
    
    let data = cache.peek(&"key".to_string());
    
    // Use iter_mut() when:
    // - Bulk updates or maintenance
    // - Background operations
    // - Statistics/metadata updates
    // - Order should NOT change
    
    for (_, vec) in cache.iter_mut() {
        vec.retain(|&x| x > 0);  // Cleanup operation
    }
    // Correct: cleanup is not a "use"
}

Choose based on whether the operation represents an "access" or maintenance.

Thread Safety Considerations

use lru::LruCache;
use std::sync::Mutex;
 
// LruCache is not thread-safe by default
// Use Mutex for multi-threaded access
 
fn thread_safety() {
    let cache = Mutex::new(LruCache::<String, i32>::new(100));
    
    // iter_mut requires exclusive access via Mutex
    {
        let mut cache = cache.lock().unwrap();
        for (_, value) in cache.iter_mut() {
            *value += 1;
        }
    }
    // Lock released after iteration completes
    
    // For concurrent access patterns, consider:
    // - Using a concurrent cache library
    // - Minimizing lock duration
    // - Batch updates outside the lock when possible
}

iter_mut requires exclusive access, which means holding a lock during iteration.

Synthesis

Comparison table:

Method Access Type Order Update Use Case
get Immutable Yes Reading data for use
get_mut Mutable Yes Modifying data as access
peek Immutable No Inspection without promotion
peek_mut Mutable No Modification without promotion
iter Immutable No Read all entries
iter_mut Mutable No Modify all entries

Key insight: iter_mut exists specifically for the case where you need to modify cached values without treating those modifications as "accesses" that would promote entries in the LRU order. This is crucial for maintaining accurate access semantics—counters, metadata, background cleanup, and statistics updates shouldn't interfere with the cache's eviction decisions. The LRU order should reflect actual data access patterns (what a user or system actively requested), not maintenance operations performed on that data. Use iter_mut for any modification that is "maintenance" rather than "use"; use get_mut when the modification itself is an access that should update recency.