What are the trade-offs between lru::LruCache::get_mut and get for modifying cached entries after retrieval?

get returns an immutable reference to the cached value and marks the entry as most-recently-used, while get_mut returns a mutable reference that allows in-place modification of the cached value—but both methods update the LRU order on access. The primary distinction is mutability: get_mut enables modification without removing and reinserting, while get only permits reading. Both have the same ordering semantics—accessing the key promotes it to most-recently-used. For read-only access, get is sufficient; for updating cached values, get_mut avoids the overhead of removal and reinsertion while maintaining the cache's LRU invariant.

The Fundamental Difference

use lru::LruCache;
 
fn basic_difference() {
    let mut cache: LruCache<&str, String> = LruCache::new(3);
    cache.put("key1", "value1".to_string());
    cache.put("key2", "value2".to_string());
    
    // get returns immutable reference
    if let Some(value) = cache.get(&"key1") {
        println!("Got: {}", value);
        // value is &String - cannot modify
    }
    
    // get_mut returns mutable reference
    if let Some(value) = cache.get_mut(&"key2") {
        value.push_str("_modified");
        // value is &mut String - can modify in place
    }
}

get returns Option<&V>; get_mut returns Option<&mut V>.

LRU Order Updates

use lru::LruCache;
 
fn lru_order_updates() {
    let mut cache: LruCache<&str, i32> = LruCache::new(3);
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    
    // Order (oldest to newest): a, b, c
    
    // get updates LRU order - marks as most recently used
    cache.get(&"a");
    // Order now: b, c, a (a is newest)
    
    // get_mut also updates LRU order
    cache.get_mut(&"b").map(|v| *v += 10);
    // Order now: c, a, b (b is newest)
    
    // Both methods have the same ordering behavior
    // The difference is purely mutability
}

Both get and get_mut promote accessed keys to most-recently-used.

In-Place Modification

use lru::LruCache;
 
fn in_place_modification() {
    let mut cache: LruCache<u32, Vec<String>> = LruCache::new(100);
    cache.put(1, vec!["a".to_string(), "b".to_string()]);
    
    // Without get_mut: remove, modify, reinsert (inefficient)
    let key = 1;
    if let Some(mut vec) = cache.pop(&key) {
        vec.push("c".to_string());
        cache.put(key, vec);
        // This removes and reinserts - extra work
    }
    
    // Reset cache
    cache.put(1, vec!["a".to_string(), "b".to_string()]);
    
    // With get_mut: modify in place (efficient)
    if let Some(vec) = cache.get_mut(&1) {
        vec.push("c".to_string());
        // No removal, no reinsertion
        // Value modified in place
    }
}

get_mut avoids removal and reinsertion overhead for modifications.

Borrow Checker Implications

use lru::LruCache;
 
fn borrow_checker() {
    let mut cache: LruCache<&str, i32> = LruCache::new(10);
    cache.put("a", 1);
    cache.put("b", 2);
    
    // get allows multiple immutable borrows
    let a = cache.get(&"a");
    let b = cache.get(&"b");
    // Both coexist - immutable references
    
    // get_mut requires exclusive access
    let a_mut = cache.get_mut(&"a");
    // let b_mut = cache.get_mut(&"b"); // ERROR: cache already borrowed
    
    // Cannot mix get_mut with other accesses
    // let c = cache.get(&"c"); // ERROR: cache already borrowed mutably
    
    if let Some(val) = a_mut {
        *val += 1;
    }
    
    // Now cache is no longer borrowed
    let b_mut = cache.get_mut(&"b");
}

get_mut borrows the cache exclusively; get allows shared immutable borrows.

The Mutable Reference Challenge

use lru::LruCache;
 
fn mutable_reference_challenge() {
    let mut cache: LruCache<&str, String> = LruCache::new(10);
    cache.put("key", "value".to_string());
    
    // Problem: want to use key for another operation while modifying
    if let Some(value) = cache.get_mut(&"key") {
        // Cannot access cache again while holding mutable reference
        // let other = cache.get(&"other"); // ERROR
        
        // Must complete modification before accessing cache again
        value.push_str("_modified");
    }
    
    // Now can access cache again
    let other = cache.get(&"key");
}
 
// Pattern: scope the mutable reference
fn scoped_mutation() {
    let mut cache: LruCache<&str, Vec<i32>> = LruCache::new(10);
    cache.put("nums", vec![1, 2, 3]);
    
    // Scope the mutable borrow
    {
        let nums = cache.get_mut(&"nums").unwrap();
        nums.push(4);
        // Mutable borrow ends here
    }
    
    // Cache accessible again
    cache.put("other", vec![5, 6]);
}

Hold get_mut references for minimal scope to avoid blocking other cache operations.

Updating Complex Cached Values

use lru::LruCache;
 
struct CachedData {
    count: u32,
    last_accessed: std::time::Instant,
    metadata: String,
}
 
fn update_complex_value() {
    let mut cache: LruCache<String, CachedData> = LruCache::new(100);
    cache.put(
        "user:1".to_string(),
        CachedData {
            count: 0,
            last_accessed: std::time::Instant::now(),
            metadata: String::new(),
        },
    );
    
    // Update multiple fields atomically
    if let Some(data) = cache.get_mut(&"user:1".to_string()) {
        data.count += 1;
        data.last_accessed = std::time::Instant::now();
        data.metadata.push_str("updated");
    }
    
    // All fields updated in one operation
    // No need to remove and reinsert
}

get_mut enables atomic updates to multiple fields of cached structures.

Comparison with peek and peek_mut

use lru::LruCache;
 
fn peek_vs_get() {
    let mut cache: LruCache<&str, i32> = LruCache::new(3);
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    // Order (oldest to newest): a, b, c
    
    // get/get_mut: access AND update LRU order
    cache.get(&"a");
    // Order: b, c, a (a is now newest)
    
    // peek/peek_mut: access WITHOUT updating LRU order
    cache.peek(&"b");
    // Order still: b, c, a (unchanged)
    
    cache.peek_mut(&"c").map(|v| *v += 10);
    // Order still: b, c, a (unchanged)
    
    // Use peek when you want to inspect without affecting eviction
}
 
fn choosing_access_method() {
    let mut cache: LruCache<String, Data> = LruCache::new(100);
    
    // get: Read access, promote to most recently used
    // Use when: Access should refresh the entry's lifetime
    
    // get_mut: Write access, promote to most recently used
    // Use when: Modification should refresh the entry's lifetime
    
    // peek: Read access, don't promote
    // Use when: Just checking, don't want to affect eviction
    
    // peek_mut: Write access, don't promote
    // Use when: Modifying but don't want to refresh (e.g., stats update)
}

get/get_mut update LRU order; peek/peek_mut don't.

When to Use Each Method

use lru::LruCache;
 
fn method_selection() {
    let mut cache: LruCache<String, Vec<String>> = LruCache::new(100);
    
    // get: Read-only access that refreshes the entry
    fn read_and_refresh(cache: &LruCache<String, Vec<String>>, key: &str) -> Option<&Vec<String>> {
        cache.get(&key.to_string())
        // Entry promoted to most recently used
    }
    
    // get_mut: Modify access that refreshes the entry
    fn modify_and_refresh(cache: &mut LruCache<String, Vec<String>>, key: &str, item: String) {
        if let Some(vec) = cache.get_mut(&key.to_string()) {
            vec.push(item);
        }
        // Entry promoted to most recently used
    }
    
    // peek: Read-only access that doesn't refresh
    fn read_no_refresh(cache: &LruCache<String, Vec<String>>, key: &str) -> Option<&Vec<String>> {
        cache.peek(&key.to_string())
        // Entry position unchanged
    }
    
    // peek_mut: Modify access that doesn't refresh
    fn modify_no_refresh(cache: &mut LruCache<String, Vec<String>>, key: &str, item: String) {
        if let Some(vec) = cache.peek_mut(&key.to_string()) {
            vec.push(item);
        }
        // Entry position unchanged
    }
}

Choose based on whether you need mutation and whether access should affect eviction order.

Eviction Behavior Differences

use lru::LruCache;
 
fn eviction_behavior() {
    let mut cache: LruCache<&str, i32> = LruCache::new(3);
    cache.put("a", 1);
    cache.put("b", 2);
    cache.put("c", 3);
    // Order (oldest to newest): a, b, c
    
    // Using get_mut - entry is promoted
    cache.get_mut(&"a").map(|v| *v += 1);
    // Order: b, c, a
    
    cache.put("d", 4); // Cache full, evicts oldest
    // "b" is evicted (not "a" because a was just accessed)
    assert!(!cache.contains(&"b"));
    assert!(cache.contains(&"a"));
    
    // Using peek_mut - entry is NOT promoted
    cache.peek_mut(&"c").map(|v| *v += 1);
    // Order unchanged: c, a, d
    
    cache.put("e", 5); // Evicts c because order wasn't updated
    assert!(!cache.contains(&"c"));
}

get_mut promotes entries (prevents eviction); peek_mut doesn't.

Performance Considerations

use lru::LruCache;
 
fn performance() {
    let mut cache: LruCache<u32, String> = LruCache::new(1000);
    
    // Populate cache
    for i in 0..100 {
        cache.put(i, format!("value_{}", i));
    }
    
    // get_mut: O(1) access + O(1) order update
    if let Some(value) = cache.get_mut(&50) {
        value.push_str("_modified");
    }
    
    // Alternative without get_mut (less efficient):
    if let Some(value) = cache.pop(&50) {
        let mut value = value;
        value.push_str("_modified");
        cache.put(50, value); // Two hash operations instead of one
    }
    
    // get_mut is more efficient for in-place modification
    // - One hash lookup
    // - In-place modification
    // - Order update is just pointer manipulation
}

get_mut is more efficient than remove-modify-reinsert patterns.

Pattern: Conditional Updates

use lru::LruCache;
 
fn conditional_updates() {
    let mut cache: LruCache<String, i32> = LruCache::new(100);
    cache.put("counter".to_string(), 0);
    
    // Conditional update with get_mut
    if let Some(counter) = cache.get_mut(&"counter".to_string()) {
        if *counter < 10 {
            *counter += 1;
        }
    }
    
    // Early exit pattern
    if let Some(value) = cache.get_mut(&"key".to_string()) {
        if value.is_empty() {
            // Do nothing if empty
        } else {
            value.push('_');
        }
    }
    
    // Chained operations
    cache.get_mut(&"key".to_string())
        .map(|v| {
            v.push_str("modified");
            v.len()
        });
}

get_mut works well with conditional logic and chained operations.

Thread Safety Considerations

use lru::LruCache;
use std::sync::Mutex;
 
// LruCache is not thread-safe by default
// For concurrent access, wrap in Mutex
 
fn threaded_cache() {
    let cache = Mutex::new(LruCache::<u32, String>::new(100));
    
    // get_mut requires &mut, so Mutex lock is needed
    {
        let mut locked = cache.lock().unwrap();
        if let Some(value) = locked.get_mut(&1) {
            value.push_str("_modified");
        }
    } // Lock released here
    
    // The mutable reference is scoped to the lock
    // Cannot hold get_mut reference across lock release
}
 
// Alternative: Use lru crate with thread-safe wrapper
// Or use dashmap-style concurrent cache for high contention

get_mut requires &mut self, so concurrent access requires external synchronization.

Real-World Use Case: Caching Computed Values

use lru::LruCache;
 
struct ComputationCache {
    cache: LruCache<String, ComputedResult>,
}
 
struct ComputedResult {
    value: f64,
    computation_count: u32,
    last_computed: std::time::Instant,
}
 
impl ComputationCache {
    fn new(capacity: usize) -> Self {
        Self {
            cache: LruCache::new(capacity),
        }
    }
    
    fn get_or_compute(&mut self, key: &str) -> f64 {
        // Check if we need to update stats (peek_mut to not affect LRU)
        if let Some(result) = self.cache.peek_mut(&key.to_string()) {
            // Update stats but don't refresh position
            result.computation_count += 1;
            result.last_computed = std::time::Instant::now();
            return result.value;
        }
        
        // Not in cache, compute and insert
        let computed = self.expensive_computation(key);
        self.cache.put(
            key.to_string(),
            ComputedResult {
                value: computed,
                computation_count: 1,
                last_computed: std::time::Instant::now(),
            },
        );
        computed
    }
    
    fn access_with_refresh(&mut self, key: &str) -> Option<f64> {
        // Use get_mut when access should refresh the entry
        self.cache.get_mut(&key.to_string())
            .map(|result| {
                result.computation_count += 1;
                result.value
            })
    }
    
    fn expensive_computation(&self, _key: &str) -> f64 {
        // Simulated expensive computation
        42.0
    }
}

Use peek_mut for internal stats that shouldn't affect eviction; get_mut for access that should.

Synthesis

Quick reference:

use lru::LruCache;
 
let mut cache: LruCache<String, Data> = LruCache::new(100);
 
// get: Read access, promotes entry to most recently used
// Returns: Option<&V>
// Use when: Reading and access should refresh lifetime
let value: Option<&Data> = cache.get(&key);
 
// get_mut: Write access, promotes entry to most recently used
// Returns: Option<&mut V>
// Use when: Modifying and access should refresh lifetime
let value: Option<&mut Data> = cache.get_mut(&key);
 
// peek: Read access, does NOT promote entry
// Returns: Option<&V>
// Use when: Reading without affecting eviction order
let value: Option<&Data> = cache.peek(&key);
 
// peek_mut: Write access, does NOT promote entry
// Returns: Option<&mut V>
// Use when: Modifying without affecting eviction order
let value: Option<&mut Data> = cache.peek_mut(&key);
 
// Key differences:
// 1. get vs get_mut: Mutability of returned reference
// 2. get/peek: Whether LRU order is updated
// 3. get_mut vs peek_mut: Both allow mutation, differ in order update
 
// Performance comparison:
// get_mut: O(1) lookup + O(1) order update + in-place modification
// remove + modify + insert: O(1) lookup + O(1) removal + O(1) insert + allocation

Key insight: get and get_mut differ only in the mutability of the returned reference—both promote the accessed key to most-recently-used. Use get_mut when you need to modify a cached value in place, avoiding the overhead of removal and reinsertion. For modification without affecting eviction order, use peek_mut. The borrow checker requires that get_mut borrows be scoped tightly since the mutable reference locks the entire cache. In concurrent contexts, get_mut requires holding a lock on the cache for the duration of the modification, making peek_mut preferable for quick updates that shouldn't block other operations or affect the LRU ordering.