What is the difference between lru::LruCache::promote and get_mut for manual vs automatic LRU ordering?
lru::LruCache::get_mut automatically promotes the accessed entry to the most-recently-used position when retrieved, while promote explicitly moves an entry to the front without retrieving its valueâenabling manual control over LRU ordering when you want to reorder entries without the overhead of a full lookup or when implementing custom caching policies. The key distinction is that get_mut combines retrieval with automatic promotion, while promote is a dedicated operation for reordering that works with the key alone.
Basic LRU Cache Behavior
use lru::LruCache;
use std::num::NonZeroUsize;
fn basic_lru() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
// Insert entries - most recent at front
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: a <- b <- c (c is most recent)
// Access "a" - automatically promoted to most recent
let val = cache.get(&"a");
// Order now: b <- c <- a (a is most recent)
}LRU caches maintain recency order with most recent at the front.
get_mut: Access and Promote
use lru::LruCache;
use std::num::NonZeroUsize;
fn get_mut_example() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: a <- b <- c
// get_mut retrieves AND promotes
if let Some(value) = cache.get_mut(&"b") {
*value += 10; // Modify the value
// Entry is now at front (most recent)
}
// Order: a <- c <- b
// Value was modified and entry was promoted
assert_eq!(cache.get(&"b"), Some(&12));
}get_mut returns a mutable reference and promotes the entry to most-recent.
promote: Reorder Without Retrieval
use lru::LruCache;
use std::num::NonZeroUsize;
fn promote_example() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: a <- b <- c
// promote only reorders - no value returned
cache.promote(&"b");
// Order: a <- c <- b (b is now most recent)
// Value unchanged, just reordered
assert_eq!(cache.peek(&"b"), Some(&2)); // peek doesn't promote
}promote moves an entry to the front without retrieving its value.
Comparing get_mut and promote
use lru::LruCache;
use std::num::NonZeroUsize;
fn comparison() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: a <- b <- c
// Using get_mut:
// - Retrieves the value
// - Allows modification
// - Promotes to most recent
// - Returns Option<&mut V>
if let Some(val) = cache.get_mut(&"a") {
*val = 10;
}
// Order: b <- c <- a
// Using promote:
// - Does NOT retrieve value
// - Does NOT allow modification
// - Promotes to most recent
// - Returns bool (true if key existed)
let existed = cache.promote(&"c");
// Order: b <- a <- c
assert!(existed);
}get_mut combines retrieval with promotion; promote only reorders.
When to Use promote
use lru::LruCache;
use std::num::NonZeroUsize;
fn promote_use_cases() {
let mut cache: LruCache<String, Vec<u8>> =
LruCache::new(NonZeroUsize::new(100).unwrap());
// Case 1: Manual promotion based on external policy
// When access patterns are determined externally
// Case 2: Batch promotion without value access
// Promote multiple entries without touching values
let keys_to_promote = vec
!["key1", "key2", "key3"]
;
for key in keys_to_promote {
cache.promote(&key); // Just reorder, no value access
}
// Case 3: Probationary entries
// Promote entries from probationary to main cache
// Without accessing their values
// Case 4: Prefetch promotion
// Promote entries that will likely be accessed soon
// Without the cost of deserializing values
}Use promote when you need to reorder without the value.
peek: Access Without Promotion
use lru::LruCache;
use std::num::NonZeroUsize;
fn peek_example() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Order: a <- b <- c
// peek does NOT promote
let val = cache.peek(&"b");
assert_eq!(val, Some(&2));
// Order unchanged: a <- b <- c
// "b" was accessed but not promoted
// peek_mut also exists for mutable access without promotion
if let Some(val) = cache.peek_mut(&"a") {
*val += 1;
}
// Order still unchanged: a <- b <- c
// Value modified but entry NOT promoted
// Contrast with get_mut:
if let Some(val) = cache.get_mut(&"a") {
*val += 1;
}
// Order now: b <- c <- a (a promoted)
}peek and peek_mut access values without promoting entries.
Manual LRU Ordering Control
use lru::LruCache;
use std::num::NonZeroUsize;
fn manual_ordering() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(5).unwrap());
// Populate cache
for i in 0..5 {
let key = format!("key{}", i);
cache.put(&key.leak(), i);
}
// Order: key0 <- key1 <- key2 <- key3 <- key4
// Scenario: We know key2 will be accessed soon
// Promote it proactively without accessing value
cache.promote(&"key2");
// Order: key0 <- key1 <- key3 <- key4 <- key2
// Now when we access, it's already near front
let _ = cache.get(&"key2");
// No promotion needed - already at front
}Use promote for proactive reordering based on access predictions.
peek_mut vs get_mut vs promote
use lru::LruCache;
use std::num::NonZeroUsize;
fn method_comparison() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// | Method | Returns Value | Modifies Value | Promotes Entry |
// |-------------|---------------|----------------|----------------|
// | get | Yes (&V) | No | Yes |
// | get_mut | Yes (&mut V) | Yes | Yes |
// | peek | Yes (&V) | No | No |
// | peek_mut | Yes (&mut V) | Yes | No |
// | promote | No | No | Yes |
// get_mut: Access + Modify + Promote
let val = cache.get_mut(&"a"); // Returns Option<&mut i32>
// Promotes "a", allows modification
// peek_mut: Access + Modify, NO Promote
let val = cache.peek_mut(&"b"); // Returns Option<&mut i32>
// Allows modification, does NOT promote
// promote: NO Access, NO Modify, YES Promote
let existed = cache.promote(&"c"); // Returns bool
// Promotes "c", no value access
}Choose the method based on what operations you need.
Implementing Custom Cache Policies
use lru::LruCache;
use std::num::NonZeroUsize;
struct TieredCache {
hot: LruCache<String, Vec<u8>>,
cold: LruCache<String, Vec<u8>>,
}
impl TieredCache {
fn new() -> Self {
TieredCache {
hot: LruCache::new(NonZeroUsize::new(100).unwrap()),
cold: LruCache::new(NonZeroUsize::new(1000).unwrap()),
}
}
fn get(&mut self, key: &str) -> Option<&Vec<u8>> {
// Check hot cache first
if self.hot.contains(key) {
return self.hot.get(key);
}
// Check cold cache
if let Some(value) = self.cold.get(key) {
// Promote to hot cache
let value = value.clone();
self.cold.pop(key);
self.hot.put(key.to_string(), value);
return self.hot.get(key);
}
None
}
fn put(&mut self, key: String, value: Vec<u8>) {
// New entries go to hot cache
self.hot.put(key.clone(), value);
// If hot cache is full, demote to cold
if self.hot.len() > self.hot.cap().get() {
if let Some((old_key, old_value)) = self.hot.pop_lru() {
// Use promote pattern: move to cold without accessing value
self.cold.put(old_key, old_value);
}
}
}
fn promote_cold_to_hot(&mut self, key: &str) {
// Promote from cold to hot using explicit promotion
if let Some(value) = self.cold.peek(key) {
let value = value.clone();
self.cold.pop(key);
self.hot.put(key.to_string(), value);
}
}
}Tiered caches use promote for moving entries between cache levels.
Probationary Cache Pattern
use lru::LruCache;
use std::num::NonZeroUsize;
struct ProbationaryCache {
probationary: LruCache<String, String>,
main: LruCache<String, String>,
}
impl ProbationaryCache {
fn access(&mut self, key: &str) -> Option<&String> {
// If in main cache, just access (auto-promote)
if self.main.contains(key) {
return self.main.get(key);
}
// If in probationary, promote to main
if let Some(value) = self.probationary.peek(key) {
// Promote on second access
let value = value.clone();
self.probationary.pop(key);
self.main.put(key.to_string(), value);
return self.main.get(key);
}
None
}
fn insert(&mut self, key: String, value: String) {
// New entries go to probationary
self.probationary.put(key, value);
}
fn promote_to_main(&mut self, key: &str) {
// Manually promote from probationary to main
// Without this being an "access"
if self.probationary.contains(key) {
// Use promote to reorder within probationary
self.probationary.promote(key);
}
}
}Probationary caches use promote to move entries between cache levels.
Prefetch and Promotion
use lru::LruCache;
use std::num::NonZeroUsize;
fn prefetch_pattern() {
let mut cache: LruCache<String, Data> = LruCache::new(NonZeroUsize::new(100).unwrap());
// Prefetch: promote entries that will likely be accessed
// This moves them to the front without accessing values
// Predicted next accesses based on access pattern
let predicted_keys = vec
!["key_a", "key_b", "key_c"]
;
for key in &predicted_keys {
cache.promote(key);
}
// Now predicted keys are at the front
// When accessed, no promotion needed - already most recent
// This is useful when:
// - You know access patterns in advance
// - Value access is expensive (e.g., deserialization)
// - You want to avoid eviction of soon-to-be-accessed entries
}
struct Data {
bytes: Vec<u8>,
}Prefetch promotes entries before they're accessed.
Batch Operations
use lru::LruCache;
use std::num::NonZeroUsize;
fn batch_operations() {
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(10).unwrap());
for i in 0..10 {
cache.put(Box::leak(format!("key{}", i).into_boxed_str()), i);
}
// Order: key0 <- key1 <- ... <- key9
// Batch promotion without accessing values
let important_keys = vec
!["key3", "key7", "key1"]
;
for key in important_keys {
cache.promote(&key);
}
// These keys are now at front (key1 most recent)
// Compare with batch get_mut:
let more_keys = vec
!["key2", "key5"]
;
for key in more_keys {
cache.get_mut(&key); // Also retrieves values
}
}Batch operations can use promote for efficiency.
Thread Safety Considerations
use lru::LruCache;
use std::num::NonZeroUsize;
use std::sync::Mutex;
// LruCache is not thread-safe by default
// Use Mutex for thread-safe access
struct ThreadSafeCache {
cache: Mutex<LruCache<String, Vec<u8>>>,
}
impl ThreadSafeCache {
fn get(&self, key: &str) -> Option<Vec<u8>> {
let mut cache = self.cache.lock().unwrap();
cache.get(key).cloned() // Auto-promotes
}
fn promote(&self, key: &str) -> bool {
let mut cache = self.cache.lock().unwrap();
cache.promote(key) // Just promotes
}
fn peek(&self, key: &str) -> Option<Vec<u8>> {
let mut cache = self.cache.lock().unwrap();
cache.peek(key).cloned() // No promotion
}
}Combine promote with synchronization for thread-safe patterns.
Performance Implications
use lru::LruCache;
use std::num::NonZeroUsize;
fn performance_notes() {
let mut cache: LruCache<&str, HeavyValue> = LruCache::new(NonZeroUsize::new(1000).unwrap());
// get_mut: Must access the value (potentially expensive)
// - HashMap lookup
// - Value reference returned
// - Entry moved to front in LRU list
// promote: Only reorder the LRU list
// - HashMap lookup for key position
// - NO value access/dereference
// - Entry moved to front in LRU list
// When value access is expensive:
// - get_mut might trigger lazy computation
// - promote avoids any value interaction
// Use promote when:
// - You only care about ordering
// - Value access might be costly
// - You're implementing cache warming
}
struct HeavyValue {
data: Vec<u8>, // Potentially large
}promote avoids value access overhead.
Complete Example: Cache Warming
use lru::LruCache;
use std::num::NonZeroUsize;
struct WarmedCache {
cache: LruCache<String, String>,
}
impl WarmedCache {
fn new(capacity: usize) -> Self {
WarmedCache {
cache: LruCache::new(NonZeroUsize::new(capacity).unwrap()),
}
}
fn warm(&mut self, keys: &[&str]) {
// Promote keys that should stay in cache
// Without triggering access side effects
for key in keys {
if self.cache.contains(key) {
self.cache.promote(key);
}
// If not in cache, promotion is a no-op
// (returns false, cache unchanged)
}
}
fn get(&mut self, key: &str) -> Option<&String> {
// Normal access - promotes automatically
self.cache.get(key)
}
fn get_without_promote(&mut self, key: &str) -> Option<&String> {
// Access without promotion
self.cache.peek(key)
}
fn get_without_promote_mut(&mut self, key: &str) -> Option<&mut String> {
// Mutable access without promotion
self.cache.peek_mut(key)
}
fn put(&mut self, key: String, value: String) {
self.cache.put(key, value);
}
}Cache warming uses promote to establish desired ordering.
Synthesis
Quick reference:
| Method | Returns Value | Modifies Value | Promotes Entry | Use Case |
|---|---|---|---|---|
get |
Option<&V> |
No | Yes | Normal access |
get_mut |
Option<&mut V> |
Yes | Yes | Access and modify |
peek |
Option<&V> |
No | No | Inspect without reordering |
peek_mut |
Option<&mut V> |
Yes | No | Modify without reordering |
promote |
bool |
No | Yes | Reorder only |
When to use each:
use lru::LruCache;
use std::num::NonZeroUsize;
let mut cache: LruCache<&str, i32> = LruCache::new(NonZeroUsize::new(10).unwrap());
// get_mut: Access value and mark as recently used
if let Some(val) = cache.get_mut(&"key") {
*val += 1; // Modify and promote
}
// promote: Mark as recently used without value access
cache.promote(&"key"); // Just reorder
// peek_mut: Modify without affecting LRU order
if let Some(val) = cache.peek_mut(&"key") {
*val += 1; // Modify but don't promote
}
// peek: Inspect without affecting LRU order
if let Some(val) = cache.peek(&"key") {
println!("{}", val); // Read but don't promote
}Key insight: get_mut provides automatic LRU orderingâevery access promotes the entry to most-recently-used. This is the right choice for standard cache access patterns where access frequency determines importance. promote provides manual ordering control when you need to reorder entries without value accessâuseful for cache warming (pre-populating important entries), implementing tiered caches (moving entries between hot and cold tiers), batch operations (reordering multiple entries efficiently), or when value access has side effects you want to avoid. The peek and peek_mut methods complete the matrix by providing value access without promotion, useful for inspection or modification that shouldn't affect recency ranking. The choice between automatic (get_mut) and manual (promote) ordering depends on whether you're implementing a standard LRU pattern or a custom caching policy with specific ordering requirements.
