Loading page…
Rust walkthroughs
Loading page…
lru::LruCache::promote and automatic promotion on access?lru::LruCache automatically promotes accessed entries to the most-recently-used position when using get or get_mut, moving them to the front of the eviction queue. The promote method explicitly moves an entry to the front without accessing its value, useful when you want to update the cache's priority ordering without triggering side effects associated with access. The trade-off centers on control versus convenience: automatic promotion happens transparently on every access, while explicit promotion requires manual intervention but allows fine-grained control over when and why entries are prioritized.
use lru::LruCache;
use std::num::NonZeroUsize;
fn basic_cache() {
let mut cache: LruCache<String, String> = LruCache::new(NonZeroUsize::new(3).unwrap());
cache.put("a".to_string(), "value_a".to_string());
cache.put("b".to_string(), "value_b".to_string());
cache.put("c".to_string(), "value_c".to_string());
// Cache order: a (LRU) <- b <- c (MRU)
// Accessing 'a' promotes it to MRU
cache.get(&"a".to_string());
// Cache order now: b (LRU) <- c <- a (MRU)
}Access through get automatically promotes entries to most-recently-used.
use lru::LruCache;
use std::num::NonZeroUsize;
fn automatic_promotion() {
let mut cache: LruCache<u32, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put(1, "one".to_string());
cache.put(2, "two".to_string());
cache.put(3, "three".to_string());
cache.put(4, "four".to_string());
// Order: 1 (LRU) <- 2 <- 3 <- 4 (MRU)
// Access entry 2
let value = cache.get(&2);
// Entry 2 is automatically promoted
// Order: 1 (LRU) <- 3 <- 4 <- 2 (MRU)
// Next eviction removes entry 1
cache.put(5, "five".to_string());
assert!(!cache.contains(&1)); // Entry 1 evicted
assert!(cache.contains(&2)); // Entry 2 still present
}get and get_mut automatically promote entries without explicit action.
use lru::LruCache;
use std::num::NonZeroUsize;
fn explicit_promote() {
let mut cache: LruCache<u32, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put(1, "one".to_string());
cache.put(2, "two".to_string());
cache.put(3, "three".to_string());
cache.put(4, "four".to_string());
// Order: 1 (LRU) <- 2 <- 3 <- 4 (MRU)
// Promote entry 2 without accessing its value
cache.promote(&2);
// Order: 1 (LRU) <- 3 <- 4 <- 2 (MRU)
// Entry 2 is now MRU, but we never accessed its value
}promote moves an entry to MRU without retrieving its value.
use lru::LruCache;
use std::num::NonZeroUsize;
fn promotion_use_cases() {
// Use case 1: Promoting based on external signals
// - Background task receives hint that data is important
// - Promote without triggering access callbacks
// Use case 2: Prefetching scenarios
// - Data loaded preemptively
// - Mark as recently used to prevent immediate eviction
// Use case 3: Custom access tracking
// - Cache wants to track "real" accesses separately
// - Promote manually for specific scenarios
let mut cache: LruCache<String, Vec<u8>> = LruCache::new(NonZeroUsize::new(10).unwrap());
// Prefetch data
cache.put("key".to_string(), vec![1, 2, 3]);
// Mark as important (but user hasn't accessed it)
cache.promote(&"key".to_string());
}Explicit promotion is useful when promotion should be independent of access.
use lru::LruCache;
use std::num::NonZeroUsize;
struct CacheWithMetrics {
cache: LruCache<String, Data>,
access_count: usize,
}
struct Data {
value: String,
}
impl CacheWithMetrics {
fn get(&mut self, key: &str) -> Option<&Data> {
// Automatic promotion + metrics
self.access_count += 1;
self.cache.get(key)
}
fn promote_without_metrics(&mut self, key: &str) {
// Promote without counting as "access"
self.cache.promote(key);
}
}
fn side_effects_example() {
let mut cache = CacheWithMetrics {
cache: LruCache::new(NonZeroUsize::new(10).unwrap()),
access_count: 0,
};
// Real user access
cache.get(&"key".to_string());
assert_eq!(cache.access_count, 1);
// Background maintenance - promote without counting
cache.promote_without_metrics(&"key".to_string());
assert_eq!(cache.access_count, 1); // Still 1
}promote allows ordering updates without triggering access-related side effects.
use lru::LruCache;
use std::num::NonZeroUsize;
fn performance_comparison() {
let mut cache: LruCache<u32, String> = LruCache::new(NonZeroUsize::new(1000).unwrap());
// Populate cache
for i in 0..1000 {
cache.put(i, format!("value_{}", i));
}
// Automatic promotion via get
// - Returns reference to value
// - Updates LRU order
// - O(1) operation
let _ = cache.get(&500);
// Explicit promotion
// - Does not return value
// - Updates LRU order
// - O(1) operation
cache.promote(&501);
// Both are O(1), but promote avoids the value lookup
}Both get and promote are O(1) operations; promote skips value retrieval.
use lru::LruCache;
use std::num::NonZeroUsize;
fn contains_vs_promote() {
let mut cache: LruCache<String, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put("a".to_string(), "value".to_string());
cache.put("b".to_string(), "value".to_string());
cache.put("c".to_string(), "value".to_string());
// contains does NOT promote
let exists = cache.contains(&"a".to_string());
// Order unchanged: a (LRU) <- b <- c (MRU)
// promote DOES promote
cache.promote(&"a".to_string());
// Order changed: b (LRU) <- c <- a (MRU)
}contains checks existence without promoting; promote updates order.
use lru::LruCache;
use std::num::NonZeroUsize;
fn peek_without_promotion() {
let mut cache: LruCache<String, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put("a".to_string(), "value_a".to_string());
cache.put("b".to_string(), "value_b".to_string());
cache.put("c".to_string(), "value_c".to_string());
// peek() returns value without promoting
let value = cache.peek(&"a".to_string());
// Order unchanged: a (LRU) <- b <- c (MRU)
// get() returns value AND promotes
let value = cache.get(&"a".to_string());
// Order changed: b (LRU) <- c <- a (MRU)
}peek accesses the value without promoting; get both accesses and promotes.
use lru::LruCache;
use std::num::NonZeroUsize;
fn peek_then_promote() {
let mut cache: LruCache<String, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put("key".to_string(), "value".to_string());
// Inspect value without promoting
if let Some(value) = cache.peek(&"key".to_string()) {
if should_promote(value) {
// Promote only if condition met
cache.promote(&"key".to_string());
}
}
}
fn should_promote(value: &str) -> bool {
value.len() > 3
}Separate peek and promote allows conditional promotion based on value.
use lru::LruCache;
use std::num::NonZeroUsize;
fn explicit_promotion_pattern() {
let mut cache: LruCache<String, Data> = LruCache::new(NonZeroUsize::new(100).unwrap());
// Populate cache
for i in 0..100 {
cache.put(format!("key_{}", i), Data::new(i));
}
// Background refresh: update value but don't promote
if let Some(data) = cache.peek_mut(&"key_50".to_string()) {
data.refresh();
// Entry not promoted due to peek_mut
}
// Promote explicitly based on criteria
if cache.peek(&"key_50".to_string()).map(|d| d.is_important).unwrap_or(false) {
cache.promote(&"key_50".to_string());
}
}
struct Data {
id: usize,
is_important: bool,
}
impl Data {
fn new(id: usize) -> Self {
Self { id, is_important: id % 10 == 0 }
}
fn refresh(&mut self) {
// Update data without promoting
}
}peek_mut allows mutation without promotion; promote can be called separately.
use lru::LruCache;
use std::num::NonZeroUsize;
fn prefetch_with_promotion() {
let mut cache: LruCache<String, Vec<u8>> = LruCache::new(NonZeroUsize::new(10).unwrap());
// Prefetch data that will be needed soon
let prefetched = load_data("future_key");
cache.put("future_key".to_string(), prefetched);
// Promote to prevent eviction before use
cache.promote(&"future_key".to_string());
// Later, when user actually accesses:
// - Cache hit (not evicted)
// - Access counted properly
}
fn load_data(key: &str) -> Vec<u8> {
vec![1, 2, 3, 4]
}Prefetched entries can be promoted to prevent premature eviction.
use lru::LruCache;
use std::num::NonZeroUsize;
fn batch_promotion() {
let mut cache: LruCache<u32, String> = LruCache::new(NonZeroUsize::new(100).unwrap());
for i in 0..100 {
cache.put(i, format!("value_{}", i));
}
// Promote multiple entries without accessing values
let important_keys = vec![10, 20, 30, 40, 50];
for key in important_keys {
cache.promote(&key);
}
// Last promoted key is now MRU
// All promoted keys are more recently used than non-promoted
}Multiple entries can be promoted in batch operations.
use lru::LruCache;
use std::num::NonZeroUsize;
fn access_patterns() {
let mut cache: LruCache<String, String> = LruCache::new(NonZeroUsize::new(4).unwrap());
cache.put("a".to_string(), "value".to_string());
cache.put("b".to_string(), "value".to_string());
cache.put("c".to_string(), "value".to_string());
cache.put("d".to_string(), "value".to_string());
// Pattern 1: get (access + promote)
let _ = cache.get(&"a".to_string());
// Entry 'a' promoted, access counted
// Pattern 2: peek + promote (conditional)
if let Some(v) = cache.peek(&"b".to_string()) {
if v.len() > 3 {
cache.promote(&"b".to_string());
}
}
// Entry 'b' promoted only if condition met
// Pattern 3: contains (check only)
let _ = cache.contains(&"c".to_string());
// Entry 'c' not promoted
// Pattern 4: promote (order only)
cache.promote(&"d".to_string());
// Entry 'd' promoted without any value access
}Different methods provide different combinations of access and promotion.
use lru::LruCache;
use std::num::NonZeroUsize;
struct TimedCache {
cache: LruCache<String, CacheEntry>,
}
struct CacheEntry {
data: String,
last_refreshed: std::time::Instant,
access_count: usize,
}
impl TimedCache {
fn new(capacity: usize) -> Self {
Self {
cache: LruCache::new(NonZeroUsize::new(capacity).unwrap()),
}
}
// User access: get data AND promote
fn get(&mut self, key: &str) -> Option<&str> {
let entry = self.cache.get(key)?;
Some(&entry.data)
}
// Background refresh: update timestamp WITHOUT promoting
fn refresh_if_stale(&mut self, key: &str) {
let now = std::time::Instant::now();
let should_refresh = self.cache.peek(key)
.map(|e| now.duration_since(e.last_refreshed).as_secs() > 60)
.unwrap_or(false);
if should_refresh {
// Update entry without promoting
if let Some(entry) = self.cache.peek_mut(key) {
entry.last_refreshed = now;
// Entry remains at same position in LRU order
}
}
}
// Preemptive promotion: mark as important
fn mark_important(&mut self, key: &str) {
// Promote without counting as user access
self.cache.promote(key);
}
}Background operations use peek/promote to avoid affecting user-facing metrics.
use lru::LruCache;
use std::num::NonZeroUsize;
struct MonitoredCache {
cache: LruCache<String, CacheValue>,
total_accesses: usize,
unique_keys_accessed: std::collections::HashSet<String>,
}
struct CacheValue {
data: Vec<u8>,
internal_accesses: usize,
}
impl MonitoredCache {
// User access: tracked in metrics
fn user_access(&mut self, key: &str) -> Option<&Vec<u8>> {
self.total_accesses += 1;
self.unique_keys_accessed.insert(key.to_string());
// Automatic promotion on get
self.cache.get(key).map(|v| {
// Can't mutate access count through get
&v.data
})
}
// Internal scan: update stats without promoting
fn update_internal_stats(&mut self) {
// Iterate without promoting
for (key, value) in self.cache.iter() {
// Access value for stats
let _ = &value.data;
// Entry not promoted
}
}
// Preemptive promotion for known hot keys
fn promote_hot_keys(&mut self, hot_keys: &[&str]) {
for key in hot_keys {
if self.cache.peek(key).is_some() {
// Promote hot key without counting as access
self.cache.promote(&key.to_string());
}
}
}
}Separating promotion from access allows accurate metrics tracking.
use lru::LruCache;
use std::num::NonZeroUsize;
struct WarmCache {
cache: LruCache<String, CachedData>,
}
struct CachedData {
content: String,
warmed: bool,
}
impl WarmCache {
// Warm cache with predicted keys
fn warm(&mut self, predicted_keys: &[String]) {
for key in predicted_keys {
if self.cache.peek(key).is_none() {
// Load and insert without promoting
let data = self.load_from_source(key);
self.cache.put(key.clone(), data);
// Don't promote - actual user will do that
}
}
}
// Promote warmed entries expected to be used soon
fn prepare_for_access(&mut self, soon_keys: &[String]) {
for key in soon_keys {
// Promote entries that will be accessed
// but don't count as actual access
self.cache.promote(key);
}
}
// Real user access
fn get(&mut self, key: &str) -> Option<&str> {
self.cache.get(key).map(|d| &d.content)
}
fn load_from_source(&self, key: &str) -> CachedData {
CachedData {
content: format!("content for {}", key),
warmed: true,
}
}
}Cache warming uses promote to prioritize entries without counting as user access.
Access methods comparison:
| Method | Returns Value | Promotes Entry | Use Case |
|--------|--------------|----------------|----------|
| get | Yes | Yes | Normal access with LRU update |
| get_mut | Yes (mutable) | Yes | Mutation with LRU update |
| peek | Yes | No | Inspect without affecting order |
| peek_mut | Yes (mutable) | No | Mutate without affecting order |
| contains | No | No | Check existence only |
| promote | No | Yes | Explicit order update |
When to use each approach:
| Scenario | Approach |
|----------|----------|
| Normal cache access | get (automatic promotion) |
| Background refresh | peek_mut (no promotion) |
| Prefetch warming | promote after insert |
| Conditional promotion | peek + promote |
| Metrics tracking | peek for internal access |
Trade-offs:
| Aspect | Automatic Promotion | Explicit Promotion | |--------|---------------------|-------------------| | Control | Limited | Full | | Convenience | High | Lower | | Side effects | Always triggered | Manual control | | Metrics impact | Every access counted | Separate from ordering | | Performance | Same O(1) | Same O(1) |
Key insight: The lru::LruCache provides both automatic promotion (through get and get_mut) and explicit promotion (through promote), giving developers control over when cache ordering changes versus when entries are "accessed" from a metrics perspective. Automatic promotion is simpler and sufficient for basic use cases, but explicit promotion with promote enables scenarios like background maintenance (updating cache order without counting as user access), prefetching (marking entries as important before actual use), and conditional promotion based on entry characteristics. The related peek and peek_mut methods provide access without promotion, completing the toolkit for separating cache ordering from entry access.