Loading pageâŚ
Rust walkthroughs
Loading pageâŚ
lru::LruCache::iter provide ordered iteration over cached entries?lru::LruCache::iter returns an iterator over cache entries in most-recently-used to least-recently-used order, reflecting the internal LRU ordering that determines which entries are evicted when capacity is exceeded. The iterator yields (&K, &V) pairs, allowing inspection of both keys and values while maintaining the cache's internal invariantsâiterating does not update access times or change the order. Internally, the LRU cache maintains a doubly-linked list alongside a hash map, where the linked list order represents recency of access; iter walks this list from most to least recently used. This ordering enables patterns like inspecting which items would be evicted next or implementing warm-up by pre-populating with known-hot entries.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
cache.put("e", 5);
// Iter yields entries in insertion order (most recent first)
// Since we haven't accessed anything, order is insertion order
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
// Output order: e, d, c, b, a (most recent to least recent)
}iter() walks from the most recently used entry to the least recently used.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
cache.put("e", 5);
// Initial order: e -> d -> c -> b -> a
// Access "a" - it becomes most recently used
cache.get(&"a");
// New order: a -> e -> d -> c -> b
// Access "c" - it becomes most recently used
cache.get(&"c");
// New order: c -> a -> e -> d -> b
// Iterate shows updated order
println!("After accesses:");
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
// Output: c: 3, a: 1, e: 5, d: 4, b: 2
}Accessing entries moves them to the front of the iteration order.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Store order: c -> b -> a
// Iterating does NOT update access times
for (key, _value) in cache.iter() {
println!("Key: {}", key);
// Even though we're "accessing" each entry,
// the order doesn't change
}
// Order is still: c -> b -> a
// Iter doesn't count as an "access" for LRU purposes
let mut iter = cache.iter();
let first = iter.next();
println!("First: {:?}", first); // ("c", 3)
// Now access "a"
cache.get(&"a");
// New order: a -> c -> b
// Iterating again shows the change
println!("After accessing 'a':");
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
}iter() is read-onlyâit doesn't update LRU order or access times.
use lru::LruCache;
fn main() {
// LruCache maintains two structures internally:
// 1. HashMap for O(1) key lookup
// 2. Doubly-linked list for LRU order
// The linked list connects entries:
// - Head: most recently used
// - Tail: least recently used (next to be evicted)
let mut cache: LruCache<&str, i32> = LruCache::new(3);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// At capacity: list is c <-> b <-> a
// Head: c, Tail: a
// Add new entry - evicts tail (a)
cache.put("d", 4);
// Now: d <-> c <-> b
// "a" was evicted
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
// Output: d: 4, c: 3, b: 2
}The doubly-linked list maintains order; iter() walks from head to tail.
use lru::LruCache;
fn show_eviction_order<K, V>(cache: &LruCache<K, V>)
where
K: std::fmt::Debug,
V: std::fmt::Debug,
{
println!("Eviction order (LRU to MRU):");
// Iterate in reverse to show eviction candidates
let entries: Vec<_> = cache.iter().collect();
for (key, value) in entries.into_iter().rev() {
println!(" {:?}: {:?}", key, value);
}
}
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
cache.put("e", 5);
cache.get(&"a"); // Access "a"
cache.get(&"c"); // Access "c"
// MRU to LRU: c -> a -> e -> d -> b
// LRU to MRU (eviction order): b -> d -> e -> a -> c
show_eviction_order(&cache);
}The last entries in iteration order are eviction candidates.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// iter_mut allows modifying values without affecting order
for (_key, value) in cache.iter_mut() {
*value *= 2;
}
// Values are modified, order unchanged
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
// Output: c: 6, b: 4, a: 2
}iter_mut() modifies values without updating access times.
use lru::LruCache;
fn main() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
cache.put("a".to_string(), 1);
cache.put("b".to_string(), 2);
cache.put("c".to_string(), 3);
// into_iter consumes the cache
// Yields owned (K, V) pairs in LRU order
let entries: Vec<(String, i32)> = cache.into_iter().collect();
for (key, value) in entries {
println!("{}: {}", key, value);
}
// Output: c: 3, b: 2, a: 1
}into_iter() consumes the cache and yields owned entries.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// Keys only (MRU to LRU)
let keys: Vec<&str> = cache.iter().map(|(k, _)| *k).collect();
println!("Keys: {:?}", keys);
// Values only (MRU to LRU)
let values: Vec<i32> = cache.iter().map(|(_, v)| *v).collect();
println!("Values: {:?}", values);
// Find a specific value
let found = cache.iter().find(|(k, _)| *k == "b");
println!("Found 'b': {:?}", found);
}Use iterator adaptors to extract keys or values as needed.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, Vec<i32>> = LruCache::new(3);
cache.put("a", vec![1, 2, 3]);
cache.put("b", vec![4, 5, 6]);
cache.put("c", vec![7, 8, 9]);
// Check what would be evicted if we add more entries
fn show_lru_entries<K, V>(cache: &LruCache<K, V>, count: usize)
where
K: std::fmt::Debug + Copy,
V: std::fmt::Debug,
{
println!("Next {} entries to be evicted:", count);
let entries: Vec<_> = cache.iter().collect();
for (key, value) in entries.into_iter().rev().take(count) {
println!(" {:?}: {:?}", key, value);
}
}
show_lru_entries(&cache, 2);
// Add two more entries - "a" and "b" will be evicted
cache.put("d", vec![10, 11]);
cache.put("e", vec![12, 13]);
println!("\nAfter adding 'd' and 'e':");
for (key, value) in cache.iter() {
println!("{}: {:?}", key, value);
}
}Iteration helps understand what entries will be evicted next.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// len() returns current size
println!("Size: {}", cache.len());
// cap() returns capacity
println!("Capacity: {}", cache.cap());
// Count using iter
let count = cache.iter().count();
println!("Count via iter: {}", count);
// Check if empty
println!("Is empty: {}", cache.is_empty());
// Check capacity usage
let usage = cache.len() as f64 / cache.cap() as f64;
println!("Capacity usage: {:.0}%", usage * 100.0);
}len(), cap(), and is_empty() provide size information without iteration.
use lru::LruCache;
fn debug_cache<K, V>(cache: &LruCache<K, V>)
where
K: std::fmt::Debug,
V: std::fmt::Debug,
{
println!("Cache state ({} / {} entries):", cache.len(), cache.cap());
println!("MRU to LRU order:");
for (i, (key, value)) in cache.iter().enumerate() {
let marker = if i == 0 { " (most recent)" }
else if i == cache.len() - 1 { " (next to evict)" }
else { "" };
println!(" {:?}: {:?}{}", key, value, marker);
}
}
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(4);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
cache.get(&"a");
cache.get(&"b");
debug_cache(&cache);
}iter() is useful for debugging cache state and ordering.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
cache.put("e", 5);
cache.get(&"a");
// Most recently used is first in iteration
let mru = cache.iter().next();
println!("MRU: {:?}", mru);
// Least recently used is last in iteration
let lru = cache.iter().last();
println!("LRU: {:?}", lru);
// Or use nth from the end
let lru_alt = cache.iter().nth_back(0);
println!("LRU (nth_back): {:?}", lru_alt);
// Second to be evicted
let second_lru = cache.iter().nth_back(1);
println!("Second LRU: {:?}", second_lru);
}First element is MRU; last element is LRU (next to evict).
use lru::LruCache;
use std::collections::VecDeque;
struct CacheSnapshot<K, V> {
entries: VecDeque<(K, V)>,
capacity: usize,
}
impl<K: Clone, V: Clone> CacheSnapshot<K, V> {
fn from_cache(cache: &LruCache<K, V>) -> Self
where
K: std::hash::Hash + Eq,
{
let entries: VecDeque<_> = cache
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
CacheSnapshot {
entries,
capacity: cache.cap(),
}
}
fn show(&self)
where
K: std::fmt::Debug,
V: std::fmt::Debug,
{
println!("Snapshot (capacity {}):", self.capacity);
for (key, value) in &self.entries {
println!(" {:?}: {:?}", key, value);
}
}
}
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
let snapshot = CacheSnapshot::from_cache(&cache);
snapshot.show();
}Clone entries during iteration to create snapshots independent of the cache.
use lru::LruCache;
use std::sync::Mutex;
use std::thread;
fn main() {
// LruCache is not thread-safe by default
// Wrap in Mutex for shared access
let cache = Mutex::new(LruCache::<i32, String>::new(100));
let mut handles = vec![];
for i in 0..5 {
let cache_ref = &cache;
let handle = thread::spawn(move || {
let mut cache = cache_ref.lock().unwrap();
cache.put(i, format!("value-{}", i));
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
// Iterate over the shared cache
let cache = cache.lock().unwrap();
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
}LruCache requires external synchronization for concurrent access.
use lru::LruCache;
fn prepopulate_cache(cache: &mut LruCache<String, Vec<u8>>, items: Vec<(String, Vec<u8>)>) {
// Pre-populate in a specific order
// Last inserted = most recently used
// Insert items in reverse order of desired LRU position
// The last item inserted will be MRU
for (key, value) in items.into_iter().rev() {
cache.put(key, value);
}
}
fn warm_cache_from_snapshot<K, V>(
cache: &mut LruCache<K, V>,
snapshot: Vec<(K, V)>
) where
K: std::hash::Hash + Eq + Clone,
V: Clone,
{
// Restore cache from a snapshot
// Order in snapshot should be LRU to MRU
// So we iterate forward and put
for (key, value) in snapshot {
cache.put(key, value);
}
}
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
// Pre-populate with known hot items
// Want: "hot" as MRU, "cold" as LRU
let items = vec![
("cold", 1), // Will be LRU (evicted first)
("warm", 2),
("hot", 3), // Will be MRU
];
for (key, value) in items {
cache.put(key, value);
}
// Verify order: hot -> warm -> cold
for (key, value) in cache.iter() {
println!("{}: {}", key, value);
}
}Insertion order determines initial LRU ordering.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(4);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
cache.put("d", 4);
println!("Initial state:");
let initial: Vec<_> = cache.iter().collect();
for (k, v) in &initial {
println!(" {}: {}", k, v);
}
// Access "a"
cache.get(&"a");
println!("\nAfter accessing 'a':");
for (k, v) in cache.iter() {
println!(" {}: {}", k, v);
}
// Put "e" - evicts least recently used
cache.put("e", 5);
println!("\nAfter adding 'e' (evicts LRU):");
for (k, v) in cache.iter() {
println!(" {}: {}", k, v);
}
}iter() helps visualize how operations affect cache ordering.
use lru::LruCache;
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(5);
cache.put("a", 1);
cache.put("b", 2);
cache.put("c", 3);
// get() updates LRU order
cache.get(&"a"); // "a" becomes MRU
// peek() does not update LRU order
cache.peek(&"b"); // "b" stays in place
println!("After get('a') and peek('b'):");
for (k, v) in cache.iter() {
println!(" {}: {}", k, v);
}
// Order: a, c, b (a moved to front, b didn't move)
// peek_mut() for non-updating mutable access
if let Some(value) = cache.peek_mut(&"c") {
*value *= 2;
}
// "c" remains in same position despite modification
}peek() and peek_mut() access values without updating LRU order.
use lru::LruCache;
#[derive(Debug)]
struct CacheInsight<K, V> {
total_entries: usize,
capacity: usize,
most_recently_used: Option<(K, V)>,
least_recently_used: Option<(K, V)>,
usage_percent: f64,
}
impl<K: Clone, V: Clone> CacheInsight<K, V> {
fn analyze(cache: &LruCache<K, V>) -> Self
where
K: std::hash::Hash + Eq,
{
let entries: Vec<_> = cache.iter().collect();
CacheInsight {
total_entries: cache.len(),
capacity: cache.cap(),
most_recently_used: entries.first()
.map(|(k, v)| ((*k).clone(), (*v).clone())),
least_recently_used: entries.last()
.map(|(k, v)| ((*k).clone(), (*v).clone())),
usage_percent: (cache.len() as f64 / cache.cap() as f64) * 100.0,
}
}
}
fn main() {
let mut cache: LruCache<&str, i32> = LruCache::new(100);
for i in 0..75 {
let key = format!("key-{}", i);
cache.put(Box::leak(key.into_boxed_str()), i);
}
let insight = CacheInsight::analyze(&cache);
println!("{:?}", insight);
}iter() enables building analysis tools for cache monitoring.
use lru::LruCache;
fn evict_by_priority(cache: &mut LruCache<String, i32>, max_value: i32) -> usize {
// Evict entries where value exceeds max_value
// Find keys to evict without modifying during iteration
let keys_to_evict: Vec<String> = cache
.iter()
.filter(|(_, &value)| value > max_value)
.map(|(key, _)| key.clone())
.collect();
let evicted_count = keys_to_evict.len();
for key in keys_to_evict {
cache.pop(&key);
}
evicted_count
}
fn main() {
let mut cache: LruCache<String, i32> = LruCache::new(10);
cache.put("a".to_string(), 5);
cache.put("b".to_string(), 15);
cache.put("c".to_string(), 8);
cache.put("d".to_string(), 20);
println!("Before eviction:");
for (k, v) in cache.iter() {
println!(" {}: {}", k, v);
}
let evicted = evict_by_priority(&mut cache, 10);
println!("\nEvicted {} entries", evicted);
println!("\nAfter eviction:");
for (k, v) in cache.iter() {
println!(" {}: {}", k, v);
}
}Collect keys first, then evict to avoid modifying during iteration.
LruCache iteration methods:
| Method | Yields | Updates Order | Ownership |
|--------|--------|---------------|-----------|
| iter() | (&K, &V) | No | Borrowed |
| iter_mut() | (&K, &mut V) | No | Borrowed |
| into_iter() | (K, V) | N/A | Owned |
| keys() | &K | No | Borrowed |
| values() | &V | No | Borrowed |
Iteration order characteristics:
| Position | Meaning | |----------|---------| | First element | Most recently used (MRU) | | Last element | Least recently used (LRU) | | Eviction candidate | Last element | | Newest entry | First element |
Key insight: lru::LruCache::iter provides ordered iteration from most to least recently used by walking the internal doubly-linked list that tracks access recency. This order reflects the same ordering used for eviction decisionsâentries at the end of iteration are next to be evicted when capacity is exceeded. Unlike get() and put(), iteration does not update access times, making iter() safe for inspection without side effects. The iterator yields references, so the cache remains valid and can be used after iteration completes. For read-only inspection of cache stateâdebugging, monitoring, snapshot creation, or analysisâiter() provides a complete view of cache contents in eviction-relevant order. Combined with peek() for non-updating value access, these methods enable sophisticated cache introspection without affecting the LRU ordering that governs eviction behavior.