Loading pageā¦
Rust walkthroughs
Loading pageā¦
{"tool_call":"file.create","args":{"content":"# How does rayon::iter::split enable custom parallel iteration strategies beyond data-parallel patterns?
split enables custom parallel iteration by accepting a user-defined splitting function that divides data into arbitrary chunks, rather than relying on Rayon's default parallel iterator which operates on fixed-size batches or individual elements. While standard parallel iterators like par_iter() automatically distribute work across threads based on data structure, split gives you control over how data is divided, allowing parallelization of non-collection data, recursive algorithms, and divide-and-conquer patterns. Use split when you need to parallelize algorithms that don't fit the standard "process each element" pattern, such as parallel quicksort, tree traversals, or computations on ranges that don't correspond to collection indices.
rust\nuse rayon::iter::split;\n\nfn main() {\n // split takes:\n // 1. Initial data (seed)\n // 2. Function that splits data into (left, right) or returns None when unsplittable\n // 3. Function that processes each chunk\n \n let range = 0..100;\n let sum: i32 = split(\n range.clone(),\n |r| {\n // Split range into two halves\n let mid = r.start + (r.end - r.start) / 2;\n if mid == r.start {\n None // Can't split further\n } else {\n Some((r.start..mid, mid..r.end))\n }\n },\n |r| {\n // Process each chunk\n r.sum()\n }\n ).sum();\n \n println!(\"Sum: {}\", sum); // 4950\n}\n\n\nsplit recursively divides data, processes chunks in parallel, and combines results.
rust\nuse rayon::prelude::*;\nuse rayon::iter::split;\n\nfn main() {\n let data: Vec<i32> = (0..1000).collect();\n \n // par_iter: automatically parallelizes over elements\n let sum1: i32 = data.par_iter().sum();\n \n // split: you control how data is divided\n let sum2: i32 = split(\n data.as_slice(),\n |slice| {\n if slice.len() > 100 {\n let mid = slice.len() / 2;\n Some((&slice[..mid], &slice[mid..]))\n } else {\n None // Stop splitting at small chunks\n }\n },\n |slice| slice.iter().sum::<i32>()\n ).sum();\n \n assert_eq!(sum1, sum2);\n \n // Key difference:\n // par_iter: parallelism is per-element\n // split: parallelism is per-chunk (you define chunks)\n}\n\n\npar_iter operates per-element; split operates on custom-defined chunks.
rust\nuse rayon::iter::split;\n\nfn parallel_quicksort(data: &mut [i32]) {\n if data.len() <= 100 {\n // Base case: sequential sort for small arrays\n data.sort();\n return;\n }\n \n let pivot_idx = partition(data);\n let (left, right) = data.split_at_mut(pivot_idx);\n \n // Recursively sort both halves in parallel using rayon::join\n rayon::join(\n || parallel_quicksort(left),\n || parallel_quicksort(&mut right[1..]), // Exclude pivot\n );\n}\n\nfn partition(data: &mut [i32]) -> usize {\n let pivot = data[data.len() / 2];\n let mut i = 0;\n for j in 0..data.len() - 1 {\n if data[j] <= pivot {\n data.swap(i, j);\n i += 1;\n }\n }\n data.swap(i, data.len() - 1);\n i\n}\n\n// Using split for divide-and-conquer\nfn parallel_sum(data: &[i32]) -> i32 {\n use rayon::iter::split;\n \n split(\n data,\n |slice| {\n if slice.len() > 1000 {\n let mid = slice.len() / 2;\n Some((&slice[..mid], &slice[mid..]))\n } else {\n None\n }\n },\n |slice| slice.iter().sum()\n ).sum()\n}\n\nfn main() {\n let mut data: Vec<i32> = (0..10000).rev().collect();\n parallel_quicksort(&mut data);\n assert!(data.windows(2).all(|w| w[0] <= w[1]));\n \n let data: Vec<i32> = (0..100000).collect();\n let sum = parallel_sum(&data);\n println!(\"Sum: {}\", sum);\n}\n\n\nsplit naturally expresses divide-and-conquer parallelism.
rust\nuse rayon::iter::split;\n\n#[derive(Debug)]\nstruct TreeNode {\n value: i32,\n children: Vec<TreeNode>,\n}\n\nfn tree_sum(root: &TreeNode) -> i32 {\n split(\n root,\n |node| {\n if node.children.is_empty() {\n None // Leaf node - no splitting\n } else if node.children.len() == 1 {\n // One child - split into value and child\n Some((&node.children[0], &node.children[0]))\n } else {\n // Multiple children - split in half\n let mid = node.children.len() / 2;\n Some((\n &node.children[..mid],\n &node.children[mid..],\n ))\n }\n },\n |node| node.value\n ).sum()\n}\n\nfn main() {\n let tree = TreeNode {\n value: 1,\n children: vec![\n TreeNode {\n value: 2,\n children: vec![\n TreeNode { value: 4, children: vec![] },\n TreeNode { value: 5, children: vec![] },\n ],\n },\n TreeNode {\n value: 3,\n children: vec![\n TreeNode { value: 6, children: vec![] },\n ],\n },\n ],\n };\n \n let sum = tree_sum(&tree);\n println!(\"Tree sum: {}\", sum); // 21\n}\n\n\nsplit enables parallel tree processing by splitting on children.
rust\nuse rayon::iter::split;\n\n// split works on any data type, not just collections\n// Example: parallel computation on numeric ranges\n\nfn parallel_factorial(n: u64) -> u64 {\n split(\n (1u64, n), // Range tuple (start, end)\n |(start, end)| {\n if end - start < 1000 {\n None // Small range - don't split further\n } else {\n let mid = start + (end - start) / 2;\n Some(((start, mid), (mid + 1, end)))\n }\n },\n |(start, end)| {\n // Sequential computation for small ranges\n (start..=end).product()\n }\n ).product()\n}\n\n// Parallel Monte Carlo simulation\nfn parallel_estimate_pi(samples: usize) -> f64 {\n use std::sync::atomic::{AtomicU64, Ordering};\n \n let hits = AtomicU64::new(0);\n \n split(\n 0..samples,\n |range| {\n if range.end - range.start < 10_000 {\n None\n } else {\n let mid = range.start + (range.end - range.start) / 2;\n Some((range.start..mid, mid..range.end))\n }\n },\n |range| {\n use rand::Rng;\n let mut rng = rand::thread_rng();\n let mut local_hits = 0u64;\n \n for _ in range {\n let x: f64 = rng.gen();\n let y: f64 = rng.gen();\n if x * x + y * y <= 1.0 {\n local_hits += 1;\n }\n }\n \n hits.fetch_add(local_hits, Ordering::Relaxed);\n }\n ).for_each(|_| ());\n \n let hits = hits.load(Ordering::Relaxed);\n 4.0 * hits as f64 / samples as f64\n}\n\nfn main() {\n let fact = parallel_factorial(20);\n println!(\"20! = {}\", fact);\n \n let pi = parallel_estimate_pi(1_000_000);\n println!(\"Ļ ā {}\", pi);\n}\n\n\nsplit parallelizes computations on arbitrary data structures, not just collections.
rust\nuse rayon::iter::split;\nuse rayon::prelude::*;\n\nfn main() {\n let data: Vec<i32> = (0..10000).collect();\n \n // par_iter with automatic chunking\n let sum1: i32 = data.par_iter().sum();\n \n // split with custom chunking strategy\n let sum2: i32 = split(\n data.as_slice(),\n |slice| {\n // Split into uneven chunks based on content\n // Example: split at first negative number\n let threshold = 5000;\n if slice.len() > threshold {\n // Find a good split point\n let split_at = slice.len() / 3; // Uneven split\n Some((&slice[..split_at], &slice[split_at..]))\n } else {\n None\n }\n },\n |slice| slice.iter().sum()\n ).sum();\n \n // Split based on work estimate\n fn split_by_work(data: &[i32]) -> Option<(&[i32], &[i32])> {\n if data.len() < 100 {\n return None;\n }\n \n // Estimate: heavier work for larger values\n let total_work: i64 = data.iter().map(|&x| x.abs() as i64).sum();\n let mut cumulative = 0i64;\n \n for (i, &x) in data.iter().enumerate() {\n cumulative += x.abs() as i64;\n if cumulative > total_work / 2 {\n // Split where work is balanced\n return Some((&data[..i], &data[i..]));\n }\n }\n \n None\n }\n \n let sum3: i32 = split(\n data.as_slice(),\n split_by_work,\n |slice| slice.iter().sum()\n ).sum();\n \n println!(\"Sums: {}, {}, {}\", sum1, sum2, sum3);\n}\n\n\nsplit enables workload-aware parallelization strategies.
rust\nuse rayon::iter::split;\nuse std::collections::HashMap;\n\n// Parallel word frequency counting\nfn parallel_word_count(text: &str) -> HashMap<String, usize> {\n split(\n text,\n |t| {\n // Split at word boundaries\n let bytes = t.as_bytes();\n let len = bytes.len();\n \n if len < 1000 {\n return None;\n }\n \n // Find a good split point (whitespace)\n let mid = len / 2;\n for i in mid..len.min(mid + 100) {\n if bytes[i].is_ascii_whitespace() {\n let (left, right) = t.split_at(i + 1);\n return Some((left, right));\n }\n }\n \n None\n },\n |t| {\n // Count words in this chunk\n let mut counts = HashMap::new();\n for word in t.split_whitespace() {\n *counts.entry(word.to_lowercase()).or_insert(0) += 1;\n }\n counts\n }\n ).reduce(HashMap::new, |mut acc, map| {\n for (word, count) in map {\n *acc.entry(word).or_insert(0) += count;\n }\n acc\n })\n}\n\nfn main() {\n let text = \"hello world hello rust world world\";\n let counts = parallel_word_count(text);\n println!(\"Word counts: {:?}\", counts);\n}\n\n\nsplit enables parallel processing with custom merging logic.
rust\nuse rayon::iter::split;\nuse rayon::join;\n\nfn main() {\n // rayon::join: explicit parallel execution of two closures\n let (a, b) = join(\n || (0..1000).sum::<i32>(),\n || (1000..2000).sum::<i32>()\n );\n println!(\"join result: {}, {}\", a, b);\n \n // split: automatic recursive parallelism\n let sum: i32 = split(\n 0..2000u32,\n |range| {\n if range.end - range.start < 100 {\n None\n } else {\n let mid = range.start + (range.end - range.start) / 2;\n Some((range.start..mid, mid..range.end))\n }\n },\n |range| range.map(|i| i as i32).sum()\n ).sum();\n \n println!(\"split sum: {}\", sum);\n \n // Key differences:\n // join: You specify exactly what runs in parallel (2 things)\n // split: You specify how to split, Rayon handles the rest\n // split can spawn more parallelism recursively\n}\n\n\njoin is for fixed parallelism; split is for recursive divide-and-conquer.
rust\nuse rayon::iter::split;\nuse rayon::prelude::*;\n\nfn main() {\n // Combine split with par_iter for nested parallelism\n let matrix: Vec<Vec<i32>> = (0..100)\n .map(|i| (0..100).map(|j| i * 100 + j).collect())\n .collect();\n \n // Outer level: split\n // Inner level: par_iter\n let sum: i32 = split(\n &matrix,\n |rows| {\n if rows.len() > 10 {\n let mid = rows.len() / 2;\n Some((&rows[..mid], &rows[mid..]))\n } else {\n None\n }\n },\n |rows| {\n // Process row chunks with par_iter\n rows.par_iter()\n .flat_map(|row| row.iter())\n .sum()\n }\n ).sum();\n \n println!(\"Matrix sum: {}\", sum);\n \n // Split at both levels\n fn matrix_sum_nested(matrix: &[Vec<i32>]) -> i32 {\n split(\n matrix,\n |rows| {\n if rows.len() > 10 {\n let mid = rows.len() / 2;\n Some((&rows[..mid], &rows[mid..]))\n } else {\n None\n }\n },\n |rows| {\n rows.iter().map(|row| {\n split(\n row.as_slice(),\n |vals| {\n if vals.len() > 10 {\n let mid = vals.len() / 2;\n Some((&vals[..mid], &vals[mid..]))\n } else {\n None\n }\n },\n |vals| vals.iter().sum::<i32>()\n ).sum::<i32>()\n }).sum()\n }\n ).sum()\n }\n}\n\n\nsplit can be nested for multi-level parallelism.
rust\nuse rayon::iter::split;\n\nfn main() {\n // split integrates with Rayon's work stealing\n // Uneven work distributions are automatically balanced\n \n fn process_data(data: &[i32]) -> i32 {\n split(\n data,\n |slice| {\n // Rayon will steal work from other threads\n // if one thread finishes early\n if slice.len() > 100 {\n let mid = slice.len() / 2;\n Some((&slice[..mid], &slice[mid..]))\n } else {\n None\n }\n },\n |slice| {\n // Simulate uneven work\n let work = slice.iter().sum::<i32>();\n // Some chunks take longer\n if work % 2 == 0 {\n std::thread::sleep(std::time::Duration::from_micros(1));\n }\n work\n }\n ).sum()\n }\n \n let data: Vec<i32> = (0..10000).collect();\n let result = process_data(&data);\n println!(\"Result: {}\", result);\n \n // Rayon automatically balances work across threads\n // even when split creates uneven chunks\n}\n\n\nsplit integrates with Rayon's work-stealing scheduler for load balancing.
rust\nuse rayon::iter::split;\nuse rayon::prelude::*;\n\n// split is best for:\n\n// 1. Divide-and-conquer algorithms\nfn parallel_merge_sort(data: &[i32]) -> Vec<i32> {\n if data.len() <= 100 {\n let mut result = data.to_vec();\n result.sort();\n return result;\n }\n \n split(\n data,\n |slice| {\n if slice.len() > 100 {\n let mid = slice.len() / 2;\n Some((&slice[..mid], &slice[mid..]))\n } else {\n None\n }\n },\n |slice| {\n let mut result = slice.to_vec();\n result.sort();\n result\n }\n ).reduce(Vec::new, |mut left, right| {\n // Merge two sorted vectors\n let mut result = Vec::with_capacity(left.len() + right.len());\n let mut l = 0;\n let mut r = 0;\n while l < left.len() && r < right.len() {\n if left[l] <= right[r] {\n result.push(left[l]);\n l += 1;\n } else {\n result.push(right[r]);\n r += 1;\n }\n }\n result.extend(left[l..].iter().chain(right[r..].iter()));\n result\n })\n}\n\n// 2. Tree traversal\nfn sum_tree(root: &TreeNode) -> i32 {\n split(\n root,\n |node| {\n if node.children.is_empty() {\n None\n } else {\n let mid = node.children.len() / 2;\n Some((&node.children[..mid], &node.children[mid..]))\n }\n },\n |node| node.value\n ).sum()\n}\n\n// 3. Non-uniform data structures\nfn sum_sparse(data: &SparseMatrix) -> i32 {\n split(\n data.rows(),\n |rows| {\n // Split based on non-zero count, not row count\n let nnz: usize = rows.iter().map(|r| r.nnz()).sum();\n if nnz < 1000 {\n None\n } else {\n // Find split point with balanced work\n let mut cumulative = 0;\n for (i, row) in rows.iter().enumerate() {\n cumulative += row.nnz();\n if cumulative > nnz / 2 {\n return Some((&rows[..i], &rows[i..]));\n }\n }\n None\n }\n },\n |rows| rows.iter().flat_map(|r| r.values()).sum()\n ).sum()\n}\n\n\nUse split for algorithms where parallelism isn't per-element.
rust\nuse rayon::iter::split;\n\nfn main() {\n // The split closure returns Option<(Left, Right)>\n // - Some((left, right)): split into two parts\n // - None: this is a leaf, process with the second closure\n \n let result: i32 = split(\n 0..100u32,\n |range| {\n // This closure decides HOW to split\n // Called recursively until None is returned\n if range.end - range.start < 10 {\n None // Small enough - don't split\n } else {\n let mid = range.start + (range.end - range.start) / 2;\n Some((range.start..mid, mid..range.end))\n }\n },\n |range| {\n // This closure processes leaves\n // Only called when split returns None\n range.map(|i| i as i32).sum()\n }\n ).sum(); // The final reduction combines all results\n \n println!(\"Result: {}\", result);\n \n // Important: both closures must be:\n // - Pure (no side effects visible to other chunks)\n // - Thread-safe (can run in parallel)\n // - Composable (results can be combined)\n}\n\n\nThe split function and leaf processor must be pure and thread-safe.
Fn(D) -> Option<(D, D)> - divides data or returns None\n- Leaf processor: Fn(D) -> R - processes unsplittable chunks\n- Returns parallel iterator of results\n- Automatically work-steals across threads\n- Integrates with sum(), reduce(), for_each(), etc.\n\npar_iter characteristics:\n- Operates on collections directly\n- One parallel task per element\n- Automatic chunking based on collection size\n- Simpler to use for element-wise operations\n- Less control over parallelism granularity\n\nUse split when:\n- Divide-and-conquer algorithms (mergesort, quicksort)\n- Tree or graph traversal\n- Non-uniform workloads (balance by computation, not elements)\n- Processing non-collection data (ranges, recursive structures)\n- Custom chunk boundaries (avoid breaking atoms)\n- Hierarchical data structures\n\nUse par_iter when:\n- Simple element-wise operations\n- Collection processing with uniform work\n- Map/filter/reduce patterns\n- When default parallelism is sufficient\n\nKey insight: split is Rayon's mechanism for expressing divide-and-conquer parallelism where the parallelism structure doesn't match a collection's element structure. While par_iter assumes parallelism per-element (process each item independently), split lets you define how data dividesāwhether that's splitting an array in half for mergesort, descending into tree branches, or dividing a number range for Monte Carlo sampling. The split function is called recursively until it returns None, creating a tree of parallel tasks that Rayon's work-stealing scheduler distributes across threads. This enables parallelization strategies that would be awkward or impossible with par_iter alone, such as parallelizing recursive algorithms, processing sparse or hierarchical data, or creating work-balancing splits based on estimated computation cost rather than element count.","path":"/articles/281_rayon_iter_split.md"}}