pub trait ParallelIterator:Sized +Send { typeItem:Send;Show 59 methods
// Required method fndrive_unindexed<C>(self, consumer: C) -> C::Resultwhere C:UnindexedConsumer<Self::Item>; // Provided methods fnfor_each<OP>(self, op: OP)where OP:Fn(Self::Item) +Sync +Send { ... } fnfor_each_with<OP, T>(self, init: T, op: OP)where OP:Fn(&mut T, Self::Item) +Sync +Send, T:Send +Clone { ... } fnfor_each_init<OP, INIT, T>(self, init: INIT, op: OP)where OP:Fn(&mut T, Self::Item) +Sync +Send, INIT:Fn() -> T +Sync +Send { ... } fntry_for_each<OP, R>(self, op: OP) -> Rwhere OP:Fn(Self::Item) -> R +Sync +Send, R: Try<Output =()> +Send { ... } fntry_for_each_with<OP, T, R>(self, init: T, op: OP) -> Rwhere OP:Fn(&mut T, Self::Item) -> R +Sync +Send, T:Send +Clone, R: Try<Output =()> +Send { ... } fntry_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> Rwhere OP:Fn(&mut T, Self::Item) -> R +Sync +Send, INIT:Fn() -> T +Sync +Send, R: Try<Output =()> +Send { ... } fncount(self) ->usize { ... } fnmap<F, R>(self, map_op: F) ->Map<Self, F>where F:Fn(Self::Item) -> R +Sync +Send, R:Send { ... } fnmap_with<F, T, R>(self, init: T, map_op: F) ->MapWith<Self, T, F>where F:Fn(&mut T, Self::Item) -> R +Sync +Send, T:Send +Clone, R:Send { ... } fnmap_init<F, INIT, T, R>( self, init: INIT, map_op: F, ) ->MapInit<Self, INIT, F>where F:Fn(&mut T, Self::Item) -> R +Sync +Send, INIT:Fn() -> T +Sync +Send, R:Send { ... } fncloned<'a, T>(self) ->Cloned<Self>where T: 'a +Clone +Send, Self:ParallelIterator<Item =&'a T> { ... } fncopied<'a, T>(self) ->Copied<Self>where T: 'a +Copy +Send, Self:ParallelIterator<Item =&'a T> { ... } fninspect<OP>(self, inspect_op: OP) ->Inspect<Self, OP>where OP:Fn(&Self::Item) +Sync +Send { ... } fnupdate<F>(self, update_op: F) ->Update<Self, F>where F:Fn(&mut Self::Item) +Sync +Send { ... } fnfilter<P>(self, filter_op: P) ->Filter<Self, P>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnfilter_map<P, R>(self, filter_op: P) ->FilterMap<Self, P>where P:Fn(Self::Item) ->Option<R> +Sync +Send, R:Send { ... } fnflat_map<F, PI>(self, map_op: F) ->FlatMap<Self, F>where F:Fn(Self::Item) -> PI +Sync +Send, PI:IntoParallelIterator { ... } fnflat_map_iter<F, SI>(self, map_op: F) ->FlatMapIter<Self, F>where F:Fn(Self::Item) -> SI +Sync +Send, SI:IntoIterator, SI::Item:Send { ... } fnflatten(self) ->Flatten<Self>where Self::Item:IntoParallelIterator { ... } fnflatten_iter(self) ->FlattenIter<Self>where Self::Item:IntoIterator, <Self::Item asIntoIterator>::Item:Send { ... } fnreduce<OP, ID>(self, identity: ID, op: OP) -> Self::Itemwhere OP:Fn(Self::Item, Self::Item) -> Self::Item +Sync +Send, ID:Fn() -> Self::Item +Sync +Send { ... } fnreduce_with<OP>(self, op: OP) ->Option<Self::Item>where OP:Fn(Self::Item, Self::Item) -> Self::Item +Sync +Send { ... } fntry_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Itemwhere OP:Fn(T, T) -> Self::Item +Sync +Send, ID:Fn() -> T +Sync +Send, Self::Item: Try<Output = T> { ... } fntry_reduce_with<T, OP>(self, op: OP) ->Option<Self::Item>where OP:Fn(T, T) -> Self::Item +Sync +Send, Self::Item: Try<Output = T> { ... } fnfold<T, ID, F>(self, identity: ID, fold_op: F) ->Fold<Self, ID, F>where F:Fn(T, Self::Item) -> T +Sync +Send, ID:Fn() -> T +Sync +Send, T:Send { ... } fnfold_with<F, T>(self, init: T, fold_op: F) ->FoldWith<Self, T, F>where F:Fn(T, Self::Item) -> T +Sync +Send, T:Send +Clone { ... } fntry_fold<T, R, ID, F>( self, identity: ID, fold_op: F, ) ->TryFold<Self, R, ID, F>where F:Fn(T, Self::Item) -> R +Sync +Send, ID:Fn() -> T +Sync +Send, R: Try<Output = T> +Send { ... } fntry_fold_with<F, T, R>( self, init: T, fold_op: F, ) ->TryFoldWith<Self, R, F>where F:Fn(T, Self::Item) -> R +Sync +Send, R: Try<Output = T> +Send, T:Clone +Send { ... } fnsum<S>(self) -> Swhere S:Send +Sum<Self::Item> +Sum<S> { ... } fnproduct<P>(self) -> Pwhere P:Send +Product<Self::Item> +Product<P> { ... } fnmin(self) ->Option<Self::Item>where Self::Item:Ord { ... } fnmin_by<F>(self, f: F) ->Option<Self::Item>where F:Sync +Send +Fn(&Self::Item, &Self::Item) ->Ordering { ... } fnmin_by_key<K, F>(self, f: F) ->Option<Self::Item>where K:Ord +Send, F:Sync +Send +Fn(&Self::Item) -> K { ... } fnmax(self) ->Option<Self::Item>where Self::Item:Ord { ... } fnmax_by<F>(self, f: F) ->Option<Self::Item>where F:Sync +Send +Fn(&Self::Item, &Self::Item) ->Ordering { ... } fnmax_by_key<K, F>(self, f: F) ->Option<Self::Item>where K:Ord +Send, F:Sync +Send +Fn(&Self::Item) -> K { ... } fnchain<C>(self, chain: C) ->Chain<Self, C::Iter>where C:IntoParallelIterator<Item = Self::Item> { ... } fnfind_any<P>(self, predicate: P) ->Option<Self::Item>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnfind_first<P>(self, predicate: P) ->Option<Self::Item>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnfind_last<P>(self, predicate: P) ->Option<Self::Item>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnfind_map_any<P, R>(self, predicate: P) ->Option<R>where P:Fn(Self::Item) ->Option<R> +Sync +Send, R:Send { ... } fnfind_map_first<P, R>(self, predicate: P) ->Option<R>where P:Fn(Self::Item) ->Option<R> +Sync +Send, R:Send { ... } fnfind_map_last<P, R>(self, predicate: P) ->Option<R>where P:Fn(Self::Item) ->Option<R> +Sync +Send, R:Send { ... } fnany<P>(self, predicate: P) ->boolwhere P:Fn(Self::Item) ->bool +Sync +Send { ... } fnall<P>(self, predicate: P) ->boolwhere P:Fn(Self::Item) ->bool +Sync +Send { ... } fnwhile_some<T>(self) ->WhileSome<Self>where Self:ParallelIterator<Item =Option<T>>, T:Send { ... } fnpanic_fuse(self) ->PanicFuse<Self> { ... } fncollect<C>(self) -> Cwhere C:FromParallelIterator<Self::Item> { ... } fnunzip<A, B, FromA, FromB>(self) ->(FromA, FromB)where Self:ParallelIterator<Item =(A, B)>, FromA:Default +Send +ParallelExtend<A>, FromB:Default +Send +ParallelExtend<B>, A:Send, B:Send { ... } fnpartition<A, B, P>(self, predicate: P) ->(A, B)where A:Default +Send +ParallelExtend<Self::Item>, B:Default +Send +ParallelExtend<Self::Item>, P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnpartition_map<A, B, P, L, R>(self, predicate: P) ->(A, B)where A:Default +Send +ParallelExtend<L>, B:Default +Send +ParallelExtend<R>, P:Fn(Self::Item) ->Either<L, R> +Sync +Send, L:Send, R:Send { ... } fnintersperse(self, element: Self::Item) ->Intersperse<Self>where Self::Item:Clone { ... } fntake_any(self, n:usize) ->TakeAny<Self> { ... } fnskip_any(self, n:usize) ->SkipAny<Self> { ... } fntake_any_while<P>(self, predicate: P) ->TakeAnyWhile<Self, P>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fnskip_any_while<P>(self, predicate: P) ->SkipAnyWhile<Self, P>where P:Fn(&Self::Item) ->bool +Sync +Send { ... } fncollect_vec_list(self) ->LinkedList<Vec<Self::Item>> { ... } fnopt_len(&self) ->Option<usize> { ... }}
Expand description
Parallel version of the standard iterator trait.
The combinators on this trait are available onall paralleliterators. Additional methods can be found on theIndexedParallelIterator
trait: those methods are onlyavailable for parallel iterators where the number of items isknown in advance (so, e.g., after invokingfilter
, those methodsbecome unavailable).
For examples of using parallel iterators, seethe docs on theiter
module.
Required Associated Types§
Required Methods§
Sourcefndrive_unindexed<C>(self, consumer: C) -> C::Resultwhere C:UnindexedConsumer<Self::Item>,
fndrive_unindexed<C>(self, consumer: C) -> C::Resultwhere C:UnindexedConsumer<Self::Item>,
Internal method used to define the behavior of this paralleliterator. You should not need to call this directly.
This method causes the iteratorself
to start producingitems and to feed them to the consumerconsumer
one by one.It may split the consumer before doing so to create theopportunity to produce in parallel.
See theREADME for more details on the internals of paralleliterators.
Provided Methods§
Sourcefnfor_each<OP>(self, op: OP)
fnfor_each<OP>(self, op: OP)
ExecutesOP
on each item produced by the iterator, in parallel.
§Examples
userayon::prelude::*;(0..100).into_par_iter().for_each(|x|println!("{:?}", x));
Sourcefnfor_each_with<OP, T>(self, init: T, op: OP)
fnfor_each_with<OP, T>(self, init: T, op: OP)
ExecutesOP
on the giveninit
value with each item produced bythe iterator, in parallel.
Theinit
value will be cloned only as needed to be paired withthe group of items in each rayon job. It does not require the typeto beSync
.
§Examples
usestd::sync::mpsc::channel;userayon::prelude::*;let(sender, receiver) = channel();(0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());letmutres: Vec<_> = receiver.iter().collect();res.sort();assert_eq!(&res[..],&[0,1,2,3,4])
Sourcefnfor_each_init<OP, INIT, T>(self, init: INIT, op: OP)
fnfor_each_init<OP, INIT, T>(self, init: INIT, op: OP)
ExecutesOP
on a value returned byinit
with each item produced bythe iterator, in parallel.
Theinit
function will be called only as needed for a value to bepaired with the group of items in each rayon job. There is noconstraint on that returned type at all!
§Examples
userand::Rng;userayon::prelude::*;letmutv =vec![0u8;1_000_000];v.par_chunks_mut(1000) .for_each_init( || rand::thread_rng(), |rng, chunk| rng.fill(chunk), );// There's a remote chance that this will fail...foriin0u8..=255{assert!(v.contains(&i));}
Sourcefntry_for_each<OP, R>(self, op: OP) -> R
fntry_for_each<OP, R>(self, op: OP) -> R
Executes a fallibleOP
on each item produced by the iterator, in parallel.
If theOP
returnsResult::Err
orOption::None
, we will attempt tostop processing the rest of the items in the iterator as soon aspossible, and we will return that terminating value. Otherwise, we willreturn an emptyResult::Ok(())
orOption::Some(())
. If there aremultiple errors in parallel, it is not specified which will be returned.
§Examples
userayon::prelude::*;usestd::io::{self, Write};// This will stop iteration early if there's any write error, like// having piped output get closed on the other end.(0..100).into_par_iter() .try_for_each(|x|writeln!(io::stdout(),"{:?}", x)) .expect("expected no write errors");
Sourcefntry_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
fntry_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
Executes a fallibleOP
on the giveninit
value with each itemproduced by the iterator, in parallel.
This combines theinit
semantics offor_each_with()
and thefailure semantics oftry_for_each()
.
§Examples
usestd::sync::mpsc::channel;userayon::prelude::*;let(sender, receiver) = channel();(0..5).into_par_iter() .try_for_each_with(sender, |s, x| s.send(x)) .expect("expected no send errors");letmutres: Vec<_> = receiver.iter().collect();res.sort();assert_eq!(&res[..],&[0,1,2,3,4])
Sourcefntry_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
fntry_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
Executes a fallibleOP
on a value returned byinit
with each itemproduced by the iterator, in parallel.
This combines theinit
semantics offor_each_init()
and thefailure semantics oftry_for_each()
.
§Examples
userand::Rng;userayon::prelude::*;letmutv =vec![0u8;1_000_000];v.par_chunks_mut(1000) .try_for_each_init( || rand::thread_rng(), |rng, chunk| rng.try_fill(chunk), ) .expect("expected no rand errors");// There's a remote chance that this will fail...foriin0u8..=255{assert!(v.contains(&i));}
Sourcefncount(self) ->usize
fncount(self) ->usize
Counts the number of items in this parallel iterator.
§Examples
userayon::prelude::*;letcount = (0..100).into_par_iter().count();assert_eq!(count,100);
Sourcefnmap<F, R>(self, map_op: F) ->Map<Self, F>
fnmap<F, R>(self, map_op: F) ->Map<Self, F>
Appliesmap_op
to each item of this iterator, producing a newiterator with the results.
§Examples
userayon::prelude::*;letmutpar_iter = (0..5).into_par_iter().map(|x| x *2);letdoubles: Vec<_> = par_iter.collect();assert_eq!(&doubles[..],&[0,2,4,6,8]);
Sourcefnmap_with<F, T, R>(self, init: T, map_op: F) ->MapWith<Self, T, F>
fnmap_with<F, T, R>(self, init: T, map_op: F) ->MapWith<Self, T, F>
Appliesmap_op
to the giveninit
value with each item of thisiterator, producing a new iterator with the results.
Theinit
value will be cloned only as needed to be paired withthe group of items in each rayon job. It does not require the typeto beSync
.
§Examples
usestd::sync::mpsc::channel;userayon::prelude::*;let(sender, receiver) = channel();leta: Vec<_> = (0..5) .into_par_iter()// iterating over i32.map_with(sender, |s, x| { s.send(x).unwrap();// sending i32 values through the channelx// returning i32}) .collect();// collecting the returned values into a vectorletmutb: Vec<_> = receiver.iter()// iterating over the values in the channel.collect();// and collecting themb.sort();assert_eq!(a, b);
Sourcefnmap_init<F, INIT, T, R>( self, init: INIT, map_op: F,) ->MapInit<Self, INIT, F>
fnmap_init<F, INIT, T, R>( self, init: INIT, map_op: F,) ->MapInit<Self, INIT, F>
Appliesmap_op
to a value returned byinit
with each item of thisiterator, producing a new iterator with the results.
Theinit
function will be called only as needed for a value to bepaired with the group of items in each rayon job. There is noconstraint on that returned type at all!
§Examples
userand::Rng;userayon::prelude::*;leta: Vec<_> = (1i32..1_000_000) .into_par_iter() .map_init( || rand::thread_rng(),// get the thread-local RNG|rng, x|ifrng.gen() {// randomly negate items-x }else{ x }, ).collect();// There's a remote chance that this will fail...assert!(a.iter().any(|&x| x <0));assert!(a.iter().any(|&x| x >0));
Sourcefncloned<'a, T>(self) ->Cloned<Self>
fncloned<'a, T>(self) ->Cloned<Self>
Creates an iterator which clones all of its elements. This may beuseful when you have an iterator over&T
, but you needT
, andthat type implementsClone
. See alsocopied()
.
§Examples
userayon::prelude::*;leta = [1,2,3];letv_cloned: Vec<_> = a.par_iter().cloned().collect();// cloned is the same as .map(|&x| x), for integersletv_map: Vec<_> = a.par_iter().map(|&x| x).collect();assert_eq!(v_cloned,vec![1,2,3]);assert_eq!(v_map,vec![1,2,3]);
Sourcefncopied<'a, T>(self) ->Copied<Self>
fncopied<'a, T>(self) ->Copied<Self>
Creates an iterator which copies all of its elements. This may beuseful when you have an iterator over&T
, but you needT
, andthat type implementsCopy
. See alsocloned()
.
§Examples
userayon::prelude::*;leta = [1,2,3];letv_copied: Vec<_> = a.par_iter().copied().collect();// copied is the same as .map(|&x| x), for integersletv_map: Vec<_> = a.par_iter().map(|&x| x).collect();assert_eq!(v_copied,vec![1,2,3]);assert_eq!(v_map,vec![1,2,3]);
Sourcefninspect<OP>(self, inspect_op: OP) ->Inspect<Self, OP>
fninspect<OP>(self, inspect_op: OP) ->Inspect<Self, OP>
Appliesinspect_op
to a reference to each item of this iterator,producing a new iterator passing through the original items. This isoften useful for debugging to see what’s happening in iterator stages.
§Examples
userayon::prelude::*;leta = [1,4,2,3];// this iterator sequence is complex.letsum = a.par_iter() .cloned() .filter(|&x| x %2==0) .reduce(||0, |sum, i| sum + i);println!("{}", sum);// let's add some inspect() calls to investigate what's happeningletsum = a.par_iter() .cloned() .inspect(|x|println!("about to filter: {}", x)) .filter(|&x| x %2==0) .inspect(|x|println!("made it through filter: {}", x)) .reduce(||0, |sum, i| sum + i);println!("{}", sum);
Sourcefnupdate<F>(self, update_op: F) ->Update<Self, F>
fnupdate<F>(self, update_op: F) ->Update<Self, F>
Mutates each item of this iterator before yielding it.
§Examples
userayon::prelude::*;letpar_iter = (0..5).into_par_iter().update(|x| {*x*=2;});letdoubles: Vec<_> = par_iter.collect();assert_eq!(&doubles[..],&[0,2,4,6,8]);
Sourcefnfilter<P>(self, filter_op: P) ->Filter<Self, P>
fnfilter<P>(self, filter_op: P) ->Filter<Self, P>
Appliesfilter_op
to each item of this iterator, producing a newiterator with only the items that gavetrue
results.
§Examples
userayon::prelude::*;letmutpar_iter = (0..10).into_par_iter().filter(|x| x %2==0);leteven_numbers: Vec<_> = par_iter.collect();assert_eq!(&even_numbers[..],&[0,2,4,6,8]);
Sourcefnfilter_map<P, R>(self, filter_op: P) ->FilterMap<Self, P>
fnfilter_map<P, R>(self, filter_op: P) ->FilterMap<Self, P>
Appliesfilter_op
to each item of this iterator to get anOption
,producing a new iterator with only the items fromSome
results.
§Examples
userayon::prelude::*;letmutpar_iter = (0..10).into_par_iter() .filter_map(|x| {ifx %2==0{Some(x *3) }else{None} });leteven_numbers: Vec<_> = par_iter.collect();assert_eq!(&even_numbers[..],&[0,6,12,18,24]);
Sourcefnflat_map<F, PI>(self, map_op: F) ->FlatMap<Self, F>
fnflat_map<F, PI>(self, map_op: F) ->FlatMap<Self, F>
Appliesmap_op
to each item of this iterator to get nested parallel iterators,producing a new parallel iterator that flattens these back into one.
See alsoflat_map_iter
.
§Examples
userayon::prelude::*;leta = [[1,2], [3,4], [5,6], [7,8]];letpar_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());letvec: Vec<_> = par_iter.collect();assert_eq!(&vec[..],&[1,2,3,4,5,6,7,8]);
Sourcefnflat_map_iter<F, SI>(self, map_op: F) ->FlatMapIter<Self, F>
fnflat_map_iter<F, SI>(self, map_op: F) ->FlatMapIter<Self, F>
Appliesmap_op
to each item of this iterator to get nested serial iterators,producing a new parallel iterator that flattens these back into one.
§flat_map_iter
versusflat_map
These two methods are similar but behave slightly differently. Withflat_map
,each of the nested iterators must be a parallel iterator, and they will be furthersplit up with nested parallelism. Withflat_map_iter
, each nested iterator is asequentialIterator
, and we only parallelizebetween them, while the itemsproduced by each nested iterator are processed sequentially.
When choosing between these methods, consider whether nested parallelism suits thepotential iterators at hand. If there’s little computation involved, or its lengthis much less than the outer parallel iterator, then it may perform better to avoidthe overhead of parallelism, just flattening sequentially withflat_map_iter
.If there is a lot of computation, potentially outweighing the outer paralleliterator, then the nested parallelism offlat_map
may be worthwhile.
§Examples
userayon::prelude::*;usestd::cell::RefCell;leta = [[1,2], [3,4], [5,6], [7,8]];letpar_iter = a.par_iter().flat_map_iter(|a| {// The serial iterator doesn't have to be thread-safe, just its items.letcell_iter = RefCell::new(a.iter().cloned()); std::iter::from_fn(move|| cell_iter.borrow_mut().next())});letvec: Vec<_> = par_iter.collect();assert_eq!(&vec[..],&[1,2,3,4,5,6,7,8]);
Sourcefnflatten(self) ->Flatten<Self>where Self::Item:IntoParallelIterator,
fnflatten(self) ->Flatten<Self>where Self::Item:IntoParallelIterator,
An adaptor that flattens parallel-iterableItem
s into one large iterator.
See alsoflatten_iter
.
§Examples
userayon::prelude::*;letx: Vec<Vec<_>> =vec![vec![1,2],vec![3,4]];lety: Vec<_> = x.into_par_iter().flatten().collect();assert_eq!(y,vec![1,2,3,4]);
Sourcefnflatten_iter(self) ->FlattenIter<Self>
fnflatten_iter(self) ->FlattenIter<Self>
An adaptor that flattens serial-iterableItem
s into one large iterator.
See alsoflatten
and the analogous comparison offlat_map_iter
versusflat_map
.
§Examples
userayon::prelude::*;letx: Vec<Vec<_>> =vec![vec![1,2],vec![3,4]];letiters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();lety: Vec<_> = iters.into_par_iter().flatten_iter().collect();assert_eq!(y,vec![1,2,3,4]);
Sourcefnreduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
fnreduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
Reduces the items in the iterator into one item usingop
.The argumentidentity
should be a closure that can produce“identity” value which may be inserted into the sequence asneeded to create opportunities for parallel execution. So, forexample, if you are doing a summation, thenidentity()
oughtto produce something that represents the zero for your type(but consider just callingsum()
in that case).
§Examples
// Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`// and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`// where the first/second elements are summed separately.userayon::prelude::*;letsums = [(0,1), (5,6), (16,2), (8,9)] .par_iter()// iterating over &(i32, i32).cloned()// iterating over (i32, i32).reduce(|| (0,0),// the "identity" is 0 in both columns|a, b| (a.0+ b.0, a.1+ b.1));assert_eq!(sums, (0+5+16+8,1+6+2+9));
Note: unlike a sequentialfold
operation, the order inwhichop
will be applied to reduce the result is not fullyspecified. Soop
should beassociative or else the resultswill be non-deterministic. And of courseidentity()
shouldproduce a true identity.
Sourcefnreduce_with<OP>(self, op: OP) ->Option<Self::Item>
fnreduce_with<OP>(self, op: OP) ->Option<Self::Item>
Reduces the items in the iterator into one item usingop
.If the iterator is empty,None
is returned; otherwise,Some
is returned.
This version ofreduce
is simple but somewhat lessefficient. If possible, it is better to callreduce()
, whichrequires an identity element.
§Examples
userayon::prelude::*;letsums = [(0,1), (5,6), (16,2), (8,9)] .par_iter()// iterating over &(i32, i32).cloned()// iterating over (i32, i32).reduce_with(|a, b| (a.0+ b.0, a.1+ b.1)) .unwrap();assert_eq!(sums, (0+5+16+8,1+6+2+9));
Note: unlike a sequentialfold
operation, the order inwhichop
will be applied to reduce the result is not fullyspecified. Soop
should beassociative or else the resultswill be non-deterministic.
Sourcefntry_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
fntry_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
Reduces the items in the iterator into one item using a fallibleop
.Theidentity
argument is used the same way as inreduce()
.
If aResult::Err
orOption::None
item is found, or ifop
reducesto one, we will attempt to stop processing the rest of the items in theiterator as soon as possible, and we will return that terminating value.Otherwise, we will return the final reducedResult::Ok(T)
orOption::Some(T)
. If there are multiple errors in parallel, it is notspecified which will be returned.
§Examples
userayon::prelude::*;// Compute the sum of squares, being careful about overflow.fnsum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) ->Option<i32> { iter.into_par_iter() .map(|i| i.checked_mul(i))// square each item,.try_reduce(||0, i32::checked_add)// and add them up!}assert_eq!(sum_squares(0..5),Some(0+1+4+9+16));// The sum might overflowassert_eq!(sum_squares(0..10_000),None);// Or the squares might overflow before it even reaches `try_reduce`assert_eq!(sum_squares(1_000_000..1_000_001),None);
Sourcefntry_reduce_with<T, OP>(self, op: OP) ->Option<Self::Item>
fntry_reduce_with<T, OP>(self, op: OP) ->Option<Self::Item>
Reduces the items in the iterator into one item using a fallibleop
.
Likereduce_with()
, if the iterator is empty,None
is returned;otherwise,Some
is returned. Beyond that, it behaves liketry_reduce()
for handlingErr
/None
.
For instance, withOption
items, the return value may be:
None
, the iterator was emptySome(None)
, we stopped after encounteringNone
.Some(Some(x))
, the entire iterator reduced tox
.
WithResult
items, the nesting is more obvious:
None
, the iterator was emptySome(Err(e))
, we stopped after encountering an errore
.Some(Ok(x))
, the entire iterator reduced tox
.
§Examples
userayon::prelude::*;letfiles = ["/dev/null","/does/not/exist"];// Find the biggest filefiles.into_par_iter() .map(|path| std::fs::metadata(path).map(|m| (path, m.len()))) .try_reduce_with(|a, b| {Ok(ifa.1>= b.1{ a }else{ b }) }) .expect("Some value, since the iterator is not empty") .expect_err("not found");
Sourcefnfold<T, ID, F>(self, identity: ID, fold_op: F) ->Fold<Self, ID, F>
fnfold<T, ID, F>(self, identity: ID, fold_op: F) ->Fold<Self, ID, F>
Parallel fold is similar to sequential fold except that thesequence of items may be subdivided before it isfolded. Consider a list of numbers like22 3 77 89 46
. Ifyou used sequential fold to add them (fold(0, |a,b| a+b)
,you would wind up first adding 0 + 22, then 22 + 3, then 25 +77, and so forth. Theparallel fold works similarly exceptthat it first breaks up your list into sublists, and henceinstead of yielding up a single sum at the end, it yields upmultiple sums. The number of results is nondeterministic, asis the point where the breaks occur.
So if we did the same parallel fold (fold(0, |a,b| a+b)
) onour example list, we might wind up with a sequence of two numbers,like so:
22 3 77 89 46 | | 102 135
Or perhaps these three numbers:
22 3 77 89 46 | | | 102 89 46
In general, Rayon will attempt to find good breaking pointsthat keep all of your cores busy.
§Fold versus reduce
Thefold()
andreduce()
methods each take an identity elementand a combining function, but they operate rather differently.
reduce()
requires that the identity function has the sametype as the things you are iterating over, and it fullyreduces the list of items into a single item. So, for example,imagine we are iterating over a list of bytesbytes: [128_u8, 64_u8, 64_u8]
. If we usedbytes.reduce(|| 0_u8, |a: u8, b: u8| a + b)
, we would get an overflow. This is because0
,a
, andb
here are all bytes, just like the numbers in thelist (I wrote the types explicitly above, but those are theonly types you can use). To avoid the overflow, we would needto do something likebytes.map(|b| b as u32).reduce(|| 0, |a, b| a + b)
, in which case our result would be256
.
In contrast, withfold()
, the identity function does nothave to have the same type as the things you are iteratingover, and you potentially get back many results. So, if wecontinue with thebytes
example from the previous paragraph,we could dobytes.fold(|| 0_u32, |a, b| a + (b as u32))
toconvert our bytes intou32
. And of course we might not getback a single sum.
There is a more subtle distinction as well, though it’sactually implied by the above points. When you usereduce()
,your reduction function is sometimes called with values thatwere never part of your original parallel iterator (forexample, both the left and right might be a partial sum). Withfold()
, in contrast, the left value in the fold function isalways the accumulator, and the right value is always fromyour original sequence.
§Fold vs Map/Reduce
Fold makes sense if you have some operation where it ischeaper to create groups of elements at a time. For example,imagine collecting characters into a string. If you were goingto use map/reduce, you might try this:
userayon::prelude::*;lets = ['a','b','c','d','e'] .par_iter() .map(|c:&char|format!("{}", c)) .reduce(|| String::new(), |muta: String, b: String| { a.push_str(&b); a });assert_eq!(s,"abcde");
Because reduce produces the same type of element as its input,you have to first map each character into a string, and thenyou can reduce them. This means we create one string perelement in our iterator – not so great. Usingfold
, we cando this instead:
userayon::prelude::*;lets = ['a','b','c','d','e'] .par_iter() .fold(|| String::new(), |muts: String, c:&char| { s.push(*c); s }) .reduce(|| String::new(), |muta: String, b: String| { a.push_str(&b); a });assert_eq!(s,"abcde");
Nowfold
will process groups of our characters at a time,and we only make one string per group. We should wind up withsome small-ish number of strings roughly proportional to thenumber of CPUs you have (it will ultimately depend on how busyyour processors are). Note that we still need to do a reduceafterwards to combine those groups of strings into a singlestring.
You could use a similar trick to save partial results (e.g., acache) or something similar.
§Combining fold with other operations
You can combinefold
withreduce
if you want to produce asingle value. This is then roughly equivalent to a map/reducecombination in effect:
userayon::prelude::*;letbytes =0..22_u8;letsum = bytes.into_par_iter() .fold(||0_u32, |a: u32, b: u8| a + (basu32)) .sum::<u32>();assert_eq!(sum, (0..22).sum());// compare to sequential
Sourcefnfold_with<F, T>(self, init: T, fold_op: F) ->FoldWith<Self, T, F>
fnfold_with<F, T>(self, init: T, fold_op: F) ->FoldWith<Self, T, F>
Appliesfold_op
to the giveninit
value with each item of thisiterator, finally producing the value for further use.
This works essentially likefold(|| init.clone(), fold_op)
, exceptit doesn’t require theinit
type to beSync
, nor any other formof added synchronization.
§Examples
userayon::prelude::*;letbytes =0..22_u8;letsum = bytes.into_par_iter() .fold_with(0_u32, |a: u32, b: u8| a + (basu32)) .sum::<u32>();assert_eq!(sum, (0..22).sum());// compare to sequential
Sourcefntry_fold<T, R, ID, F>( self, identity: ID, fold_op: F,) ->TryFold<Self, R, ID, F>
fntry_fold<T, R, ID, F>( self, identity: ID, fold_op: F,) ->TryFold<Self, R, ID, F>
Performs a fallible parallel fold.
This is a variation offold()
for operations which can fail withOption::None
orResult::Err
. The first such failure stopsprocessing the local set of items, without affecting other folds in theiterator’s subdivisions.
Often,try_fold()
will be followed bytry_reduce()
for a final reduction and global short-circuiting effect.
§Examples
userayon::prelude::*;letbytes =0..22_u8;letsum = bytes.into_par_iter() .try_fold(||0_u32, |a: u32, b: u8| a.checked_add(basu32)) .try_reduce(||0, u32::checked_add);assert_eq!(sum,Some((0..22).sum()));// compare to sequential
Sourcefntry_fold_with<F, T, R>(self, init: T, fold_op: F) ->TryFoldWith<Self, R, F>
fntry_fold_with<F, T, R>(self, init: T, fold_op: F) ->TryFoldWith<Self, R, F>
Performs a fallible parallel fold with a cloneableinit
value.
This combines theinit
semantics offold_with()
and the failuresemantics oftry_fold()
.
userayon::prelude::*;letbytes =0..22_u8;letsum = bytes.into_par_iter() .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(basu32)) .try_reduce(||0, u32::checked_add);assert_eq!(sum,Some((0..22).sum()));// compare to sequential
Sourcefnsum<S>(self) -> S
fnsum<S>(self) -> S
Sums up the items in the iterator.
Note that the order in items will be reduced is not specified,so if the+
operator is not trulyassociative (as is thecase for floating point numbers), then the results are notfully deterministic.
Basically equivalent toself.reduce(|| 0, |a, b| a + b)
,except that the type of0
and the+
operation may varydepending on the type of value being produced.
§Examples
userayon::prelude::*;leta = [1,5,7];letsum: i32 = a.par_iter().sum();assert_eq!(sum,13);
Sourcefnproduct<P>(self) -> P
fnproduct<P>(self) -> P
Multiplies all the items in the iterator.
Note that the order in items will be reduced is not specified,so if the*
operator is not trulyassociative (as is thecase for floating point numbers), then the results are notfully deterministic.
Basically equivalent toself.reduce(|| 1, |a, b| a * b)
,except that the type of1
and the*
operation may varydepending on the type of value being produced.
§Examples
userayon::prelude::*;fnfactorial(n: u32) -> u32 { (1..n+1).into_par_iter().product()}assert_eq!(factorial(0),1);assert_eq!(factorial(1),1);assert_eq!(factorial(5),120);
Sourcefnmin(self) ->Option<Self::Item>
fnmin(self) ->Option<Self::Item>
Computes the minimum of all the items in the iterator. If theiterator is empty,None
is returned; otherwise,Some(min)
is returned.
Note that the order in which the items will be reduced is notspecified, so if theOrd
impl is not truly associative, thenthe results are not deterministic.
Basically equivalent toself.reduce_with(|a, b| Ord::min(a, b))
.
§Examples
userayon::prelude::*;leta = [45,74,32];assert_eq!(a.par_iter().min(),Some(&32));letb: [i32;0] = [];assert_eq!(b.par_iter().min(),None);
Sourcefnmin_by<F>(self, f: F) ->Option<Self::Item>
fnmin_by<F>(self, f: F) ->Option<Self::Item>
Computes the minimum of all the items in the iterator with respect tothe given comparison function. If the iterator is empty,None
isreturned; otherwise,Some(min)
is returned.
Note that the order in which the items will be reduced is notspecified, so if the comparison function is not associative, thenthe results are not deterministic.
§Examples
userayon::prelude::*;leta = [-3_i32,77,53,240, -1];assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)),Some(&-3));
Sourcefnmin_by_key<K, F>(self, f: F) ->Option<Self::Item>
fnmin_by_key<K, F>(self, f: F) ->Option<Self::Item>
Computes the item that yields the minimum value for the givenfunction. If the iterator is empty,None
is returned;otherwise,Some(item)
is returned.
Note that the order in which the items will be reduced is notspecified, so if theOrd
impl is not truly associative, thenthe results are not deterministic.
§Examples
userayon::prelude::*;leta = [-3_i32,34,2,5, -10, -3, -23];assert_eq!(a.par_iter().min_by_key(|x| x.abs()),Some(&2));
Sourcefnmax(self) ->Option<Self::Item>
fnmax(self) ->Option<Self::Item>
Computes the maximum of all the items in the iterator. If theiterator is empty,None
is returned; otherwise,Some(max)
is returned.
Note that the order in which the items will be reduced is notspecified, so if theOrd
impl is not truly associative, thenthe results are not deterministic.
Basically equivalent toself.reduce_with(|a, b| Ord::max(a, b))
.
§Examples
userayon::prelude::*;leta = [45,74,32];assert_eq!(a.par_iter().max(),Some(&74));letb: [i32;0] = [];assert_eq!(b.par_iter().max(),None);
Sourcefnmax_by<F>(self, f: F) ->Option<Self::Item>
fnmax_by<F>(self, f: F) ->Option<Self::Item>
Computes the maximum of all the items in the iterator with respect tothe given comparison function. If the iterator is empty,None
isreturned; otherwise,Some(max)
is returned.
Note that the order in which the items will be reduced is notspecified, so if the comparison function is not associative, thenthe results are not deterministic.
§Examples
userayon::prelude::*;leta = [-3_i32,77,53,240, -1];assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())),Some(&240));
Sourcefnmax_by_key<K, F>(self, f: F) ->Option<Self::Item>
fnmax_by_key<K, F>(self, f: F) ->Option<Self::Item>
Computes the item that yields the maximum value for the givenfunction. If the iterator is empty,None
is returned;otherwise,Some(item)
is returned.
Note that the order in which the items will be reduced is notspecified, so if theOrd
impl is not truly associative, thenthe results are not deterministic.
§Examples
userayon::prelude::*;leta = [-3_i32,34,2,5, -10, -3, -23];assert_eq!(a.par_iter().max_by_key(|x| x.abs()),Some(&34));
Sourcefnchain<C>(self, chain: C) ->Chain<Self, C::Iter>where C:IntoParallelIterator<Item = Self::Item>,
fnchain<C>(self, chain: C) ->Chain<Self, C::Iter>where C:IntoParallelIterator<Item = Self::Item>,
Takes two iterators and creates a new iterator over both.
§Examples
userayon::prelude::*;leta = [0,1,2];letb = [9,8,7];letpar_iter = a.par_iter().chain(b.par_iter());letchained: Vec<_> = par_iter.cloned().collect();assert_eq!(&chained[..],&[0,1,2,9,8,7]);
Sourcefnfind_any<P>(self, predicate: P) ->Option<Self::Item>
fnfind_any<P>(self, predicate: P) ->Option<Self::Item>
Searches forsome item in the parallel iterator thatmatches the given predicate and returns it. This operationis similar tofind
on sequential iterators butthe item returned may not be thefirst one in the parallelsequence which matches, since we search the entire sequence in parallel.
Once a match is found, we will attempt to stop processingthe rest of the items in the iterator as soon as possible(just asfind
stops iterating once a match is found).
§Examples
userayon::prelude::*;leta = [1,2,3,3];assert_eq!(a.par_iter().find_any(|&&x| x ==3),Some(&3));assert_eq!(a.par_iter().find_any(|&&x| x ==100),None);
Sourcefnfind_first<P>(self, predicate: P) ->Option<Self::Item>
fnfind_first<P>(self, predicate: P) ->Option<Self::Item>
Searches for the sequentiallyfirst item in the parallel iteratorthat matches the given predicate and returns it.
Once a match is found, all attempts to the right of the matchwill be stopped, while attempts to the left must continue in casean earlier match is found.
For added performance, you might consider usingfind_first
in conjunction withby_exponential_blocks()
.
Note that not all parallel iterators have a useful order, much likesequentialHashMap
iteration, so “first” may be nebulous. If youjust want the first match that discovered anywhere in the iterator,find_any
is a better choice.
§Examples
userayon::prelude::*;leta = [1,2,3,3];assert_eq!(a.par_iter().find_first(|&&x| x ==3),Some(&3));assert_eq!(a.par_iter().find_first(|&&x| x ==100),None);
Sourcefnfind_last<P>(self, predicate: P) ->Option<Self::Item>
fnfind_last<P>(self, predicate: P) ->Option<Self::Item>
Searches for the sequentiallylast item in the parallel iteratorthat matches the given predicate and returns it.
Once a match is found, all attempts to the left of the matchwill be stopped, while attempts to the right must continue in casea later match is found.
Note that not all parallel iterators have a useful order, much likesequentialHashMap
iteration, so “last” may be nebulous. When theorder doesn’t actually matter to you,find_any
is a better choice.
§Examples
userayon::prelude::*;leta = [1,2,3,3];assert_eq!(a.par_iter().find_last(|&&x| x ==3),Some(&3));assert_eq!(a.par_iter().find_last(|&&x| x ==100),None);
Sourcefnfind_map_any<P, R>(self, predicate: P) ->Option<R>
fnfind_map_any<P, R>(self, predicate: P) ->Option<R>
Applies the given predicate to the items in the parallel iteratorand returnsany non-None result of the map operation.
Once a non-None value is produced from the map operation, we willattempt to stop processing the rest of the items in the iteratoras soon as possible.
Note that this method only returnssome item in the paralleliterator that is not None from the map predicate. The item returnedmay not be thefirst non-None value produced in the parallelsequence, since the entire sequence is mapped over in parallel.
§Examples
userayon::prelude::*;letc = ["lol","NaN","5","5"];letfound_number = c.par_iter().find_map_any(|s| s.parse().ok());assert_eq!(found_number,Some(5));
Sourcefnfind_map_first<P, R>(self, predicate: P) ->Option<R>
fnfind_map_first<P, R>(self, predicate: P) ->Option<R>
Applies the given predicate to the items in the parallel iterator andreturns the sequentiallyfirst non-None result of the map operation.
Once a non-None value is produced from the map operation, all attemptsto the right of the match will be stopped, while attempts to the leftmust continue in case an earlier match is found.
Note that not all parallel iterators have a useful order, much likesequentialHashMap
iteration, so “first” may be nebulous. If youjust want the first non-None value discovered anywhere in the iterator,find_map_any
is a better choice.
§Examples
userayon::prelude::*;letc = ["lol","NaN","2","5"];letfirst_number = c.par_iter().find_map_first(|s| s.parse().ok());assert_eq!(first_number,Some(2));
Sourcefnfind_map_last<P, R>(self, predicate: P) ->Option<R>
fnfind_map_last<P, R>(self, predicate: P) ->Option<R>
Applies the given predicate to the items in the parallel iterator andreturns the sequentiallylast non-None result of the map operation.
Once a non-None value is produced from the map operation, all attemptsto the left of the match will be stopped, while attempts to the rightmust continue in case a later match is found.
Note that not all parallel iterators have a useful order, much likesequentialHashMap
iteration, so “first” may be nebulous. If youjust want the first non-None value discovered anywhere in the iterator,find_map_any
is a better choice.
§Examples
userayon::prelude::*;letc = ["lol","NaN","2","5"];letlast_number = c.par_iter().find_map_last(|s| s.parse().ok());assert_eq!(last_number,Some(5));
Sourcefnany<P>(self, predicate: P) ->bool
fnany<P>(self, predicate: P) ->bool
Searches forsome item in the parallel iterator thatmatches the given predicate, and if so returns true. Oncea match is found, we’ll attempt to stop process the restof the items. Proving that there’s no match, returning false,does require visiting every item.
§Examples
userayon::prelude::*;leta = [0,12,3,4,0,23,0];letis_valid = a.par_iter().any(|&x| x >10);assert!(is_valid);
Sourcefnall<P>(self, predicate: P) ->bool
fnall<P>(self, predicate: P) ->bool
Tests that every item in the parallel iterator matches the givenpredicate, and if so returns true. If a counter-example is found,we’ll attempt to stop processing more items, then return false.
§Examples
userayon::prelude::*;leta = [0,12,3,4,0,23,0];letis_valid = a.par_iter().all(|&x| x >10);assert!(!is_valid);
Sourcefnwhile_some<T>(self) ->WhileSome<Self>
fnwhile_some<T>(self) ->WhileSome<Self>
Creates an iterator over theSome
items of this iterator, haltingas soon as anyNone
is found.
§Examples
userayon::prelude::*;usestd::sync::atomic::{AtomicUsize, Ordering};letcounter = AtomicUsize::new(0);letvalue = (0_i32..2048) .into_par_iter() .map(|x| { counter.fetch_add(1, Ordering::SeqCst);ifx <1024{Some(x) }else{None} }) .while_some() .max();assert!(value <Some(1024));assert!(counter.load(Ordering::SeqCst) <2048);// should not have visited every single one
Sourcefnpanic_fuse(self) ->PanicFuse<Self>
fnpanic_fuse(self) ->PanicFuse<Self>
Wraps an iterator with a fuse in case of panics, to halt all threadsas soon as possible.
Panics within parallel iterators are always propagated to the caller,but they don’t always halt the rest of the iterator right away, due tothe internal semantics ofjoin
. This adaptor makes a greater effortto stop processing other items sooner, with the cost of additionalsynchronization overhead, which may also inhibit some optimizations.
§Examples
If this code didn’t usepanic_fuse()
, it would continue processingmany more items in other threads (with long sleep delays) before thepanic is finally propagated.
userayon::prelude::*;usestd::{thread, time};(0..1_000_000) .into_par_iter() .panic_fuse() .for_each(|i| {// simulate some workthread::sleep(time::Duration::from_secs(1));assert!(i >0);// oops!});
Sourcefncollect<C>(self) -> Cwhere C:FromParallelIterator<Self::Item>,
fncollect<C>(self) -> Cwhere C:FromParallelIterator<Self::Item>,
Creates a fresh collection containing all the elements producedby this parallel iterator.
You may prefercollect_into_vec()
implemented onIndexedParallelIterator
, if your underlying iterator also implementsit.collect_into_vec()
allocates efficiently with precise knowledgeof how many elements the iterator contains, and even allows you to reusean existing vector’s backing store rather than allocating a fresh vector.
See alsocollect_vec_list()
for collectinginto aLinkedList<Vec<T>>
.
§Examples
userayon::prelude::*;letsync_vec: Vec<_> = (0..100).into_iter().collect();letasync_vec: Vec<_> = (0..100).into_par_iter().collect();assert_eq!(sync_vec, async_vec);
You can collect a pair of collections likeunzip
for paired items:
userayon::prelude::*;leta = [(0,1), (1,2), (2,3), (3,4)];let(first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();assert_eq!(first, [0,1,2,3]);assert_eq!(second, [1,2,3,4]);
Or likepartition_map
forEither
items:
userayon::prelude::*;userayon::iter::Either;let(left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {ifx %2==0{ Either::Left(x *4) }else{ Either::Right(x *3) }}).collect();assert_eq!(left, [0,8,16,24]);assert_eq!(right, [3,9,15,21]);
You can even collect an arbitrarily-nested combination of pairs andEither
:
userayon::prelude::*;userayon::iter::Either;let(first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..8).into_par_iter().map(|x| {ifx %2==0{ (x, Either::Left(x *4)) }else{ (-x, Either::Right(x *3)) } }).collect();assert_eq!(first, [0, -1,2, -3,4, -5,6, -7]);assert_eq!(left, [0,8,16,24]);assert_eq!(right, [3,9,15,21]);
All of that canalso be combined with short-circuiting collection ofResult
orOption
types:
userayon::prelude::*;userayon::iter::Either;letresult:Result<(Vec<_>, (Vec<_>, Vec<_>)),_> = (0..8).into_par_iter().map(|x| {ifx >5{Err(x) }else ifx %2==0{Ok((x, Either::Left(x *4))) }else{Ok((-x, Either::Right(x *3))) } }).collect();leterror = result.unwrap_err();assert!(error ==6|| error ==7);
Sourcefnunzip<A, B, FromA, FromB>(self) ->(FromA, FromB)where Self:ParallelIterator<Item =(A, B)>, FromA:Default +Send +ParallelExtend<A>, FromB:Default +Send +ParallelExtend<B>, A:Send, B:Send,
fnunzip<A, B, FromA, FromB>(self) ->(FromA, FromB)where Self:ParallelIterator<Item =(A, B)>, FromA:Default +Send +ParallelExtend<A>, FromB:Default +Send +ParallelExtend<B>, A:Send, B:Send,
Unzips the items of a parallel iterator into a pair of arbitraryParallelExtend
containers.
You may prefer to useunzip_into_vecs()
, which allocates moreefficiently with precise knowledge of how many elements theiterator contains, and even allows you to reuse existingvectors’ backing stores rather than allocating fresh vectors.
§Examples
userayon::prelude::*;leta = [(0,1), (1,2), (2,3), (3,4)];let(left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();assert_eq!(left, [0,1,2,3]);assert_eq!(right, [1,2,3,4]);
Nested pairs can be unzipped too.
userayon::prelude::*;let(values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter() .map(|i| (i, (i * i, i * i * i))) .unzip();assert_eq!(values, [0,1,2,3]);assert_eq!(squares, [0,1,4,9]);assert_eq!(cubes, [0,1,8,27]);
Sourcefnpartition<A, B, P>(self, predicate: P) ->(A, B)
fnpartition<A, B, P>(self, predicate: P) ->(A, B)
Partitions the items of a parallel iterator into a pair of arbitraryParallelExtend
containers. Items for which thepredicate
returnstrue go into the first container, and the rest go into the second.
Note: unlike the standardIterator::partition
, this allows distinctcollection types for the left and right items. This is more flexible,but may require new type annotations when converting sequential codethat used type inference assuming the two were the same.
§Examples
userayon::prelude::*;let(left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x %2==0);assert_eq!(left, [0,2,4,6]);assert_eq!(right, [1,3,5,7]);
Sourcefnpartition_map<A, B, P, L, R>(self, predicate: P) ->(A, B)
fnpartition_map<A, B, P, L, R>(self, predicate: P) ->(A, B)
Partitions and maps the items of a parallel iterator into a pair ofarbitraryParallelExtend
containers.Either::Left
items go intothe first container, andEither::Right
items go into the second.
§Examples
userayon::prelude::*;userayon::iter::Either;let(left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter() .partition_map(|x| {ifx %2==0{ Either::Left(x *4) }else{ Either::Right(x *3) } });assert_eq!(left, [0,8,16,24]);assert_eq!(right, [3,9,15,21]);
NestedEither
enums can be split as well.
userayon::prelude::*;userayon::iter::Either::*;let((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20) .into_par_iter() .partition_map(|x|match(x %3, x %5) { (0,0) => Left(Left(x)), (0,_) => Left(Right(x)), (_,0) => Right(Left(x)), (_,_) => Right(Right(x)), });assert_eq!(fizzbuzz, [15]);assert_eq!(fizz, [3,6,9,12,18]);assert_eq!(buzz, [5,10]);assert_eq!(other, [1,2,4,7,8,11,13,14,16,17,19]);
Sourcefnintersperse(self, element: Self::Item) ->Intersperse<Self>
fnintersperse(self, element: Self::Item) ->Intersperse<Self>
Intersperses clones of an element between items of this iterator.
§Examples
userayon::prelude::*;letx =vec![1,2,3];letr: Vec<_> = x.into_par_iter().intersperse(-1).collect();assert_eq!(r,vec![1, -1,2, -1,3]);
Sourcefntake_any(self, n:usize) ->TakeAny<Self>
fntake_any(self, n:usize) ->TakeAny<Self>
Creates an iterator that yieldsn
elements fromanywhere in the original iterator.
This is similar toIndexedParallelIterator::take
without beingconstrained to the “first”n
of the original iterator order. Thetaken items will still maintain their relative order where that isvisible incollect
,reduce
, and similar outputs.
§Examples
userayon::prelude::*;letresult: Vec<_> = (0..100) .into_par_iter() .filter(|&x| x %2==0) .take_any(5) .collect();assert_eq!(result.len(),5);assert!(result.windows(2).all(|w| w[0] < w[1]));
Sourcefnskip_any(self, n:usize) ->SkipAny<Self>
fnskip_any(self, n:usize) ->SkipAny<Self>
Creates an iterator that skipsn
elements fromanywhere in the original iterator.
This is similar toIndexedParallelIterator::skip
without beingconstrained to the “first”n
of the original iterator order. Theremaining items will still maintain their relative order where that isvisible incollect
,reduce
, and similar outputs.
§Examples
userayon::prelude::*;letresult: Vec<_> = (0..100) .into_par_iter() .filter(|&x| x %2==0) .skip_any(5) .collect();assert_eq!(result.len(),45);assert!(result.windows(2).all(|w| w[0] < w[1]));
Sourcefntake_any_while<P>(self, predicate: P) ->TakeAnyWhile<Self, P>
fntake_any_while<P>(self, predicate: P) ->TakeAnyWhile<Self, P>
Creates an iterator that takes elements fromanywhere in the original iteratoruntil the givenpredicate
returnsfalse
.
Thepredicate
may be anything – e.g. it could be checking a fact about the item, aglobal condition unrelated to the item itself, or some combination thereof.
If parallel calls to thepredicate
race and give different results, then thetrue
results will still take those particular items, while respecting thefalse
result from elsewhere to skip any further items.
This is similar toIterator::take_while
without being constrained to the originaliterator order. The taken items will still maintain their relative order where that isvisible incollect
,reduce
, and similar outputs.
§Examples
userayon::prelude::*;letresult: Vec<_> = (0..100) .into_par_iter() .take_any_while(|x|*x <50) .collect();assert!(result.len() <=50);assert!(result.windows(2).all(|w| w[0] < w[1]));
userayon::prelude::*;usestd::sync::atomic::AtomicUsize;usestd::sync::atomic::Ordering::Relaxed;// Collect any group of items that sum <= 1000letquota = AtomicUsize::new(1000);letresult: Vec<_> = (0_usize..100) .into_par_iter() .take_any_while(|&x| { quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x)) .is_ok() }) .collect();letsum = result.iter().sum::<usize>();assert!(matches!(sum,902..=1000));
Sourcefnskip_any_while<P>(self, predicate: P) ->SkipAnyWhile<Self, P>
fnskip_any_while<P>(self, predicate: P) ->SkipAnyWhile<Self, P>
Creates an iterator that skips elements fromanywhere in the original iteratoruntil the givenpredicate
returnsfalse
.
Thepredicate
may be anything – e.g. it could be checking a fact about the item, aglobal condition unrelated to the item itself, or some combination thereof.
If parallel calls to thepredicate
race and give different results, then thetrue
results will still skip those particular items, while respecting thefalse
result from elsewhere to skip any further items.
This is similar toIterator::skip_while
without being constrained to the originaliterator order. The remaining items will still maintain their relative order where that isvisible incollect
,reduce
, and similar outputs.
§Examples
userayon::prelude::*;letresult: Vec<_> = (0..100) .into_par_iter() .skip_any_while(|x|*x <50) .collect();assert!(result.len() >=50);assert!(result.windows(2).all(|w| w[0] < w[1]));
Sourcefncollect_vec_list(self) ->LinkedList<Vec<Self::Item>>
fncollect_vec_list(self) ->LinkedList<Vec<Self::Item>>
Collects this iterator into a linked list of vectors.
This is useful when you need to condense a parallel iterator into a collection,but have no specific requirements for what that collection should be. If youplan to store the collection longer-term,Vec<T>
is, as always, likely thebest default choice, despite the overhead that comes from concatenating eachvector. Or, if this is anIndexedParallelIterator
, you should also prefer tojust collect to aVec<T>
.
Internally, mostFromParallelIterator
/ParallelExtend
implementationsuse this strategy; each job collecting their chunk of the iterator to aVec<T>
and those chunks getting merged into aLinkedList
, before then extending thecollection with each vector. This is a very efficient way to collect anunindexed parallel iterator, without much intermediate data movement.
§Examples
userayon::prelude::*;letresult: LinkedList<Vec<_>> = (0..=100) .into_par_iter() .filter(|x| x %2==0) .flat_map(|x|0..x) .collect_vec_list();// `par_iter.collect_vec_list().into_iter().flatten()` turns// a parallel iterator into a serial onelettotal_len = result.into_iter().flatten().count();assert_eq!(total_len,2550);
Sourcefnopt_len(&self) ->Option<usize>
fnopt_len(&self) ->Option<usize>
Internal method used to define the behavior of this paralleliterator. You should not need to call this directly.
Returns the number of items produced by this iterator, if knownstatically. This can be used by consumers to trigger special fastpaths. Therefore, ifSome(_)
is returned, this iterator must onlyuse the (indexed)Consumer
methods when driving a consumer, suchassplit_at()
. CallingUnindexedConsumer::split_off_left()
orotherUnindexedConsumer
methods – or returning an inaccuratevalue – may result in panics.
This method is currently used to optimizecollect
for wantof true Rust specialization; it may be removed whenspecialization is stable.
Dyn Compatibility§
This trait isnotdyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.
Implementors§
Source§impl<'a, K:Ord +Sync + 'a, V:Send + 'a>ParallelIterator for rayon::collections::btree_map::IterMut<'a, K, V>
impl<'a, K:Ord +Sync + 'a, V:Send + 'a>ParallelIterator for rayon::collections::btree_map::IterMut<'a, K, V>
Source§impl<'a, K:Ord +Sync + 'a, V:Sync + 'a>ParallelIterator for rayon::collections::btree_map::Iter<'a, K, V>
impl<'a, K:Ord +Sync + 'a, V:Sync + 'a>ParallelIterator for rayon::collections::btree_map::Iter<'a, K, V>
Source§impl<'a, K:Hash +Eq +Sync + 'a, V:Send + 'a>ParallelIterator for rayon::collections::hash_map::IterMut<'a, K, V>
impl<'a, K:Hash +Eq +Sync + 'a, V:Send + 'a>ParallelIterator for rayon::collections::hash_map::IterMut<'a, K, V>
Source§impl<'a, K:Hash +Eq +Sync + 'a, V:Sync + 'a>ParallelIterator for rayon::collections::hash_map::Iter<'a, K, V>
impl<'a, K:Hash +Eq +Sync + 'a, V:Sync + 'a>ParallelIterator for rayon::collections::hash_map::Iter<'a, K, V>
Source§impl<'a, T, I>ParallelIterator forCloned<I>
impl<'a, T, I>ParallelIterator forCloned<I>
Source§impl<'a, T, I>ParallelIterator forCopied<I>
impl<'a, T, I>ParallelIterator forCopied<I>
Source§impl<'a, T:Hash +Eq +Sync + 'a>ParallelIterator for rayon::collections::hash_set::Iter<'a, T>
impl<'a, T:Hash +Eq +Sync + 'a>ParallelIterator for rayon::collections::hash_set::Iter<'a, T>
Source§impl<'a, T:Send + 'a>ParallelIterator for rayon::collections::linked_list::IterMut<'a, T>
impl<'a, T:Send + 'a>ParallelIterator for rayon::collections::linked_list::IterMut<'a, T>
Source§impl<'a, T:Send + 'a>ParallelIterator for rayon::collections::vec_deque::IterMut<'a, T>
impl<'a, T:Send + 'a>ParallelIterator for rayon::collections::vec_deque::IterMut<'a, T>
Source§impl<'a, T:Send + 'a>ParallelIterator for rayon::option::IterMut<'a, T>
impl<'a, T:Send + 'a>ParallelIterator for rayon::option::IterMut<'a, T>
Source§impl<'a, T:Send + 'a>ParallelIterator for rayon::result::IterMut<'a, T>
impl<'a, T:Send + 'a>ParallelIterator for rayon::result::IterMut<'a, T>
Source§impl<'a, T:Send>ParallelIterator for rayon::collections::vec_deque::Drain<'a, T>
impl<'a, T:Send>ParallelIterator for rayon::collections::vec_deque::Drain<'a, T>
Source§impl<'a, T:Sync + 'a>ParallelIterator for rayon::collections::linked_list::Iter<'a, T>
impl<'a, T:Sync + 'a>ParallelIterator for rayon::collections::linked_list::Iter<'a, T>
Source§impl<'a, T:Sync + 'a>ParallelIterator for rayon::collections::vec_deque::Iter<'a, T>
impl<'a, T:Sync + 'a>ParallelIterator for rayon::collections::vec_deque::Iter<'a, T>
Source§impl<'ch>ParallelIterator forCharIndices<'ch>
impl<'ch>ParallelIterator forCharIndices<'ch>
Source§impl<'ch>ParallelIterator forEncodeUtf16<'ch>
impl<'ch>ParallelIterator forEncodeUtf16<'ch>
Source§impl<'ch>ParallelIterator forSplitAsciiWhitespace<'ch>
impl<'ch>ParallelIterator forSplitAsciiWhitespace<'ch>
Source§impl<'ch>ParallelIterator forSplitWhitespace<'ch>
impl<'ch>ParallelIterator forSplitWhitespace<'ch>
Source§impl<'ch, P: Pattern>ParallelIterator forMatchIndices<'ch, P>
impl<'ch, P: Pattern>ParallelIterator forMatchIndices<'ch, P>
Source§impl<'ch, P: Pattern>ParallelIterator for rayon::str::SplitInclusive<'ch, P>
impl<'ch, P: Pattern>ParallelIterator for rayon::str::SplitInclusive<'ch, P>
Source§impl<'ch, P: Pattern>ParallelIterator forSplitTerminator<'ch, P>
impl<'ch, P: Pattern>ParallelIterator forSplitTerminator<'ch, P>
Source§impl<'data, T, P>ParallelIterator forChunkBy<'data, T, P>
impl<'data, T, P>ParallelIterator forChunkBy<'data, T, P>
Source§impl<'data, T, P>ParallelIterator forChunkByMut<'data, T, P>
impl<'data, T, P>ParallelIterator forChunkByMut<'data, T, P>
Source§impl<'data, T, P>ParallelIterator for rayon::slice::Split<'data, T, P>
impl<'data, T, P>ParallelIterator for rayon::slice::Split<'data, T, P>
Source§impl<'data, T, P>ParallelIterator for rayon::slice::SplitInclusive<'data, T, P>
impl<'data, T, P>ParallelIterator for rayon::slice::SplitInclusive<'data, T, P>
Source§impl<'data, T, P>ParallelIterator forSplitInclusiveMut<'data, T, P>
impl<'data, T, P>ParallelIterator forSplitInclusiveMut<'data, T, P>
Source§impl<'data, T, P>ParallelIterator forSplitMut<'data, T, P>
impl<'data, T, P>ParallelIterator forSplitMut<'data, T, P>
Source§impl<'data, T:Send + 'data>ParallelIterator forChunksExactMut<'data, T>
impl<'data, T:Send + 'data>ParallelIterator forChunksExactMut<'data, T>
Source§impl<'data, T:Send + 'data>ParallelIterator forChunksMut<'data, T>
impl<'data, T:Send + 'data>ParallelIterator forChunksMut<'data, T>
Source§impl<'data, T:Send + 'data>ParallelIterator for rayon::slice::IterMut<'data, T>
impl<'data, T:Send + 'data>ParallelIterator for rayon::slice::IterMut<'data, T>
typeItem =&'data mut T
Source§impl<'data, T:Send + 'data>ParallelIterator forRChunksExactMut<'data, T>
impl<'data, T:Send + 'data>ParallelIterator forRChunksExactMut<'data, T>
Source§impl<'data, T:Send + 'data>ParallelIterator forRChunksMut<'data, T>
impl<'data, T:Send + 'data>ParallelIterator forRChunksMut<'data, T>
Source§impl<'data, T:Sync + 'data>ParallelIterator for rayon::slice::Chunks<'data, T>
impl<'data, T:Sync + 'data>ParallelIterator for rayon::slice::Chunks<'data, T>
Source§impl<'data, T:Sync + 'data>ParallelIterator forChunksExact<'data, T>
impl<'data, T:Sync + 'data>ParallelIterator forChunksExact<'data, T>
Source§impl<'data, T:Sync + 'data>ParallelIterator for rayon::slice::Iter<'data, T>
impl<'data, T:Sync + 'data>ParallelIterator for rayon::slice::Iter<'data, T>
Source§impl<'data, T:Sync + 'data>ParallelIterator forRChunksExact<'data, T>
impl<'data, T:Sync + 'data>ParallelIterator forRChunksExact<'data, T>
Source§impl<A>ParallelIterator forMultiZip<(A,)>where A:IndexedParallelIterator,
impl<A>ParallelIterator forMultiZip<(A,)>where A:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item,)
Source§impl<A, B>ParallelIterator forChain<A, B>
impl<A, B>ParallelIterator forChain<A, B>
typeItem = <A asParallelIterator>::Item
Source§impl<A, B>ParallelIterator forMultiZip<(A, B)>where A:IndexedParallelIterator, B:IndexedParallelIterator,
impl<A, B>ParallelIterator forMultiZip<(A, B)>where A:IndexedParallelIterator, B:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item)
Source§impl<A, B>ParallelIterator forZip<A, B>where A:IndexedParallelIterator, B:IndexedParallelIterator,
impl<A, B>ParallelIterator forZip<A, B>where A:IndexedParallelIterator, B:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item)
Source§impl<A, B>ParallelIterator forZipEq<A, B>where A:IndexedParallelIterator, B:IndexedParallelIterator,
impl<A, B>ParallelIterator forZipEq<A, B>where A:IndexedParallelIterator, B:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item)
Source§impl<A, B, C>ParallelIterator forMultiZip<(A, B, C)>
impl<A, B, C>ParallelIterator forMultiZip<(A, B, C)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item)
Source§impl<A, B, C, D>ParallelIterator forMultiZip<(A, B, C, D)>
impl<A, B, C, D>ParallelIterator forMultiZip<(A, B, C, D)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item)
Source§impl<A, B, C, D, E>ParallelIterator forMultiZip<(A, B, C, D, E)>
impl<A, B, C, D, E>ParallelIterator forMultiZip<(A, B, C, D, E)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F>ParallelIterator forMultiZip<(A, B, C, D, E, F)>
impl<A, B, C, D, E, F>ParallelIterator forMultiZip<(A, B, C, D, E, F)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G>ParallelIterator forMultiZip<(A, B, C, D, E, F, G)>
impl<A, B, C, D, E, F, G>ParallelIterator forMultiZip<(A, B, C, D, E, F, G)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G, H>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H)>
impl<A, B, C, D, E, F, G, H>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item, <H asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G, H, I>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I)>
impl<A, B, C, D, E, F, G, H, I>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item, <H asParallelIterator>::Item, <I asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G, H, I, J>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J)>
impl<A, B, C, D, E, F, G, H, I, J>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J)>
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item, <H asParallelIterator>::Item, <I asParallelIterator>::Item, <J asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G, H, I, J, K>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J, K)>where A:IndexedParallelIterator, B:IndexedParallelIterator, C:IndexedParallelIterator, D:IndexedParallelIterator, E:IndexedParallelIterator, F:IndexedParallelIterator, G:IndexedParallelIterator, H:IndexedParallelIterator, I:IndexedParallelIterator, J:IndexedParallelIterator, K:IndexedParallelIterator,
impl<A, B, C, D, E, F, G, H, I, J, K>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J, K)>where A:IndexedParallelIterator, B:IndexedParallelIterator, C:IndexedParallelIterator, D:IndexedParallelIterator, E:IndexedParallelIterator, F:IndexedParallelIterator, G:IndexedParallelIterator, H:IndexedParallelIterator, I:IndexedParallelIterator, J:IndexedParallelIterator, K:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item, <H asParallelIterator>::Item, <I asParallelIterator>::Item, <J asParallelIterator>::Item, <K asParallelIterator>::Item)
Source§impl<A, B, C, D, E, F, G, H, I, J, K, L>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J, K, L)>where A:IndexedParallelIterator, B:IndexedParallelIterator, C:IndexedParallelIterator, D:IndexedParallelIterator, E:IndexedParallelIterator, F:IndexedParallelIterator, G:IndexedParallelIterator, H:IndexedParallelIterator, I:IndexedParallelIterator, J:IndexedParallelIterator, K:IndexedParallelIterator, L:IndexedParallelIterator,
impl<A, B, C, D, E, F, G, H, I, J, K, L>ParallelIterator forMultiZip<(A, B, C, D, E, F, G, H, I, J, K, L)>where A:IndexedParallelIterator, B:IndexedParallelIterator, C:IndexedParallelIterator, D:IndexedParallelIterator, E:IndexedParallelIterator, F:IndexedParallelIterator, G:IndexedParallelIterator, H:IndexedParallelIterator, I:IndexedParallelIterator, J:IndexedParallelIterator, K:IndexedParallelIterator, L:IndexedParallelIterator,
typeItem = (<A asParallelIterator>::Item, <B asParallelIterator>::Item, <C asParallelIterator>::Item, <D asParallelIterator>::Item, <E asParallelIterator>::Item, <F asParallelIterator>::Item, <G asParallelIterator>::Item, <H asParallelIterator>::Item, <I asParallelIterator>::Item, <J asParallelIterator>::Item, <K asParallelIterator>::Item, <L asParallelIterator>::Item)
Source§impl<D, S>ParallelIterator for rayon::iter::Split<D, S>
impl<D, S>ParallelIterator for rayon::iter::Split<D, S>
Source§impl<I>ParallelIterator for rayon::iter::Chunks<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator for rayon::iter::Chunks<I>where I:IndexedParallelIterator,
Source§impl<I>ParallelIterator forEnumerate<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forEnumerate<I>where I:IndexedParallelIterator,
Source§impl<I>ParallelIterator forExponentialBlocks<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forExponentialBlocks<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forFlatten<I>
impl<I>ParallelIterator forFlatten<I>
typeItem = <<I asParallelIterator>::Item asIntoParallelIterator>::Item
Source§impl<I>ParallelIterator forFlattenIter<I>
impl<I>ParallelIterator forFlattenIter<I>
typeItem = <<I asParallelIterator>::Item asIntoIterator>::Item
Source§impl<I>ParallelIterator forIntersperse<I>
impl<I>ParallelIterator forIntersperse<I>
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forMaxLen<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forMaxLen<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forMinLen<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forMinLen<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forPanicFuse<I>where I:ParallelIterator,
impl<I>ParallelIterator forPanicFuse<I>where I:ParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forRev<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forRev<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forSkip<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forSkip<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forSkipAny<I>where I:ParallelIterator,
impl<I>ParallelIterator forSkipAny<I>where I:ParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forStepBy<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forStepBy<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forTake<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forTake<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forTakeAny<I>where I:ParallelIterator,
impl<I>ParallelIterator forTakeAny<I>where I:ParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I>ParallelIterator forUniformBlocks<I>where I:IndexedParallelIterator,
impl<I>ParallelIterator forUniformBlocks<I>where I:IndexedParallelIterator,
typeItem = <I asParallelIterator>::Item
Source§impl<I, F>ParallelIterator forInspect<I, F>
impl<I, F>ParallelIterator forInspect<I, F>
typeItem = <I asParallelIterator>::Item
Source§impl<I, F>ParallelIterator forUpdate<I, F>
impl<I, F>ParallelIterator forUpdate<I, F>
typeItem = <I asParallelIterator>::Item
Source§impl<I, F, PI>ParallelIterator forFlatMap<I, F>
impl<I, F, PI>ParallelIterator forFlatMap<I, F>
typeItem = <PI asIntoParallelIterator>::Item
Source§impl<I, F, R>ParallelIterator forMap<I, F>
impl<I, F, R>ParallelIterator forMap<I, F>
Source§impl<I, F, SI>ParallelIterator forFlatMapIter<I, F>
impl<I, F, SI>ParallelIterator forFlatMapIter<I, F>
typeItem = <SI asIntoIterator>::Item
Source§impl<I, ID, U, F>ParallelIterator forFoldChunks<I, ID, F>
impl<I, ID, U, F>ParallelIterator forFoldChunks<I, ID, F>
Source§impl<I, INIT, T, F, R>ParallelIterator forMapInit<I, INIT, F>
impl<I, INIT, T, F, R>ParallelIterator forMapInit<I, INIT, F>
Source§impl<I, J>ParallelIterator forInterleave<I, J>
impl<I, J>ParallelIterator forInterleave<I, J>
typeItem = <I asParallelIterator>::Item
Source§impl<I, J>ParallelIterator forInterleaveShortest<I, J>
impl<I, J>ParallelIterator forInterleaveShortest<I, J>
typeItem = <I asParallelIterator>::Item
Source§impl<I, P>ParallelIterator forFilter<I, P>
impl<I, P>ParallelIterator forFilter<I, P>
typeItem = <I asParallelIterator>::Item
Source§impl<I, P>ParallelIterator forPositions<I, P>
impl<I, P>ParallelIterator forPositions<I, P>
Source§impl<I, P>ParallelIterator forSkipAnyWhile<I, P>
impl<I, P>ParallelIterator forSkipAnyWhile<I, P>
typeItem = <I asParallelIterator>::Item
Source§impl<I, P>ParallelIterator forTakeAnyWhile<I, P>
impl<I, P>ParallelIterator forTakeAnyWhile<I, P>
typeItem = <I asParallelIterator>::Item
Source§impl<I, P, R>ParallelIterator forFilterMap<I, P>
impl<I, P, R>ParallelIterator forFilterMap<I, P>
Source§impl<I, T>ParallelIterator forWhileSome<I>
impl<I, T>ParallelIterator forWhileSome<I>
Source§impl<I, T, F, R>ParallelIterator forMapWith<I, T, F>
impl<I, T, F, R>ParallelIterator forMapWith<I, T, F>
Source§impl<I, U, F>ParallelIterator forFoldChunksWith<I, U, F>
impl<I, U, F>ParallelIterator forFoldChunksWith<I, U, F>
Source§impl<Iter:Iterator +Send>ParallelIterator forIterBridge<Iter>
impl<Iter:Iterator +Send>ParallelIterator forIterBridge<Iter>
Source§impl<K:Hash +Eq +Send, V:Send>ParallelIterator for rayon::collections::hash_map::Drain<'_, K, V>
impl<K:Hash +Eq +Send, V:Send>ParallelIterator for rayon::collections::hash_map::Drain<'_, K, V>
Source§impl<K:Hash +Eq +Send, V:Send>ParallelIterator for rayon::collections::hash_map::IntoIter<K, V>
impl<K:Hash +Eq +Send, V:Send>ParallelIterator for rayon::collections::hash_map::IntoIter<K, V>
Source§impl<L, R>ParallelIterator forEither<L, R>
Either<L, R>
is a parallel iterator if bothL
andR
are parallel iterators.
impl<L, R>ParallelIterator forEither<L, R>
Either<L, R>
is a parallel iterator if bothL
andR
are parallel iterators.