Skip to content
This repository was archived by the owner on Feb 10, 2024. It is now read-only.

Commit 816f5bc

Browse files
bors[bot]RalfJungGankraGuillaumeGomezfintelia
committed
Merge #24
24: Merge upstream changes, and some local cleanups r=cuviper a=cuviper Co-authored-by: Ralf Jung <[email protected]> Co-authored-by: Alexis Beingessner <[email protected]> Co-authored-by: Guillaume Gomez <[email protected]> Co-authored-by: Jonathan Behrens <[email protected]> Co-authored-by: Val <[email protected]> Co-authored-by: bors <[email protected]> Co-authored-by: teresy <[email protected]> Co-authored-by: kennytm <[email protected]> Co-authored-by: Josh Stone <[email protected]>
2 parents 85fddaa + 9e81875 commit 816f5bc

File tree

11 files changed

+933
-216
lines changed

11 files changed

+933
-216
lines changed

Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22
authors = ["Josh Stone <[email protected]>"]
33
name = "rayon-hash"
4-
version = "0.4.0"
4+
version = "0.4.1"
55
repository = "https://github.com/rayon-rs/rayon-hash"
66
documentation = "https://docs.rs/rayon-hash/"
77
keywords = ["parallel", "iterator", "hash", "map", "set"]
@@ -14,4 +14,5 @@ readme = "README.md"
1414
rayon = "1.0"
1515

1616
[dev-dependencies]
17-
rand = "0.5"
17+
rand = "0.6"
18+
rand_xorshift = "0.1"

README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@ split into parallel jobs. With the custom types in `rayon-hash`, we can
1313
instead read the raw hash table directly, for much better performance.
1414

1515
```text
16-
test rayon_set_sum_parallel ... bench: 1,077,602 ns/iter (+/- 50,610)
17-
test rayon_set_sum_serial ... bench: 6,363,125 ns/iter (+/- 101,513)
18-
test std_set_sum_parallel ... bench: 8,519,683 ns/iter (+/- 219,785)
19-
test std_set_sum_serial ... bench: 6,295,263 ns/iter (+/- 98,600)
16+
test rayon_set_sum_parallel ... bench: 1,035,111 ns/iter (+/- 57,327)
17+
test rayon_set_sum_serial ... bench: 7,500,179 ns/iter (+/- 96,918)
18+
test std_set_sum_parallel ... bench: 6,799,231 ns/iter (+/- 94,154)
19+
test std_set_sum_serial ... bench: 7,634,174 ns/iter (+/- 84,806)
2020
```
2121

2222
This crate currently requires `rustc 1.28.0` or greater.
@@ -45,7 +45,7 @@ RUSTFLAGS='--cfg rayon_hash_unstable' cargo build
4545
Note that this must not only be done for your crate, but for any crate that
4646
depends on your crate. This infectious nature is intentional, as it serves as
4747
a reminder that you are outside of the normal semver guarantees. These
48-
features also require a nightly Rust compiler.
48+
features may also require a nightly Rust compiler.
4949

5050
When such features are stabilized in the standard library, we will remove the
5151
`rayon_hash_unstable` guard here too.

benches/set_sum.rs

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,27 @@
11
#![feature(test)]
22

3-
extern crate test;
43
extern crate rand;
4+
extern crate rand_xorshift;
55
extern crate rayon;
66
extern crate rayon_hash;
7+
extern crate test;
78

8-
use rand::{Rng, SeedableRng, XorShiftRng};
99
use rand::distributions::Standard;
10-
use std::collections::HashSet as StdHashSet;
10+
use rand::{Rng, SeedableRng};
11+
use rand_xorshift::XorShiftRng;
12+
use rayon::prelude::*;
1113
use rayon_hash::HashSet as RayonHashSet;
14+
use std::collections::HashSet as StdHashSet;
1215
use std::iter::FromIterator;
13-
use rayon::prelude::*;
1416
use test::Bencher;
1517

16-
1718
fn default_set<C: FromIterator<u32>>(n: usize) -> C {
1819
let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
1920
(0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
20-
XorShiftRng::from_seed(seed).sample_iter(&Standard).take(n).collect()
21+
XorShiftRng::from_seed(seed)
22+
.sample_iter(&Standard)
23+
.take(n)
24+
.collect()
2125
}
2226

2327
macro_rules! bench_set_sum {
@@ -32,7 +36,7 @@ macro_rules! bench_set_sum {
3236
assert_eq!(s, sum);
3337
})
3438
}
35-
}
39+
};
3640
}
3741

3842
bench_set_sum!{std_set_sum_serial, StdHashSet<_>, iter}

src/alloc.rs

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ fn layout_err() -> LayoutErr {
3737
/// use specific allocators with looser requirements.)
3838
// #[stable(feature = "alloc_layout", since = "1.28.0")]
3939
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
40+
// #[lang = "alloc_layout"]
4041
pub(crate) struct Layout {
4142
inner: alloc::Layout,
4243
}
@@ -46,6 +47,8 @@ impl Layout {
4647
/// or returns `LayoutErr` if either of the following conditions
4748
/// are not met:
4849
///
50+
/// * `align` must not be zero,
51+
///
4952
/// * `align` must be a power of two,
5053
///
5154
/// * `size`, when rounded up to the nearest multiple of `align`,
@@ -102,7 +105,7 @@ impl Layout {
102105
/// to be less than or equal to the alignment of the starting
103106
/// address for the whole allocated block of memory. One way to
104107
/// satisfy this constraint is to ensure `align <= self.align()`.
105-
// #[unstable(feature = "allocator_api", issue = "32838")]
108+
// #[unstable(feature = "alloc_layout_extra", issue = "55724")]
106109
#[inline]
107110
pub(crate) fn padding_needed_for(&self, align: usize) -> usize {
108111
let len = self.size();
@@ -128,7 +131,7 @@ impl Layout {
128131

129132
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1)
130133
& !align.wrapping_sub(1);
131-
return len_rounded_up.wrapping_sub(len);
134+
len_rounded_up.wrapping_sub(len)
132135
}
133136

134137
/// Creates a layout describing the record for `n` instances of
@@ -139,7 +142,7 @@ impl Layout {
139142
/// of each element in the array.
140143
///
141144
/// On arithmetic overflow, returns `LayoutErr`.
142-
// #[unstable(feature = "allocator_api", issue = "32838")]
145+
// #[unstable(feature = "alloc_layout_extra", issue = "55724")]
143146
#[inline]
144147
pub(crate) fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> {
145148
let padded_size = self.size().checked_add(self.padding_needed_for(self.align()))
@@ -159,13 +162,16 @@ impl Layout {
159162
/// will be properly aligned. Note that the result layout will
160163
/// satisfy the alignment properties of both `self` and `next`.
161164
///
165+
/// The resulting layout will be the same as that of a C struct containing
166+
/// two fields with the layouts of `self` and `next`, in that order.
167+
///
162168
/// Returns `Some((k, offset))`, where `k` is layout of the concatenated
163169
/// record and `offset` is the relative location, in bytes, of the
164170
/// start of the `next` embedded within the concatenated record
165171
/// (assuming that the record itself starts at offset 0).
166172
///
167173
/// On arithmetic overflow, returns `LayoutErr`.
168-
// #[unstable(feature = "allocator_api", issue = "32838")]
174+
// #[unstable(feature = "alloc_layout_extra", issue = "55724")]
169175
#[inline]
170176
pub(crate) fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> {
171177
let new_align = cmp::max(self.align(), next.align());
@@ -183,7 +189,7 @@ impl Layout {
183189
/// Creates a layout describing the record for a `[T; n]`.
184190
///
185191
/// On arithmetic overflow, returns `LayoutErr`.
186-
// #[unstable(feature = "allocator_api", issue = "32838")]
192+
// #[unstable(feature = "alloc_layout_extra", issue = "55724")]
187193
#[inline]
188194
pub(crate) fn array<T>(n: usize) -> Result<Self, LayoutErr> {
189195
Layout::new::<T>()
@@ -210,7 +216,6 @@ impl From<Layout> for alloc::Layout {
210216
}
211217

212218
/// Augments `AllocErr` with a CapacityOverflow variant.
213-
// FIXME: should this be in libcore or liballoc?
214219
#[derive(Clone, PartialEq, Eq, Debug)]
215220
// #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
216221
pub enum CollectionAllocErr {

src/par/map.rs

Lines changed: 44 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,13 @@
11
/// Rayon extensions to `HashMap`
2-
3-
use rayon::iter::{ParallelIterator, IntoParallelIterator, FromParallelIterator, ParallelExtend};
4-
use std::hash::{Hash, BuildHasher};
2+
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
3+
use std::hash::{BuildHasher, Hash};
54

65
use super::table;
76
use HashMap;
87

98
pub use self::table::{ParIntoIter, ParIter, ParIterMut};
109
pub use self::table::{ParKeys, ParValues, ParValuesMut};
1110

12-
1311
impl<K: Sync, V, S> HashMap<K, V, S> {
1412
pub fn par_keys(&self) -> ParKeys<K, V> {
1513
self.table.par_keys()
@@ -29,17 +27,18 @@ impl<K, V: Send, S> HashMap<K, V, S> {
2927
}
3028

3129
impl<K, V, S> HashMap<K, V, S>
32-
where K: Eq + Hash + Sync,
33-
V: PartialEq + Sync,
34-
S: BuildHasher + Sync
30+
where
31+
K: Eq + Hash + Sync,
32+
V: PartialEq + Sync,
33+
S: BuildHasher + Sync,
3534
{
3635
pub fn par_eq(&self, other: &Self) -> bool {
37-
self.len() == other.len() &&
38-
self.into_par_iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
36+
self.len() == other.len() && self
37+
.into_par_iter()
38+
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
3939
}
4040
}
4141

42-
4342
impl<K: Send, V: Send, S> IntoParallelIterator for HashMap<K, V, S> {
4443
type Item = (K, V);
4544
type Iter = ParIntoIter<K, V>;
@@ -67,58 +66,63 @@ impl<'a, K: Sync, V: Send, S> IntoParallelIterator for &'a mut HashMap<K, V, S>
6766
}
6867
}
6968

70-
7169
/// Collect (key, value) pairs from a parallel iterator into a
7270
/// hashmap. If multiple pairs correspond to the same key, then the
7371
/// ones produced earlier in the parallel iterator will be
7472
/// overwritten, just as with a sequential iterator.
7573
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
76-
where K: Eq + Hash + Send,
77-
V: Send,
78-
S: BuildHasher + Default + Send
74+
where
75+
K: Eq + Hash + Send,
76+
V: Send,
77+
S: BuildHasher + Default + Send,
7978
{
8079
fn from_par_iter<P>(par_iter: P) -> Self
81-
where P: IntoParallelIterator<Item = (K, V)>
80+
where
81+
P: IntoParallelIterator<Item = (K, V)>,
8282
{
8383
let mut map = HashMap::default();
8484
map.par_extend(par_iter);
8585
map
8686
}
8787
}
8888

89-
9089
/// Extend a hash map with items from a parallel iterator.
9190
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
92-
where K: Eq + Hash + Send,
93-
V: Send,
94-
S: BuildHasher + Send
91+
where
92+
K: Eq + Hash + Send,
93+
V: Send,
94+
S: BuildHasher + Send,
9595
{
9696
fn par_extend<I>(&mut self, par_iter: I)
97-
where I: IntoParallelIterator<Item = (K, V)>
97+
where
98+
I: IntoParallelIterator<Item = (K, V)>,
9899
{
99100
extend(self, par_iter);
100101
}
101102
}
102103

103104
/// Extend a hash map with copied items from a parallel iterator.
104105
impl<'a, K, V, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
105-
where K: Copy + Eq + Hash + Send + Sync,
106-
V: Copy + Send + Sync,
107-
S: BuildHasher + Send
106+
where
107+
K: Copy + Eq + Hash + Send + Sync,
108+
V: Copy + Send + Sync,
109+
S: BuildHasher + Send,
108110
{
109111
fn par_extend<I>(&mut self, par_iter: I)
110-
where I: IntoParallelIterator<Item = (&'a K, &'a V)>
112+
where
113+
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
111114
{
112115
extend(self, par_iter);
113116
}
114117
}
115118

116119
// This is equal to the normal `HashMap` -- no custom advantage.
117120
fn extend<K, V, S, I>(map: &mut HashMap<K, V, S>, par_iter: I)
118-
where K: Eq + Hash,
119-
S: BuildHasher,
120-
I: IntoParallelIterator,
121-
HashMap<K, V, S>: Extend<I::Item>
121+
where
122+
K: Eq + Hash,
123+
S: BuildHasher,
124+
I: IntoParallelIterator,
125+
HashMap<K, V, S>: Extend<I::Item>,
122126
{
123127
let (list, len) = super::collect(par_iter);
124128

@@ -133,13 +137,12 @@ fn extend<K, V, S, I>(map: &mut HashMap<K, V, S>, par_iter: I)
133137
}
134138
}
135139

136-
137140
#[cfg(test)]
138141
mod test_par_map {
139142
use super::HashMap;
140-
use std::sync::atomic::{AtomicUsize, Ordering};
141-
use std::hash::{Hash, Hasher};
142143
use rayon::prelude::*;
144+
use std::hash::{Hash, Hasher};
145+
use std::sync::atomic::{AtomicUsize, Ordering};
143146

144147
struct Dropable<'a> {
145148
k: usize,
@@ -150,7 +153,10 @@ mod test_par_map {
150153
fn new(k: usize, counter: &AtomicUsize) -> Dropable {
151154
counter.fetch_add(1, Ordering::Relaxed);
152155

153-
Dropable { k: k, counter: counter }
156+
Dropable {
157+
k: k,
158+
counter: counter,
159+
}
154160
}
155161
}
156162

@@ -168,7 +174,8 @@ mod test_par_map {
168174

169175
impl<'a> Hash for Dropable<'a> {
170176
fn hash<H>(&self, state: &mut H)
171-
where H: Hasher
177+
where
178+
H: Hasher,
172179
{
173180
self.k.hash(state)
174181
}
@@ -213,7 +220,8 @@ mod test_par_map {
213220
assert_eq!(value.load(Ordering::Relaxed), 100);
214221

215222
// retain only half
216-
let _v: Vec<_> = hm.into_par_iter()
223+
let _v: Vec<_> = hm
224+
.into_par_iter()
217225
.filter(|&(ref key, _)| key.k < 50)
218226
.collect();
219227

@@ -243,7 +251,7 @@ mod test_par_map {
243251
fn test_iterate() {
244252
let mut m = HashMap::with_capacity(4);
245253
for i in 0..32 {
246-
assert!(m.insert(i, i*2).is_none());
254+
assert!(m.insert(i, i * 2).is_none());
247255
}
248256
assert_eq!(m.len(), 32);
249257

@@ -282,9 +290,7 @@ mod test_par_map {
282290
fn test_values_mut() {
283291
let vec = vec![(1, 1), (2, 2), (3, 3)];
284292
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
285-
map.par_values_mut().for_each(|value| {
286-
*value = (*value) * 2
287-
});
293+
map.par_values_mut().for_each(|value| *value = (*value) * 2);
288294
let values: Vec<_> = map.par_values().cloned().collect();
289295
assert_eq!(values.len(), 3);
290296
assert!(values.contains(&2));

src/par/mod.rs

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,16 @@ mod table;
77

88
/// Helper for collecting parallel iterators to an intermediary
99
fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
10-
let list = iter.into_par_iter()
10+
let list = iter
11+
.into_par_iter()
1112
.fold(Vec::new, |mut vec, elem| {
1213
vec.push(elem);
1314
vec
14-
})
15-
.map(|vec| {
15+
}).map(|vec| {
1616
let mut list = LinkedList::new();
1717
list.push_back(vec);
1818
list
19-
})
20-
.reduce(LinkedList::new, |mut list1, mut list2| {
19+
}).reduce(LinkedList::new, |mut list1, mut list2| {
2120
list1.append(&mut list2);
2221
list1
2322
});

0 commit comments

Comments
 (0)