diff --git a/benches/bench.rs b/benches/bench.rs
index 771e7169a5..729b53fe52 100644
--- a/benches/bench.rs
+++ b/benches/bench.rs
@@ -10,7 +10,10 @@ use test::{black_box, Bencher};
use hashbrown::hash_map::DefaultHashBuilder;
use hashbrown::HashMap;
-use std::collections::hash_map::RandomState;
+use std::{
+ collections::hash_map::RandomState,
+ sync::atomic::{self, AtomicUsize},
+};
const SIZE: usize = 1000;
@@ -40,6 +43,20 @@ impl Iterator for RandomKeys {
}
}
+// Just an arbitrary side effect to make the maps not shortcircuit to the non-dropping path
+// when dropping maps/entries (most real world usages likely have drop in the key or value)
+lazy_static::lazy_static! {
+ static ref SIDE_EFFECT: AtomicUsize = AtomicUsize::new(0);
+}
+
+#[derive(Clone)]
+struct DropType(usize);
+impl Drop for DropType {
+ fn drop(&mut self) {
+ SIDE_EFFECT.fetch_add(self.0, atomic::Ordering::SeqCst);
+ }
+}
+
macro_rules! bench_suite {
($bench_macro:ident, $bench_ahash_serial:ident, $bench_std_serial:ident,
$bench_ahash_highbits:ident, $bench_std_highbits:ident,
@@ -69,10 +86,11 @@ macro_rules! bench_insert {
b.iter(|| {
m.clear();
for i in ($keydist).take(SIZE) {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
black_box(&mut m);
- })
+ });
+ eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst));
}
};
}
@@ -93,7 +111,7 @@ macro_rules! bench_insert_erase {
fn $name(b: &mut Bencher) {
let mut base = $maptype::default();
for i in ($keydist).take(SIZE) {
- base.insert(i, i);
+ base.insert(i, DropType(i));
}
let skip = $keydist.skip(SIZE);
b.iter(|| {
@@ -103,11 +121,12 @@ macro_rules! bench_insert_erase {
// While keeping the size constant,
// replace the first keydist with the second.
for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) {
- m.insert(add, add);
+ m.insert(add, DropType(add));
black_box(m.remove(&remove));
}
black_box(m);
- })
+ });
+ eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst));
}
};
}
@@ -128,14 +147,15 @@ macro_rules! bench_lookup {
fn $name(b: &mut Bencher) {
let mut m = $maptype::default();
for i in $keydist.take(SIZE) {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
for i in $keydist.take(SIZE) {
black_box(m.get(&i));
}
- })
+ });
+ eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst));
}
};
}
@@ -157,7 +177,7 @@ macro_rules! bench_lookup_fail {
let mut m = $maptype::default();
let mut iter = $keydist;
for i in (&mut iter).take(SIZE) {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
@@ -185,7 +205,7 @@ macro_rules! bench_iter {
fn $name(b: &mut Bencher) {
let mut m = $maptype::default();
for i in ($keydist).take(SIZE) {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
@@ -211,7 +231,7 @@ bench_suite!(
fn clone_small(b: &mut Bencher) {
let mut m = HashMap::new();
for i in 0..10 {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
@@ -224,7 +244,7 @@ fn clone_from_small(b: &mut Bencher) {
let mut m = HashMap::new();
let mut m2 = HashMap::new();
for i in 0..10 {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
@@ -237,7 +257,7 @@ fn clone_from_small(b: &mut Bencher) {
fn clone_large(b: &mut Bencher) {
let mut m = HashMap::new();
for i in 0..1000 {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
@@ -250,7 +270,7 @@ fn clone_from_large(b: &mut Bencher) {
let mut m = HashMap::new();
let mut m2 = HashMap::new();
for i in 0..1000 {
- m.insert(i, i);
+ m.insert(i, DropType(i));
}
b.iter(|| {
diff --git a/src/raw/mod.rs b/src/raw/mod.rs
index 32fec98476..fac9baa577 100644
--- a/src/raw/mod.rs
+++ b/src/raw/mod.rs
@@ -145,27 +145,22 @@ fn h2(hash: u64) -> u8 {
/// Proof that the probe will visit every group in the table:
///
struct ProbeSeq {
- bucket_mask: usize,
pos: usize,
stride: usize,
}
-impl Iterator for ProbeSeq {
- type Item = usize;
-
+impl ProbeSeq {
#[inline]
- fn next(&mut self) -> Option {
+ fn move_next(&mut self, bucket_mask: usize) {
// We should have found an empty bucket by now and ended the probe.
debug_assert!(
- self.stride <= self.bucket_mask,
+ self.stride <= bucket_mask,
"Went past end of probe sequence"
);
- let result = self.pos;
self.stride += Group::WIDTH;
self.pos += self.stride;
- self.pos &= self.bucket_mask;
- Some(result)
+ self.pos &= bucket_mask;
}
}
@@ -578,7 +573,7 @@ impl RawTable {
}
}
- /// Returns an iterator for a probe sequence on the table.
+ /// Returns an iterator-like object for a probe sequence on the table.
///
/// This iterator never terminates, but is guaranteed to visit each bucket
/// group exactly once. The loop using `probe_seq` must terminate upon
@@ -586,7 +581,6 @@ impl RawTable {
#[cfg_attr(feature = "inline-more", inline)]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
- bucket_mask: self.bucket_mask,
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
@@ -626,11 +620,12 @@ impl RawTable {
/// There must be at least 1 empty bucket in the table.
#[cfg_attr(feature = "inline-more", inline)]
fn find_insert_slot(&self, hash: u64) -> usize {
- for pos in self.probe_seq(hash) {
+ let mut probe_seq = self.probe_seq(hash);
+ loop {
unsafe {
- let group = Group::load(self.ctrl(pos));
+ let group = Group::load(self.ctrl(probe_seq.pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
- let result = (pos + bit) & self.bucket_mask;
+ let result = (probe_seq.pos + bit) & self.bucket_mask;
// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
@@ -643,7 +638,7 @@ impl RawTable {
// control bytes (containing EMPTY).
if unlikely(is_full(*self.ctrl(result))) {
debug_assert!(self.bucket_mask < Group::WIDTH);
- debug_assert_ne!(pos, 0);
+ debug_assert_ne!(probe_seq.pos, 0);
return Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
@@ -652,10 +647,8 @@ impl RawTable {
}
}
}
+ probe_seq.move_next(self.bucket_mask);
}
-
- // probe_seq never returns.
- unreachable!();
}
/// Marks all table buckets as empty without dropping their contents.
@@ -1872,8 +1865,6 @@ pub struct RawIterHash<'a, T> {
// The sequence of groups to probe in the search.
probe_seq: ProbeSeq,
- // The current group and its position.
- pos: usize,
group: Group,
// The elements within the group with a matching h2-hash.
@@ -1884,16 +1875,14 @@ impl<'a, T> RawIterHash<'a, T> {
fn new(table: &'a RawTable, hash: u64) -> Self {
unsafe {
let h2_hash = h2(hash);
- let mut probe_seq = table.probe_seq(hash);
- let pos = probe_seq.next().unwrap();
- let group = Group::load(table.ctrl(pos));
+ let probe_seq = table.probe_seq(hash);
+ let group = Group::load(table.ctrl(probe_seq.pos));
let bitmask = group.match_byte(h2_hash).into_iter();
RawIterHash {
table,
h2_hash,
probe_seq,
- pos,
group,
bitmask,
}
@@ -1908,15 +1897,15 @@ impl<'a, T> Iterator for RawIterHash<'a, T> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
- let index = (self.pos + bit) & self.table.bucket_mask;
+ let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
let bucket = self.table.bucket(index);
return Some(bucket);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
- self.pos = self.probe_seq.next().unwrap();
- self.group = Group::load(self.table.ctrl(self.pos));
+ self.probe_seq.move_next(self.table.bucket_mask);
+ self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}