Optimize AtomicBool::fetch_nand

This commit is contained in:
Stjepan Glavina 2017-04-07 17:28:55 +02:00
parent 4c59c92bc4
commit 5c5a5182c9
2 changed files with 27 additions and 10 deletions

View file

@ -539,17 +539,21 @@ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
// We can't use atomic_nand here because it can result in a bool with
// an invalid value. This happens because the atomic operation is done
// with an 8-bit integer internally, which would set the upper 7 bits.
// So we just use a compare-exchange loop instead, which is what the
// intrinsic actually expands to anyways on many platforms.
let mut old = self.load(Relaxed);
loop {
let new = !(old && val);
match self.compare_exchange_weak(old, new, order, Relaxed) {
Ok(_) => break,
Err(x) => old = x,
// So we just use fetch_xor or compare_exchange instead.
if val {
// !(x & true) == !x
// We must invert the bool.
self.fetch_xor(true, order)
} else {
// !(x & false) == true
// We must set the bool to true. Instead of delegating to swap or fetch_or, use
// compare_exchange instead in order to avoid unnecessary writes to memory, which
// might minimize cache-coherence traffic.
match self.compare_exchange(false, true, order, Ordering::Relaxed) {
Ok(_) => false,
Err(_) => true,
}
}
old
}
/// Logical "or" with a boolean value.

View file

@ -24,10 +24,23 @@ fn bool_() {
#[test]
fn bool_and() {
let a = AtomicBool::new(true);
assert_eq!(a.fetch_and(false, SeqCst),true);
assert_eq!(a.fetch_and(false, SeqCst), true);
assert_eq!(a.load(SeqCst),false);
}
#[test]
fn bool_nand() {
let a = AtomicBool::new(false);
assert_eq!(a.fetch_nand(false, SeqCst), false);
assert_eq!(a.load(SeqCst), true);
assert_eq!(a.fetch_nand(false, SeqCst), true);
assert_eq!(a.load(SeqCst), true);
assert_eq!(a.fetch_nand(true, SeqCst), true);
assert_eq!(a.load(SeqCst), false);
assert_eq!(a.fetch_nand(true, SeqCst), false);
assert_eq!(a.load(SeqCst), true);
}
#[test]
fn uint_and() {
let x = AtomicUsize::new(0xf731);