Files
arrayref
arrayvec
bamboo_rs_core
blake2b_simd
block_buffer
byteorder
cfg_if
constant_time_eq
cpufeatures
crossbeam_channel
crossbeam_deque
crossbeam_epoch
crossbeam_utils
curve25519_dalek
digest
doc_comment
ed25519
ed25519_dalek
either
generic_array
getrandom
hex
keccak
lazy_static
libc
lipmaa_link
memoffset
merlin
num_cpus
opaque_debug
ppv_lite86
proc_macro2
quote
rand
rand_chacha
rand_core
rayon
rayon_core
scopeguard
serde
serde_bytes
serde_derive
sha2
signature
snafu
snafu_derive
static_assertions
subtle
syn
synstructure
typenum
unicode_xid
varu64
yamf_hash
zeroize
zeroize_derive
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
//! Atomic types.
//!
//! * [`AtomicCell`], a thread-safe mutable memory location.
//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.

#[cfg(not(crossbeam_no_atomic_cas))]
#[cfg(not(crossbeam_loom))]
cfg_if::cfg_if! {
    // Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap
    // around.
    //
    // We are ignoring too wide architectures (pointer width >= 256), since such a system will not
    // appear in a conceivable future.
    //
    // In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be
    // vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the
    // counter will not be increased that fast.
    if #[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))] {
        mod seq_lock;
    } else {
        #[path = "seq_lock_wide.rs"]
        mod seq_lock;
    }
}

#[cfg(not(crossbeam_no_atomic_cas))]
mod atomic_cell;
mod consume;

#[cfg(not(crossbeam_no_atomic_cas))]
pub use self::atomic_cell::AtomicCell;
pub use self::consume::AtomicConsume;