Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
Clojure
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Yul (Solidity IR)
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![no_std] #![allow(unused, internal_features)] #![feature(cfg_target_has_atomic, core_intrinsics)] macro_rules! atomic_update { ($t:ident) => { mod $t { pub(crate) use core::sync::atomic::Ordering; #[inline(always)] pub(crate) unsafe fn atomic_update<F>(dst: *mut $t, order: Ordering, mut f: F) -> $t where F: FnMut($t) -> $t, { // This is a private function and all instances of `f` only operate on the value // loaded, so there is no need to synchronize the first load/failed CAS. let mut old = core::intrinsics::atomic_load_relaxed(dst); loop { let next = f(old); let (x, ok) = match order { Ordering::Relaxed => core::intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, next), Ordering::Acquire => core::intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, next), Ordering::Release => core::intrinsics::atomic_cxchgweak_release_relaxed(dst, old, next), Ordering::AcqRel => core::intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, next), Ordering::SeqCst => core::intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, next), _ => unreachable!(), }; if ok { return x; } old = x; } } } }; } // #[cfg(target_has_atomic = "8")] // atomic_update!(u8); // #[cfg(target_has_atomic = "16")] // atomic_update!(u16); // #[cfg(target_has_atomic = "32")] // atomic_update!(u32); // #[cfg(target_has_atomic = "64")] // atomic_update!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] atomic_update!(u128); pub mod load { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_load_relaxed(a) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_load_acquire(a) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_load_seqcst(a) } } }; } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod store { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { core::intrinsics::atomic_store_relaxed(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) { core::intrinsics::atomic_store_release(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { core::intrinsics::atomic_store_seqcst(a, val) } } } } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod swap { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xchg_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xchg_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xchg_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange_weak { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_add { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xadd_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xadd_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xadd_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_sub { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xsub_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xsub_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xsub_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_and { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_and_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_and_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_and_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_and_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_and_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_nand { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_nand_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_nand_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_nand_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_nand_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_nand_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_or { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_or_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_or_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_or_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_or_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_or_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_xor { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xor_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xor_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xor_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xor_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xor_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_not { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_xor_relaxed(a, T::MAX) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_xor_acquire(a, T::MAX) } #[inline(never)] pub unsafe fn release(a: A) -> T { core::intrinsics::atomic_xor_release(a, T::MAX) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { core::intrinsics::atomic_xor_acqrel(a, T::MAX) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_xor_seqcst(a, T::MAX) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_neg { macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { atomic_update(a, Ordering::Relaxed, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { atomic_update(a, Ordering::Acquire, $t::wrapping_neg) } #[inline(never)] pub unsafe fn release(a: A) -> T { atomic_update(a, Ordering::Release, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { atomic_update(a, Ordering::AcqRel, $t::wrapping_neg) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { atomic_update(a, Ordering::SeqCst, $t::wrapping_neg) } } } } // #[cfg(target_has_atomic = "8")] // u!(u8); // #[cfg(target_has_atomic = "16")] // u!(u16); // #[cfg(target_has_atomic = "32")] // u!(u32); // #[cfg(target_has_atomic = "64")] // u!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] u!(u128); } pub mod fetch_max { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_max_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_max_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_max_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_max_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_max_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::max(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umax { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umax_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umax_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umax_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umax_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umax_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::max(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); } pub mod fetch_min { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_min_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_min_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_min_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_min_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_min_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::min(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umin { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umin_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umin_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umin_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umin_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umin_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::min(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); }
rust source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
// From https://github.com/taiki-e/portable-atomic #![no_std] #![allow(unused)] #![feature(asm_experimental_arch)] pub mod load { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_load(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_load(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_load(a, Ordering::SeqCst) } } } pub mod store { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { crate::atomic_store(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release(a: A, val: T) { crate::atomic_store(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { crate::atomic_store(a, val, Ordering::SeqCst) } } } pub mod swap { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::SeqCst) } } } pub mod compare_exchange { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod compare_exchange_weak { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod fetch_add { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::SeqCst) } } } pub mod fetch_sub { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::SeqCst) } } } pub mod fetch_and { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::SeqCst) } } } pub mod fetch_nand { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::SeqCst) } } } pub mod fetch_or { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::SeqCst) } } } pub mod fetch_xor { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::SeqCst) } } } pub mod fetch_not { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_not(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_not(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_not(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_not(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_not(a, Ordering::SeqCst) } } } pub mod fetch_neg { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_neg(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_neg(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_neg(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_neg(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_neg(a, Ordering::SeqCst) } } } pub mod fetch_max { pub mod i128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::SeqCst) } } } pub mod fetch_umax { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::SeqCst) } } } pub mod fetch_min { pub mod i128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::SeqCst) } } } pub mod fetch_umin { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::SeqCst) } } } #[macro_use] mod utils { #[cfg(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), ))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[allow(clippy::ptr_as_ptr)] { // If we cast to u64 here, the provenance will be lost, // so we convert to MaybeUninit<u64> via zero extend helper. crate::utils::zero_extend64_ptr($ptr as *mut ()) } #[cfg(portable_atomic_no_asm_maybe_uninit)] { // Use cast on old rustc because it does not support MaybeUninit // registers. This is still permissive-provenance compatible and // is sound. $ptr as u64 } }}; } #[cfg(not(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), )))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) $ptr // cast is unnecessary here. }}; } use core::sync::atomic::Ordering; // Stable version of https://doc.rust-lang.org/nightly/std/hint/fn.assert_unchecked.html. #[inline(always)] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] pub(crate) unsafe fn assert_unchecked(cond: bool) { if !cond { if cfg!(debug_assertions) { unreachable!() } else { // SAFETY: the caller promised `cond` is true. unsafe { core::hint::unreachable_unchecked() } } } } // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html // https://github.com/rust-lang/rust/pull/98383 #[allow(dead_code)] #[inline] pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering { match (success, failure) { (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire, (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel, (_, Ordering::SeqCst) => Ordering::SeqCst, _ => success, } } /// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`. /// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI). /// See ptr_reg! macro in src/gen/utils.rs for details. #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[cfg(target_pointer_width = "32")] #[allow(dead_code)] #[inline] pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> { #[repr(C)] struct ZeroExtended { #[cfg(target_endian = "big")] pad: *mut (), v: *mut (), #[cfg(target_endian = "little")] pad: *mut (), } // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>. unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) } } #[allow(dead_code)] #[cfg(any( target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x", target_arch = "x86_64", ))] /// A 128-bit value represented as a pair of 64-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U128 { pub(crate) whole: u128, pub(crate) pair: Pair<u64>, } #[allow(dead_code)] #[cfg(target_arch = "arm")] /// A 64-bit value represented as a pair of 32-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U64 { pub(crate) whole: u64, pub(crate) pair: Pair<u32>, } #[allow(dead_code)] #[derive(Clone, Copy)] #[repr(C)] pub(crate) struct Pair<T: Copy> { // little endian order #[cfg(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm"))] pub(crate) lo: T, pub(crate) hi: T, // big endian order #[cfg(not(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm")))] pub(crate) lo: T, } } macro_rules! atomic_rmw_by_atomic_update { (cmp) => { #[inline] unsafe fn atomic_max(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x as i128, val as i128) as u128) } } #[inline] unsafe fn atomic_umax(dst: *mut u128, val: u128, order: Ordering) -> u128 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x, val)) } } #[inline] unsafe fn atomic_min(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x as i128, val as i128) as u128) } } #[inline] unsafe fn atomic_umin(dst: *mut u128, val: u128, order: Ordering) -> u128 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x, val)) } } }; } use core::{arch::asm, sync::atomic::Ordering}; use utils::*; // Asserts that the function is called in the correct context. macro_rules! debug_assert_cmpxchg16b { () => { #[cfg(not(any( target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b", )))] { debug_assert!(detect::detect().has_cmpxchg16b()); } }; } #[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))] #[cfg(target_feature = "sse")] macro_rules! debug_assert_vmovdqa_atomic { () => {{ debug_assert_cmpxchg16b!(); debug_assert!(detect::detect().has_vmovdqa_atomic()); }}; } #[allow(unused_macros)] #[cfg(target_pointer_width = "32")] macro_rules! ptr_modifier { () => { ":e" }; } #[allow(unused_macros)] #[cfg(target_pointer_width = "64")] macro_rules! ptr_modifier { () => { "" }; } // Unlike AArch64 and RISC-V, x86's assembler doesn't check instruction // requirements for the currently enabled target features. In the first place, // there is no option in the x86 assembly for such case, like ARM .arch_extension, // RISC-V .option arch, PowerPC .machine, etc. // However, we set target_feature(enable) when available (Rust 1.69+) in case a // new codegen backend is added that checks for it in the future, or an option // is added to the assembler to check for it. #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn cmpxchg16b(dst: *mut u128, old: u128, new: u128) -> (u128, bool) { debug_assert!(dst as usize % 16 == 0); debug_assert_cmpxchg16b!(); // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned (required by CMPXCHG16B), that there are no // concurrent non-atomic operations, and that the CPU supports CMPXCHG16B. // // If the value at `dst` (destination operand) and rdx:rax are equal, the // 128-bit value in rcx:rbx is stored in the `dst`, otherwise the value at // `dst` is loaded to rdx:rax. // // The ZF flag is set if the value at `dst` and rdx:rax are equal, // otherwise it is cleared. Other flags are unaffected. // // Refs: https://www.felixcloutier.com/x86/cmpxchg8b:cmpxchg16b unsafe { // cmpxchg16b is always SeqCst. let r: u8; let old = U128 { whole: old }; let new = U128 { whole: new }; let (prev_lo, prev_hi); macro_rules! cmpxchg16b { ($rdi:tt) => { asm!( "xchg {rbx_tmp}, rbx", // save rbx which is reserved by LLVM concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"), "sete r8b", "mov rbx, {rbx_tmp}", // restore rbx rbx_tmp = inout(reg) new.pair.lo => _, in("rcx") new.pair.hi, inout("rax") old.pair.lo => prev_lo, inout("rdx") old.pair.hi => prev_hi, in($rdi) dst, out("r8b") r, // Do not use `preserves_flags` because CMPXCHG16B modifies the ZF flag. options(nostack), ) }; } #[cfg(target_pointer_width = "32")] cmpxchg16b!("edi"); #[cfg(target_pointer_width = "64")] cmpxchg16b!("rdi"); assert_unchecked(r == 0 || r == 1); // needed to remove extra test (U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole, r != 0) } } // VMOVDQA is atomic on Intel and AMD CPUs with AVX. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688 for details. // // Refs: https://www.felixcloutier.com/x86/movdqa:vmovdqa32:vmovdqa64 // // Use cfg(target_feature = "sse") here -- SSE is included in the x86_64 // baseline and is always available, but the SSE target feature is disabled for // use cases such as kernels and firmware that should not use vector registers. // So, do not use vector registers unless SSE target feature is enabled. // See also https://doc.rust-lang.org/nightly/rustc/platform-support/x86_64-unknown-none.html. #[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))] #[cfg(target_feature = "sse")] #[target_feature(enable = "avx")] #[inline] unsafe fn atomic_load_vmovdqa(src: *mut u128) -> u128 { debug_assert!(src as usize % 16 == 0); debug_assert_vmovdqa_atomic!(); // SAFETY: the caller must uphold the safety contract. // // atomic load by vmovdqa is always SeqCst. unsafe { let out: core::arch::x86_64::__m128; asm!( concat!("vmovdqa {out}, xmmword ptr [{src", ptr_modifier!(), "}]"), src = in(reg) src, out = out(xmm_reg) out, options(nostack, preserves_flags), ); core::mem::transmute(out) } } #[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))] #[cfg(target_feature = "sse")] #[target_feature(enable = "avx")] #[inline] unsafe fn atomic_store_vmovdqa(dst: *mut u128, val: u128, order: Ordering) { debug_assert!(dst as usize % 16 == 0); debug_assert_vmovdqa_atomic!(); // SAFETY: the caller must uphold the safety contract. unsafe { let val: core::arch::x86_64::__m128 = core::mem::transmute(val); match order { // Relaxed and Release stores are equivalent. Ordering::Relaxed | Ordering::Release => { asm!( concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"), dst = in(reg) dst, val = in(xmm_reg) val, options(nostack, preserves_flags), ); } Ordering::SeqCst => { asm!( concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"), "mfence", dst = in(reg) dst, val = in(xmm_reg) val, options(nostack, preserves_flags), ); } _ => unreachable!(), } } } #[cfg(not(all( any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"), any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")), )))] macro_rules! load_store_detect { ( vmovdqa = $vmovdqa:ident cmpxchg16b = $cmpxchg16b:ident fallback = $fallback:ident ) => {{ let cpuid = detect::detect(); #[cfg(not(any( target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b", )))] { // Check CMPXCHG16B first to prevent mixing atomic and non-atomic access. if cpuid.has_cmpxchg16b() { // We only use VMOVDQA when SSE is enabled. See atomic_load_vmovdqa() for more. #[cfg(target_feature = "sse")] { if cpuid.has_vmovdqa_atomic() { $vmovdqa } else { $cmpxchg16b } } #[cfg(not(target_feature = "sse"))] { $cmpxchg16b } } else { fallback::$fallback } } #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))] { if cpuid.has_vmovdqa_atomic() { $vmovdqa } else { $cmpxchg16b } } }}; } #[inline] unsafe fn atomic_load(src: *mut u128, _order: Ordering) -> u128 { // We only use VMOVDQA when SSE is enabled. See atomic_load_vmovdqa() for more. // SGX doesn't support CPUID. #[cfg(all( any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"), any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")), ))] // SAFETY: the caller must uphold the safety contract. // cfg guarantees that CMPXCHG16B is available at compile-time. unsafe { // cmpxchg16b is always SeqCst. atomic_load_cmpxchg16b(src) } #[cfg(not(all( any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"), any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")), )))] // SAFETY: the caller must uphold the safety contract. unsafe { ifunc!(unsafe fn(src: *mut u128) -> u128 { load_store_detect! { vmovdqa = atomic_load_vmovdqa cmpxchg16b = atomic_load_cmpxchg16b // Use SeqCst because cmpxchg16b and atomic load by vmovdqa is always SeqCst. fallback = atomic_load_seqcst } }) } } // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn atomic_load_cmpxchg16b(src: *mut u128) -> u128 { debug_assert!(src as usize % 16 == 0); debug_assert_cmpxchg16b!(); // SAFETY: the caller must guarantee that `src` is valid for both writes and // reads, 16-byte aligned, and that there are no concurrent non-atomic operations. // cfg guarantees that the CPU supports CMPXCHG16B. // // See cmpxchg16b function for more. // // We could use CAS loop by atomic_compare_exchange here, but using an inline assembly allows // omitting the storing of condition flags and avoid use of xchg to handle rbx. unsafe { // cmpxchg16b is always SeqCst. let (out_lo, out_hi); macro_rules! cmpxchg16b { ($rdi:tt) => { asm!( "mov {rbx_tmp}, rbx", // save rbx which is reserved by LLVM "xor rbx, rbx", // zeroed rbx concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"), "mov rbx, {rbx_tmp}", // restore rbx // set old/new args of cmpxchg16b to 0 (rbx is zeroed after saved to rbx_tmp, to avoid xchg) rbx_tmp = out(reg) _, in("rcx") 0_u64, inout("rax") 0_u64 => out_lo, inout("rdx") 0_u64 => out_hi, in($rdi) src, // Do not use `preserves_flags` because CMPXCHG16B modifies the ZF flag. options(nostack), ) }; } #[cfg(target_pointer_width = "32")] cmpxchg16b!("edi"); #[cfg(target_pointer_width = "64")] cmpxchg16b!("rdi"); U128 { pair: Pair { lo: out_lo, hi: out_hi } }.whole } } #[inline] unsafe fn atomic_store(dst: *mut u128, val: u128, order: Ordering) { // We only use VMOVDQA when SSE is enabled. See atomic_load_vmovdqa() for more. // SGX doesn't support CPUID. #[cfg(all( any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"), any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")), ))] // SAFETY: the caller must uphold the safety contract. // cfg guarantees that CMPXCHG16B is available at compile-time. unsafe { // cmpxchg16b is always SeqCst. let _ = order; atomic_store_cmpxchg16b(dst, val); } #[cfg(not(all( any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"), any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")), )))] // SAFETY: the caller must uphold the safety contract. unsafe { #[cfg(target_feature = "sse")] fn_alias! { #[target_feature(enable = "avx")] unsafe fn(dst: *mut u128, val: u128); // atomic store by vmovdqa has at least release semantics. atomic_store_vmovdqa_non_seqcst = atomic_store_vmovdqa(Ordering::Release); atomic_store_vmovdqa_seqcst = atomic_store_vmovdqa(Ordering::SeqCst); } match order { // Relaxed and Release stores are equivalent in all implementations // that may be called here (vmovdqa, asm-based cmpxchg16b, and fallback). // core::arch's cmpxchg16b will never called here. Ordering::Relaxed | Ordering::Release => { ifunc!(unsafe fn(dst: *mut u128, val: u128) { load_store_detect! { vmovdqa = atomic_store_vmovdqa_non_seqcst cmpxchg16b = atomic_store_cmpxchg16b fallback = atomic_store_non_seqcst } }); } Ordering::SeqCst => { ifunc!(unsafe fn(dst: *mut u128, val: u128) { load_store_detect! { vmovdqa = atomic_store_vmovdqa_seqcst cmpxchg16b = atomic_store_cmpxchg16b fallback = atomic_store_seqcst } }); } _ => unreachable!(), } } } // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn atomic_store_cmpxchg16b(dst: *mut u128, val: u128) { // SAFETY: the caller must uphold the safety contract. unsafe { // cmpxchg16b is always SeqCst. atomic_swap_cmpxchg16b(dst, val, Ordering::SeqCst); } } #[inline] unsafe fn atomic_compare_exchange( dst: *mut u128, old: u128, new: u128, _success: Ordering, _failure: Ordering, ) -> Result<u128, u128> { #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))] // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned, that there are no concurrent non-atomic operations, // and cfg guarantees that CMPXCHG16B is available at compile-time. let (prev, ok) = unsafe { cmpxchg16b(dst, old, new) }; #[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))] // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned, and that there are no different kinds of concurrent accesses. let (prev, ok) = unsafe { ifunc!(unsafe fn(dst: *mut u128, old: u128, new: u128) -> (u128, bool) { if detect::detect().has_cmpxchg16b() { cmpxchg16b } else { // Use SeqCst because cmpxchg16b is always SeqCst. fallback::atomic_compare_exchange_seqcst } }) }; if ok { Ok(prev) } else { Err(prev) } } // cmpxchg16b is always strong. use atomic_compare_exchange as atomic_compare_exchange_weak; #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))] use atomic_swap_cmpxchg16b as atomic_swap; // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn atomic_swap_cmpxchg16b(dst: *mut u128, val: u128, _order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_cmpxchg16b!(); // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned, and that there are no concurrent non-atomic operations. // cfg guarantees that the CPU supports CMPXCHG16B. // // See cmpxchg16b function for more. // // We could use CAS loop by atomic_compare_exchange here, but using an inline assembly allows // omitting the storing/comparing of condition flags and reducing uses of xchg/mov to handle rbx. // // Do not use atomic_rmw_cas_3 because it needs extra MOV to implement swap. unsafe { // cmpxchg16b is always SeqCst. let val = U128 { whole: val }; let (mut prev_lo, mut prev_hi); macro_rules! cmpxchg16b { ($rdi:tt) => { asm!( "xchg {rbx_tmp}, rbx", // save rbx which is reserved by LLVM // This is not single-copy atomic reads, but this is ok because subsequent // CAS will check for consistency. // // This is based on the code generated for the first load in DW RMWs by LLVM. // // Note that the C++20 memory model does not allow mixed-sized atomic access, // so we must use inline assembly to implement this. // (i.e., byte-wise atomic based on the standard library's atomic types // cannot be used here). concat!("mov rax, qword ptr [", $rdi, "]"), concat!("mov rdx, qword ptr [", $rdi, " + 8]"), "2:", concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"), "jne 2b", "mov rbx, {rbx_tmp}", // restore rbx rbx_tmp = inout(reg) val.pair.lo => _, in("rcx") val.pair.hi, out("rax") prev_lo, out("rdx") prev_hi, in($rdi) dst, // Do not use `preserves_flags` because CMPXCHG16B modifies the ZF flag. options(nostack), ) }; } #[cfg(target_pointer_width = "32")] cmpxchg16b!("edi"); #[cfg(target_pointer_width = "64")] cmpxchg16b!("rdi"); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } } /// Atomic RMW by CAS loop (3 arguments) /// `unsafe fn(dst: *mut u128, val: u128, order: Ordering) -> u128;` /// /// `$op` can use the following registers: /// - rsi/r8 pair: val argument (read-only for `$op`) /// - rax/rdx pair: previous value loaded (read-only for `$op`) /// - rbx/rcx pair: new value that will be stored // We could use CAS loop by atomic_compare_exchange here, but using an inline assembly allows // omitting the storing/comparing of condition flags and reducing uses of xchg/mov to handle rbx. macro_rules! atomic_rmw_cas_3 { ($name:ident as $reexport_name:ident, $($op:tt)*) => { #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))] use $name as $reexport_name; // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn $name(dst: *mut u128, val: u128, _order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_cmpxchg16b!(); // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned, and that there are no concurrent non-atomic operations. // cfg guarantees that the CPU supports CMPXCHG16B. // // See cmpxchg16b function for more. unsafe { // cmpxchg16b is always SeqCst. let val = U128 { whole: val }; let (mut prev_lo, mut prev_hi); macro_rules! cmpxchg16b { ($rdi:tt) => { asm!( "mov {rbx_tmp}, rbx", // save rbx which is reserved by LLVM // This is not single-copy atomic reads, but this is ok because subsequent // CAS will check for consistency. // // This is based on the code generated for the first load in DW RMWs by LLVM. // // Note that the C++20 memory model does not allow mixed-sized atomic access, // so we must use inline assembly to implement this. // (i.e., byte-wise atomic based on the standard library's atomic types // cannot be used here). concat!("mov rax, qword ptr [", $rdi, "]"), concat!("mov rdx, qword ptr [", $rdi, " + 8]"), "2:", $($op)* concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"), "jne 2b", "mov rbx, {rbx_tmp}", // restore rbx rbx_tmp = out(reg) _, out("rcx") _, out("rax") prev_lo, out("rdx") prev_hi, in($rdi) dst, in("rsi") val.pair.lo, in("r8") val.pair.hi, // Do not use `preserves_flags` because CMPXCHG16B modifies the ZF flag. options(nostack), ) }; } #[cfg(target_pointer_width = "32")] cmpxchg16b!("edi"); #[cfg(target_pointer_width = "64")] cmpxchg16b!("rdi"); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } } }; } /// Atomic RMW by CAS loop (2 arguments) /// `unsafe fn(dst: *mut u128, order: Ordering) -> u128;` /// /// `$op` can use the following registers: /// - rax/rdx pair: previous value loaded (read-only for `$op`) /// - rbx/rcx pair: new value that will be stored // We could use CAS loop by atomic_compare_exchange here, but using an inline assembly allows // omitting the storing of condition flags and avoid use of xchg to handle rbx. macro_rules! atomic_rmw_cas_2 { ($name:ident as $reexport_name:ident, $($op:tt)*) => { #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))] use $name as $reexport_name; // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] #[inline] unsafe fn $name(dst: *mut u128, _order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_cmpxchg16b!(); // SAFETY: the caller must guarantee that `dst` is valid for both writes and // reads, 16-byte aligned, and that there are no concurrent non-atomic operations. // cfg guarantees that the CPU supports CMPXCHG16B. // // See cmpxchg16b function for more. unsafe { // cmpxchg16b is always SeqCst. let (mut prev_lo, mut prev_hi); macro_rules! cmpxchg16b { ($rdi:tt) => { asm!( "mov {rbx_tmp}, rbx", // save rbx which is reserved by LLVM // This is not single-copy atomic reads, but this is ok because subsequent // CAS will check for consistency. // // This is based on the code generated for the first load in DW RMWs by LLVM. // // Note that the C++20 memory model does not allow mixed-sized atomic access, // so we must use inline assembly to implement this. // (i.e., byte-wise atomic based on the standard library's atomic types // cannot be used here). concat!("mov rax, qword ptr [", $rdi, "]"), concat!("mov rdx, qword ptr [", $rdi, " + 8]"), "2:", $($op)* concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"), "jne 2b", "mov rbx, {rbx_tmp}", // restore rbx rbx_tmp = out(reg) _, out("rcx") _, out("rax") prev_lo, out("rdx") prev_hi, in($rdi) dst, // Do not use `preserves_flags` because CMPXCHG16B modifies the ZF flag. options(nostack), ) }; } #[cfg(target_pointer_width = "32")] cmpxchg16b!("edi"); #[cfg(target_pointer_width = "64")] cmpxchg16b!("rdi"); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } } }; } atomic_rmw_cas_3! { atomic_add_cmpxchg16b as atomic_add, "mov rbx, rax", "add rbx, rsi", "mov rcx, rdx", "adc rcx, r8", } atomic_rmw_cas_3! { atomic_sub_cmpxchg16b as atomic_sub, "mov rbx, rax", "sub rbx, rsi", "mov rcx, rdx", "sbb rcx, r8", } atomic_rmw_cas_3! { atomic_and_cmpxchg16b as atomic_and, "mov rbx, rax", "and rbx, rsi", "mov rcx, rdx", "and rcx, r8", } atomic_rmw_cas_3! { atomic_nand_cmpxchg16b as atomic_nand, "mov rbx, rax", "and rbx, rsi", "not rbx", "mov rcx, rdx", "and rcx, r8", "not rcx", } atomic_rmw_cas_3! { atomic_or_cmpxchg16b as atomic_or, "mov rbx, rax", "or rbx, rsi", "mov rcx, rdx", "or rcx, r8", } atomic_rmw_cas_3! { atomic_xor_cmpxchg16b as atomic_xor, "mov rbx, rax", "xor rbx, rsi", "mov rcx, rdx", "xor rcx, r8", } atomic_rmw_cas_2! { atomic_not_cmpxchg16b as atomic_not, "mov rbx, rax", "not rbx", "mov rcx, rdx", "not rcx", } atomic_rmw_cas_2! { atomic_neg_cmpxchg16b as atomic_neg, "mov rbx, rax", "neg rbx", "mov rcx, 0", "sbb rcx, rdx", } atomic_rmw_cas_3! { atomic_max_cmpxchg16b as atomic_max, "cmp rsi, rax", "mov rcx, r8", "sbb rcx, rdx", "mov rcx, r8", "cmovl rcx, rdx", "mov rbx, rsi", "cmovl rbx, rax", } atomic_rmw_cas_3! { atomic_umax_cmpxchg16b as atomic_umax, "cmp rsi, rax", "mov rcx, r8", "sbb rcx, rdx", "mov rcx, r8", "cmovb rcx, rdx", "mov rbx, rsi", "cmovb rbx, rax", } atomic_rmw_cas_3! { atomic_min_cmpxchg16b as atomic_min, "cmp rsi, rax", "mov rcx, r8", "sbb rcx, rdx", "mov rcx, r8", "cmovge rcx, rdx", "mov rbx, rsi", "cmovge rbx, rax", } atomic_rmw_cas_3! { atomic_umin_cmpxchg16b as atomic_umin, "cmp rsi, rax", "mov rcx, r8", "sbb rcx, rdx", "mov rcx, r8", "cmovae rcx, rdx", "mov rbx, rsi", "cmovae rbx, rax", } macro_rules! atomic_rmw_with_ifunc { ( unsafe fn $name:ident($($arg:tt)*) $(-> $ret_ty:ty)?; cmpxchg16b = $cmpxchg16b_fn:ident; fallback = $seqcst_fallback_fn:ident; ) => { #[cfg(not(any( target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b", )))] #[inline] unsafe fn $name($($arg)*, _order: Ordering) $(-> $ret_ty)? { fn_alias! { // See cmpxchg16b() for target_feature(enable). #[cfg_attr( not(portable_atomic_no_cmpxchg16b_target_feature), target_feature(enable = "cmpxchg16b") )] unsafe fn($($arg)*) $(-> $ret_ty)?; // cmpxchg16b is always SeqCst. cmpxchg16b_seqcst_fn = $cmpxchg16b_fn(Ordering::SeqCst); } // SAFETY: the caller must uphold the safety contract. // we only calls cmpxchg16b_fn if cmpxchg16b is available. unsafe { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_cmpxchg16b() { cmpxchg16b_seqcst_fn } else { // Use SeqCst because cmpxchg16b is always SeqCst. fallback::$seqcst_fallback_fn } }) } } }; } atomic_rmw_with_ifunc! { unsafe fn atomic_swap(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_swap_cmpxchg16b; fallback = atomic_swap_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_add(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_add_cmpxchg16b; fallback = atomic_add_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_sub(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_sub_cmpxchg16b; fallback = atomic_sub_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_and(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_and_cmpxchg16b; fallback = atomic_and_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_nand(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_nand_cmpxchg16b; fallback = atomic_nand_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_or(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_or_cmpxchg16b; fallback = atomic_or_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_xor(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_xor_cmpxchg16b; fallback = atomic_xor_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_max(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_max_cmpxchg16b; fallback = atomic_max_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_umax(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_umax_cmpxchg16b; fallback = atomic_umax_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_min(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_min_cmpxchg16b; fallback = atomic_min_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_umin(dst: *mut u128, val: u128) -> u128; cmpxchg16b = atomic_umin_cmpxchg16b; fallback = atomic_umin_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_not(dst: *mut u128) -> u128; cmpxchg16b = atomic_not_cmpxchg16b; fallback = atomic_not_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_neg(dst: *mut u128) -> u128; cmpxchg16b = atomic_neg_cmpxchg16b; fallback = atomic_neg_seqcst; }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree