Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![no_std] #![allow(unused, internal_features)] #![feature(cfg_target_has_atomic, core_intrinsics)] macro_rules! atomic_update { ($t:ident) => { mod $t { pub(crate) use core::sync::atomic::Ordering; #[inline(always)] pub(crate) unsafe fn atomic_update<F>(dst: *mut $t, order: Ordering, mut f: F) -> $t where F: FnMut($t) -> $t, { // This is a private function and all instances of `f` only operate on the value // loaded, so there is no need to synchronize the first load/failed CAS. let mut old = core::intrinsics::atomic_load_relaxed(dst); loop { let next = f(old); let (x, ok) = match order { Ordering::Relaxed => core::intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, next), Ordering::Acquire => core::intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, next), Ordering::Release => core::intrinsics::atomic_cxchgweak_release_relaxed(dst, old, next), Ordering::AcqRel => core::intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, next), Ordering::SeqCst => core::intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, next), _ => unreachable!(), }; if ok { return x; } old = x; } } } }; } // #[cfg(target_has_atomic = "8")] // atomic_update!(u8); // #[cfg(target_has_atomic = "16")] // atomic_update!(u16); // #[cfg(target_has_atomic = "32")] // atomic_update!(u32); // #[cfg(target_has_atomic = "64")] // atomic_update!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] atomic_update!(u128); pub mod load { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_load_relaxed(a) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_load_acquire(a) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_load_seqcst(a) } } }; } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod store { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { core::intrinsics::atomic_store_relaxed(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) { core::intrinsics::atomic_store_release(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { core::intrinsics::atomic_store_seqcst(a, val) } } } } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod swap { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xchg_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xchg_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xchg_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange_weak { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; // #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_add { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xadd_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xadd_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xadd_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_sub { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xsub_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xsub_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xsub_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_and { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_and_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_and_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_and_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_and_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_and_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_nand { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_nand_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_nand_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_nand_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_nand_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_nand_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_or { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_or_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_or_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_or_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_or_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_or_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_xor { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xor_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xor_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xor_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xor_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xor_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_not { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_xor_relaxed(a, T::MAX) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_xor_acquire(a, T::MAX) } #[inline(never)] pub unsafe fn release(a: A) -> T { core::intrinsics::atomic_xor_release(a, T::MAX) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { core::intrinsics::atomic_xor_acqrel(a, T::MAX) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_xor_seqcst(a, T::MAX) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_neg { macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { atomic_update(a, Ordering::Relaxed, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { atomic_update(a, Ordering::Acquire, $t::wrapping_neg) } #[inline(never)] pub unsafe fn release(a: A) -> T { atomic_update(a, Ordering::Release, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { atomic_update(a, Ordering::AcqRel, $t::wrapping_neg) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { atomic_update(a, Ordering::SeqCst, $t::wrapping_neg) } } } } // #[cfg(target_has_atomic = "8")] // u!(u8); // #[cfg(target_has_atomic = "16")] // u!(u16); // #[cfg(target_has_atomic = "32")] // u!(u32); // #[cfg(target_has_atomic = "64")] // u!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] u!(u128); } pub mod fetch_max { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_max_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_max_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_max_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_max_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_max_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::max(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umax { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umax_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umax_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umax_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umax_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umax_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::max(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); } pub mod fetch_min { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_min_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_min_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_min_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_min_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_min_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::min(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umin { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umin_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umin_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umin_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umin_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umin_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::min(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); }
rust source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
// From https://github.com/taiki-e/portable-atomic #![no_std] #![allow(unused)] #![feature(asm_experimental_arch)] pub mod load { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_load(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_load(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_load(a, Ordering::SeqCst) } } } pub mod store { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { crate::atomic_store(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release(a: A, val: T) { crate::atomic_store(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { crate::atomic_store(a, val, Ordering::SeqCst) } } } pub mod swap { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::SeqCst) } } } pub mod compare_exchange { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod compare_exchange_weak { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod fetch_add { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::SeqCst) } } } pub mod fetch_sub { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::SeqCst) } } } pub mod fetch_and { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::SeqCst) } } } pub mod fetch_nand { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::SeqCst) } } } pub mod fetch_or { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::SeqCst) } } } pub mod fetch_xor { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::SeqCst) } } } pub mod fetch_not { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_not(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_not(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_not(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_not(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_not(a, Ordering::SeqCst) } } } pub mod fetch_neg { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_neg(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_neg(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_neg(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_neg(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_neg(a, Ordering::SeqCst) } } } pub mod fetch_max { pub mod i128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::SeqCst) } } } pub mod fetch_umax { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::SeqCst) } } } pub mod fetch_min { pub mod i128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::SeqCst) } } } pub mod fetch_umin { pub mod u128 { use core::sync::atomic::Ordering; type A = *mut T; type T = u128; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::SeqCst) } } } #[macro_use] mod utils { #[cfg(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), ))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[allow(clippy::ptr_as_ptr)] { // If we cast to u64 here, the provenance will be lost, // so we convert to MaybeUninit<u64> via zero extend helper. crate::utils::zero_extend64_ptr($ptr as *mut ()) } #[cfg(portable_atomic_no_asm_maybe_uninit)] { // Use cast on old rustc because it does not support MaybeUninit // registers. This is still permissive-provenance compatible and // is sound. $ptr as u64 } }}; } #[cfg(not(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), )))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) $ptr // cast is unnecessary here. }}; } use core::sync::atomic::Ordering; // Stable version of https://doc.rust-lang.org/nightly/std/hint/fn.assert_unchecked.html. #[inline(always)] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] pub(crate) unsafe fn assert_unchecked(cond: bool) { if !cond { if cfg!(debug_assertions) { unreachable!() } else { // SAFETY: the caller promised `cond` is true. unsafe { core::hint::unreachable_unchecked() } } } } // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html // https://github.com/rust-lang/rust/pull/98383 #[allow(dead_code)] #[inline] pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering { match (success, failure) { (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire, (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel, (_, Ordering::SeqCst) => Ordering::SeqCst, _ => success, } } /// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`. /// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI). /// See ptr_reg! macro in src/gen/utils.rs for details. #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[cfg(target_pointer_width = "32")] #[allow(dead_code)] #[inline] pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> { #[repr(C)] struct ZeroExtended { #[cfg(target_endian = "big")] pad: *mut (), v: *mut (), #[cfg(target_endian = "little")] pad: *mut (), } // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>. unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) } } #[allow(dead_code)] #[cfg(any( target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x", target_arch = "x86_64", ))] /// A 128-bit value represented as a pair of 64-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U128 { pub(crate) whole: u128, pub(crate) pair: Pair<u64>, } #[allow(dead_code)] #[cfg(target_arch = "arm")] /// A 64-bit value represented as a pair of 32-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U64 { pub(crate) whole: u64, pub(crate) pair: Pair<u32>, } #[allow(dead_code)] #[derive(Clone, Copy)] #[repr(C)] pub(crate) struct Pair<T: Copy> { // little endian order #[cfg(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm"))] pub(crate) lo: T, pub(crate) hi: T, // big endian order #[cfg(not(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm")))] pub(crate) lo: T, } } macro_rules! atomic_rmw_by_atomic_update { (cmp) => { #[inline] unsafe fn atomic_max(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x as i128, val as i128) as u128) } } #[inline] unsafe fn atomic_umax(dst: *mut u128, val: u128, order: Ordering) -> u128 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x, val)) } } #[inline] unsafe fn atomic_min(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x as i128, val as i128) as u128) } } #[inline] unsafe fn atomic_umin(dst: *mut u128, val: u128, order: Ordering) -> u128 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x, val)) } } }; } use core::{arch::asm, sync::atomic::Ordering}; use utils::*; macro_rules! debug_assert_pwr8 { () => { #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] { debug_assert!(detect::detect().has_quadword_atomics()); } }; } // Refs: https://www.ibm.com/docs/en/aix/7.3?topic=ops-machine-pseudo-op // // This is similar to #[target_feature(enable = "quadword-atomics")], except that there are // no compiler guarantees regarding (un)inlining, and the scope is within an asm // block rather than a function. We use this directive because #[target_feature(enable = "quadword-atomics")] // is not supported as of Rust 1.70-nightly. // // start_pwr8 and end_pwr8 must be used in pairs. // // Note: If power8 instructions are not available at compile-time, we must guarantee that // the function that uses it is not inlined into a function where it is not // clear whether power8 instructions are available. Otherwise, (even if we checked whether // power8 instructions are available at run-time) optimizations that reorder its // instructions across the if condition might introduce undefined behavior. // (see also https://rust-lang.github.io/rfcs/2045-target-feature.html#safely-inlining-target_feature-functions-on-more-contexts) // However, our code uses the ifunc helper macro that works with function pointers, // so we don't have to worry about this unless calling without helper macro. macro_rules! start_pwr8 { () => { ".machine push\n.machine power8" }; } macro_rules! end_pwr8 { () => { ".machine pop" }; } macro_rules! atomic_rmw { ($op:ident, $order:ident) => { match $order { Ordering::Relaxed => $op!("", ""), Ordering::Acquire => $op!("lwsync", ""), Ordering::Release => $op!("", "lwsync"), Ordering::AcqRel => $op!("lwsync", "lwsync"), Ordering::SeqCst => $op!("lwsync", "sync"), _ => unreachable!(), } }; } // Extracts and checks the EQ bit of cr0. #[inline] fn extract_cr0(r: u64) -> bool { r & 0x20000000 != 0 } #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use atomic_load_pwr8 as atomic_load; #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] #[inline] unsafe fn atomic_load(src: *mut u128, order: Ordering) -> u128 { fn_alias! { // inline(never) is just a hint and also not strictly necessary // because we use ifunc helper macro, but used for clarity. #[inline(never)] unsafe fn(src: *mut u128) -> u128; atomic_load_pwr8_relaxed = atomic_load_pwr8(Ordering::Relaxed); atomic_load_pwr8_acquire = atomic_load_pwr8(Ordering::Acquire); atomic_load_pwr8_seqcst = atomic_load_pwr8(Ordering::SeqCst); } // SAFETY: the caller must uphold the safety contract. // we only calls atomic_load_pwr8 if quadword-atomics is available. unsafe { match order { Ordering::Relaxed => { ifunc!(unsafe fn(src: *mut u128) -> u128 { if detect::detect().has_quadword_atomics() { atomic_load_pwr8_relaxed } else { fallback::atomic_load_non_seqcst } }) } Ordering::Acquire => { ifunc!(unsafe fn(src: *mut u128) -> u128 { if detect::detect().has_quadword_atomics() { atomic_load_pwr8_acquire } else { fallback::atomic_load_non_seqcst } }) } Ordering::SeqCst => { ifunc!(unsafe fn(src: *mut u128) -> u128 { if detect::detect().has_quadword_atomics() { atomic_load_pwr8_seqcst } else { fallback::atomic_load_seqcst } }) } _ => unreachable!(), } } } #[inline] unsafe fn atomic_load_pwr8(src: *mut u128, order: Ordering) -> u128 { debug_assert!(src as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. // // Refs: "3.3.4 Fixed Point Load and Store Quadword Instructions" of Power ISA unsafe { let (out_hi, out_lo); macro_rules! atomic_load_acquire { ($release:tt) => { asm!( start_pwr8!(), $release, "lq %r4, 0({src})", // Lightweight acquire sync // Refs: https://github.com/boostorg/atomic/blob/boost-1.79.0/include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp#L47-L62 "cmpd %cr7, %r4, %r4", "bne- %cr7, 2f", "2:", "isync", end_pwr8!(), src = in(reg_nonzero) ptr_reg!(src), // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. out("r4") out_hi, out("r5") out_lo, out("cr7") _, options(nostack, preserves_flags), ) }; } match order { Ordering::Relaxed => { asm!( start_pwr8!(), "lq %r4, 0({src})", end_pwr8!(), src = in(reg_nonzero) ptr_reg!(src), // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. out("r4") out_hi, out("r5") out_lo, options(nostack, preserves_flags, readonly), ); } Ordering::Acquire => atomic_load_acquire!(""), Ordering::SeqCst => atomic_load_acquire!("sync"), _ => unreachable!(), } U128 { pair: Pair { hi: out_hi, lo: out_lo } }.whole } } #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use atomic_store_pwr8 as atomic_store; #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] #[inline] unsafe fn atomic_store(dst: *mut u128, val: u128, order: Ordering) { fn_alias! { // inline(never) is just a hint and also not strictly necessary // because we use ifunc helper macro, but used for clarity. #[inline(never)] unsafe fn(dst: *mut u128, val: u128); atomic_store_pwr8_relaxed = atomic_store_pwr8(Ordering::Relaxed); atomic_store_pwr8_release = atomic_store_pwr8(Ordering::Release); atomic_store_pwr8_seqcst = atomic_store_pwr8(Ordering::SeqCst); } // SAFETY: the caller must uphold the safety contract. // we only calls atomic_store_pwr8 if quadword-atomics is available. unsafe { match order { Ordering::Relaxed => { ifunc!(unsafe fn(dst: *mut u128, val: u128) { if detect::detect().has_quadword_atomics() { atomic_store_pwr8_relaxed } else { fallback::atomic_store_non_seqcst } }); } Ordering::Release => { ifunc!(unsafe fn(dst: *mut u128, val: u128) { if detect::detect().has_quadword_atomics() { atomic_store_pwr8_release } else { fallback::atomic_store_non_seqcst } }); } Ordering::SeqCst => { ifunc!(unsafe fn(dst: *mut u128, val: u128) { if detect::detect().has_quadword_atomics() { atomic_store_pwr8_seqcst } else { fallback::atomic_store_seqcst } }); } _ => unreachable!(), } } } #[inline] unsafe fn atomic_store_pwr8(dst: *mut u128, val: u128, order: Ordering) { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. // // Refs: "3.3.4 Fixed Point Load and Store Quadword Instructions" of Power ISA unsafe { let val = U128 { whole: val }; macro_rules! atomic_store { ($release:tt) => { asm!( start_pwr8!(), $release, "stq %r4, 0({dst})", end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. in("r4") val.pair.hi, in("r5") val.pair.lo, options(nostack, preserves_flags), ) }; } match order { Ordering::Relaxed => atomic_store!(""), Ordering::Release => atomic_store!("lwsync"), Ordering::SeqCst => atomic_store!("sync"), _ => unreachable!(), } } } #[inline] unsafe fn atomic_compare_exchange( dst: *mut u128, old: u128, new: u128, success: Ordering, failure: Ordering, ) -> Result<u128, u128> { let success = crate::utils::upgrade_success_ordering(success, failure); #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] // SAFETY: the caller must uphold the safety contract. // cfg guarantees that quadword atomics instructions are available at compile-time. let (prev, ok) = unsafe { atomic_compare_exchange_pwr8(dst, old, new, success) }; #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] // SAFETY: the caller must uphold the safety contract. let (prev, ok) = unsafe { atomic_compare_exchange_ifunc(dst, old, new, success) }; if ok { Ok(prev) } else { Err(prev) } } #[inline] unsafe fn atomic_compare_exchange_pwr8( dst: *mut u128, old: u128, new: u128, order: Ordering, ) -> (u128, bool) { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. // // Refs: "4.6.2.2 128-bit Load And Reserve and Store Conditional Instructions" of Power ISA unsafe { let old = U128 { whole: old }; let new = U128 { whole: new }; let (mut prev_hi, mut prev_lo); let mut r; macro_rules! cmpxchg { ($acquire:tt, $release:tt) => { asm!( start_pwr8!(), $release, "2:", "lqarx %r8, 0, {dst}", "xor {tmp_lo}, %r9, {old_lo}", "xor {tmp_hi}, %r8, {old_hi}", "or. {tmp_lo}, {tmp_lo}, {tmp_hi}", "bne %cr0, 3f", // jump if compare failed "stqcx. %r6, 0, {dst}", "bne %cr0, 2b", // continue loop if store failed "3:", // if compare failed EQ bit is cleared, if stqcx succeeds EQ bit is set. "mfcr {tmp_lo}", $acquire, end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), old_hi = in(reg) old.pair.hi, old_lo = in(reg) old.pair.lo, tmp_hi = out(reg) _, tmp_lo = out(reg) r, // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. in("r6") new.pair.hi, in("r7") new.pair.lo, out("r8") prev_hi, out("r9") prev_lo, out("cr0") _, options(nostack, preserves_flags), ) }; } atomic_rmw!(cmpxchg, order); (U128 { pair: Pair { hi: prev_hi, lo: prev_lo } }.whole, extract_cr0(r)) } } // Always use strong CAS for outline-atomics. #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] use atomic_compare_exchange as atomic_compare_exchange_weak; #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] #[inline] unsafe fn atomic_compare_exchange_weak( dst: *mut u128, old: u128, new: u128, success: Ordering, failure: Ordering, ) -> Result<u128, u128> { let success = crate::utils::upgrade_success_ordering(success, failure); // SAFETY: the caller must uphold the safety contract. // cfg guarantees that quadword atomics instructions are available at compile-time. let (prev, ok) = unsafe { atomic_compare_exchange_weak_pwr8(dst, old, new, success) }; if ok { Ok(prev) } else { Err(prev) } } #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] #[inline] unsafe fn atomic_compare_exchange_weak_pwr8( dst: *mut u128, old: u128, new: u128, order: Ordering, ) -> (u128, bool) { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. // // Refs: "4.6.2.2 128-bit Load And Reserve and Store Conditional Instructions" of Power ISA unsafe { let old = U128 { whole: old }; let new = U128 { whole: new }; let (mut prev_hi, mut prev_lo); let mut r; macro_rules! cmpxchg_weak { ($acquire:tt, $release:tt) => { asm!( start_pwr8!(), $release, "lqarx %r8, 0, {dst}", "xor {tmp_lo}, %r9, {old_lo}", "xor {tmp_hi}, %r8, {old_hi}", "or. {tmp_lo}, {tmp_lo}, {tmp_hi}", "bne %cr0, 3f", // jump if compare failed "stqcx. %r6, 0, {dst}", "3:", // if compare or stqcx failed EQ bit is cleared, if stqcx succeeds EQ bit is set. "mfcr {tmp_lo}", $acquire, end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), old_hi = in(reg) old.pair.hi, old_lo = in(reg) old.pair.lo, tmp_hi = out(reg) _, tmp_lo = out(reg) r, // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. in("r6") new.pair.hi, in("r7") new.pair.lo, out("r8") prev_hi, out("r9") prev_lo, out("cr0") _, options(nostack, preserves_flags), ) }; } atomic_rmw!(cmpxchg_weak, order); (U128 { pair: Pair { hi: prev_hi, lo: prev_lo } }.whole, extract_cr0(r)) } } #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use atomic_swap_pwr8 as atomic_swap; // Do not use atomic_rmw_ll_sc_3 because it needs extra MR to implement swap. #[inline] unsafe fn atomic_swap_pwr8(dst: *mut u128, val: u128, order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. unsafe { let val = U128 { whole: val }; let (mut prev_hi, mut prev_lo); macro_rules! swap { ($acquire:tt, $release:tt) => { asm!( start_pwr8!(), $release, "2:", "lqarx %r6, 0, {dst}", "stqcx. %r8, 0, {dst}", "bne %cr0, 2b", $acquire, end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. out("r6") prev_hi, out("r7") prev_lo, in("r8") val.pair.hi, in("r9") val.pair.lo, out("cr0") _, options(nostack, preserves_flags), ) }; } atomic_rmw!(swap, order); U128 { pair: Pair { hi: prev_hi, lo: prev_lo } }.whole } } /// Atomic RMW by LL/SC loop (3 arguments) /// `unsafe fn(dst: *mut u128, val: u128, order: Ordering) -> u128;` /// /// $op can use the following registers: /// - val_hi/val_lo pair: val argument (read-only for `$op`) /// - r6/r7 pair: previous value loaded by ll (read-only for `$op`) /// - r8/r9 pair: new value that will be stored by sc macro_rules! atomic_rmw_ll_sc_3 { ($name:ident as $reexport_name:ident, [$($reg:tt)*], $($op:tt)*) => { #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use $name as $reexport_name; #[inline] unsafe fn $name(dst: *mut u128, val: u128, order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. unsafe { let val = U128 { whole: val }; let (mut prev_hi, mut prev_lo); macro_rules! op { ($acquire:tt, $release:tt) => { asm!( start_pwr8!(), $release, "2:", "lqarx %r6, 0, {dst}", $($op)* "stqcx. %r8, 0, {dst}", "bne %cr0, 2b", $acquire, end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), val_hi = in(reg) val.pair.hi, val_lo = in(reg) val.pair.lo, $($reg)* // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. out("r6") prev_hi, out("r7") prev_lo, out("r8") _, // new (hi) out("r9") _, // new (lo) out("cr0") _, options(nostack, preserves_flags), ) }; } atomic_rmw!(op, order); U128 { pair: Pair { hi: prev_hi, lo: prev_lo } }.whole } } }; } /// Atomic RMW by LL/SC loop (2 arguments) /// `unsafe fn(dst: *mut u128, order: Ordering) -> u128;` /// /// $op can use the following registers: /// - r6/r7 pair: previous value loaded by ll (read-only for `$op`) /// - r8/r9 pair: new value that will be stored by sc macro_rules! atomic_rmw_ll_sc_2 { ($name:ident as $reexport_name:ident, [$($reg:tt)*], $($op:tt)*) => { #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use $name as $reexport_name; #[inline] unsafe fn $name(dst: *mut u128, order: Ordering) -> u128 { debug_assert!(dst as usize % 16 == 0); debug_assert_pwr8!(); // SAFETY: the caller must uphold the safety contract. unsafe { let (mut prev_hi, mut prev_lo); macro_rules! op { ($acquire:tt, $release:tt) => { asm!( start_pwr8!(), $release, "2:", "lqarx %r6, 0, {dst}", $($op)* "stqcx. %r8, 0, {dst}", "bne %cr0, 2b", $acquire, end_pwr8!(), dst = in(reg_nonzero) ptr_reg!(dst), $($reg)* // Quadword atomic instructions work with even/odd pair of specified register and subsequent register. // We cannot use r1 (sp) and r2 (system reserved), so start with r4 or grater. out("r6") prev_hi, out("r7") prev_lo, out("r8") _, // new (hi) out("r9") _, // new (lo) out("cr0") _, options(nostack, preserves_flags), ) }; } atomic_rmw!(op, order); U128 { pair: Pair { hi: prev_hi, lo: prev_lo } }.whole } } }; } atomic_rmw_ll_sc_3! { atomic_add_pwr8 as atomic_add, [out("xer") _,], "addc %r9, {val_lo}, %r7", "adde %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_sub_pwr8 as atomic_sub, [out("xer") _,], "subc %r9, %r7, {val_lo}", "subfe %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_and_pwr8 as atomic_and, [], "and %r9, {val_lo}, %r7", "and %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_nand_pwr8 as atomic_nand, [], "nand %r9, {val_lo}, %r7", "nand %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_or_pwr8 as atomic_or, [], "or %r9, {val_lo}, %r7", "or %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_xor_pwr8 as atomic_xor, [], "xor %r9, {val_lo}, %r7", "xor %r8, {val_hi}, %r6", } atomic_rmw_ll_sc_3! { atomic_max_pwr8 as atomic_max, [out("cr1") _,], "cmpld %r7, {val_lo}", // (unsigned) compare lo 64-bit, store result to cr0 "iselgt %r9, %r7, {val_lo}", // select lo 64-bit based on GT bit in cr0 "cmpd %cr1, %r6, {val_hi}", // (signed) compare hi 64-bit, store result to cr1 "isel %r8, %r7, {val_lo}, 5", // select lo 64-bit based on GT bit in cr1 "cmpld %r6, {val_hi}", // (unsigned) compare hi 64-bit, store result to cr0 "iseleq %r9, %r9, %r8", // select lo 64-bit based on EQ bit in cr0 "isel %r8, %r6, {val_hi}, 5", // select hi 64-bit based on GT bit in cr1 } atomic_rmw_ll_sc_3! { atomic_umax_pwr8 as atomic_umax, [], "cmpld %r7, {val_lo}", // compare lo 64-bit, store result to cr0 "iselgt %r9, %r7, {val_lo}", // select lo 64-bit based on GT bit in cr0 "cmpld %r6, {val_hi}", // compare hi 64-bit, store result to cr0 "iselgt %r8, %r7, {val_lo}", // select lo 64-bit based on GT bit in cr0 "iseleq %r9, %r9, %r8", // select lo 64-bit based on EQ bit in cr0 "iselgt %r8, %r6, {val_hi}", // select hi 64-bit based on GT bit in cr0 } atomic_rmw_ll_sc_3! { atomic_min_pwr8 as atomic_min, [out("cr1") _,], "cmpld %r7, {val_lo}", // (unsigned) compare lo 64-bit, store result to cr0 "isellt %r9, %r7, {val_lo}", // select lo 64-bit based on LT bit in cr0 "cmpd %cr1, %r6, {val_hi}", // (signed) compare hi 64-bit, store result to cr1 "isel %r8, %r7, {val_lo}, 4", // select lo 64-bit based on LT bit in cr1 "cmpld %r6, {val_hi}", // (unsigned) compare hi 64-bit, store result to cr0 "iseleq %r9, %r9, %r8", // select lo 64-bit based on EQ bit in cr0 "isel %r8, %r6, {val_hi}, 4", // select hi 64-bit based on LT bit in cr1 } atomic_rmw_ll_sc_3! { atomic_umin_pwr8 as atomic_umin, [], "cmpld %r7, {val_lo}", // compare lo 64-bit, store result to cr0 "isellt %r9, %r7, {val_lo}", // select lo 64-bit based on LT bit in cr0 "cmpld %r6, {val_hi}", // compare hi 64-bit, store result to cr0 "isellt %r8, %r7, {val_lo}", // select lo 64-bit based on LT bit in cr0 "iseleq %r9, %r9, %r8", // select lo 64-bit based on EQ bit in cr0 "isellt %r8, %r6, {val_hi}", // select hi 64-bit based on LT bit in cr0 } #[cfg(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", ))] use atomic_not_pwr8 as atomic_not; #[inline] unsafe fn atomic_not_pwr8(dst: *mut u128, order: Ordering) -> u128 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_xor_pwr8(dst, !0, order) } } #[cfg(portable_atomic_llvm_16)] atomic_rmw_ll_sc_2! { atomic_neg_pwr8 as atomic_neg, [out("xer") _,], "subfic %r9, %r7, 0", "subfze %r8, %r6", } // LLVM 15 miscompiles subfic. #[cfg(not(portable_atomic_llvm_16))] atomic_rmw_ll_sc_2! { atomic_neg_pwr8 as atomic_neg, [zero = in(reg) 0_u64, out("xer") _,], "subc %r9, {zero}, %r7", "subfze %r8, %r6", } macro_rules! atomic_rmw_with_ifunc { ( unsafe fn $name:ident($($arg:tt)*) $(-> $ret_ty:ty)?; pwr8 = $pwr8_fn:ident; non_seqcst_fallback = $non_seqcst_fallback_fn:ident; seqcst_fallback = $seqcst_fallback_fn:ident; ) => { #[cfg(not(any( target_feature = "quadword-atomics", portable_atomic_target_feature = "quadword-atomics", )))] #[inline] unsafe fn $name($($arg)*, order: Ordering) $(-> $ret_ty)? { fn_alias! { // inline(never) is just a hint and also not strictly necessary // because we use ifunc helper macro, but used for clarity. #[inline(never)] unsafe fn($($arg)*) $(-> $ret_ty)?; pwr8_relaxed_fn = $pwr8_fn(Ordering::Relaxed); pwr8_acquire_fn = $pwr8_fn(Ordering::Acquire); pwr8_release_fn = $pwr8_fn(Ordering::Release); pwr8_acqrel_fn = $pwr8_fn(Ordering::AcqRel); pwr8_seqcst_fn = $pwr8_fn(Ordering::SeqCst); } // SAFETY: the caller must uphold the safety contract. // we only calls pwr8_fn if quadword-atomics is available. unsafe { match order { Ordering::Relaxed => { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_quadword_atomics() { pwr8_relaxed_fn } else { fallback::$non_seqcst_fallback_fn } }) } Ordering::Acquire => { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_quadword_atomics() { pwr8_acquire_fn } else { fallback::$non_seqcst_fallback_fn } }) } Ordering::Release => { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_quadword_atomics() { pwr8_release_fn } else { fallback::$non_seqcst_fallback_fn } }) } Ordering::AcqRel => { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_quadword_atomics() { pwr8_acqrel_fn } else { fallback::$non_seqcst_fallback_fn } }) } Ordering::SeqCst => { ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? { if detect::detect().has_quadword_atomics() { pwr8_seqcst_fn } else { fallback::$seqcst_fallback_fn } }) } _ => unreachable!(), } } } }; } atomic_rmw_with_ifunc! { unsafe fn atomic_compare_exchange_ifunc(dst: *mut u128, old: u128, new: u128) -> (u128, bool); pwr8 = atomic_compare_exchange_pwr8; non_seqcst_fallback = atomic_compare_exchange_non_seqcst; seqcst_fallback = atomic_compare_exchange_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_swap(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_swap_pwr8; non_seqcst_fallback = atomic_swap_non_seqcst; seqcst_fallback = atomic_swap_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_add(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_add_pwr8; non_seqcst_fallback = atomic_add_non_seqcst; seqcst_fallback = atomic_add_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_sub(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_sub_pwr8; non_seqcst_fallback = atomic_sub_non_seqcst; seqcst_fallback = atomic_sub_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_and(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_and_pwr8; non_seqcst_fallback = atomic_and_non_seqcst; seqcst_fallback = atomic_and_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_nand(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_nand_pwr8; non_seqcst_fallback = atomic_nand_non_seqcst; seqcst_fallback = atomic_nand_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_or(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_or_pwr8; non_seqcst_fallback = atomic_or_non_seqcst; seqcst_fallback = atomic_or_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_xor(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_xor_pwr8; non_seqcst_fallback = atomic_xor_non_seqcst; seqcst_fallback = atomic_xor_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_max(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_max_pwr8; non_seqcst_fallback = atomic_max_non_seqcst; seqcst_fallback = atomic_max_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_umax(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_umax_pwr8; non_seqcst_fallback = atomic_umax_non_seqcst; seqcst_fallback = atomic_umax_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_min(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_min_pwr8; non_seqcst_fallback = atomic_min_non_seqcst; seqcst_fallback = atomic_min_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_umin(dst: *mut u128, val: u128) -> u128; pwr8 = atomic_umin_pwr8; non_seqcst_fallback = atomic_umin_non_seqcst; seqcst_fallback = atomic_umin_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_not(dst: *mut u128) -> u128; pwr8 = atomic_not_pwr8; non_seqcst_fallback = atomic_not_non_seqcst; seqcst_fallback = atomic_not_seqcst; } atomic_rmw_with_ifunc! { unsafe fn atomic_neg(dst: *mut u128) -> u128; pwr8 = atomic_neg_pwr8; non_seqcst_fallback = atomic_neg_non_seqcst; seqcst_fallback = atomic_neg_seqcst; }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree