Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![no_std] #![allow(unused, internal_features)] #![feature(cfg_target_has_atomic, core_intrinsics)] macro_rules! atomic_update { ($t:ident) => { mod $t { pub(crate) use core::sync::atomic::Ordering; #[inline(always)] pub(crate) unsafe fn atomic_update<F>(dst: *mut $t, order: Ordering, mut f: F) -> $t where F: FnMut($t) -> $t, { // This is a private function and all instances of `f` only operate on the value // loaded, so there is no need to synchronize the first load/failed CAS. let mut old = core::intrinsics::atomic_load_relaxed(dst); loop { let next = f(old); let (x, ok) = match order { Ordering::Relaxed => core::intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, next), Ordering::Acquire => core::intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, next), Ordering::Release => core::intrinsics::atomic_cxchgweak_release_relaxed(dst, old, next), Ordering::AcqRel => core::intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, next), Ordering::SeqCst => core::intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, next), _ => unreachable!(), }; if ok { return x; } old = x; } } } }; } // #[cfg(target_has_atomic = "8")] // atomic_update!(u8); // #[cfg(target_has_atomic = "16")] // atomic_update!(u16); // #[cfg(target_has_atomic = "32")] // atomic_update!(u32); // #[cfg(target_has_atomic = "64")] // atomic_update!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] atomic_update!(u128); pub mod load { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_load_relaxed(a) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_load_acquire(a) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_load_seqcst(a) } } }; } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod store { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { core::intrinsics::atomic_store_relaxed(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) { core::intrinsics::atomic_store_release(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { core::intrinsics::atomic_store_seqcst(a, val) } } } } // #[cfg(target_has_atomic_load_store = "8")] // t!(u8); // #[cfg(target_has_atomic_load_store = "16")] // t!(u16); // #[cfg(target_has_atomic_load_store = "32")] // t!(u32); // #[cfg(target_has_atomic_load_store = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod swap { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xchg_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xchg_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xchg_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange_weak { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_add { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xadd_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xadd_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xadd_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_sub { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xsub_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xsub_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xsub_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_and { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_and_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_and_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_and_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_and_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_and_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_nand { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_nand_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_nand_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_nand_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_nand_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_nand_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_or { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_or_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_or_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_or_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_or_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_or_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_xor { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xor_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xor_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xor_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xor_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xor_seqcst(a, val) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_not { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_xor_relaxed(a, T::MAX) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_xor_acquire(a, T::MAX) } #[inline(never)] pub unsafe fn release(a: A) -> T { core::intrinsics::atomic_xor_release(a, T::MAX) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { core::intrinsics::atomic_xor_acqrel(a, T::MAX) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_xor_seqcst(a, T::MAX) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_neg { macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { atomic_update(a, Ordering::Relaxed, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { atomic_update(a, Ordering::Acquire, $t::wrapping_neg) } #[inline(never)] pub unsafe fn release(a: A) -> T { atomic_update(a, Ordering::Release, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { atomic_update(a, Ordering::AcqRel, $t::wrapping_neg) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { atomic_update(a, Ordering::SeqCst, $t::wrapping_neg) } } } } // #[cfg(target_has_atomic = "8")] // u!(u8); // #[cfg(target_has_atomic = "16")] // u!(u16); // #[cfg(target_has_atomic = "32")] // u!(u32); // #[cfg(target_has_atomic = "64")] // u!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] u!(u128); } pub mod fetch_max { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_max_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_max_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_max_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_max_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_max_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::max(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umax { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umax_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umax_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umax_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umax_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umax_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::max(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); } pub mod fetch_min { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_min_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_min_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_min_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_min_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_min_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::min(x as T, val) as U) as T } } } } // #[cfg(target_has_atomic = "8")] // t!(i8); // #[cfg(target_has_atomic = "16")] // t!(i16); // #[cfg(target_has_atomic = "32")] // t!(i32); // #[cfg(target_has_atomic = "64")] // t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umin { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umin_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umin_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umin_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umin_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umin_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::min(x, val)) } } } } // #[cfg(target_has_atomic = "8")] // t!(u8); // #[cfg(target_has_atomic = "16")] // t!(u16); // #[cfg(target_has_atomic = "32")] // t!(u32); // #[cfg(target_has_atomic = "64")] // t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), all(target_arch = "riscv64", any(portable_atomic_target_feature = "experimental-zacas", atomic_maybe_uninit_target_feature = "experimental-zacas")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); }
rust source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
// From https://github.com/taiki-e/portable-atomic #![no_std] #![allow(unused)] #![feature(asm_experimental_arch)] pub mod load { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_load(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_load(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_load(a, Ordering::SeqCst) } } } pub mod store { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { crate::atomic_store(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release(a: A, val: T) { crate::atomic_store(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { crate::atomic_store(a, val, Ordering::SeqCst) } } } pub mod swap { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_swap(a, val, Ordering::SeqCst) } } } pub mod compare_exchange { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod compare_exchange_weak { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Relaxed) } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::Acquire) } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Relaxed, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::Acquire) } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Acquire, Ordering::SeqCst) } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::Acquire) } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::Release, Ordering::SeqCst) } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::Acquire) } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::AcqRel, Ordering::SeqCst) } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Relaxed) } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { crate::atomic_compare_exchange_weak(a, old, new, Ordering::SeqCst, Ordering::SeqCst) } } } pub mod fetch_add { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_add(a, val, Ordering::SeqCst) } } } pub mod fetch_sub { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_sub(a, val, Ordering::SeqCst) } } } pub mod fetch_and { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_and(a, val, Ordering::SeqCst) } } } pub mod fetch_nand { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_nand(a, val, Ordering::SeqCst) } } } pub mod fetch_or { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_or(a, val, Ordering::SeqCst) } } } pub mod fetch_xor { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_xor(a, val, Ordering::SeqCst) } } } pub mod fetch_not { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_not(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_not(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_not(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_not(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_not(a, Ordering::SeqCst) } } } pub mod fetch_neg { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { crate::atomic_neg(a, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { crate::atomic_neg(a, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A) -> T { crate::atomic_neg(a, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { crate::atomic_neg(a, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { crate::atomic_neg(a, Ordering::SeqCst) } } } pub mod fetch_max { pub mod i64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_max(a, val, Ordering::SeqCst) } } } pub mod fetch_umax { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umax(a, val, Ordering::SeqCst) } } } pub mod fetch_min { pub mod i64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_min(a, val, Ordering::SeqCst) } } } pub mod fetch_umin { pub mod u64 { use core::sync::atomic::Ordering; type A = *mut T; type T = u64; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { crate::atomic_umin(a, val, Ordering::SeqCst) } } } #[macro_use] mod utils { #[cfg(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), ))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[allow(clippy::ptr_as_ptr)] { // If we cast to u64 here, the provenance will be lost, // so we convert to MaybeUninit<u64> via zero extend helper. crate::utils::zero_extend64_ptr($ptr as *mut ()) } #[cfg(portable_atomic_no_asm_maybe_uninit)] { // Use cast on old rustc because it does not support MaybeUninit // registers. This is still permissive-provenance compatible and // is sound. $ptr as u64 } }}; } #[cfg(not(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), )))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) $ptr // cast is unnecessary here. }}; } use core::sync::atomic::Ordering; // Stable version of https://doc.rust-lang.org/nightly/std/hint/fn.assert_unchecked.html. #[inline(always)] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] pub(crate) unsafe fn assert_unchecked(cond: bool) { if !cond { if cfg!(debug_assertions) { unreachable!() } else { // SAFETY: the caller promised `cond` is true. unsafe { core::hint::unreachable_unchecked() } } } } // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html // https://github.com/rust-lang/rust/pull/98383 #[allow(dead_code)] #[inline] pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering { match (success, failure) { (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire, (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel, (_, Ordering::SeqCst) => Ordering::SeqCst, _ => success, } } /// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`. /// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI). /// See ptr_reg! macro in src/gen/utils.rs for details. #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[cfg(target_pointer_width = "32")] #[allow(dead_code)] #[inline] pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> { #[repr(C)] struct ZeroExtended { #[cfg(target_endian = "big")] pad: *mut (), v: *mut (), #[cfg(target_endian = "little")] pad: *mut (), } // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>. unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) } } #[allow(dead_code)] /// A 64-bit value represented as a pair of 64-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U128 { pub(crate) whole: u128, pub(crate) pair: Pair<u64>, } #[allow(dead_code)] #[cfg(any(target_arch = "arm", target_arch = "riscv32"))] /// A 64-bit value represented as a pair of 32-bit values. /// /// This type is `#[repr(C)]`, both fields have the same in-memory representation /// and are plain old data types, so access to the fields is always safe. #[derive(Clone, Copy)] #[repr(C)] pub(crate) union U64 { pub(crate) whole: u64, pub(crate) pair: Pair<u32>, } #[allow(dead_code)] #[derive(Clone, Copy)] #[repr(C)] pub(crate) struct Pair<T: Copy> { // little endian order #[cfg(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm"))] pub(crate) lo: T, pub(crate) hi: T, // big endian order #[cfg(not(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm")))] pub(crate) lo: T, } } macro_rules! atomic_rmw_by_atomic_update { () => { #[inline] unsafe fn atomic_swap(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |_| val) } } #[inline] unsafe fn atomic_add(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| x.wrapping_add(val)) } } #[inline] unsafe fn atomic_sub(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| x.wrapping_sub(val)) } } #[inline] unsafe fn atomic_and(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| x & val) } } #[inline] unsafe fn atomic_nand(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| !(x & val)) } } #[inline] unsafe fn atomic_or(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| x | val) } } #[inline] unsafe fn atomic_xor(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| x ^ val) } } #[inline] unsafe fn atomic_not(dst: *mut u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| !x) } } #[inline] unsafe fn atomic_neg(dst: *mut u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, u64::wrapping_neg) } } atomic_rmw_by_atomic_update!(cmp); }; (cmp) => { #[inline] unsafe fn atomic_max(dst: *mut u64, val: u64, order: Ordering) -> u64 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x as i64, val as i64) as u64) } } #[inline] unsafe fn atomic_umax(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::max(x, val)) } } #[inline] unsafe fn atomic_min(dst: *mut u64, val: u64, order: Ordering) -> u64 { #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x as i64, val as i64) as u64) } } #[inline] unsafe fn atomic_umin(dst: *mut u64, val: u64, order: Ordering) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_update(dst, order, |x| core::cmp::min(x, val)) } } }; } use core::{arch::asm, sync::atomic::Ordering}; use utils::*; // https://github.com/riscv-non-isa/riscv-asm-manual/blob/ad0de8c004e29c9a7ac33cfd054f4d4f9392f2fb/src/asm-manual.adoc#arch #[cfg(any( target_feature = "experimental-zacas", portable_atomic_target_feature = "experimental-zacas" ))] macro_rules! start_zacas { () => { // zacas available, no-op "" }; } #[cfg(any( target_feature = "experimental-zacas", portable_atomic_target_feature = "experimental-zacas" ))] macro_rules! end_zacas { () => { // zacas available, no-op "" }; } #[cfg(not(any( target_feature = "experimental-zacas", portable_atomic_target_feature = "experimental-zacas" )))] macro_rules! start_zacas { () => { ".option push\n.option arch, +experimental-zacas" }; } #[cfg(not(any( target_feature = "experimental-zacas", portable_atomic_target_feature = "experimental-zacas" )))] macro_rules! end_zacas { () => { ".option pop" }; } macro_rules! atomic_rmw_amocas_order { ($op:ident, $order:ident) => { atomic_rmw_amocas_order!($op, $order, failure = $order) }; ($op:ident, $order:ident, failure = $failure:ident) => { match $order { Ordering::Relaxed => $op!("", ""), Ordering::Acquire => $op!("", ".aq"), Ordering::Release => $op!("", ".rl"), Ordering::AcqRel => $op!("", ".aqrl"), Ordering::SeqCst if $failure == Ordering::SeqCst => $op!("fence rw,rw", ".aqrl"), Ordering::SeqCst => $op!("", ".aqrl"), _ => unreachable!(), } }; } #[inline] unsafe fn atomic_load(src: *mut u64, order: Ordering) -> u64 { debug_assert!(src as usize % 8 == 0); // SAFETY: the caller must uphold the safety contract. unsafe { let (out_lo, out_hi); macro_rules! load { ($fence:tt, $asm_order:tt) => { asm!( start_zacas!(), $fence, concat!("amocas.d", $asm_order, " a2, a2, 0({src})"), end_zacas!(), src = in(reg) ptr_reg!(src), inout("a2") 0_u32 => out_lo, inout("a3") 0_u32 => out_hi, options(nostack, preserves_flags), ) }; } atomic_rmw_amocas_order!(load, order); U64 { pair: Pair { lo: out_lo, hi: out_hi } }.whole } } #[inline] unsafe fn atomic_store(dst: *mut u64, val: u64, order: Ordering) { // SAFETY: the caller must uphold the safety contract. unsafe { atomic_swap(dst, val, order); } } #[inline] unsafe fn atomic_compare_exchange( dst: *mut u64, old: u64, new: u64, success: Ordering, failure: Ordering, ) -> Result<u64, u64> { debug_assert!(dst as usize % 8 == 0); let order = crate::utils::upgrade_success_ordering(success, failure); // SAFETY: the caller must uphold the safety contract. let prev = unsafe { let old = U64 { whole: old }; let new = U64 { whole: new }; let (prev_lo, prev_hi); macro_rules! cmpxchg { ($fence:tt, $asm_order:tt) => { asm!( start_zacas!(), $fence, concat!("amocas.d", $asm_order, " a4, a2, 0({dst})"), end_zacas!(), dst = in(reg) ptr_reg!(dst), // must be allocated to even/odd register pair inout("a4") old.pair.lo => prev_lo, inout("a5") old.pair.hi => prev_hi, // must be allocated to even/odd register pair in("a2") new.pair.lo, in("a3") new.pair.hi, options(nostack, preserves_flags), ) }; } atomic_rmw_amocas_order!(cmpxchg, order, failure = failure); U64 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole }; if prev == old { Ok(prev) } else { Err(prev) } } // amocas is always strong. use atomic_compare_exchange as atomic_compare_exchange_weak; // 128-bit atomic load by two 64-bit atomic loads. #[inline] unsafe fn byte_wise_atomic_load(src: *const u64) -> u64 { // SAFETY: the caller must uphold the safety contract. unsafe { let (out_lo, out_hi); asm!( "lw {out_lo}, ({src})", "lw {out_hi}, 4({src})", src = in(reg) ptr_reg!(src), out_lo = out(reg) out_lo, out_hi = out(reg) out_hi, options(pure, nostack, preserves_flags, readonly), ); U64 { pair: Pair { lo: out_lo, hi: out_hi } }.whole } } #[inline(always)] unsafe fn atomic_update<F>(dst: *mut u64, order: Ordering, mut f: F) -> u64 where F: FnMut(u64) -> u64, { // SAFETY: the caller must uphold the safety contract. unsafe { let mut prev = byte_wise_atomic_load(dst); loop { let next = f(prev); match atomic_compare_exchange_weak(dst, prev, next, order, Ordering::Relaxed) { Ok(x) => return x, Err(x) => prev = x, } } } } atomic_rmw_by_atomic_update!();
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree