Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![no_std] #![allow(unused, internal_features)] #![feature(cfg_target_has_atomic, core_intrinsics)] /* macro_rules! atomic_update { ($t:ident) => { mod $t { pub(crate) use core::sync::atomic::Ordering; #[inline(always)] pub(crate) unsafe fn atomic_update<F>(dst: *mut $t, order: Ordering, mut f: F) -> $t where F: FnMut($t) -> $t, { // This is a private function and all instances of `f` only operate on the value // loaded, so there is no need to synchronize the first load/failed CAS. let mut old = core::intrinsics::atomic_load_relaxed(dst); loop { let next = f(old); let (x, ok) = match order { Ordering::Relaxed => core::intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, next), Ordering::Acquire => core::intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, next), Ordering::Release => core::intrinsics::atomic_cxchgweak_release_relaxed(dst, old, next), Ordering::AcqRel => core::intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, next), Ordering::SeqCst => core::intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, next), _ => unreachable!(), }; if ok { return x; } old = x; } } } }; } #[cfg(target_has_atomic = "8")] atomic_update!(u8); #[cfg(target_has_atomic = "16")] atomic_update!(u16); #[cfg(target_has_atomic = "32")] atomic_update!(u32); #[cfg(target_has_atomic = "64")] atomic_update!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] atomic_update!(u128); */ pub mod load { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_load_relaxed(a) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_load_acquire(a) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_load_seqcst(a) } } }; } #[cfg(target_has_atomic_load_store = "8")] t!(u8); #[cfg(target_has_atomic_load_store = "16")] t!(u16); #[cfg(target_has_atomic_load_store = "32")] t!(u32); #[cfg(target_has_atomic_load_store = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod store { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) { core::intrinsics::atomic_store_relaxed(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) { core::intrinsics::atomic_store_release(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) { core::intrinsics::atomic_store_seqcst(a, val) } } } } #[cfg(target_has_atomic_load_store = "8")] t!(u8); #[cfg(target_has_atomic_load_store = "16")] t!(u16); #[cfg(target_has_atomic_load_store = "32")] t!(u32); #[cfg(target_has_atomic_load_store = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod swap { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xchg_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xchg_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xchg_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xchg_seqcst(a, val) } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } /* pub mod compare_exchange { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchg_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod compare_exchange_weak { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; // #[inline(never)] pub unsafe fn relaxed_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn relaxed_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn relaxed_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_relaxed_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acquire_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acquire_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn release_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_release_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn acqrel_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_acqrel_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_relaxed(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_relaxed(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_acquire(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_acquire(a, old, new); if ok { Ok(val) } else { Err(val) } } // #[inline(never)] pub unsafe fn seqcst_seqcst(a: A, old: T, new: T) -> Result<T, T> { let (val, ok) = core::intrinsics::atomic_cxchgweak_seqcst_seqcst(a, old, new); if ok { Ok(val) } else { Err(val) } } } } } #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } */ pub mod fetch_add { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xadd_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xadd_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xadd_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xadd_seqcst(a, val) } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_sub { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xsub_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xsub_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xsub_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xsub_seqcst(a, val) } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_and { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_and_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_and_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_and_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_and_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_and_seqcst(a, val) } } } } #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } /* pub mod fetch_nand { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_nand_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_nand_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_nand_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_nand_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_nand_seqcst(a, val) } } } } #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } */ pub mod fetch_or { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_or_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_or_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_or_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_or_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_or_seqcst(a, val) } } } } #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_xor { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_xor_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_xor_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_xor_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_xor_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_xor_seqcst(a, val) } } } } #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } pub mod fetch_not { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { core::intrinsics::atomic_xor_relaxed(a, T::MAX) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { core::intrinsics::atomic_xor_acquire(a, T::MAX) } #[inline(never)] pub unsafe fn release(a: A) -> T { core::intrinsics::atomic_xor_release(a, T::MAX) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { core::intrinsics::atomic_xor_acqrel(a, T::MAX) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { core::intrinsics::atomic_xor_seqcst(a, T::MAX) } } } } #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); } /* pub mod fetch_neg { macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A) -> T { atomic_update(a, Ordering::Relaxed, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acquire(a: A) -> T { atomic_update(a, Ordering::Acquire, $t::wrapping_neg) } #[inline(never)] pub unsafe fn release(a: A) -> T { atomic_update(a, Ordering::Release, $t::wrapping_neg) } #[inline(never)] pub unsafe fn acqrel(a: A) -> T { atomic_update(a, Ordering::AcqRel, $t::wrapping_neg) } #[inline(never)] pub unsafe fn seqcst(a: A) -> T { atomic_update(a, Ordering::SeqCst, $t::wrapping_neg) } } } } #[cfg(target_has_atomic = "8")] u!(u8); #[cfg(target_has_atomic = "16")] u!(u16); #[cfg(target_has_atomic = "32")] u!(u32); #[cfg(target_has_atomic = "64")] u!(u64); #[cfg(any( target_arch = "aarch64", all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] u!(u128); } */ pub mod fetch_max { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_max_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_max_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_max_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_max_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_max_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::max(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::max(x as T, val) as U) as T } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "32")] t!(i32); #[cfg(target_has_atomic = "64")] t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umax { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umax_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umax_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umax_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umax_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umax_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::max(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::max(x, val)) } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); } pub mod fetch_min { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_min_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_min_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_min_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_min_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_min_seqcst(a, val) } } } } macro_rules! u { ($t:ident, $u:ident) => { pub mod $t { use crate::$u::*; type T = $t; type U = $u; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Relaxed, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Acquire, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::Release, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::AcqRel, |x| core::cmp::min(x as T, val) as U) as T } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a.cast::<U>(), Ordering::SeqCst, |x| core::cmp::min(x as T, val) as U) as T } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(i8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(i16); #[cfg(target_has_atomic = "32")] t!(i32); #[cfg(target_has_atomic = "64")] t!(i64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(i128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(i128, u128); } pub mod fetch_umin { macro_rules! t { ($t:ident) => { pub mod $t { type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { core::intrinsics::atomic_umin_relaxed(a, val) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { core::intrinsics::atomic_umin_acquire(a, val) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { core::intrinsics::atomic_umin_release(a, val) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { core::intrinsics::atomic_umin_acqrel(a, val) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { core::intrinsics::atomic_umin_seqcst(a, val) } } } } macro_rules! u { ($t:ident) => { pub mod $t { use crate::$t::*; type T = $t; type A = *mut T; #[inline(never)] pub unsafe fn relaxed(a: A, val: T) -> T { atomic_update(a, Ordering::Relaxed, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acquire(a: A, val: T) -> T { atomic_update(a, Ordering::Acquire, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn release(a: A, val: T) -> T { atomic_update(a, Ordering::Release, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn acqrel(a: A, val: T) -> T { atomic_update(a, Ordering::AcqRel, |x| core::cmp::min(x, val)) } #[inline(never)] pub unsafe fn seqcst(a: A, val: T) -> T { atomic_update(a, Ordering::SeqCst, |x| core::cmp::min(x, val)) } } } } #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "8")] t!(u8); #[cfg(portable_atomic_target_feature = "zabha")] #[cfg(target_has_atomic = "16")] t!(u16); #[cfg(target_has_atomic = "32")] t!(u32); #[cfg(target_has_atomic = "64")] t!(u64); #[cfg(any( target_arch = "aarch64", // all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")), target_arch = "s390x", all(target_arch = "x86_64", target_feature = "cmpxchg16b"), ))] t!(u128); #[cfg(all(target_arch = "powerpc64", any(portable_atomic_target_feature = "quadword-atomics", atomic_maybe_uninit_target_feature = "quadword-atomics")))] u!(u128); }
rust source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc 1.90.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
// From https://github.com/taiki-e/portable-atomic #![no_std] #![allow(unused)] pub mod load { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(src: *const T) -> T { (*(src as A)).load(Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(src: *const T) -> T { (*(src as A)).load(Ordering::Acquire) } #[inline(never)] pub unsafe fn seqcst(src: *const T) -> T { (*(src as A)).load(Ordering::SeqCst) } } }; } t!(u8, AtomicU8); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod store { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) { (*(dst as A)).store(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) { (*(dst as A)).store(val, Ordering::Release) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) { (*(dst as A)).store(val, Ordering::SeqCst) } } }; } t!(u8, AtomicU8); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod swap { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).swap(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).swap(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).swap(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).swap(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).swap(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(u8, AtomicU8); #[cfg(portable_atomic_target_feature = "zabha")] t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_add { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_add(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_add(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_add(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_add(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_add(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(u8, AtomicU8); #[cfg(portable_atomic_target_feature = "zabha")] t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_sub { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_sub(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_sub(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_sub(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_sub(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_sub(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(u8, AtomicU8); #[cfg(portable_atomic_target_feature = "zabha")] t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_and { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_and(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_and(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_and(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_and(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_and(val, Ordering::SeqCst) } } }; } t!(i8, AtomicI8); t!(u8, AtomicU8); t!(i16, AtomicI16); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_or { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_or(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_or(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_or(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_or(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_or(val, Ordering::SeqCst) } } }; } t!(i8, AtomicI8); t!(u8, AtomicU8); t!(i16, AtomicI16); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_xor { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_xor(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_xor(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_xor(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_xor(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_xor(val, Ordering::SeqCst) } } }; } t!(i8, AtomicI8); t!(u8, AtomicU8); t!(i16, AtomicI16); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_not { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T) -> T { (*(dst as A)).fetch_not(Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T) -> T { (*(dst as A)).fetch_not(Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T) -> T { (*(dst as A)).fetch_not(Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T) -> T { (*(dst as A)).fetch_not(Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T) -> T { (*(dst as A)).fetch_not(Ordering::SeqCst) } } }; } t!(i8, AtomicI8); t!(u8, AtomicU8); t!(i16, AtomicI16); t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_max { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(i8, AtomicI8); #[cfg(portable_atomic_target_feature = "zabha")] t!(i16, AtomicI16); t!(i32, AtomicI32); #[cfg(target_arch = "riscv64")] t!(i64, AtomicI64); } pub mod fetch_umax { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_max(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(u8, AtomicU8); #[cfg(portable_atomic_target_feature = "zabha")] t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } pub mod fetch_min { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(i8, AtomicI8); #[cfg(portable_atomic_target_feature = "zabha")] t!(i16, AtomicI16); t!(i32, AtomicI32); #[cfg(target_arch = "riscv64")] t!(i64, AtomicI64); } pub mod fetch_umin { macro_rules! t { ($t:ident, $a:ident) => { pub mod $t { use core::sync::atomic::Ordering; type T = $t; type A = *const crate::$a; #[inline(never)] pub unsafe fn relaxed(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Relaxed) } #[inline(never)] pub unsafe fn acquire(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Acquire) } #[inline(never)] pub unsafe fn release(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::Release) } #[inline(never)] pub unsafe fn acqrel(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::AcqRel) } #[inline(never)] pub unsafe fn seqcst(dst: *mut T, val: T) -> T { (*(dst as A)).fetch_min(val, Ordering::SeqCst) } } }; } #[cfg(portable_atomic_target_feature = "zabha")] t!(u8, AtomicU8); #[cfg(portable_atomic_target_feature = "zabha")] t!(u16, AtomicU16); t!(u32, AtomicU32); #[cfg(target_arch = "riscv64")] t!(u64, AtomicU64); } #[macro_use] mod utils { #[cfg(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), ))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) #[cfg(not(portable_atomic_no_asm_maybe_uninit))] #[allow(clippy::ptr_as_ptr)] { // If we cast to u64 here, the provenance will be lost, // so we convert to MaybeUninit<u64> via zero extend helper. crate::utils::zero_extend64_ptr($ptr as *mut ()) } #[cfg(portable_atomic_no_asm_maybe_uninit)] { // Use cast on old rustc because it does not support MaybeUninit // registers. This is still permissive-provenance compatible and // is sound. $ptr as u64 } }}; } #[cfg(not(all( target_pointer_width = "32", any( target_arch = "aarch64", target_arch = "bpf", target_arch = "loongarch64", target_arch = "mips64", target_arch = "mips64r6", target_arch = "nvptx64", target_arch = "powerpc64", target_arch = "riscv64", target_arch = "s390x", target_arch = "sparc64", target_arch = "wasm64", target_arch = "x86_64", ), )))] macro_rules! ptr_reg { ($ptr:ident) => {{ let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _) $ptr // cast is unnecessary here. }}; } use core::sync::atomic::Ordering; #[inline] pub(crate) fn assert_load_ordering(order: Ordering) { match order { Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {} Ordering::Release => panic!("there is no such thing as a release load"), Ordering::AcqRel => panic!("there is no such thing as an acquire-release load"), _ => unreachable!("{:?}", order), } } #[inline] pub(crate) fn assert_store_ordering(order: Ordering) { match order { Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {} Ordering::Acquire => panic!("there is no such thing as an acquire store"), Ordering::AcqRel => panic!("there is no such thing as an acquire-release store"), _ => unreachable!("{:?}", order), } } #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] type MinWord = u32; #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] type RetInt = u32; // Adapted from https://github.com/taiki-e/atomic-maybe-uninit/blob/v0.3.0/src/utils.rs#L210. // Helper for implementing sub-word atomic operations using word-sized LL/SC loop or CAS loop. // // Refs: https://github.com/llvm/llvm-project/blob/llvmorg-18.1.2/llvm/lib/CodeGen/AtomicExpandPass.cpp#L691 // (aligned_ptr, shift, mask) #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] #[allow(dead_code)] #[inline] pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) { use core::mem; const SHIFT_MASK: bool = !cfg!(any( target_arch = "riscv32", target_arch = "riscv64", target_arch = "loongarch64", target_arch = "s390x", )); let ptr_mask = mem::size_of::<MinWord>() - 1; let aligned_ptr = strict::with_addr(ptr, ptr as usize & !ptr_mask) as *mut MinWord; let ptr_lsb = if SHIFT_MASK { ptr as usize & ptr_mask } else { // We use 32-bit wrapping shift instructions in asm on these platforms. ptr as usize }; let shift = if cfg!(any(target_endian = "little", target_arch = "s390x")) { ptr_lsb.wrapping_mul(8) } else { (ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8) }; let mut mask: RetInt = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RetInt if SHIFT_MASK { mask <<= shift; } (aligned_ptr, shift as RetInt, mask) } /// Emulate strict provenance. /// /// Once strict_provenance is stable, migrate to the standard library's APIs. #[cfg(any(miri, target_arch = "riscv32", target_arch = "riscv64"))] #[allow(dead_code)] #[allow(clippy::cast_possible_wrap)] pub(crate) mod strict { /// Replace the address portion of this pointer with a new address. #[inline] #[must_use] pub(crate) fn with_addr<T>(ptr: *mut T, addr: usize) -> *mut T { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // // In the mean-time, this operation is defined to be "as if" it was // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. let self_addr = ptr as usize as isize; let dest_addr = addr as isize; let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugaring of this operation. (ptr as *mut u8).wrapping_offset(offset) as *mut T } /// Run an operation of some kind on a pointer. #[inline] #[must_use] pub(crate) fn map_addr<T>(ptr: *mut T, f: impl FnOnce(usize) -> usize) -> *mut T { with_addr(ptr, f(ptr as usize)) } } } use core::arch::asm; use core::{cell::UnsafeCell, sync::atomic::Ordering}; #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] #[cfg(target_arch = "riscv32")] macro_rules! w { () => { "" }; } #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] #[cfg(target_arch = "riscv64")] macro_rules! w { () => { "w" }; } #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] macro_rules! atomic_rmw_amo_ext { ("w") => { "+a" }; ("d") => { "+a" }; ("b") => { "+a,+zabha" }; ("h") => { "+a,+zabha" }; } #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] macro_rules! atomic_rmw_amo { ($op:ident, $dst:ident, $val:ident, $order:ident, $asm_suffix:tt) => {{ let out; macro_rules! op { ($asm_order:tt) => { // SAFETY: The user guaranteed that the AMO instruction is available in this // system by setting the portable_atomic_force_amo/target_feature and // portable_atomic_unsafe_assume_single_core. // The caller of this macro must guarantee the validity of the pointer. asm!( ".option push", // https://github.com/riscv-non-isa/riscv-asm-manual/blob/ad0de8c004e29c9a7ac33cfd054f4d4f9392f2fb/src/asm-manual.adoc#arch // Note that .insn <value> requires LLVM 19 https://github.com/llvm/llvm-project/commit/2a086dce691e3cc34a2fc27f4fb255bb2cbbfac9 concat!(".option arch, ", atomic_rmw_amo_ext!($asm_suffix)), concat!("amo", stringify!($op), ".", $asm_suffix, $asm_order, " {out}, {val}, 0({dst})"), ".option pop", dst = in(reg) ptr_reg!($dst), val = in(reg) $val, out = lateout(reg) out, options(nostack, preserves_flags), ) }; } match $order { Ordering::Relaxed => op!(""), Ordering::Acquire => op!(".aq"), Ordering::Release => op!(".rl"), // AcqRel and SeqCst RMWs are equivalent. Ordering::AcqRel | Ordering::SeqCst => op!(".aqrl"), _ => unreachable!(), } out }}; } // 32-bit val.wrapping_shl(shift) but no extra `& (u32::BITS - 1)` #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] #[inline(always)] fn sllw(val: u32, shift: u32) -> u32 { // SAFETY: Calling sll{,w} is safe. unsafe { let out; asm!( concat!("sll", w!(), " {out}, {val}, {shift}"), out = lateout(reg) out, val = in(reg) val, shift = in(reg) shift, options(pure, nomem, nostack, preserves_flags), ); out } } // 32-bit val.wrapping_shr(shift) but no extra `& (u32::BITS - 1)` #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] macro_rules! srlw { ($val:expr, $shift:expr) => { // SAFETY: Calling srl{,w} is safe. unsafe { let val: u32 = $val; let shift: u32 = $shift; let out; asm!( concat!("srl", w!(), " {out}, {val}, {shift}"), out = lateout(reg) out, val = in(reg) val, shift = in(reg) shift, options(pure, nomem, nostack, preserves_flags), ); out } }; } macro_rules! atomic_load_store { ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => { #[repr(transparent)] pub(crate) struct $atomic_type $(<$($generics)*>)? { v: UnsafeCell<$value_type>, } // Send is implicitly implemented for atomic integers, but not for atomic pointers. // SAFETY: any data races are prevented by atomic operations. unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {} // SAFETY: any data races are prevented by atomic operations. unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {} #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? { #[inline] pub(crate) const fn new(v: $value_type) -> Self { Self { v: UnsafeCell::new(v) } } #[inline] pub(crate) fn is_lock_free() -> bool { Self::IS_ALWAYS_LOCK_FREE } pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true; #[inline] pub(crate) fn get_mut(&mut self) -> &mut $value_type { // SAFETY: the mutable reference guarantees unique ownership. // (UnsafeCell::get_mut requires Rust 1.50) unsafe { &mut *self.v.get() } } #[inline] pub(crate) const fn as_ptr(&self) -> *mut $value_type { self.v.get() } } impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? { #[inline] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] pub(crate) fn load(&self, order: Ordering) -> $value_type { crate::utils::assert_load_ordering(order); let src = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { let out; macro_rules! atomic_load { ($acquire:tt, $release:tt) => { asm!( $release, concat!("l", $asm_suffix, " {out}, 0({src})"), $acquire, src = in(reg) ptr_reg!(src), out = lateout(reg) out, options(nostack, preserves_flags), ) }; } match order { Ordering::Relaxed => atomic_load!("", ""), Ordering::Acquire => atomic_load!("fence r, rw", ""), Ordering::SeqCst => atomic_load!("fence r, rw", "fence rw, rw"), _ => unreachable!(), } out } } #[inline] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] pub(crate) fn store(&self, val: $value_type, order: Ordering) { crate::utils::assert_store_ordering(order); let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { macro_rules! atomic_store { ($acquire:tt, $release:tt) => { asm!( $release, concat!("s", $asm_suffix, " {val}, 0({dst})"), $acquire, dst = in(reg) ptr_reg!(dst), val = in(reg) val, options(nostack, preserves_flags), ) }; } match order { Ordering::Relaxed => atomic_store!("", ""), Ordering::Release => atomic_store!("", "fence rw, w"), // https://github.com/llvm/llvm-project/commit/3ea8f2526541884e03d5bd4f4e46f4eb190990b6 Ordering::SeqCst => atomic_store!("fence rw, rw", "fence rw, w"), _ => unreachable!(), } } } } }; } macro_rules! atomic_ptr { ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => { atomic_load_store!($([$($generics)*])? $atomic_type, $value_type, $asm_suffix); #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? { #[inline] pub(crate) fn swap(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(swap, dst, val, order, $asm_suffix) } } } }; } macro_rules! atomic { ($atomic_type:ident, $value_type:ty, $asm_suffix:tt, $max:tt, $min:tt) => { atomic_load_store!($atomic_type, $value_type, $asm_suffix); // There is no amo{sub,nand,neg}. #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] impl $atomic_type { #[inline] pub(crate) fn swap(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(swap, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_add(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(add, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_sub(&self, val: $value_type, order: Ordering) -> $value_type { self.fetch_add(val.wrapping_neg(), order) } #[inline] pub(crate) fn fetch_and(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(and, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_or(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(or, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_xor(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!(xor, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_not(&self, order: Ordering) -> $value_type { self.fetch_xor(!0, order) } #[inline] pub(crate) fn fetch_max(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!($max, dst, val, order, $asm_suffix) } } #[inline] pub(crate) fn fetch_min(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_rmw_amo!($min, dst, val, order, $asm_suffix) } } } }; } #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] trait ZeroExtend: Copy { /// Zero-extends `self` to `u32` if it is smaller than 32-bit. fn zero_extend(self) -> u32; } macro_rules! zero_extend { ($int:ident, $uint:ident) => { #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] impl ZeroExtend for $uint { #[inline(always)] fn zero_extend(self) -> u32 { self as u32 } } #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] impl ZeroExtend for $int { #[allow(clippy::cast_sign_loss)] #[inline(always)] fn zero_extend(self) -> u32 { self as $uint as u32 } } }; } zero_extend!(i8, u8); zero_extend!(i16, u16); macro_rules! atomic_sub_word { ($atomic_type:ident, $value_type:ty, $asm_suffix:tt, $max:tt, $min:tt) => { #[cfg(any(target_feature = "zabha", portable_atomic_target_feature = "zabha"))] atomic!($atomic_type, $value_type, $asm_suffix, $max, $min); #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] atomic_load_store!($atomic_type, $value_type, $asm_suffix); #[cfg(any( test, portable_atomic_force_amo, target_feature = "zaamo", portable_atomic_target_feature = "zaamo", ))] #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))] impl $atomic_type { #[inline] pub(crate) fn fetch_and(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); let (dst, shift, mut mask) = crate::utils::create_sub_word_mask_values(dst); mask = !sllw(mask, shift); let mut val = sllw(ZeroExtend::zero_extend(val), shift); val |= mask; // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. let out: u32 = unsafe { atomic_rmw_amo!(and, dst, val, order, "w") }; srlw!(out, shift) } #[inline] pub(crate) fn fetch_or(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst); let val = sllw(ZeroExtend::zero_extend(val), shift); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. let out: u32 = unsafe { atomic_rmw_amo!(or, dst, val, order, "w") }; srlw!(out, shift) } #[inline] pub(crate) fn fetch_xor(&self, val: $value_type, order: Ordering) -> $value_type { let dst = self.v.get(); let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst); let val = sllw(ZeroExtend::zero_extend(val), shift); // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. let out: u32 = unsafe { atomic_rmw_amo!(xor, dst, val, order, "w") }; srlw!(out, shift) } #[inline] pub(crate) fn fetch_not(&self, order: Ordering) -> $value_type { self.fetch_xor(!0, order) } } }; } atomic_sub_word!(AtomicI8, i8, "b", max, min); atomic_sub_word!(AtomicU8, u8, "b", maxu, minu); atomic_sub_word!(AtomicI16, i16, "h", max, min); atomic_sub_word!(AtomicU16, u16, "h", maxu, minu); atomic!(AtomicI32, i32, "w", max, min); atomic!(AtomicU32, u32, "w", maxu, minu); #[cfg(target_arch = "riscv64")] atomic!(AtomicI64, i64, "d", max, min); #[cfg(target_arch = "riscv64")] atomic!(AtomicU64, u64, "d", maxu, minu); #[cfg(target_pointer_width = "32")] atomic!(AtomicIsize, isize, "w", max, min); #[cfg(target_pointer_width = "32")] atomic!(AtomicUsize, usize, "w", maxu, minu); #[cfg(target_pointer_width = "32")] atomic_ptr!([T] AtomicPtr, *mut T, "w"); #[cfg(target_pointer_width = "64")] atomic!(AtomicIsize, isize, "d", max, min); #[cfg(target_pointer_width = "64")] atomic!(AtomicUsize, usize, "d", maxu, minu); #[cfg(target_pointer_width = "64")] atomic_ptr!([T] AtomicPtr, *mut T, "d");
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree