Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![feature(inline_const)] #![feature(const_mut_refs)] #![feature(const_ptr_write)] use std::mem::*; use std::ptr; pub struct Msg(String); #[no_mangle] pub fn swap_with_sep(a: &mut Msg, b: &mut Msg) { swap_chunked(a, b); } #[no_mangle] pub fn swap_with_arrs(a: &mut Msg, b: &mut Msg) { swap_with_arrays(a, b); } #[inline] const fn swap_chunked<T: Sized>(x: &mut T, y: &mut T) { const XMM_BYTES: usize = 128 / 8; const YMM_BYTES: usize = 256 / 8; #[inline] const unsafe fn swap_simd<const SIZE_BYTES: usize>( x: *mut MaybeUninit<u64>, y: *mut MaybeUninit<u64>, ) { const { assert!( SIZE_BYTES == XMM_BYTES || SIZE_BYTES == YMM_BYTES, "Must have valid SIMD register size", ); } unsafe { const { assert!(XMM_BYTES == size_of::<u64>() * 2, "Check number of temporaries below."); assert!(YMM_BYTES == size_of::<u64>() * 4, "Check number of temporaries below."); } let x0: MaybeUninit<u64> = ptr::read_unaligned(x); let x1: MaybeUninit<u64> = ptr::read_unaligned(x.add(1)); let x2: MaybeUninit<u64>; let x3: MaybeUninit<u64>; if const { SIZE_BYTES == YMM_BYTES } { x2 = ptr::read_unaligned(x.add(2)); x3 = ptr::read_unaligned(x.add(3)); } else { x2 = MaybeUninit::uninit(); x3 = MaybeUninit::uninit(); } ptr::copy_nonoverlapping::<MaybeUninit<u8>>(y.cast(), x.cast(), SIZE_BYTES); ptr::write_unaligned(y, x0); ptr::write_unaligned(y.add(1), x1); if const { SIZE_BYTES == YMM_BYTES } { ptr::write_unaligned(y.add(2), x2); ptr::write_unaligned(y.add(3), x3); } } } #[inline] const unsafe fn swap_tail<ChunkType: Copy, T>( x: *mut MaybeUninit<u8>, y: *mut MaybeUninit<u8>, ) { let remaining_bytes = const { size_of::<T>() % (2 * size_of::<ChunkType>()) }; if remaining_bytes < const { size_of::<ChunkType>() } { return; } let offset_before_tail = size_of::<T>() - remaining_bytes; unsafe { let x = x.add(offset_before_tail); let y = y.add(offset_before_tail); let tmp_x: MaybeUninit<ChunkType> = ptr::read_unaligned(x.cast()); let tmp_y: MaybeUninit<ChunkType> = ptr::read_unaligned(y.cast()); ptr::write_unaligned(x.cast(), tmp_y); ptr::write_unaligned(y.cast(), tmp_x); } } const { assert!(size_of::<T>() <= usize::MAX / 2, "We assume that overflows cannot happen."); } let x: *mut MaybeUninit<u8> = (x as *mut T).cast(); let y: *mut MaybeUninit<u8> = (y as *mut T).cast(); let mut byte_offset = 0; while byte_offset + YMM_BYTES <= const { size_of::<T>() } { unsafe { let x = x.add(byte_offset); let y = y.add(byte_offset); swap_simd::<YMM_BYTES>(x.cast(), y.cast()); byte_offset += YMM_BYTES; } } if byte_offset + XMM_BYTES <= const { size_of::<T>() } { unsafe { let x = x.add(byte_offset); let y = y.add(byte_offset); swap_simd::<XMM_BYTES>(x.cast(), y.cast()); byte_offset += XMM_BYTES; } } debug_assert!(byte_offset + XMM_BYTES > size_of::<T>()); unsafe { const { assert!(size_of::<T>() % XMM_BYTES < 2 * size_of::<u64>()); } swap_tail::<u64, T>(x, y); swap_tail::<u32, T>(x, y); swap_tail::<u16, T>(x, y); swap_tail::<u8, T>(x, y); } } #[inline] const fn swap_with_arrays<T: Sized>(x: &mut T, y: &mut T) { const XMM_BYTES: usize = 128 / 8; const YMM_BYTES: usize = 256 / 8; #[inline] const unsafe fn swap_simd<const SIZE_BYTES: usize>( x: *mut MaybeUninit<u64>, y: *mut MaybeUninit<u64>, ) { const { assert!( SIZE_BYTES == XMM_BYTES || SIZE_BYTES == YMM_BYTES, "Must have valid SIMD register size", ); } unsafe { const { assert!(XMM_BYTES == size_of::<u64>() * 2, "Check number of temporaries below."); assert!(YMM_BYTES == size_of::<u64>() * 4, "Check number of temporaries below."); } let mut tmp_x: [MaybeUninit<u8>; 16] = MaybeUninit::uninit().assume_init(); let mut tmp_y: [MaybeUninit<u8>; 16] = MaybeUninit::uninit().assume_init(); let mut tmp_x = tmp_x.as_mut_ptr(); let mut tmp_y = tmp_y.as_mut_ptr(); ptr::copy_nonoverlapping(x.cast(), tmp_x, SIZE_BYTES); ptr::copy_nonoverlapping(y.cast(), tmp_y, SIZE_BYTES); ptr::copy_nonoverlapping(tmp_x, y.cast(), SIZE_BYTES); ptr::copy_nonoverlapping(tmp_y, x.cast(), SIZE_BYTES); } } #[inline] const unsafe fn swap_tail<ChunkType: Copy, T>( x: *mut MaybeUninit<u8>, y: *mut MaybeUninit<u8>, ) { let remaining_bytes = const { size_of::<T>() % (2 * size_of::<ChunkType>()) }; if remaining_bytes < const { size_of::<ChunkType>() } { return; } let offset_before_tail = size_of::<T>() - remaining_bytes; unsafe { let x = x.add(offset_before_tail); let y = y.add(offset_before_tail); let tmp_x: MaybeUninit<ChunkType> = ptr::read_unaligned(x.cast()); let tmp_y: MaybeUninit<ChunkType> = ptr::read_unaligned(y.cast()); ptr::write_unaligned(x.cast(), tmp_y); ptr::write_unaligned(y.cast(), tmp_x); } } const { assert!(size_of::<T>() <= usize::MAX / 2, "We assume that overflows cannot happen."); } let x: *mut MaybeUninit<u8> = (x as *mut T).cast(); let y: *mut MaybeUninit<u8> = (y as *mut T).cast(); let mut byte_offset = 0; while byte_offset + YMM_BYTES <= const { size_of::<T>() } { unsafe { let x = x.add(byte_offset); let y = y.add(byte_offset); swap_simd::<YMM_BYTES>(x.cast(), y.cast()); byte_offset += YMM_BYTES; } } if byte_offset + XMM_BYTES <= const { size_of::<T>() } { unsafe { let x = x.add(byte_offset); let y = y.add(byte_offset); swap_simd::<XMM_BYTES>(x.cast(), y.cast()); byte_offset += XMM_BYTES; } } debug_assert!(byte_offset + XMM_BYTES > size_of::<T>()); unsafe { const { assert!(size_of::<T>() % XMM_BYTES < 2 * size_of::<u64>()); } swap_tail::<u64, T>(x, y); swap_tail::<u32, T>(x, y); swap_tail::<u16, T>(x, y); swap_tail::<u8, T>(x, y); } }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree