Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
Go
Haskell
HLSL
Hook
Hylo
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
Zig
Javascript
GIMPLE
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
BPF gcc (trunk)
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.8.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
Options
Source code
#[no_mangle] pub fn swap_2_arrays_2_u32(a: &mut [u32;2], b: &mut [u32;2]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_3_u32(a: &mut [u32;3], b: &mut [u32;3]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_4_32(a: &mut [u32;4], b: &mut [u32;4]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_3_64(a: &mut [u64; 3], b: &mut [u64; 3]){ swap_single(a, b); } #[repr(C, packed)] pub struct Packed{ pub v: bool, pub c: usize, } pub fn swap_packed(a: &mut Packed, b: &mut Packed){ swap_single(a, b); } /* Swap internals */ use core::mem::{MaybeUninit, size_of, align_of}; use core::ptr::{read, write}; fn swap_single<T>(a: &mut T, b: &mut T){ trait ConstifyConditions{ const IS_PROBABLY_SIMD: bool; // If LLVM manages this (e.g. primitive integers) const IS_OPTIMIZABLE_SIMPLE: bool; } impl <T:Sized> ConstifyConditions for T{ const IS_PROBABLY_SIMD: bool = align_of::<T>() > align_of::<usize>(); const IS_OPTIMIZABLE_SIMPLE: bool = size_of::<T>() <= 2*align_of::<T>(); } if <T as ConstifyConditions>::IS_PROBABLY_SIMD { return swap_simple(a, b); } if <T as ConstifyConditions>::IS_OPTIMIZABLE_SIMPLE { return swap_simple(a, b); } swap_chunks(a, b); } fn swap_simple<T>(a: &mut T, b: &mut T){ unsafe{ let tmp_a: MaybeUninit<T> = read((a as *mut T).cast()); let tmp_b: MaybeUninit<T> = read((b as *mut T).cast()); write((a as *mut T).cast(), tmp_b); write((b as *mut T).cast(), tmp_a); } } // This version tries chunks of only one size. fn swap_chunks<T>(a: &mut T, b: &mut T){ macro_rules! attempt_chunks{ ($ChunkTy:ty)=>{ if size_of::<$ChunkTy>() <= size_of::<usize>() && size_of::<T>() % size_of::<$ChunkTy>() == 0 && align_of::<T>() >= align_of::<$ChunkTy>(){ let count = size_of::<T>() / size_of::<$ChunkTy>(); let a: *mut MaybeUninit<$ChunkTy> = (a as *mut T).cast(); let b: *mut MaybeUninit<$ChunkTy> = (b as *mut T).cast(); let mut i = 0; while i < count{ unsafe{ let a = a.add(i); let b = b.add(i); swap_simple(&mut *a, &mut *b); } i += 1; } return; } } }; attempt_chunks!(u64); attempt_chunks!(u32); attempt_chunks!(u16); attempt_chunks!(u8); unreachable!(); }
rust source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
BPF gcc (trunk)
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.8.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
Options
Source code
#[no_mangle] pub fn swap_2_arrays_2_u32(a: &mut [u32;2], b: &mut [u32;2]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_3_u32(a: &mut [u32;3], b: &mut [u32;3]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_4_32(a: &mut [u32;4], b: &mut [u32;4]){ swap_single(a, b); } #[no_mangle] pub fn swap_2_arrays_3_64(a: &mut [u64; 3], b: &mut [u64; 3]){ swap_single(a, b); } #[repr(C, packed)] pub struct Packed{ pub v: bool, pub c: usize, } pub fn swap_packed(a: &mut Packed, b: &mut Packed){ swap_single(a, b); } /* Swap internals */ use core::mem::{align_of, size_of, MaybeUninit}; use core::ptr::{read, write, read_unaligned, write_unaligned}; fn swap_single<T>(a: &mut T, b: &mut T) { trait ConstifyConditions { const IS_PROBABLY_SIMD: bool; // If LLVM manages this (e.g. primitive integers) const IS_OPTIMIZABLE_SIMPLE: bool; } impl<T: Sized> ConstifyConditions for T { const IS_PROBABLY_SIMD: bool = align_of::<T>() > align_of::<usize>(); const IS_OPTIMIZABLE_SIMPLE: bool = size_of::<T>() <= 2 * align_of::<T>(); } if <T as ConstifyConditions>::IS_PROBABLY_SIMD { return swap_simple(a, b); } if <T as ConstifyConditions>::IS_OPTIMIZABLE_SIMPLE { return swap_simple(a, b); } swap_chunks(a, b); } fn swap_simple<T>(a: &mut T, b: &mut T) { unsafe{ let tmp_a: MaybeUninit<T> = read((a as *mut T).cast()); let tmp_b: MaybeUninit<T> = read((b as *mut T).cast()); write((a as *mut T).cast(), tmp_b); write((b as *mut T).cast(), tmp_a); } } // This version tries chunks of only one size. fn swap_chunks<T>(a: &mut T, b: &mut T) { trait ConstifyOffsets { const OFF_U64: usize; const COUNT_U64: usize; const OFF_U32: usize; const COUNT_U32: usize; const OFF_U16: usize; const COUNT_U16: usize; const OFF_U8: usize; const COUNT_U8: usize; } macro_rules! gen_count { ($ChunkTy:ty, $offset:expr) => { if size_of::<$ChunkTy>() <= size_of::<usize>() { (size_of::<T>() - $offset) / size_of::<$ChunkTy>() } else { // Don't try to split by chunks smaller than pointer 0 } }; } impl<T: Sized> ConstifyOffsets for T { const OFF_U64: usize = 0; const COUNT_U64: usize = gen_count!(u64, Self::OFF_U64); const OFF_U32: usize = Self::OFF_U64 + Self::COUNT_U64 * size_of::<u64>(); const COUNT_U32: usize = gen_count!(u32, Self::OFF_U32); const OFF_U16: usize = Self::OFF_U32 + Self::COUNT_U32 * size_of::<u32>(); const COUNT_U16: usize = gen_count!(u16, Self::OFF_U16); const OFF_U8: usize = Self::OFF_U16 + Self::COUNT_U16 * size_of::<u16>(); const COUNT_U8: usize = gen_count!(u8, Self::OFF_U8); } assert_eq!( <T as ConstifyOffsets>::COUNT_U8, size_of::<T>() - <T as ConstifyOffsets>::OFF_U8 ); let a: *mut MaybeUninit<u8> = (a as *mut T).cast(); let b: *mut MaybeUninit<u8> = (b as *mut T).cast(); macro_rules! attempt_chunks { ($ChunkTy:ty, $offset:expr, $count:expr) => { if $count != 0 { unsafe { let a: *mut MaybeUninit<$ChunkTy> = a.add($offset).cast(); let b: *mut MaybeUninit<$ChunkTy> = b.add($offset).cast(); let mut i = 0; while i < $count { let a = a.add(i); let b = b.add(i); let tmp_a = read_unaligned(a); let tmp_b = read_unaligned(b); write_unaligned(a, tmp_b); write_unaligned(b, tmp_a); i += 1; } } } }; } attempt_chunks!( u64, <T as ConstifyOffsets>::OFF_U64, <T as ConstifyOffsets>::COUNT_U64 ); attempt_chunks!( u32, <T as ConstifyOffsets>::OFF_U32, <T as ConstifyOffsets>::COUNT_U32 ); attempt_chunks!( u16, <T as ConstifyOffsets>::OFF_U16, <T as ConstifyOffsets>::COUNT_U16 ); attempt_chunks!( u8, <T as ConstifyOffsets>::OFF_U8, <T as ConstifyOffsets>::COUNT_U8 ); }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree