Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
WASM
Zig
Javascript
GIMPLE
Ygen
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC)
Options
Source code
//! Argon2 memory block functions #![no_std] use core::{ convert::{AsMut, AsRef}, num::Wrapping, ops::{BitXor, BitXorAssign}, }; #[cfg(feature = "zeroize")] use zeroize::Zeroize; const TRUNC: u64 = u32::MAX as u64; #[rustfmt::skip] macro_rules! permute_step { ($a:expr, $b:expr, $c:expr, $d:expr) => { $a = (Wrapping($a) + Wrapping($b) + (Wrapping(2) * Wrapping(($a & TRUNC) * ($b & TRUNC)))).0; $d = ($d ^ $a).rotate_right(32); $c = (Wrapping($c) + Wrapping($d) + (Wrapping(2) * Wrapping(($c & TRUNC) * ($d & TRUNC)))).0; $b = ($b ^ $c).rotate_right(24); $a = (Wrapping($a) + Wrapping($b) + (Wrapping(2) * Wrapping(($a & TRUNC) * ($b & TRUNC)))).0; $d = ($d ^ $a).rotate_right(16); $c = (Wrapping($c) + Wrapping($d) + (Wrapping(2) * Wrapping(($c & TRUNC) * ($d & TRUNC)))).0; $b = ($b ^ $c).rotate_right(63); }; } macro_rules! permute { ( $v0:expr, $v1:expr, $v2:expr, $v3:expr, $v4:expr, $v5:expr, $v6:expr, $v7:expr, $v8:expr, $v9:expr, $v10:expr, $v11:expr, $v12:expr, $v13:expr, $v14:expr, $v15:expr, ) => { permute_step!($v0, $v4, $v8, $v12); permute_step!($v1, $v5, $v9, $v13); permute_step!($v2, $v6, $v10, $v14); permute_step!($v3, $v7, $v11, $v15); permute_step!($v0, $v5, $v10, $v15); permute_step!($v1, $v6, $v11, $v12); permute_step!($v2, $v7, $v8, $v13); permute_step!($v3, $v4, $v9, $v14); }; } const fn _mm_shuffle2(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w } macro_rules! rotr32 { ($x:expr) => { _mm256_shuffle_epi32($x, _mm_shuffle2(2, 3, 0, 1)) }; } macro_rules! rotr24 { ($x:expr) => { _mm256_shuffle_epi8( $x, _mm256_setr_epi8( 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, ), ) }; } macro_rules! rotr16 { ($x:expr) => { _mm256_shuffle_epi8( $x, _mm256_setr_epi8( 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, ), ) }; } macro_rules! rotr63 { ($x:expr) => { _mm256_xor_si256(_mm256_srli_epi64($x, 63), _mm256_add_epi64($x, $x)) }; } macro_rules! G1_AVX2 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ let ml = _mm256_mul_epu32($A0, $B0); let ml = _mm256_add_epi64(ml, ml); $A0 = _mm256_add_epi64($A0, _mm256_add_epi64($B0, ml)); $D0 = _mm256_xor_si256($D0, $A0); $D0 = rotr32!($D0); let ml = _mm256_mul_epu32($C0, $D0); let ml = _mm256_add_epi64(ml, ml); $C0 = _mm256_add_epi64($C0, _mm256_add_epi64($D0, ml)); $B0 = _mm256_xor_si256($B0, $C0); $B0 = rotr24!($B0); let ml = _mm256_mul_epu32($A1, $B1); let ml = _mm256_add_epi64(ml, ml); $A1 = _mm256_add_epi64($A1, _mm256_add_epi64($B1, ml)); $D1 = _mm256_xor_si256($D1, $A1); $D1 = rotr32!($D1); let ml = _mm256_mul_epu32($C1, $D1); let ml = _mm256_add_epi64(ml, ml); $C1 = _mm256_add_epi64($C1, _mm256_add_epi64($D1, ml)); $B1 = _mm256_xor_si256($B1, $C1); $B1 = rotr24!($B1); }}; } macro_rules! G2_AVX2 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ let ml = _mm256_mul_epu32($A0, $B0); let ml = _mm256_add_epi64(ml, ml); $A0 = _mm256_add_epi64($A0, _mm256_add_epi64($B0, ml)); $D0 = _mm256_xor_si256($D0, $A0); $D0 = rotr16!($D0); let ml = _mm256_mul_epu32($C0, $D0); let ml = _mm256_add_epi64(ml, ml); $C0 = _mm256_add_epi64($C0, _mm256_add_epi64($D0, ml)); $B0 = _mm256_xor_si256($B0, $C0); $B0 = rotr63!($B0); let ml = _mm256_mul_epu32($A1, $B1); let ml = _mm256_add_epi64(ml, ml); $A1 = _mm256_add_epi64($A1, _mm256_add_epi64($B1, ml)); $D1 = _mm256_xor_si256($D1, $A1); $D1 = rotr16!($D1); let ml = _mm256_mul_epu32($C1, $D1); let ml = _mm256_add_epi64(ml, ml); $C1 = _mm256_add_epi64($C1, _mm256_add_epi64($D1, ml)); $B1 = _mm256_xor_si256($B1, $C1); $B1 = rotr63!($B1); }}; } macro_rules! DIAGONALIZE_1 { ($A0:expr, $B0:expr, $C0:expr, $D0:expr, $A1:expr, $B1:expr, $C1:expr, $D1:expr) => {{ $B0 = _mm256_permute4x64_epi64($B0, _mm_shuffle2(0, 3, 2, 1)); $C0 = _mm256_permute4x64_epi64($C0, _mm_shuffle2(1, 0, 3, 2)); $D0 = _mm256_permute4x64_epi64($D0, _mm_shuffle2(2, 1, 0, 3)); $B1 = _mm256_permute4x64_epi64($B1, _mm_shuffle2(0, 3, 2, 1)); $C1 = _mm256_permute4x64_epi64($C1, _mm_shuffle2(1, 0, 3, 2)); $D1 = _mm256_permute4x64_epi64($D1, _mm_shuffle2(2, 1, 0, 3)); }}; } macro_rules! DIAGONALIZE_2 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ let tmp1 = _mm256_blend_epi32($B0, $B1, 0xCC); let tmp2 = _mm256_blend_epi32($B0, $B1, 0x33); $B1 = _mm256_permute4x64_epi64(tmp1, _mm_shuffle2(2, 3, 0, 1)); $B0 = _mm256_permute4x64_epi64(tmp2, _mm_shuffle2(2, 3, 0, 1)); let tmp1 = $C0; $C0 = $C1; $C1 = tmp1; let tmp1 = _mm256_blend_epi32($D0, $D1, 0xCC); let tmp2 = _mm256_blend_epi32($D0, $D1, 0x33); $D0 = _mm256_permute4x64_epi64(tmp1, _mm_shuffle2(2, 3, 0, 1)); $D1 = _mm256_permute4x64_epi64(tmp2, _mm_shuffle2(2, 3, 0, 1)); }}; } macro_rules! UNDIAGONALIZE_1 { ($A0:expr, $B0:expr, $C0:expr, $D0:expr, $A1:expr, $B1:expr, $C1:expr, $D1:expr) => {{ $B0 = _mm256_permute4x64_epi64($B0, _mm_shuffle2(2, 1, 0, 3)); $C0 = _mm256_permute4x64_epi64($C0, _mm_shuffle2(1, 0, 3, 2)); $D0 = _mm256_permute4x64_epi64($D0, _mm_shuffle2(0, 3, 2, 1)); $B1 = _mm256_permute4x64_epi64($B1, _mm_shuffle2(2, 1, 0, 3)); $C1 = _mm256_permute4x64_epi64($C1, _mm_shuffle2(1, 0, 3, 2)); $D1 = _mm256_permute4x64_epi64($D1, _mm_shuffle2(0, 3, 2, 1)); }}; } macro_rules! UNDIAGONALIZE_2 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ let tmp1 = _mm256_blend_epi32($B0, $B1, 0xCC); let tmp2 = _mm256_blend_epi32($B0, $B1, 0x33); $B0 = _mm256_permute4x64_epi64(tmp1, _mm_shuffle2(2, 3, 0, 1)); $B1 = _mm256_permute4x64_epi64(tmp2, _mm_shuffle2(2, 3, 0, 1)); let tmp1 = $C0; $C0 = $C1; $C1 = tmp1; let tmp1 = _mm256_blend_epi32($D0, $D1, 0x33); let tmp2 = _mm256_blend_epi32($D0, $D1, 0xCC); $D0 = _mm256_permute4x64_epi64(tmp1, _mm_shuffle2(2, 3, 0, 1)); $D1 = _mm256_permute4x64_epi64(tmp2, _mm_shuffle2(2, 3, 0, 1)); }}; } macro_rules! BLAKE2_ROUND_1 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ G1_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); G2_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); DIAGONALIZE_1!($A0, $B0, $C0, $D0, $A1, $B1, $C1, $D1); G1_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); G2_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); UNDIAGONALIZE_1!($A0, $B0, $C0, $D0, $A1, $B1, $C1, $D1); }}; } macro_rules! BLAKE2_ROUND_2 { ($A0:expr, $A1:expr, $B0:expr, $B1:expr, $C0:expr, $C1:expr, $D0:expr, $D1:expr) => {{ G1_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); G2_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); DIAGONALIZE_2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); G1_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); G2_AVX2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); UNDIAGONALIZE_2!($A0, $A1, $B0, $B1, $C0, $C1, $D0, $D1); }}; } // cpufeatures::new!(avx2_cpuid, "avx2"); /// Structure for the (1 KiB) memory block implemented as 128 64-bit words. #[derive(Copy, Clone, Debug)] #[cfg_attr(test, derive(PartialEq))] #[repr(align(64))] pub struct Block([u64; Self::SIZE / 8]); impl Block { /// Memory block size in bytes pub const SIZE: usize = 1024; /// Returns a Block initialized with zeros. pub const fn new() -> Self { Self([0u64; Self::SIZE / 8]) } pub(crate) fn as_bytes(&self) -> &[u8; Self::SIZE] { unsafe { &*(self.0.as_ptr() as *const [u8; Self::SIZE]) } } pub(crate) fn as_mut_bytes(&mut self) -> &mut [u8; Self::SIZE] { unsafe { &mut *(self.0.as_mut_ptr() as *mut [u8; Self::SIZE]) } } pub fn compress(rhs: &Self, lhs: &Self) -> Self { #[cfg(any(target_arch = "x86_64"))] { // let (_, avx2) = avx2_cpuid::init_get(); let avx2 = true; if avx2 { return unsafe { Self::compress_avx2(rhs, lhs) }; } } Self::compress_safe(rhs, lhs) } fn compress_safe(rhs: &Self, lhs: &Self) -> Self { let r = *rhs ^ lhs; // Apply permutations rowwise let mut q = r; for chunk in q.0.chunks_exact_mut(16) { #[rustfmt::skip] permute!( chunk[0], chunk[1], chunk[2], chunk[3], chunk[4], chunk[5], chunk[6], chunk[7], chunk[8], chunk[9], chunk[10], chunk[11], chunk[12], chunk[13], chunk[14], chunk[15], ); } // Apply permutations columnwise for i in 0..8 { let b = i * 2; #[rustfmt::skip] permute!( q.0[b], q.0[b + 1], q.0[b + 16], q.0[b + 17], q.0[b + 32], q.0[b + 33], q.0[b + 48], q.0[b + 49], q.0[b + 64], q.0[b + 65], q.0[b + 80], q.0[b + 81], q.0[b + 96], q.0[b + 97], q.0[b + 112], q.0[b + 113], ); } q ^= &r; q } #[cfg(any(target_arch = "x86_64"))] unsafe fn compress_avx2(rhs: &Self, lhs: &Self) -> Self { #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; // one u64 is 64 bits, so 4 u64s is 256 bits // 256 bits * 32 = 8192 bits = 1024 bytes // extract the data into 32 256-bit registers let mut state = [ _mm256_loadu_si256(rhs.0.as_ptr().offset(0 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(1 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(2 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(3 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(4 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(5 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(6 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(7 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(8 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(9 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(10 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(11 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(12 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(13 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(14 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(15 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(16 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(17 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(18 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(19 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(20 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(21 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(22 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(23 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(24 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(25 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(26 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(27 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(28 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(29 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(30 * 4) as *const __m256i), _mm256_loadu_si256(rhs.0.as_ptr().offset(31 * 4) as *const __m256i), ]; // xor registers for i in 0..state.len() { state[i] = _mm256_xor_si256( state[i], _mm256_loadu_si256(lhs.0.as_ptr().offset(i as isize * 4) as *const __m256i), ); } for i in 0..4 { #[rustfmt::skip] BLAKE2_ROUND_1!( state[8 * i + 0], state[8 * i + 4], state[8 * i + 1], state[8 * i + 5], state[8 * i + 2], state[8 * i + 6], state[8 * i + 3], state[8 * i + 7] ); } for i in 0..4 { #[rustfmt::skip] BLAKE2_ROUND_2!( state[0 + i], state[4 + i], state[8 + i], state[12 + i], state[16 + i], state[20 + i], state[24 + i], state[28 + i] ); } // xor registers for i in 0..state.len() { state[i] = _mm256_xor_si256( state[i], _mm256_loadu_si256(lhs.0.as_ptr().offset(i as isize * 4) as *const __m256i), ); } // reapply registers let mut r = Self::new(); for i in 0..state.len() { _mm256_storeu_si256( r.0.as_mut_ptr().offset(i as isize * 4) as *mut __m256i, state[i], ); } r } } impl Default for Block { fn default() -> Self { Self([0u64; Self::SIZE / 8]) } } impl AsRef<[u64]> for Block { fn as_ref(&self) -> &[u64] { &self.0 } } impl AsMut<[u64]> for Block { fn as_mut(&mut self) -> &mut [u64] { &mut self.0 } } impl BitXor<&Block> for Block { type Output = Block; fn bitxor(mut self, rhs: &Block) -> Self::Output { self ^= rhs; self } } impl BitXorAssign<&Block> for Block { fn bitxor_assign(&mut self, rhs: &Block) { for (dst, src) in self.0.iter_mut().zip(rhs.0.iter()) { *dst ^= src; } } }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree