Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
llvm source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
clang (assertions trunk)
clang (trunk)
clang 10.0.0
clang 10.0.1
clang 11.0.0
clang 11.0.1
clang 12.0.0
clang 12.0.1
clang 13.0.0
clang 14.0.0
clang 15.0.0
clang 16.0.0
clang 17.0.1
clang 18.1.0
clang 19.1.0
clang 4.0.1
clang 5.0.0
clang 6.0.0
clang 7.0.0
clang 8.0.0
clang 9.0.0
llc (assertions trunk)
llc (trunk)
llc 10.0.0
llc 10.0.1
llc 11.0.0
llc 11.0.1
llc 12.0.0
llc 12.0.1
llc 13.0.0
llc 14.0.0
llc 15.0.0
llc 16.0.0
llc 17.0.1
llc 18.1.0
llc 19.1.0
llc 3.2
llc 3.3
llc 3.9.1
llc 4.0.0
llc 4.0.1
llc 5.0.0
llc 6.0.0
llc 7.0.0
llc 8.0.0
llc 9.0.0
opt (assertions trunk)
opt (trunk)
opt 10.0.0
opt 10.0.1
opt 11.0.0
opt 11.0.1
opt 12.0.0
opt 12.0.1
opt 13.0.0
opt 14.0.0
opt 15.0.0
opt 16.0.0
opt 17.0.1
opt 18.1.0
opt 19.1.0
opt 3.2
opt 3.3
opt 3.9.1
opt 4.0.0
opt 4.0.1
opt 5.0.0
opt 6.0.0
opt 7.0.0
opt 8.0.0
opt 9.0.0
Options
Source code
define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.vec1, <32 x i16>* %out.vec2, <32 x i16>* %out.vec3, <32 x i16>* %out.vec4, <32 x i16>* %out.vec5) nounwind { %wide.vec = load <192 x i16>, <192 x i16>* %in.vec, align 32 tail call void asm sideeffect "# LLVM-MCA-BEGIN", "~{dirflag},~{fpsr},~{flags}"() %strided.vec0 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186> %strided.vec1 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187> %strided.vec2 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188> %strided.vec3 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189> %strided.vec4 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190> %strided.vec5 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191> tail call void asm sideeffect "# LLVM-MCA-END", "~{dirflag},~{fpsr},~{flags}"() store <32 x i16> %strided.vec0, <32 x i16>* %out.vec0, align 32 store <32 x i16> %strided.vec1, <32 x i16>* %out.vec1, align 32 store <32 x i16> %strided.vec2, <32 x i16>* %out.vec2, align 32 store <32 x i16> %strided.vec3, <32 x i16>* %out.vec3, align 32 store <32 x i16> %strided.vec4, <32 x i16>* %out.vec4, align 32 store <32 x i16> %strided.vec5, <32 x i16>* %out.vec5, align 32 ret void }
analysis source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
OSACA (0.6.1)
llvm-mca (assertions trunk)
llvm-mca (trunk)
Options
Source code
vperm2i128 ymm12, ymm3, ymm2, 49 # ymm12 = ymm3[2,3],ymm2[2,3] vpshufd ymm4, ymm12, 104 # ymm4 = ymm12[0,2,2,1,4,6,6,5] vpshufhw ymm4, ymm4, 170 # ymm4 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14] vperm2i128 ymm2, ymm3, ymm2, 32 # ymm2 = ymm3[0,1],ymm2[0,1] vpshufd ymm3, ymm2, 236 # ymm3 = ymm2[0,3,2,3,4,7,6,7] vpshuflw ymm2, ymm3, 232 # ymm2 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] vpshufd ymm2, ymm2, 164 # ymm2 = ymm2[0,1,2,2,4,5,6,6] vpblendw ymm2, ymm2, ymm4, 132 # ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5,6],ymm4[7],ymm2[8,9],ymm4[10],ymm2[11,12,13,14],ymm4[15] vperm2i128 ymm9, ymm0, ymm1, 49 # ymm9 = ymm0[2,3],ymm1[2,3] vperm2i128 ymm7, ymm0, ymm1, 32 # ymm7 = ymm0[0,1],ymm1[0,1] vpblendd ymm1, ymm6, ymm5, 36 # ymm1 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7] vmovdqa xmm4, xmmword ptr [rip + .LCPI0_6] # xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15> vpshufb xmm0, xmm1, xmm4 vextracti128 xmm2, ymm1, 1 vpshuflw xmm3, xmm2, 170 # xmm3 = xmm2[2,2,2,2,4,5,6,7] vpblendw xmm3, xmm0, xmm3, 18 # xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7] vpshufd ymm10, ymm7, 102 # ymm10 = ymm7[2,1,2,1,6,5,6,5] vpshufhw ymm0, ymm10, 170 # ymm0 = ymm10[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14] vpshufd ymm13, ymm9, 196 # ymm13 = ymm9[0,1,0,3,4,5,4,7] vpshuflw ymm11, ymm13, 170 # ymm11 = ymm13[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15] vpshufhw ymm11, ymm11, 132 # ymm11 = ymm11[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14] vpblendw ymm11, ymm11, ymm0, 33 # ymm11 = ymm0[0],ymm11[1,2,3,4],ymm0[5],ymm11[6,7],ymm0[8],ymm11[9,10,11,12],ymm0[13],ymm11[14,15] vmovdqa xmm5, xmmword ptr [rip + .LCPI0_7] # xmm5 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0] vpblendvb ymm0, ymm11, ymm3, ymm5 vmovdqa ymm0, ymm0 vmovdqa ymm6, ymm6 vpblendd ymm3, ymm0, ymm6, 36 # ymm3 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7] vmovdqa ymm9, ymm6 vmovdqa ymm8, ymm0 vpshufb xmm7, xmm3, xmm4 vextracti128 xmm4, ymm3, 1 vpshuflw xmm6, xmm4, 170 # xmm6 = xmm4[2,2,2,2,4,5,6,7] vpblendw xmm6, xmm7, xmm6, 18 # xmm6 = xmm7[0],xmm6[1],xmm7[2,3],xmm6[4],xmm7[5,6,7] vpshufd ymm14, ymm12, 196 # ymm14 = ymm12[0,1,0,3,4,5,4,7] vpshuflw ymm7, ymm14, 170 # ymm7 = ymm14[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15] vpshufhw ymm7, ymm7, 132 # ymm7 = ymm7[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14] vpshufd ymm0, ymm0, 102 # 32-byte Folded Reload # ymm0 = mem[2,1,2,1,6,5,6,5] vpshufhw ymm15, ymm0, 170 # ymm15 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14] vpblendw ymm7, ymm7, ymm15, 33 # ymm7 = ymm15[0],ymm7[1,2,3,4],ymm15[5],ymm7[6,7],ymm15[8],ymm7[9,10,11,12],ymm15[13],ymm7[14,15] vpblendvb ymm6, ymm7, ymm6, ymm5 vpshuflw ymm6, ymm10, 85 # ymm6 = ymm10[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15] vpshufd ymm6, ymm6, 244 # ymm6 = ymm6[0,1,3,3,4,5,7,7] vpshuflw ymm7, ymm13, 255 # ymm7 = ymm13[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15] vpshufhw ymm7, ymm7, 212 # ymm7 = ymm7[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15] vpblendw ymm6, ymm7, ymm6, 33 # ymm6 = ymm6[0],ymm7[1,2,3,4],ymm6[5],ymm7[6,7],ymm6[8],ymm7[9,10,11,12],ymm6[13],ymm7[14,15] vmovdqa xmm7, xmmword ptr [rip + .LCPI0_9] # xmm7 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15> vpshufb xmm1, xmm1, xmm7 vpshufd xmm2, xmm2, 229 # xmm2 = xmm2[1,1,2,3] vpshufhw xmm2, xmm2, 85 # xmm2 = xmm2[0,1,2,3,5,5,5,5] vpblendw xmm1, xmm1, xmm2, 18 # xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7] vpblendvb ymm1, ymm6, ymm1, ymm5 vpshuflw ymm0, ymm0, 85 # ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15] vpshufd ymm0, ymm0, 244 # ymm0 = ymm0[0,1,3,3,4,5,7,7] vpshuflw ymm1, ymm14, 255 # ymm1 = ymm14[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15] vpshufhw ymm1, ymm1, 212 # ymm1 = ymm1[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15] vpblendw ymm0, ymm1, ymm0, 33 # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15] vpshufb xmm1, xmm3, xmm7 vpshufd xmm2, xmm4, 229 # xmm2 = xmm4[1,1,2,3] vpshufhw xmm2, xmm2, 85 # xmm2 = xmm2[0,1,2,3,5,5,5,5] vpblendw xmm1, xmm1, xmm2, 18 # xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7] vpblendvb ymm0, ymm0, ymm1, ymm5 vpblendd ymm2, ymm9, ymm8, 146 # ymm2 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7] vextracti128 xmm9, ymm2, 1 vpshufd xmm0, xmm9, 200 # xmm0 = xmm9[0,2,0,3] vpshufhw xmm0, xmm0, 232 # xmm0 = xmm0[0,1,2,3,4,6,6,7] vmovdqa xmm10, xmmword ptr [rip + .LCPI0_0] # xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15> vpshufb xmm1, xmm2, xmm10 vpblendw xmm0, xmm1, xmm0, 52 # xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7] vpblendd ymm0, ymm0, ymm0, 56 # 32-byte Folded Reload # ymm0 = ymm0[0,1,2],mem[3,4,5],ymm0[6,7] vmovdqa ymm12, ymm12 vmovdqa ymm3, ymm3 vpblendd ymm5, ymm3, ymm12, 36 # ymm5 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7] vpshuflw xmm1, xmm5, 170 # xmm1 = xmm5[2,2,2,2,4,5,6,7] vpshufd xmm1, xmm1, 164 # xmm1 = xmm1[0,1,2,2] vmovdqa xmm6, xmmword ptr [rip + .LCPI0_1] # xmm6 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5> vextracti128 xmm4, ymm5, 1 vpshufb xmm7, xmm4, xmm6 vpblendw xmm1, xmm7, xmm1, 72 # xmm1 = xmm7[0,1,2],xmm1[3],xmm7[4,5],xmm1[6],xmm7[7] vinserti128 ymm1, ymm0, xmm1, 1 vpblendw ymm1, ymm0, ymm1, 248 # ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15] vpblendd ymm0, ymm0, ymm1, 240 # ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] vmovdqa ymm0, ymm0 vmovdqa ymm1, ymm1 vpblendd ymm7, ymm1, ymm0, 36 # ymm7 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] vextracti128 xmm3, ymm7, 1 vpshufb xmm6, xmm3, xmm6 vpshuflw xmm13, xmm7, 170 # xmm13 = xmm7[2,2,2,2,4,5,6,7] vpshufd xmm1, xmm13, 164 # xmm1 = xmm13[0,1,2,2] vpblendw xmm8, xmm6, xmm1, 72 # xmm8 = xmm6[0,1,2],xmm1[3],xmm6[4,5],xmm1[6],xmm6[7] vpshufd ymm6, ymm15, 104 # ymm6 = ymm15[0,2,2,1,4,6,6,5] vpshufhw ymm6, ymm6, 170 # ymm6 = ymm6[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14] vpshufd ymm13, ymm13, 236 # 32-byte Folded Reload # ymm13 = mem[0,3,2,3,4,7,6,7] vpshuflw ymm11, ymm13, 232 # ymm11 = ymm13[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] vpshufd ymm11, ymm11, 164 # ymm11 = ymm11[0,1,2,2,4,5,6,6] vpblendw ymm6, ymm11, ymm6, 132 # ymm6 = ymm11[0,1],ymm6[2],ymm11[3,4,5,6],ymm6[7],ymm11[8,9],ymm6[10],ymm11[11,12,13,14],ymm6[15] vpblendd ymm11, ymm0, ymm0, 146 # 32-byte Folded Reload # ymm11 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7] vpshufb xmm10, xmm11, xmm10 vextracti128 xmm0, ymm11, 1 vpshufd xmm14, xmm0, 200 # xmm14 = xmm0[0,2,0,3] vpshufhw xmm1, xmm14, 232 # xmm1 = xmm14[0,1,2,3,4,6,6,7] vpblendw xmm1, xmm10, xmm1, 52 # xmm1 = xmm10[0,1],xmm1[2],xmm10[3],xmm1[4,5],xmm10[6,7] vpblendd ymm1, ymm1, ymm6, 56 # ymm1 = ymm1[0,1,2],ymm6[3,4,5],ymm1[6,7] vinserti128 ymm6, ymm0, xmm8, 1 vpblendw ymm6, ymm1, ymm6, 248 # ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15] vpblendd ymm1, ymm1, ymm6, 240 # ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7] vpshufd ymm1, ymm10, 102 # ymm1 = ymm10[2,1,2,1,6,5,6,5] vpshuflw ymm1, ymm1, 85 # ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15] vpshuflw ymm8, ymm8, 237 # 32-byte Folded Reload # ymm8 = mem[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15] vpshufhw ymm8, ymm8, 85 # ymm8 = ymm8[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] vpblendw ymm1, ymm8, ymm1, 132 # ymm1 = ymm8[0,1],ymm1[2],ymm8[3,4,5,6],ymm1[7],ymm8[8,9],ymm1[10],ymm8[11,12,13,14],ymm1[15] vmovdqa xmm8, xmmword ptr [rip + .LCPI0_2] # xmm8 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u> vpshufb xmm6, xmm9, xmm8 vmovdqa xmm9, xmmword ptr [rip + .LCPI0_3] # xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15> vpshufb xmm2, xmm2, xmm9 vpblendw xmm2, xmm2, xmm6, 52 # xmm2 = xmm2[0,1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6,7] vpblendd ymm1, ymm2, ymm1, 56 # ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7] vmovdqa xmm2, xmmword ptr [rip + .LCPI0_4] # xmm2 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7> vpshufb xmm4, xmm4, xmm2 vpshufhw xmm5, xmm5, 85 # xmm5 = xmm5[0,1,2,3,5,5,5,5] vpblendw xmm4, xmm4, xmm5, 72 # xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7] vinserti128 ymm4, ymm0, xmm4, 1 vpblendw ymm4, ymm1, ymm4, 248 # ymm4 = ymm1[0,1,2],ymm4[3,4,5,6,7],ymm1[8,9,10],ymm4[11,12,13,14,15] vpblendd ymm1, ymm1, ymm4, 240 # ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7] vpshufb xmm2, xmm3, xmm2 vpshufhw xmm3, xmm7, 85 # xmm3 = xmm7[0,1,2,3,5,5,5,5] vpblendw xmm2, xmm2, xmm3, 72 # xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7] vpshufb xmm0, xmm0, xmm8 vpshufb xmm3, xmm11, xmm9 vpblendw xmm0, xmm3, xmm0, 52 # xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7] vpshufd ymm3, ymm15, 102 # ymm3 = ymm15[2,1,2,1,6,5,6,5] vmovdqa ymm1, ymm15 vpshuflw ymm3, ymm3, 85 # ymm3 = ymm3[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15] vpshuflw ymm4, ymm13, 237 # ymm4 = ymm13[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15] vpshufhw ymm4, ymm4, 85 # ymm4 = ymm4[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] vpblendw ymm3, ymm4, ymm3, 132 # ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4,5,6],ymm3[7],ymm4[8,9],ymm3[10],ymm4[11,12,13,14],ymm3[15] vpblendd ymm0, ymm0, ymm3, 56 # ymm0 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7] vinserti128 ymm2, ymm0, xmm2, 1 vpblendw ymm2, ymm0, ymm2, 248 # ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15] vpblendd ymm0, ymm0, ymm2, 240 # ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] vpshufd ymm0, ymm0, 197 # 32-byte Folded Reload # ymm0 = mem[1,1,0,3,5,5,4,7] vpshufhw ymm0, ymm0, 224 # ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15] vpshufd ymm15, ymm10, 236 # ymm15 = ymm10[0,3,2,3,4,7,6,7] vpshuflw ymm4, ymm15, 224 # ymm4 = ymm15[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15] vpshufhw ymm4, ymm4, 0 # ymm4 = ymm4[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] vpblendw ymm0, ymm4, ymm0, 97 # ymm0 = ymm0[0],ymm4[1,2,3,4],ymm0[5,6],ymm4[7],ymm0[8],ymm4[9,10,11,12],ymm0[13,14],ymm4[15] vpblendd ymm4, ymm12, ymm12, 36 # 32-byte Folded Reload # ymm4 = ymm12[0,1],mem[2],ymm12[3,4],mem[5],ymm12[6,7] vextracti128 xmm5, ymm4, 1 vpshufd xmm12, xmm5, 100 # xmm12 = xmm5[0,1,2,1] vpshufd xmm14, xmm4, 198 # xmm14 = xmm4[2,1,0,3] vpshuflw xmm5, xmm14, 0 # xmm5 = xmm14[0,0,0,0,4,5,6,7] vpshufhw xmm5, xmm5, 224 # xmm5 = xmm5[0,1,2,3,4,4,6,7] vpshufhw xmm7, xmm12, 38 # xmm7 = xmm12[0,1,2,3,6,5,6,4] vpblendw xmm5, xmm5, xmm7, 144 # xmm5 = xmm5[0,1,2,3],xmm7[4],xmm5[5,6],xmm7[7] vpblendd ymm7, ymm2, ymm2, 36 # 32-byte Folded Reload # ymm7 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7] vpshufd xmm11, xmm7, 230 # xmm11 = xmm7[2,1,2,3] vextracti128 xmm7, ymm7, 1 vpshufd xmm7, xmm7, 108 # xmm7 = xmm7[0,3,2,1] vpshuflw xmm2, xmm7, 224 # xmm2 = xmm7[0,0,2,3,4,5,6,7] vpshufhw xmm2, xmm2, 170 # xmm2 = xmm2[0,1,2,3,6,6,6,6] vpshuflw xmm3, xmm11, 38 # xmm3 = xmm11[2,1,2,0,4,5,6,7] vpblendw xmm2, xmm2, xmm3, 9 # xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7] vinserti128 ymm3, ymm0, xmm5, 1 vpblendw ymm3, ymm3, ymm0, 7 # ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15] vpblendw xmm0, xmm0, xmm2, 31 # xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7] vpblendd ymm0, ymm0, ymm3, 240 # ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7] vpblendd ymm2, ymm0, ymm0, 219 # 32-byte Folded Reload # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7] vextracti128 xmm3, ymm2, 1 vpshufd xmm3, xmm3, 100 # xmm3 = xmm3[0,1,2,1] vpshufd xmm2, xmm2, 198 # xmm2 = xmm2[2,1,0,3] vpshuflw xmm5, xmm2, 0 # xmm5 = xmm2[0,0,0,0,4,5,6,7] vpshufhw xmm5, xmm5, 224 # xmm5 = xmm5[0,1,2,3,4,4,6,7] vpshufhw xmm0, xmm3, 38 # xmm0 = xmm3[0,1,2,3,6,5,6,4] vpblendw xmm9, xmm5, xmm0, 144 # xmm9 = xmm5[0,1,2,3],xmm0[4],xmm5[5,6],xmm0[7] vpshufd ymm5, ymm8, 197 # ymm5 = ymm8[1,1,0,3,5,5,4,7] vpshufhw ymm5, ymm5, 224 # ymm5 = ymm5[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15] vpshufd ymm13, ymm1, 236 # ymm13 = ymm1[0,3,2,3,4,7,6,7] vpshuflw ymm10, ymm13, 224 # ymm10 = ymm13[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15] vpshufhw ymm10, ymm10, 0 # ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] vpblendw ymm5, ymm10, ymm5, 97 # ymm5 = ymm5[0],ymm10[1,2,3,4],ymm5[5,6],ymm10[7],ymm5[8],ymm10[9,10,11,12],ymm5[13,14],ymm10[15] vpblendd ymm10, ymm0, ymm0, 36 # 32-byte Folded Reload # ymm10 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7] vpshufd xmm0, xmm10, 230 # xmm0 = xmm10[2,1,2,3] vextracti128 xmm6, ymm10, 1 vpshufd xmm6, xmm6, 108 # xmm6 = xmm6[0,3,2,1] vpshuflw xmm4, xmm6, 224 # xmm4 = xmm6[0,0,2,3,4,5,6,7] vpshufhw xmm4, xmm4, 170 # xmm4 = xmm4[0,1,2,3,6,6,6,6] vpshuflw xmm1, xmm0, 38 # xmm1 = xmm0[2,1,2,0,4,5,6,7] vpblendw xmm1, xmm4, xmm1, 9 # xmm1 = xmm1[0],xmm4[1,2],xmm1[3],xmm4[4,5,6,7] vinserti128 ymm4, ymm0, xmm9, 1 vpblendw ymm4, ymm4, ymm5, 7 # ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7],ymm5[8,9,10],ymm4[11,12,13,14,15] vpblendw xmm1, xmm5, xmm1, 31 # xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7] vpblendd ymm1, ymm1, ymm4, 240 # ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7] vmovdqa ymm10, ymmword ptr [rip + .LCPI0_5] # ymm10 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u> vpshufb ymm4, ymm1, ymm10 vpshuflw ymm9, ymm15, 244 # ymm9 = ymm15[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15] vpshufhw ymm9, ymm9, 85 # ymm9 = ymm9[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] vpblendw ymm4, ymm9, ymm4, 97 # ymm4 = ymm4[0],ymm9[1,2,3,4],ymm4[5,6],ymm9[7],ymm4[8],ymm9[9,10,11,12],ymm4[13,14],ymm9[15] vpshufhw xmm5, xmm12, 103 # xmm5 = xmm12[0,1,2,3,7,5,6,5] vpshuflw xmm1, xmm14, 85 # xmm1 = xmm14[1,1,1,1,4,5,6,7] vpshufhw xmm1, xmm1, 244 # xmm1 = xmm1[0,1,2,3,4,5,7,7] vpblendw xmm1, xmm1, xmm5, 144 # xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5,6],xmm5[7] vpshuflw xmm5, xmm11, 103 # xmm5 = xmm11[3,1,2,1,4,5,6,7] vpshuflw xmm7, xmm7, 244 # xmm7 = xmm7[0,1,3,3,4,5,6,7] vpshufhw xmm7, xmm7, 255 # xmm7 = xmm7[0,1,2,3,7,7,7,7] vpblendw xmm5, xmm7, xmm5, 9 # xmm5 = xmm5[0],xmm7[1,2],xmm5[3],xmm7[4,5,6,7] vinserti128 ymm1, ymm0, xmm1, 1 vpblendw ymm1, ymm1, ymm4, 7 # ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7],ymm4[8,9,10],ymm1[11,12,13,14,15] vpblendw xmm4, xmm4, xmm5, 31 # xmm4 = xmm5[0,1,2,3,4],xmm4[5,6,7] vpblendd ymm9, ymm4, ymm1, 240 # ymm9 = ymm4[0,1,2,3],ymm1[4,5,6,7] vpshufhw xmm3, xmm3, 103 # xmm3 = xmm3[0,1,2,3,7,5,6,5] vpshuflw xmm2, xmm2, 85 # xmm2 = xmm2[1,1,1,1,4,5,6,7] vpshufhw xmm2, xmm2, 244 # xmm2 = xmm2[0,1,2,3,4,5,7,7] vpblendw xmm2, xmm2, xmm3, 144 # xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7] vpshufb ymm3, ymm8, ymm10 vpshuflw ymm4, ymm13, 244 # ymm4 = ymm13[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15] vpshufhw ymm4, ymm4, 85 # ymm4 = ymm4[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] vpblendw ymm3, ymm4, ymm3, 97 # ymm3 = ymm3[0],ymm4[1,2,3,4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9,10,11,12],ymm3[13,14],ymm4[15] vpshuflw xmm0, xmm0, 103 # xmm0 = xmm0[3,1,2,1,4,5,6,7] vpshuflw xmm4, xmm6, 244 # xmm4 = xmm6[0,1,3,3,4,5,6,7] vpshufhw xmm4, xmm4, 255 # xmm4 = xmm4[0,1,2,3,7,7,7,7] vpblendw xmm0, xmm4, xmm0, 9 # xmm0 = xmm0[0],xmm4[1,2],xmm0[3],xmm4[4,5,6,7] vinserti128 ymm2, ymm0, xmm2, 1 vpblendw ymm2, ymm2, ymm3, 7 # ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15] vpblendw xmm0, xmm3, xmm0, 31 # xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7] vpblendd ymm0, ymm0, ymm2, 240 # ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] vpblendd ymm2, ymm1, ymm1, 146 # 32-byte Folded Reload # ymm2 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7] vextracti128 xmm3, ymm2, 1 vpshufd xmm2, xmm2, 108 # xmm2 = xmm2[0,3,2,1] vpshuflw xmm4, xmm2, 132 # xmm4 = xmm2[0,1,0,2,4,5,6,7] vpshufhw xmm4, xmm4, 170 # xmm4 = xmm4[0,1,2,3,6,6,6,6] vmovdqa xmm5, xmmword ptr [rip + .LCPI0_8] # xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13> vpshufb xmm6, xmm3, xmm5 vpblendw xmm4, xmm4, xmm6, 208 # xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7] vinserti128 ymm4, ymm0, xmm4, 1 vpblendd ymm4, ymm4, ymm4, 31 # 32-byte Folded Reload # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7] vpblendd ymm6, ymm1, ymm1, 109 # 32-byte Folded Reload # ymm6 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7] vextracti128 xmm7, ymm6, 1 vpshufb xmm5, xmm7, xmm5 vpshufd xmm6, xmm6, 108 # xmm6 = xmm6[0,3,2,1] vpshuflw xmm1, xmm6, 132 # xmm1 = xmm6[0,1,0,2,4,5,6,7] vpshufhw xmm1, xmm1, 170 # xmm1 = xmm1[0,1,2,3,6,6,6,6] vpblendw xmm1, xmm1, xmm5, 208 # xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5],xmm5[6,7] vinserti128 ymm1, ymm0, xmm1, 1 vpblendd ymm1, ymm1, ymm1, 31 # 32-byte Folded Reload # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7] vmovdqa xmm5, xmmword ptr [rip + .LCPI0_10] # xmm5 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15> vpshufb xmm3, xmm3, xmm5 vpshuflw xmm2, xmm2, 212 # xmm2 = xmm2[0,1,1,3,4,5,6,7] vpshufd xmm2, xmm2, 244 # xmm2 = xmm2[0,1,3,3] vpblendw xmm2, xmm2, xmm3, 208 # xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7] vinserti128 ymm2, ymm0, xmm2, 1 vpblendd ymm2, ymm2, ymm2, 31 # 32-byte Folded Reload # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7] vpshufb xmm3, xmm7, xmm5 vpshuflw xmm5, xmm6, 212 # xmm5 = xmm6[0,1,1,3,4,5,6,7] vpshufd xmm5, xmm5, 244 # xmm5 = xmm5[0,1,3,3] vpblendw xmm3, xmm5, xmm3, 208 # xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7] vinserti128 ymm3, ymm0, xmm3, 1 vpblendd ymm3, ymm3, ymm3, 31 # 32-byte Folded Reload # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
About the author
Statistics
Changelog
Version tree