Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
c++ source #6
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
6502-c++ 11.1.0
ARM GCC 10.2.0
ARM GCC 10.3.0
ARM GCC 10.4.0
ARM GCC 10.5.0
ARM GCC 11.1.0
ARM GCC 11.2.0
ARM GCC 11.3.0
ARM GCC 11.4.0
ARM GCC 12.1.0
ARM GCC 12.2.0
ARM GCC 12.3.0
ARM GCC 12.4.0
ARM GCC 13.1.0
ARM GCC 13.2.0
ARM GCC 13.2.0 (unknown-eabi)
ARM GCC 13.3.0
ARM GCC 13.3.0 (unknown-eabi)
ARM GCC 14.1.0
ARM GCC 14.1.0 (unknown-eabi)
ARM GCC 14.2.0
ARM GCC 14.2.0 (unknown-eabi)
ARM GCC 4.5.4
ARM GCC 4.6.4
ARM GCC 5.4
ARM GCC 6.3.0
ARM GCC 6.4.0
ARM GCC 7.3.0
ARM GCC 7.5.0
ARM GCC 8.2.0
ARM GCC 8.5.0
ARM GCC 9.3.0
ARM GCC 9.4.0
ARM GCC 9.5.0
ARM GCC trunk
ARM gcc 10.2.1 (none)
ARM gcc 10.3.1 (2021.07 none)
ARM gcc 10.3.1 (2021.10 none)
ARM gcc 11.2.1 (none)
ARM gcc 5.4.1 (none)
ARM gcc 7.2.1 (none)
ARM gcc 8.2 (WinCE)
ARM gcc 8.3.1 (none)
ARM gcc 9.2.1 (none)
ARM msvc v19.0 (WINE)
ARM msvc v19.10 (WINE)
ARM msvc v19.14 (WINE)
ARM64 Morello gcc 10.1 Alpha 2
ARM64 gcc 10.2
ARM64 gcc 10.3
ARM64 gcc 10.4
ARM64 gcc 10.5.0
ARM64 gcc 11.1
ARM64 gcc 11.2
ARM64 gcc 11.3
ARM64 gcc 11.4.0
ARM64 gcc 12.1
ARM64 gcc 12.2.0
ARM64 gcc 12.3.0
ARM64 gcc 12.4.0
ARM64 gcc 13.1.0
ARM64 gcc 13.2.0
ARM64 gcc 13.3.0
ARM64 gcc 14.1.0
ARM64 gcc 14.2.0
ARM64 gcc 4.9.4
ARM64 gcc 5.4
ARM64 gcc 5.5.0
ARM64 gcc 6.3
ARM64 gcc 6.4
ARM64 gcc 7.3
ARM64 gcc 7.5
ARM64 gcc 8.2
ARM64 gcc 8.5
ARM64 gcc 9.3
ARM64 gcc 9.4
ARM64 gcc 9.5
ARM64 gcc trunk
ARM64 msvc v19.14 (WINE)
AVR gcc 10.3.0
AVR gcc 11.1.0
AVR gcc 12.1.0
AVR gcc 12.2.0
AVR gcc 12.3.0
AVR gcc 12.4.0
AVR gcc 13.1.0
AVR gcc 13.2.0
AVR gcc 13.3.0
AVR gcc 14.1.0
AVR gcc 14.2.0
AVR gcc 4.5.4
AVR gcc 4.6.4
AVR gcc 5.4.0
AVR gcc 9.2.0
AVR gcc 9.3.0
Arduino Mega (1.8.9)
Arduino Uno (1.8.9)
BPF clang (trunk)
BPF clang 13.0.0
BPF clang 14.0.0
BPF clang 15.0.0
BPF clang 16.0.0
BPF clang 17.0.1
BPF clang 18.1.0
BPF clang 19.1.0
EDG (experimental reflection)
EDG 6.5
EDG 6.5 (GNU mode gcc 13)
EDG 6.6
EDG 6.6 (GNU mode gcc 13)
FRC 2019
FRC 2020
FRC 2023
HPPA gcc 14.2.0
KVX ACB 4.1.0 (GCC 7.5.0)
KVX ACB 4.1.0-cd1 (GCC 7.5.0)
KVX ACB 4.10.0 (GCC 10.3.1)
KVX ACB 4.11.1 (GCC 10.3.1)
KVX ACB 4.12.0 (GCC 11.3.0)
KVX ACB 4.2.0 (GCC 7.5.0)
KVX ACB 4.3.0 (GCC 7.5.0)
KVX ACB 4.4.0 (GCC 7.5.0)
KVX ACB 4.6.0 (GCC 9.4.1)
KVX ACB 4.8.0 (GCC 9.4.1)
KVX ACB 4.9.0 (GCC 9.4.1)
KVX ACB 5.0.0 (GCC 12.2.1)
KVX ACB 5.2.0 (GCC 13.2.1)
LoongArch64 clang (trunk)
LoongArch64 clang 17.0.1
LoongArch64 clang 18.1.0
LoongArch64 clang 19.1.0
M68K gcc 13.1.0
M68K gcc 13.2.0
M68K gcc 13.3.0
M68K gcc 14.1.0
M68K gcc 14.2.0
M68k clang (trunk)
MRISC32 gcc (trunk)
MSP430 gcc 4.5.3
MSP430 gcc 5.3.0
MSP430 gcc 6.2.1
MinGW clang 14.0.3
MinGW clang 14.0.6
MinGW clang 15.0.7
MinGW clang 16.0.0
MinGW clang 16.0.2
MinGW gcc 11.3.0
MinGW gcc 12.1.0
MinGW gcc 12.2.0
MinGW gcc 13.1.0
RISC-V (32-bits) gcc (trunk)
RISC-V (32-bits) gcc 10.2.0
RISC-V (32-bits) gcc 10.3.0
RISC-V (32-bits) gcc 11.2.0
RISC-V (32-bits) gcc 11.3.0
RISC-V (32-bits) gcc 11.4.0
RISC-V (32-bits) gcc 12.1.0
RISC-V (32-bits) gcc 12.2.0
RISC-V (32-bits) gcc 12.3.0
RISC-V (32-bits) gcc 12.4.0
RISC-V (32-bits) gcc 13.1.0
RISC-V (32-bits) gcc 13.2.0
RISC-V (32-bits) gcc 13.3.0
RISC-V (32-bits) gcc 14.1.0
RISC-V (32-bits) gcc 14.2.0
RISC-V (32-bits) gcc 8.2.0
RISC-V (32-bits) gcc 8.5.0
RISC-V (32-bits) gcc 9.4.0
RISC-V (64-bits) gcc (trunk)
RISC-V (64-bits) gcc 10.2.0
RISC-V (64-bits) gcc 10.3.0
RISC-V (64-bits) gcc 11.2.0
RISC-V (64-bits) gcc 11.3.0
RISC-V (64-bits) gcc 11.4.0
RISC-V (64-bits) gcc 12.1.0
RISC-V (64-bits) gcc 12.2.0
RISC-V (64-bits) gcc 12.3.0
RISC-V (64-bits) gcc 12.4.0
RISC-V (64-bits) gcc 13.1.0
RISC-V (64-bits) gcc 13.2.0
RISC-V (64-bits) gcc 13.3.0
RISC-V (64-bits) gcc 14.1.0
RISC-V (64-bits) gcc 14.2.0
RISC-V (64-bits) gcc 8.2.0
RISC-V (64-bits) gcc 8.5.0
RISC-V (64-bits) gcc 9.4.0
RISC-V rv32gc clang (trunk)
RISC-V rv32gc clang 10.0.0
RISC-V rv32gc clang 10.0.1
RISC-V rv32gc clang 11.0.0
RISC-V rv32gc clang 11.0.1
RISC-V rv32gc clang 12.0.0
RISC-V rv32gc clang 12.0.1
RISC-V rv32gc clang 13.0.0
RISC-V rv32gc clang 13.0.1
RISC-V rv32gc clang 14.0.0
RISC-V rv32gc clang 15.0.0
RISC-V rv32gc clang 16.0.0
RISC-V rv32gc clang 17.0.1
RISC-V rv32gc clang 18.1.0
RISC-V rv32gc clang 19.1.0
RISC-V rv32gc clang 9.0.0
RISC-V rv32gc clang 9.0.1
RISC-V rv64gc clang (trunk)
RISC-V rv64gc clang 10.0.0
RISC-V rv64gc clang 10.0.1
RISC-V rv64gc clang 11.0.0
RISC-V rv64gc clang 11.0.1
RISC-V rv64gc clang 12.0.0
RISC-V rv64gc clang 12.0.1
RISC-V rv64gc clang 13.0.0
RISC-V rv64gc clang 13.0.1
RISC-V rv64gc clang 14.0.0
RISC-V rv64gc clang 15.0.0
RISC-V rv64gc clang 16.0.0
RISC-V rv64gc clang 17.0.1
RISC-V rv64gc clang 18.1.0
RISC-V rv64gc clang 19.1.0
RISC-V rv64gc clang 9.0.0
RISC-V rv64gc clang 9.0.1
Raspbian Buster
Raspbian Stretch
SPARC LEON gcc 12.2.0
SPARC LEON gcc 12.3.0
SPARC LEON gcc 12.4.0
SPARC LEON gcc 13.1.0
SPARC LEON gcc 13.2.0
SPARC LEON gcc 13.3.0
SPARC LEON gcc 14.1.0
SPARC LEON gcc 14.2.0
SPARC gcc 12.2.0
SPARC gcc 12.3.0
SPARC gcc 12.4.0
SPARC gcc 13.1.0
SPARC gcc 13.2.0
SPARC gcc 13.3.0
SPARC gcc 14.1.0
SPARC gcc 14.2.0
SPARC64 gcc 12.2.0
SPARC64 gcc 12.3.0
SPARC64 gcc 12.4.0
SPARC64 gcc 13.1.0
SPARC64 gcc 13.2.0
SPARC64 gcc 13.3.0
SPARC64 gcc 14.1.0
SPARC64 gcc 14.2.0
TI C6x gcc 12.2.0
TI C6x gcc 12.3.0
TI C6x gcc 12.4.0
TI C6x gcc 13.1.0
TI C6x gcc 13.2.0
TI C6x gcc 13.3.0
TI C6x gcc 14.1.0
TI C6x gcc 14.2.0
TI CL430 21.6.1
VAX gcc NetBSDELF 10.4.0
VAX gcc NetBSDELF 10.5.0 (Nov 15 03:50:22 2023)
WebAssembly clang (trunk)
Xtensa ESP32 gcc 11.2.0 (2022r1)
Xtensa ESP32 gcc 12.2.0 (20230208)
Xtensa ESP32 gcc 8.2.0 (2019r2)
Xtensa ESP32 gcc 8.2.0 (2020r1)
Xtensa ESP32 gcc 8.2.0 (2020r2)
Xtensa ESP32 gcc 8.4.0 (2020r3)
Xtensa ESP32 gcc 8.4.0 (2021r1)
Xtensa ESP32 gcc 8.4.0 (2021r2)
Xtensa ESP32-S2 gcc 11.2.0 (2022r1)
Xtensa ESP32-S2 gcc 12.2.0 (20230208)
Xtensa ESP32-S2 gcc 8.2.0 (2019r2)
Xtensa ESP32-S2 gcc 8.2.0 (2020r1)
Xtensa ESP32-S2 gcc 8.2.0 (2020r2)
Xtensa ESP32-S2 gcc 8.4.0 (2020r3)
Xtensa ESP32-S2 gcc 8.4.0 (2021r1)
Xtensa ESP32-S2 gcc 8.4.0 (2021r2)
Xtensa ESP32-S3 gcc 11.2.0 (2022r1)
Xtensa ESP32-S3 gcc 12.2.0 (20230208)
Xtensa ESP32-S3 gcc 8.4.0 (2020r3)
Xtensa ESP32-S3 gcc 8.4.0 (2021r1)
Xtensa ESP32-S3 gcc 8.4.0 (2021r2)
arm64 msvc v19.20 VS16.0
arm64 msvc v19.21 VS16.1
arm64 msvc v19.22 VS16.2
arm64 msvc v19.23 VS16.3
arm64 msvc v19.24 VS16.4
arm64 msvc v19.25 VS16.5
arm64 msvc v19.27 VS16.7
arm64 msvc v19.28 VS16.8
arm64 msvc v19.28 VS16.9
arm64 msvc v19.29 VS16.10
arm64 msvc v19.29 VS16.11
arm64 msvc v19.30 VS17.0
arm64 msvc v19.31 VS17.1
arm64 msvc v19.32 VS17.2
arm64 msvc v19.33 VS17.3
arm64 msvc v19.34 VS17.4
arm64 msvc v19.35 VS17.5
arm64 msvc v19.36 VS17.6
arm64 msvc v19.37 VS17.7
arm64 msvc v19.38 VS17.8
arm64 msvc v19.39 VS17.9
arm64 msvc v19.40 VS17.10
arm64 msvc v19.latest
armv7-a clang (trunk)
armv7-a clang 10.0.0
armv7-a clang 10.0.1
armv7-a clang 11.0.0
armv7-a clang 11.0.1
armv7-a clang 12.0.0
armv7-a clang 12.0.1
armv7-a clang 13.0.0
armv7-a clang 13.0.1
armv7-a clang 14.0.0
armv7-a clang 15.0.0
armv7-a clang 16.0.0
armv7-a clang 17.0.1
armv7-a clang 18.1.0
armv7-a clang 19.1.0
armv7-a clang 9.0.0
armv7-a clang 9.0.1
armv8-a clang (all architectural features, trunk)
armv8-a clang (trunk)
armv8-a clang 10.0.0
armv8-a clang 10.0.1
armv8-a clang 11.0.0
armv8-a clang 11.0.1
armv8-a clang 12.0.0
armv8-a clang 13.0.0
armv8-a clang 14.0.0
armv8-a clang 15.0.0
armv8-a clang 16.0.0
armv8-a clang 17.0.1
armv8-a clang 18.1.0
armv8-a clang 19.1.0
armv8-a clang 9.0.0
armv8-a clang 9.0.1
clang-cl 18.1.0
ellcc 0.1.33
ellcc 0.1.34
ellcc 2017-07-16
hexagon-clang 16.0.5
llvm-mos atari2600-3e
llvm-mos atari2600-4k
llvm-mos atari2600-common
llvm-mos atari5200-supercart
llvm-mos atari8-cart-megacart
llvm-mos atari8-cart-std
llvm-mos atari8-cart-xegs
llvm-mos atari8-common
llvm-mos atari8-dos
llvm-mos c128
llvm-mos c64
llvm-mos commodore
llvm-mos cpm65
llvm-mos cx16
llvm-mos dodo
llvm-mos eater
llvm-mos mega65
llvm-mos nes
llvm-mos nes-action53
llvm-mos nes-cnrom
llvm-mos nes-gtrom
llvm-mos nes-mmc1
llvm-mos nes-mmc3
llvm-mos nes-nrom
llvm-mos nes-unrom
llvm-mos nes-unrom-512
llvm-mos osi-c1p
llvm-mos pce
llvm-mos pce-cd
llvm-mos pce-common
llvm-mos pet
llvm-mos rp6502
llvm-mos rpc8e
llvm-mos supervision
llvm-mos vic20
loongarch64 gcc 12.2.0
loongarch64 gcc 12.3.0
loongarch64 gcc 12.4.0
loongarch64 gcc 13.1.0
loongarch64 gcc 13.2.0
loongarch64 gcc 13.3.0
loongarch64 gcc 14.1.0
loongarch64 gcc 14.2.0
mips clang 13.0.0
mips clang 14.0.0
mips clang 15.0.0
mips clang 16.0.0
mips clang 17.0.1
mips clang 18.1.0
mips clang 19.1.0
mips gcc 11.2.0
mips gcc 12.1.0
mips gcc 12.2.0
mips gcc 12.3.0
mips gcc 12.4.0
mips gcc 13.1.0
mips gcc 13.2.0
mips gcc 13.3.0
mips gcc 14.1.0
mips gcc 14.2.0
mips gcc 4.9.4
mips gcc 5.4
mips gcc 5.5.0
mips gcc 9.3.0 (codescape)
mips gcc 9.5.0
mips64 (el) gcc 12.1.0
mips64 (el) gcc 12.2.0
mips64 (el) gcc 12.3.0
mips64 (el) gcc 12.4.0
mips64 (el) gcc 13.1.0
mips64 (el) gcc 13.2.0
mips64 (el) gcc 13.3.0
mips64 (el) gcc 14.1.0
mips64 (el) gcc 14.2.0
mips64 (el) gcc 4.9.4
mips64 (el) gcc 5.4.0
mips64 (el) gcc 5.5.0
mips64 (el) gcc 9.5.0
mips64 clang 13.0.0
mips64 clang 14.0.0
mips64 clang 15.0.0
mips64 clang 16.0.0
mips64 clang 17.0.1
mips64 clang 18.1.0
mips64 clang 19.1.0
mips64 gcc 11.2.0
mips64 gcc 12.1.0
mips64 gcc 12.2.0
mips64 gcc 12.3.0
mips64 gcc 12.4.0
mips64 gcc 13.1.0
mips64 gcc 13.2.0
mips64 gcc 13.3.0
mips64 gcc 14.1.0
mips64 gcc 14.2.0
mips64 gcc 4.9.4
mips64 gcc 5.4.0
mips64 gcc 5.5.0
mips64 gcc 9.5.0
mips64el clang 13.0.0
mips64el clang 14.0.0
mips64el clang 15.0.0
mips64el clang 16.0.0
mips64el clang 17.0.1
mips64el clang 18.1.0
mips64el clang 19.1.0
mipsel clang 13.0.0
mipsel clang 14.0.0
mipsel clang 15.0.0
mipsel clang 16.0.0
mipsel clang 17.0.1
mipsel clang 18.1.0
mipsel clang 19.1.0
mipsel gcc 12.1.0
mipsel gcc 12.2.0
mipsel gcc 12.3.0
mipsel gcc 12.4.0
mipsel gcc 13.1.0
mipsel gcc 13.2.0
mipsel gcc 13.3.0
mipsel gcc 14.1.0
mipsel gcc 14.2.0
mipsel gcc 4.9.4
mipsel gcc 5.4.0
mipsel gcc 5.5.0
mipsel gcc 9.5.0
nanoMIPS gcc 6.3.0 (mtk)
power gcc 11.2.0
power gcc 12.1.0
power gcc 12.2.0
power gcc 12.3.0
power gcc 12.4.0
power gcc 13.1.0
power gcc 13.2.0
power gcc 13.3.0
power gcc 14.1.0
power gcc 14.2.0
power gcc 4.8.5
power64 AT12.0 (gcc8)
power64 AT13.0 (gcc9)
power64 gcc 11.2.0
power64 gcc 12.1.0
power64 gcc 12.2.0
power64 gcc 12.3.0
power64 gcc 12.4.0
power64 gcc 13.1.0
power64 gcc 13.2.0
power64 gcc 13.3.0
power64 gcc 14.1.0
power64 gcc 14.2.0
power64 gcc trunk
power64le AT12.0 (gcc8)
power64le AT13.0 (gcc9)
power64le clang (trunk)
power64le gcc 11.2.0
power64le gcc 12.1.0
power64le gcc 12.2.0
power64le gcc 12.3.0
power64le gcc 12.4.0
power64le gcc 13.1.0
power64le gcc 13.2.0
power64le gcc 13.3.0
power64le gcc 14.1.0
power64le gcc 14.2.0
power64le gcc 6.3.0
power64le gcc trunk
powerpc64 clang (trunk)
qnx 8.0.0
s390x gcc 11.2.0
s390x gcc 12.1.0
s390x gcc 12.2.0
s390x gcc 12.3.0
s390x gcc 12.4.0
s390x gcc 13.1.0
s390x gcc 13.2.0
s390x gcc 13.3.0
s390x gcc 14.1.0
s390x gcc 14.2.0
sh gcc 12.2.0
sh gcc 12.3.0
sh gcc 12.4.0
sh gcc 13.1.0
sh gcc 13.2.0
sh gcc 13.3.0
sh gcc 14.1.0
sh gcc 14.2.0
sh gcc 4.9.4
sh gcc 9.5.0
vast (trunk)
x64 msvc v19.0 (WINE)
x64 msvc v19.10 (WINE)
x64 msvc v19.14 (WINE)
x64 msvc v19.20 VS16.0
x64 msvc v19.21 VS16.1
x64 msvc v19.22 VS16.2
x64 msvc v19.23 VS16.3
x64 msvc v19.24 VS16.4
x64 msvc v19.25 VS16.5
x64 msvc v19.27 VS16.7
x64 msvc v19.28 VS16.8
x64 msvc v19.28 VS16.9
x64 msvc v19.29 VS16.10
x64 msvc v19.29 VS16.11
x64 msvc v19.30 VS17.0
x64 msvc v19.31 VS17.1
x64 msvc v19.32 VS17.2
x64 msvc v19.33 VS17.3
x64 msvc v19.34 VS17.4
x64 msvc v19.35 VS17.5
x64 msvc v19.36 VS17.6
x64 msvc v19.37 VS17.7
x64 msvc v19.38 VS17.8
x64 msvc v19.39 VS17.9
x64 msvc v19.40 VS17.10
x64 msvc v19.latest
x86 djgpp 4.9.4
x86 djgpp 5.5.0
x86 djgpp 6.4.0
x86 djgpp 7.2.0
x86 msvc v19.0 (WINE)
x86 msvc v19.10 (WINE)
x86 msvc v19.14 (WINE)
x86 msvc v19.20 VS16.0
x86 msvc v19.21 VS16.1
x86 msvc v19.22 VS16.2
x86 msvc v19.23 VS16.3
x86 msvc v19.24 VS16.4
x86 msvc v19.25 VS16.5
x86 msvc v19.27 VS16.7
x86 msvc v19.28 VS16.8
x86 msvc v19.28 VS16.9
x86 msvc v19.29 VS16.10
x86 msvc v19.29 VS16.11
x86 msvc v19.30 VS17.0
x86 msvc v19.31 VS17.1
x86 msvc v19.32 VS17.2
x86 msvc v19.33 VS17.3
x86 msvc v19.34 VS17.4
x86 msvc v19.35 VS17.5
x86 msvc v19.36 VS17.6
x86 msvc v19.37 VS17.7
x86 msvc v19.38 VS17.8
x86 msvc v19.39 VS17.9
x86 msvc v19.40 VS17.10
x86 msvc v19.latest
x86 nvc++ 22.11
x86 nvc++ 22.7
x86 nvc++ 22.9
x86 nvc++ 23.1
x86 nvc++ 23.11
x86 nvc++ 23.3
x86 nvc++ 23.5
x86 nvc++ 23.7
x86 nvc++ 23.9
x86 nvc++ 24.1
x86 nvc++ 24.11
x86 nvc++ 24.3
x86 nvc++ 24.5
x86 nvc++ 24.7
x86 nvc++ 24.9
x86-64 Zapcc 190308
x86-64 clang (Chris Bazley N3089)
x86-64 clang (EricWF contracts)
x86-64 clang (amd-staging)
x86-64 clang (assertions trunk)
x86-64 clang (clangir)
x86-64 clang (dascandy contracts)
x86-64 clang (experimental -Wlifetime)
x86-64 clang (experimental P1061)
x86-64 clang (experimental P1144)
x86-64 clang (experimental P1221)
x86-64 clang (experimental P2996)
x86-64 clang (experimental P2998)
x86-64 clang (experimental P3068)
x86-64 clang (experimental P3309)
x86-64 clang (experimental P3367)
x86-64 clang (experimental P3372)
x86-64 clang (experimental metaprogramming - P2632)
x86-64 clang (old concepts branch)
x86-64 clang (p1974)
x86-64 clang (pattern matching - P2688)
x86-64 clang (reflection)
x86-64 clang (resugar)
x86-64 clang (string interpolation - P3412)
x86-64 clang (thephd.dev)
x86-64 clang (trunk)
x86-64 clang (variadic friends - P2893)
x86-64 clang (widberg)
x86-64 clang 10.0.0
x86-64 clang 10.0.0 (assertions)
x86-64 clang 10.0.1
x86-64 clang 11.0.0
x86-64 clang 11.0.0 (assertions)
x86-64 clang 11.0.1
x86-64 clang 12.0.0
x86-64 clang 12.0.0 (assertions)
x86-64 clang 12.0.1
x86-64 clang 13.0.0
x86-64 clang 13.0.0 (assertions)
x86-64 clang 13.0.1
x86-64 clang 14.0.0
x86-64 clang 14.0.0 (assertions)
x86-64 clang 15.0.0
x86-64 clang 15.0.0 (assertions)
x86-64 clang 16.0.0
x86-64 clang 16.0.0 (assertions)
x86-64 clang 17.0.1
x86-64 clang 17.0.1 (assertions)
x86-64 clang 18.1.0
x86-64 clang 18.1.0 (assertions)
x86-64 clang 18.1.0 (clad 1.8)
x86-64 clang 19.1.0
x86-64 clang 19.1.0 (assertions)
x86-64 clang 2.6.0 (assertions)
x86-64 clang 2.7.0 (assertions)
x86-64 clang 2.8.0 (assertions)
x86-64 clang 2.9.0 (assertions)
x86-64 clang 3.0.0
x86-64 clang 3.0.0 (assertions)
x86-64 clang 3.1
x86-64 clang 3.1 (assertions)
x86-64 clang 3.2
x86-64 clang 3.2 (assertions)
x86-64 clang 3.3
x86-64 clang 3.3 (assertions)
x86-64 clang 3.4 (assertions)
x86-64 clang 3.4.1
x86-64 clang 3.5
x86-64 clang 3.5 (assertions)
x86-64 clang 3.5.1
x86-64 clang 3.5.2
x86-64 clang 3.6
x86-64 clang 3.6 (assertions)
x86-64 clang 3.7
x86-64 clang 3.7 (assertions)
x86-64 clang 3.7.1
x86-64 clang 3.8
x86-64 clang 3.8 (assertions)
x86-64 clang 3.8.1
x86-64 clang 3.9.0
x86-64 clang 3.9.0 (assertions)
x86-64 clang 3.9.1
x86-64 clang 4.0.0
x86-64 clang 4.0.0 (assertions)
x86-64 clang 4.0.1
x86-64 clang 5.0.0
x86-64 clang 5.0.0 (assertions)
x86-64 clang 5.0.1
x86-64 clang 5.0.2
x86-64 clang 6.0.0
x86-64 clang 6.0.0 (assertions)
x86-64 clang 6.0.1
x86-64 clang 7.0.0
x86-64 clang 7.0.0 (assertions)
x86-64 clang 7.0.1
x86-64 clang 7.1.0
x86-64 clang 8.0.0
x86-64 clang 8.0.0 (assertions)
x86-64 clang 8.0.1
x86-64 clang 9.0.0
x86-64 clang 9.0.0 (assertions)
x86-64 clang 9.0.1
x86-64 clang rocm-4.5.2
x86-64 clang rocm-5.0.2
x86-64 clang rocm-5.1.3
x86-64 clang rocm-5.2.3
x86-64 clang rocm-5.3.3
x86-64 clang rocm-5.7.0
x86-64 clang rocm-6.0.2
x86-64 clang rocm-6.1.2
x86-64 gcc (contract labels)
x86-64 gcc (contracts natural syntax)
x86-64 gcc (contracts)
x86-64 gcc (coroutines)
x86-64 gcc (modules)
x86-64 gcc (trunk)
x86-64 gcc 10.1
x86-64 gcc 10.2
x86-64 gcc 10.3
x86-64 gcc 10.3 (assertions)
x86-64 gcc 10.4
x86-64 gcc 10.4 (assertions)
x86-64 gcc 10.5
x86-64 gcc 10.5 (assertions)
x86-64 gcc 11.1
x86-64 gcc 11.1 (assertions)
x86-64 gcc 11.2
x86-64 gcc 11.2 (assertions)
x86-64 gcc 11.3
x86-64 gcc 11.3 (assertions)
x86-64 gcc 11.4
x86-64 gcc 11.4 (assertions)
x86-64 gcc 12.1
x86-64 gcc 12.1 (assertions)
x86-64 gcc 12.2
x86-64 gcc 12.2 (assertions)
x86-64 gcc 12.3
x86-64 gcc 12.3 (assertions)
x86-64 gcc 12.4
x86-64 gcc 12.4 (assertions)
x86-64 gcc 13.1
x86-64 gcc 13.1 (assertions)
x86-64 gcc 13.2
x86-64 gcc 13.2 (assertions)
x86-64 gcc 13.3
x86-64 gcc 13.3 (assertions)
x86-64 gcc 14.1
x86-64 gcc 14.1 (assertions)
x86-64 gcc 14.2
x86-64 gcc 14.2 (assertions)
x86-64 gcc 3.4.6
x86-64 gcc 4.0.4
x86-64 gcc 4.1.2
x86-64 gcc 4.4.7
x86-64 gcc 4.5.3
x86-64 gcc 4.6.4
x86-64 gcc 4.7.1
x86-64 gcc 4.7.2
x86-64 gcc 4.7.3
x86-64 gcc 4.7.4
x86-64 gcc 4.8.1
x86-64 gcc 4.8.2
x86-64 gcc 4.8.3
x86-64 gcc 4.8.4
x86-64 gcc 4.8.5
x86-64 gcc 4.9.0
x86-64 gcc 4.9.1
x86-64 gcc 4.9.2
x86-64 gcc 4.9.3
x86-64 gcc 4.9.4
x86-64 gcc 5.1
x86-64 gcc 5.2
x86-64 gcc 5.3
x86-64 gcc 5.4
x86-64 gcc 5.5
x86-64 gcc 6.1
x86-64 gcc 6.2
x86-64 gcc 6.3
x86-64 gcc 6.4
x86-64 gcc 6.5
x86-64 gcc 7.1
x86-64 gcc 7.2
x86-64 gcc 7.3
x86-64 gcc 7.4
x86-64 gcc 7.5
x86-64 gcc 8.1
x86-64 gcc 8.2
x86-64 gcc 8.3
x86-64 gcc 8.4
x86-64 gcc 8.5
x86-64 gcc 9.1
x86-64 gcc 9.2
x86-64 gcc 9.3
x86-64 gcc 9.4
x86-64 gcc 9.5
x86-64 icc 13.0.1
x86-64 icc 16.0.3
x86-64 icc 17.0.0
x86-64 icc 18.0.0
x86-64 icc 19.0.0
x86-64 icc 19.0.1
x86-64 icc 2021.1.2
x86-64 icc 2021.10.0
x86-64 icc 2021.2.0
x86-64 icc 2021.3.0
x86-64 icc 2021.4.0
x86-64 icc 2021.5.0
x86-64 icc 2021.6.0
x86-64 icc 2021.7.0
x86-64 icc 2021.7.1
x86-64 icc 2021.8.0
x86-64 icc 2021.9.0
x86-64 icx 2021.1.2
x86-64 icx 2021.2.0
x86-64 icx 2021.3.0
x86-64 icx 2021.4.0
x86-64 icx 2022.0.0
x86-64 icx 2022.1.0
x86-64 icx 2022.2.0
x86-64 icx 2022.2.1
x86-64 icx 2023.0.0
x86-64 icx 2023.1.0
x86-64 icx 2023.2.1
x86-64 icx 2024.0.0
x86-64 icx 2024.1.0
x86-64 icx 2024.2.0
x86-64 icx 2025.0.0
x86-64 icx 2025.0.0
zig c++ 0.10.0
zig c++ 0.11.0
zig c++ 0.12.0
zig c++ 0.12.1
zig c++ 0.13.0
zig c++ 0.6.0
zig c++ 0.7.0
zig c++ 0.7.1
zig c++ 0.8.0
zig c++ 0.9.0
zig c++ trunk
Options
Source code
#include <iostream> #include "absl/debugging/stacktrace.h" #include "absl/strings/string_view.h" #include "subspace.h" void PrintStack(absl::string_view caller) { void* result[10]; int sizes[10]; int depth = absl::GetStackFrames(result, sizes, 10, 0); std::cout << caller << " "; for (int i = 0; i < depth; ++i) { std::cout << sizes[i] << " "; } std::cout << '\n'; } __attribute__((noinline)) void Main7() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even); PrintStack("Main7"); } __attribute__((noinline)) void Main6() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even); PrintStack("Main6"); } __attribute__((noinline)) void Main5() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even) // .filter(even) // .filter(even) // .filter(even); PrintStack("Main5"); } __attribute__((noinline)) void Main4() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even) // .filter(even) // .filter(even); PrintStack("Main4"); } __attribute__((noinline)) void Main3() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even) // .filter(even); PrintStack("Main3"); } __attribute__((noinline)) void Main2() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter() .filter(even) // .filter(even); PrintStack("Main2"); } __attribute__((noinline)) void Main1() { auto even = [](const int& i) { return i % 2 == 0; }; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); auto result = r0.iter().filter(even); PrintStack("Main1"); } __attribute__((noinline)) void Main0() { auto even = [](const int& i) { return i % 2 == 0; }; (void)even; auto r0 = sus::Array<int, 11>::with_values(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); PrintStack("Main0"); } int main(int argc, char* argv[]) { Main0(); Main1(); Main2(); Main3(); Main4(); Main5(); Main6(); Main7(); return 0; }
cmake source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
Options
Source code
project(shapes) cmake_minimum_required(VERSION 3.5) add_executable(the_executable main.cpp) target_link_libraries(the_executable absl_stacktrace absl_debugging_internal absl_raw_logging_internal) set(CMAKE_CXX_FLAGS_DEBUG "") set(CMAKE_CXX_FLAGS_RELEASE "")
c++ source #4
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
6502-c++ 11.1.0
ARM GCC 10.2.0
ARM GCC 10.3.0
ARM GCC 10.4.0
ARM GCC 10.5.0
ARM GCC 11.1.0
ARM GCC 11.2.0
ARM GCC 11.3.0
ARM GCC 11.4.0
ARM GCC 12.1.0
ARM GCC 12.2.0
ARM GCC 12.3.0
ARM GCC 12.4.0
ARM GCC 13.1.0
ARM GCC 13.2.0
ARM GCC 13.2.0 (unknown-eabi)
ARM GCC 13.3.0
ARM GCC 13.3.0 (unknown-eabi)
ARM GCC 14.1.0
ARM GCC 14.1.0 (unknown-eabi)
ARM GCC 14.2.0
ARM GCC 14.2.0 (unknown-eabi)
ARM GCC 4.5.4
ARM GCC 4.6.4
ARM GCC 5.4
ARM GCC 6.3.0
ARM GCC 6.4.0
ARM GCC 7.3.0
ARM GCC 7.5.0
ARM GCC 8.2.0
ARM GCC 8.5.0
ARM GCC 9.3.0
ARM GCC 9.4.0
ARM GCC 9.5.0
ARM GCC trunk
ARM gcc 10.2.1 (none)
ARM gcc 10.3.1 (2021.07 none)
ARM gcc 10.3.1 (2021.10 none)
ARM gcc 11.2.1 (none)
ARM gcc 5.4.1 (none)
ARM gcc 7.2.1 (none)
ARM gcc 8.2 (WinCE)
ARM gcc 8.3.1 (none)
ARM gcc 9.2.1 (none)
ARM msvc v19.0 (WINE)
ARM msvc v19.10 (WINE)
ARM msvc v19.14 (WINE)
ARM64 Morello gcc 10.1 Alpha 2
ARM64 gcc 10.2
ARM64 gcc 10.3
ARM64 gcc 10.4
ARM64 gcc 10.5.0
ARM64 gcc 11.1
ARM64 gcc 11.2
ARM64 gcc 11.3
ARM64 gcc 11.4.0
ARM64 gcc 12.1
ARM64 gcc 12.2.0
ARM64 gcc 12.3.0
ARM64 gcc 12.4.0
ARM64 gcc 13.1.0
ARM64 gcc 13.2.0
ARM64 gcc 13.3.0
ARM64 gcc 14.1.0
ARM64 gcc 14.2.0
ARM64 gcc 4.9.4
ARM64 gcc 5.4
ARM64 gcc 5.5.0
ARM64 gcc 6.3
ARM64 gcc 6.4
ARM64 gcc 7.3
ARM64 gcc 7.5
ARM64 gcc 8.2
ARM64 gcc 8.5
ARM64 gcc 9.3
ARM64 gcc 9.4
ARM64 gcc 9.5
ARM64 gcc trunk
ARM64 msvc v19.14 (WINE)
AVR gcc 10.3.0
AVR gcc 11.1.0
AVR gcc 12.1.0
AVR gcc 12.2.0
AVR gcc 12.3.0
AVR gcc 12.4.0
AVR gcc 13.1.0
AVR gcc 13.2.0
AVR gcc 13.3.0
AVR gcc 14.1.0
AVR gcc 14.2.0
AVR gcc 4.5.4
AVR gcc 4.6.4
AVR gcc 5.4.0
AVR gcc 9.2.0
AVR gcc 9.3.0
Arduino Mega (1.8.9)
Arduino Uno (1.8.9)
BPF clang (trunk)
BPF clang 13.0.0
BPF clang 14.0.0
BPF clang 15.0.0
BPF clang 16.0.0
BPF clang 17.0.1
BPF clang 18.1.0
BPF clang 19.1.0
EDG (experimental reflection)
EDG 6.5
EDG 6.5 (GNU mode gcc 13)
EDG 6.6
EDG 6.6 (GNU mode gcc 13)
FRC 2019
FRC 2020
FRC 2023
HPPA gcc 14.2.0
KVX ACB 4.1.0 (GCC 7.5.0)
KVX ACB 4.1.0-cd1 (GCC 7.5.0)
KVX ACB 4.10.0 (GCC 10.3.1)
KVX ACB 4.11.1 (GCC 10.3.1)
KVX ACB 4.12.0 (GCC 11.3.0)
KVX ACB 4.2.0 (GCC 7.5.0)
KVX ACB 4.3.0 (GCC 7.5.0)
KVX ACB 4.4.0 (GCC 7.5.0)
KVX ACB 4.6.0 (GCC 9.4.1)
KVX ACB 4.8.0 (GCC 9.4.1)
KVX ACB 4.9.0 (GCC 9.4.1)
KVX ACB 5.0.0 (GCC 12.2.1)
KVX ACB 5.2.0 (GCC 13.2.1)
LoongArch64 clang (trunk)
LoongArch64 clang 17.0.1
LoongArch64 clang 18.1.0
LoongArch64 clang 19.1.0
M68K gcc 13.1.0
M68K gcc 13.2.0
M68K gcc 13.3.0
M68K gcc 14.1.0
M68K gcc 14.2.0
M68k clang (trunk)
MRISC32 gcc (trunk)
MSP430 gcc 4.5.3
MSP430 gcc 5.3.0
MSP430 gcc 6.2.1
MinGW clang 14.0.3
MinGW clang 14.0.6
MinGW clang 15.0.7
MinGW clang 16.0.0
MinGW clang 16.0.2
MinGW gcc 11.3.0
MinGW gcc 12.1.0
MinGW gcc 12.2.0
MinGW gcc 13.1.0
RISC-V (32-bits) gcc (trunk)
RISC-V (32-bits) gcc 10.2.0
RISC-V (32-bits) gcc 10.3.0
RISC-V (32-bits) gcc 11.2.0
RISC-V (32-bits) gcc 11.3.0
RISC-V (32-bits) gcc 11.4.0
RISC-V (32-bits) gcc 12.1.0
RISC-V (32-bits) gcc 12.2.0
RISC-V (32-bits) gcc 12.3.0
RISC-V (32-bits) gcc 12.4.0
RISC-V (32-bits) gcc 13.1.0
RISC-V (32-bits) gcc 13.2.0
RISC-V (32-bits) gcc 13.3.0
RISC-V (32-bits) gcc 14.1.0
RISC-V (32-bits) gcc 14.2.0
RISC-V (32-bits) gcc 8.2.0
RISC-V (32-bits) gcc 8.5.0
RISC-V (32-bits) gcc 9.4.0
RISC-V (64-bits) gcc (trunk)
RISC-V (64-bits) gcc 10.2.0
RISC-V (64-bits) gcc 10.3.0
RISC-V (64-bits) gcc 11.2.0
RISC-V (64-bits) gcc 11.3.0
RISC-V (64-bits) gcc 11.4.0
RISC-V (64-bits) gcc 12.1.0
RISC-V (64-bits) gcc 12.2.0
RISC-V (64-bits) gcc 12.3.0
RISC-V (64-bits) gcc 12.4.0
RISC-V (64-bits) gcc 13.1.0
RISC-V (64-bits) gcc 13.2.0
RISC-V (64-bits) gcc 13.3.0
RISC-V (64-bits) gcc 14.1.0
RISC-V (64-bits) gcc 14.2.0
RISC-V (64-bits) gcc 8.2.0
RISC-V (64-bits) gcc 8.5.0
RISC-V (64-bits) gcc 9.4.0
RISC-V rv32gc clang (trunk)
RISC-V rv32gc clang 10.0.0
RISC-V rv32gc clang 10.0.1
RISC-V rv32gc clang 11.0.0
RISC-V rv32gc clang 11.0.1
RISC-V rv32gc clang 12.0.0
RISC-V rv32gc clang 12.0.1
RISC-V rv32gc clang 13.0.0
RISC-V rv32gc clang 13.0.1
RISC-V rv32gc clang 14.0.0
RISC-V rv32gc clang 15.0.0
RISC-V rv32gc clang 16.0.0
RISC-V rv32gc clang 17.0.1
RISC-V rv32gc clang 18.1.0
RISC-V rv32gc clang 19.1.0
RISC-V rv32gc clang 9.0.0
RISC-V rv32gc clang 9.0.1
RISC-V rv64gc clang (trunk)
RISC-V rv64gc clang 10.0.0
RISC-V rv64gc clang 10.0.1
RISC-V rv64gc clang 11.0.0
RISC-V rv64gc clang 11.0.1
RISC-V rv64gc clang 12.0.0
RISC-V rv64gc clang 12.0.1
RISC-V rv64gc clang 13.0.0
RISC-V rv64gc clang 13.0.1
RISC-V rv64gc clang 14.0.0
RISC-V rv64gc clang 15.0.0
RISC-V rv64gc clang 16.0.0
RISC-V rv64gc clang 17.0.1
RISC-V rv64gc clang 18.1.0
RISC-V rv64gc clang 19.1.0
RISC-V rv64gc clang 9.0.0
RISC-V rv64gc clang 9.0.1
Raspbian Buster
Raspbian Stretch
SPARC LEON gcc 12.2.0
SPARC LEON gcc 12.3.0
SPARC LEON gcc 12.4.0
SPARC LEON gcc 13.1.0
SPARC LEON gcc 13.2.0
SPARC LEON gcc 13.3.0
SPARC LEON gcc 14.1.0
SPARC LEON gcc 14.2.0
SPARC gcc 12.2.0
SPARC gcc 12.3.0
SPARC gcc 12.4.0
SPARC gcc 13.1.0
SPARC gcc 13.2.0
SPARC gcc 13.3.0
SPARC gcc 14.1.0
SPARC gcc 14.2.0
SPARC64 gcc 12.2.0
SPARC64 gcc 12.3.0
SPARC64 gcc 12.4.0
SPARC64 gcc 13.1.0
SPARC64 gcc 13.2.0
SPARC64 gcc 13.3.0
SPARC64 gcc 14.1.0
SPARC64 gcc 14.2.0
TI C6x gcc 12.2.0
TI C6x gcc 12.3.0
TI C6x gcc 12.4.0
TI C6x gcc 13.1.0
TI C6x gcc 13.2.0
TI C6x gcc 13.3.0
TI C6x gcc 14.1.0
TI C6x gcc 14.2.0
TI CL430 21.6.1
VAX gcc NetBSDELF 10.4.0
VAX gcc NetBSDELF 10.5.0 (Nov 15 03:50:22 2023)
WebAssembly clang (trunk)
Xtensa ESP32 gcc 11.2.0 (2022r1)
Xtensa ESP32 gcc 12.2.0 (20230208)
Xtensa ESP32 gcc 8.2.0 (2019r2)
Xtensa ESP32 gcc 8.2.0 (2020r1)
Xtensa ESP32 gcc 8.2.0 (2020r2)
Xtensa ESP32 gcc 8.4.0 (2020r3)
Xtensa ESP32 gcc 8.4.0 (2021r1)
Xtensa ESP32 gcc 8.4.0 (2021r2)
Xtensa ESP32-S2 gcc 11.2.0 (2022r1)
Xtensa ESP32-S2 gcc 12.2.0 (20230208)
Xtensa ESP32-S2 gcc 8.2.0 (2019r2)
Xtensa ESP32-S2 gcc 8.2.0 (2020r1)
Xtensa ESP32-S2 gcc 8.2.0 (2020r2)
Xtensa ESP32-S2 gcc 8.4.0 (2020r3)
Xtensa ESP32-S2 gcc 8.4.0 (2021r1)
Xtensa ESP32-S2 gcc 8.4.0 (2021r2)
Xtensa ESP32-S3 gcc 11.2.0 (2022r1)
Xtensa ESP32-S3 gcc 12.2.0 (20230208)
Xtensa ESP32-S3 gcc 8.4.0 (2020r3)
Xtensa ESP32-S3 gcc 8.4.0 (2021r1)
Xtensa ESP32-S3 gcc 8.4.0 (2021r2)
arm64 msvc v19.20 VS16.0
arm64 msvc v19.21 VS16.1
arm64 msvc v19.22 VS16.2
arm64 msvc v19.23 VS16.3
arm64 msvc v19.24 VS16.4
arm64 msvc v19.25 VS16.5
arm64 msvc v19.27 VS16.7
arm64 msvc v19.28 VS16.8
arm64 msvc v19.28 VS16.9
arm64 msvc v19.29 VS16.10
arm64 msvc v19.29 VS16.11
arm64 msvc v19.30 VS17.0
arm64 msvc v19.31 VS17.1
arm64 msvc v19.32 VS17.2
arm64 msvc v19.33 VS17.3
arm64 msvc v19.34 VS17.4
arm64 msvc v19.35 VS17.5
arm64 msvc v19.36 VS17.6
arm64 msvc v19.37 VS17.7
arm64 msvc v19.38 VS17.8
arm64 msvc v19.39 VS17.9
arm64 msvc v19.40 VS17.10
arm64 msvc v19.latest
armv7-a clang (trunk)
armv7-a clang 10.0.0
armv7-a clang 10.0.1
armv7-a clang 11.0.0
armv7-a clang 11.0.1
armv7-a clang 12.0.0
armv7-a clang 12.0.1
armv7-a clang 13.0.0
armv7-a clang 13.0.1
armv7-a clang 14.0.0
armv7-a clang 15.0.0
armv7-a clang 16.0.0
armv7-a clang 17.0.1
armv7-a clang 18.1.0
armv7-a clang 19.1.0
armv7-a clang 9.0.0
armv7-a clang 9.0.1
armv8-a clang (all architectural features, trunk)
armv8-a clang (trunk)
armv8-a clang 10.0.0
armv8-a clang 10.0.1
armv8-a clang 11.0.0
armv8-a clang 11.0.1
armv8-a clang 12.0.0
armv8-a clang 13.0.0
armv8-a clang 14.0.0
armv8-a clang 15.0.0
armv8-a clang 16.0.0
armv8-a clang 17.0.1
armv8-a clang 18.1.0
armv8-a clang 19.1.0
armv8-a clang 9.0.0
armv8-a clang 9.0.1
clang-cl 18.1.0
ellcc 0.1.33
ellcc 0.1.34
ellcc 2017-07-16
hexagon-clang 16.0.5
llvm-mos atari2600-3e
llvm-mos atari2600-4k
llvm-mos atari2600-common
llvm-mos atari5200-supercart
llvm-mos atari8-cart-megacart
llvm-mos atari8-cart-std
llvm-mos atari8-cart-xegs
llvm-mos atari8-common
llvm-mos atari8-dos
llvm-mos c128
llvm-mos c64
llvm-mos commodore
llvm-mos cpm65
llvm-mos cx16
llvm-mos dodo
llvm-mos eater
llvm-mos mega65
llvm-mos nes
llvm-mos nes-action53
llvm-mos nes-cnrom
llvm-mos nes-gtrom
llvm-mos nes-mmc1
llvm-mos nes-mmc3
llvm-mos nes-nrom
llvm-mos nes-unrom
llvm-mos nes-unrom-512
llvm-mos osi-c1p
llvm-mos pce
llvm-mos pce-cd
llvm-mos pce-common
llvm-mos pet
llvm-mos rp6502
llvm-mos rpc8e
llvm-mos supervision
llvm-mos vic20
loongarch64 gcc 12.2.0
loongarch64 gcc 12.3.0
loongarch64 gcc 12.4.0
loongarch64 gcc 13.1.0
loongarch64 gcc 13.2.0
loongarch64 gcc 13.3.0
loongarch64 gcc 14.1.0
loongarch64 gcc 14.2.0
mips clang 13.0.0
mips clang 14.0.0
mips clang 15.0.0
mips clang 16.0.0
mips clang 17.0.1
mips clang 18.1.0
mips clang 19.1.0
mips gcc 11.2.0
mips gcc 12.1.0
mips gcc 12.2.0
mips gcc 12.3.0
mips gcc 12.4.0
mips gcc 13.1.0
mips gcc 13.2.0
mips gcc 13.3.0
mips gcc 14.1.0
mips gcc 14.2.0
mips gcc 4.9.4
mips gcc 5.4
mips gcc 5.5.0
mips gcc 9.3.0 (codescape)
mips gcc 9.5.0
mips64 (el) gcc 12.1.0
mips64 (el) gcc 12.2.0
mips64 (el) gcc 12.3.0
mips64 (el) gcc 12.4.0
mips64 (el) gcc 13.1.0
mips64 (el) gcc 13.2.0
mips64 (el) gcc 13.3.0
mips64 (el) gcc 14.1.0
mips64 (el) gcc 14.2.0
mips64 (el) gcc 4.9.4
mips64 (el) gcc 5.4.0
mips64 (el) gcc 5.5.0
mips64 (el) gcc 9.5.0
mips64 clang 13.0.0
mips64 clang 14.0.0
mips64 clang 15.0.0
mips64 clang 16.0.0
mips64 clang 17.0.1
mips64 clang 18.1.0
mips64 clang 19.1.0
mips64 gcc 11.2.0
mips64 gcc 12.1.0
mips64 gcc 12.2.0
mips64 gcc 12.3.0
mips64 gcc 12.4.0
mips64 gcc 13.1.0
mips64 gcc 13.2.0
mips64 gcc 13.3.0
mips64 gcc 14.1.0
mips64 gcc 14.2.0
mips64 gcc 4.9.4
mips64 gcc 5.4.0
mips64 gcc 5.5.0
mips64 gcc 9.5.0
mips64el clang 13.0.0
mips64el clang 14.0.0
mips64el clang 15.0.0
mips64el clang 16.0.0
mips64el clang 17.0.1
mips64el clang 18.1.0
mips64el clang 19.1.0
mipsel clang 13.0.0
mipsel clang 14.0.0
mipsel clang 15.0.0
mipsel clang 16.0.0
mipsel clang 17.0.1
mipsel clang 18.1.0
mipsel clang 19.1.0
mipsel gcc 12.1.0
mipsel gcc 12.2.0
mipsel gcc 12.3.0
mipsel gcc 12.4.0
mipsel gcc 13.1.0
mipsel gcc 13.2.0
mipsel gcc 13.3.0
mipsel gcc 14.1.0
mipsel gcc 14.2.0
mipsel gcc 4.9.4
mipsel gcc 5.4.0
mipsel gcc 5.5.0
mipsel gcc 9.5.0
nanoMIPS gcc 6.3.0 (mtk)
power gcc 11.2.0
power gcc 12.1.0
power gcc 12.2.0
power gcc 12.3.0
power gcc 12.4.0
power gcc 13.1.0
power gcc 13.2.0
power gcc 13.3.0
power gcc 14.1.0
power gcc 14.2.0
power gcc 4.8.5
power64 AT12.0 (gcc8)
power64 AT13.0 (gcc9)
power64 gcc 11.2.0
power64 gcc 12.1.0
power64 gcc 12.2.0
power64 gcc 12.3.0
power64 gcc 12.4.0
power64 gcc 13.1.0
power64 gcc 13.2.0
power64 gcc 13.3.0
power64 gcc 14.1.0
power64 gcc 14.2.0
power64 gcc trunk
power64le AT12.0 (gcc8)
power64le AT13.0 (gcc9)
power64le clang (trunk)
power64le gcc 11.2.0
power64le gcc 12.1.0
power64le gcc 12.2.0
power64le gcc 12.3.0
power64le gcc 12.4.0
power64le gcc 13.1.0
power64le gcc 13.2.0
power64le gcc 13.3.0
power64le gcc 14.1.0
power64le gcc 14.2.0
power64le gcc 6.3.0
power64le gcc trunk
powerpc64 clang (trunk)
qnx 8.0.0
s390x gcc 11.2.0
s390x gcc 12.1.0
s390x gcc 12.2.0
s390x gcc 12.3.0
s390x gcc 12.4.0
s390x gcc 13.1.0
s390x gcc 13.2.0
s390x gcc 13.3.0
s390x gcc 14.1.0
s390x gcc 14.2.0
sh gcc 12.2.0
sh gcc 12.3.0
sh gcc 12.4.0
sh gcc 13.1.0
sh gcc 13.2.0
sh gcc 13.3.0
sh gcc 14.1.0
sh gcc 14.2.0
sh gcc 4.9.4
sh gcc 9.5.0
vast (trunk)
x64 msvc v19.0 (WINE)
x64 msvc v19.10 (WINE)
x64 msvc v19.14 (WINE)
x64 msvc v19.20 VS16.0
x64 msvc v19.21 VS16.1
x64 msvc v19.22 VS16.2
x64 msvc v19.23 VS16.3
x64 msvc v19.24 VS16.4
x64 msvc v19.25 VS16.5
x64 msvc v19.27 VS16.7
x64 msvc v19.28 VS16.8
x64 msvc v19.28 VS16.9
x64 msvc v19.29 VS16.10
x64 msvc v19.29 VS16.11
x64 msvc v19.30 VS17.0
x64 msvc v19.31 VS17.1
x64 msvc v19.32 VS17.2
x64 msvc v19.33 VS17.3
x64 msvc v19.34 VS17.4
x64 msvc v19.35 VS17.5
x64 msvc v19.36 VS17.6
x64 msvc v19.37 VS17.7
x64 msvc v19.38 VS17.8
x64 msvc v19.39 VS17.9
x64 msvc v19.40 VS17.10
x64 msvc v19.latest
x86 djgpp 4.9.4
x86 djgpp 5.5.0
x86 djgpp 6.4.0
x86 djgpp 7.2.0
x86 msvc v19.0 (WINE)
x86 msvc v19.10 (WINE)
x86 msvc v19.14 (WINE)
x86 msvc v19.20 VS16.0
x86 msvc v19.21 VS16.1
x86 msvc v19.22 VS16.2
x86 msvc v19.23 VS16.3
x86 msvc v19.24 VS16.4
x86 msvc v19.25 VS16.5
x86 msvc v19.27 VS16.7
x86 msvc v19.28 VS16.8
x86 msvc v19.28 VS16.9
x86 msvc v19.29 VS16.10
x86 msvc v19.29 VS16.11
x86 msvc v19.30 VS17.0
x86 msvc v19.31 VS17.1
x86 msvc v19.32 VS17.2
x86 msvc v19.33 VS17.3
x86 msvc v19.34 VS17.4
x86 msvc v19.35 VS17.5
x86 msvc v19.36 VS17.6
x86 msvc v19.37 VS17.7
x86 msvc v19.38 VS17.8
x86 msvc v19.39 VS17.9
x86 msvc v19.40 VS17.10
x86 msvc v19.latest
x86 nvc++ 22.11
x86 nvc++ 22.7
x86 nvc++ 22.9
x86 nvc++ 23.1
x86 nvc++ 23.11
x86 nvc++ 23.3
x86 nvc++ 23.5
x86 nvc++ 23.7
x86 nvc++ 23.9
x86 nvc++ 24.1
x86 nvc++ 24.11
x86 nvc++ 24.3
x86 nvc++ 24.5
x86 nvc++ 24.7
x86 nvc++ 24.9
x86-64 Zapcc 190308
x86-64 clang (Chris Bazley N3089)
x86-64 clang (EricWF contracts)
x86-64 clang (amd-staging)
x86-64 clang (assertions trunk)
x86-64 clang (clangir)
x86-64 clang (dascandy contracts)
x86-64 clang (experimental -Wlifetime)
x86-64 clang (experimental P1061)
x86-64 clang (experimental P1144)
x86-64 clang (experimental P1221)
x86-64 clang (experimental P2996)
x86-64 clang (experimental P2998)
x86-64 clang (experimental P3068)
x86-64 clang (experimental P3309)
x86-64 clang (experimental P3367)
x86-64 clang (experimental P3372)
x86-64 clang (experimental metaprogramming - P2632)
x86-64 clang (old concepts branch)
x86-64 clang (p1974)
x86-64 clang (pattern matching - P2688)
x86-64 clang (reflection)
x86-64 clang (resugar)
x86-64 clang (string interpolation - P3412)
x86-64 clang (thephd.dev)
x86-64 clang (trunk)
x86-64 clang (variadic friends - P2893)
x86-64 clang (widberg)
x86-64 clang 10.0.0
x86-64 clang 10.0.0 (assertions)
x86-64 clang 10.0.1
x86-64 clang 11.0.0
x86-64 clang 11.0.0 (assertions)
x86-64 clang 11.0.1
x86-64 clang 12.0.0
x86-64 clang 12.0.0 (assertions)
x86-64 clang 12.0.1
x86-64 clang 13.0.0
x86-64 clang 13.0.0 (assertions)
x86-64 clang 13.0.1
x86-64 clang 14.0.0
x86-64 clang 14.0.0 (assertions)
x86-64 clang 15.0.0
x86-64 clang 15.0.0 (assertions)
x86-64 clang 16.0.0
x86-64 clang 16.0.0 (assertions)
x86-64 clang 17.0.1
x86-64 clang 17.0.1 (assertions)
x86-64 clang 18.1.0
x86-64 clang 18.1.0 (assertions)
x86-64 clang 18.1.0 (clad 1.8)
x86-64 clang 19.1.0
x86-64 clang 19.1.0 (assertions)
x86-64 clang 2.6.0 (assertions)
x86-64 clang 2.7.0 (assertions)
x86-64 clang 2.8.0 (assertions)
x86-64 clang 2.9.0 (assertions)
x86-64 clang 3.0.0
x86-64 clang 3.0.0 (assertions)
x86-64 clang 3.1
x86-64 clang 3.1 (assertions)
x86-64 clang 3.2
x86-64 clang 3.2 (assertions)
x86-64 clang 3.3
x86-64 clang 3.3 (assertions)
x86-64 clang 3.4 (assertions)
x86-64 clang 3.4.1
x86-64 clang 3.5
x86-64 clang 3.5 (assertions)
x86-64 clang 3.5.1
x86-64 clang 3.5.2
x86-64 clang 3.6
x86-64 clang 3.6 (assertions)
x86-64 clang 3.7
x86-64 clang 3.7 (assertions)
x86-64 clang 3.7.1
x86-64 clang 3.8
x86-64 clang 3.8 (assertions)
x86-64 clang 3.8.1
x86-64 clang 3.9.0
x86-64 clang 3.9.0 (assertions)
x86-64 clang 3.9.1
x86-64 clang 4.0.0
x86-64 clang 4.0.0 (assertions)
x86-64 clang 4.0.1
x86-64 clang 5.0.0
x86-64 clang 5.0.0 (assertions)
x86-64 clang 5.0.1
x86-64 clang 5.0.2
x86-64 clang 6.0.0
x86-64 clang 6.0.0 (assertions)
x86-64 clang 6.0.1
x86-64 clang 7.0.0
x86-64 clang 7.0.0 (assertions)
x86-64 clang 7.0.1
x86-64 clang 7.1.0
x86-64 clang 8.0.0
x86-64 clang 8.0.0 (assertions)
x86-64 clang 8.0.1
x86-64 clang 9.0.0
x86-64 clang 9.0.0 (assertions)
x86-64 clang 9.0.1
x86-64 clang rocm-4.5.2
x86-64 clang rocm-5.0.2
x86-64 clang rocm-5.1.3
x86-64 clang rocm-5.2.3
x86-64 clang rocm-5.3.3
x86-64 clang rocm-5.7.0
x86-64 clang rocm-6.0.2
x86-64 clang rocm-6.1.2
x86-64 gcc (contract labels)
x86-64 gcc (contracts natural syntax)
x86-64 gcc (contracts)
x86-64 gcc (coroutines)
x86-64 gcc (modules)
x86-64 gcc (trunk)
x86-64 gcc 10.1
x86-64 gcc 10.2
x86-64 gcc 10.3
x86-64 gcc 10.3 (assertions)
x86-64 gcc 10.4
x86-64 gcc 10.4 (assertions)
x86-64 gcc 10.5
x86-64 gcc 10.5 (assertions)
x86-64 gcc 11.1
x86-64 gcc 11.1 (assertions)
x86-64 gcc 11.2
x86-64 gcc 11.2 (assertions)
x86-64 gcc 11.3
x86-64 gcc 11.3 (assertions)
x86-64 gcc 11.4
x86-64 gcc 11.4 (assertions)
x86-64 gcc 12.1
x86-64 gcc 12.1 (assertions)
x86-64 gcc 12.2
x86-64 gcc 12.2 (assertions)
x86-64 gcc 12.3
x86-64 gcc 12.3 (assertions)
x86-64 gcc 12.4
x86-64 gcc 12.4 (assertions)
x86-64 gcc 13.1
x86-64 gcc 13.1 (assertions)
x86-64 gcc 13.2
x86-64 gcc 13.2 (assertions)
x86-64 gcc 13.3
x86-64 gcc 13.3 (assertions)
x86-64 gcc 14.1
x86-64 gcc 14.1 (assertions)
x86-64 gcc 14.2
x86-64 gcc 14.2 (assertions)
x86-64 gcc 3.4.6
x86-64 gcc 4.0.4
x86-64 gcc 4.1.2
x86-64 gcc 4.4.7
x86-64 gcc 4.5.3
x86-64 gcc 4.6.4
x86-64 gcc 4.7.1
x86-64 gcc 4.7.2
x86-64 gcc 4.7.3
x86-64 gcc 4.7.4
x86-64 gcc 4.8.1
x86-64 gcc 4.8.2
x86-64 gcc 4.8.3
x86-64 gcc 4.8.4
x86-64 gcc 4.8.5
x86-64 gcc 4.9.0
x86-64 gcc 4.9.1
x86-64 gcc 4.9.2
x86-64 gcc 4.9.3
x86-64 gcc 4.9.4
x86-64 gcc 5.1
x86-64 gcc 5.2
x86-64 gcc 5.3
x86-64 gcc 5.4
x86-64 gcc 5.5
x86-64 gcc 6.1
x86-64 gcc 6.2
x86-64 gcc 6.3
x86-64 gcc 6.4
x86-64 gcc 6.5
x86-64 gcc 7.1
x86-64 gcc 7.2
x86-64 gcc 7.3
x86-64 gcc 7.4
x86-64 gcc 7.5
x86-64 gcc 8.1
x86-64 gcc 8.2
x86-64 gcc 8.3
x86-64 gcc 8.4
x86-64 gcc 8.5
x86-64 gcc 9.1
x86-64 gcc 9.2
x86-64 gcc 9.3
x86-64 gcc 9.4
x86-64 gcc 9.5
x86-64 icc 13.0.1
x86-64 icc 16.0.3
x86-64 icc 17.0.0
x86-64 icc 18.0.0
x86-64 icc 19.0.0
x86-64 icc 19.0.1
x86-64 icc 2021.1.2
x86-64 icc 2021.10.0
x86-64 icc 2021.2.0
x86-64 icc 2021.3.0
x86-64 icc 2021.4.0
x86-64 icc 2021.5.0
x86-64 icc 2021.6.0
x86-64 icc 2021.7.0
x86-64 icc 2021.7.1
x86-64 icc 2021.8.0
x86-64 icc 2021.9.0
x86-64 icx 2021.1.2
x86-64 icx 2021.2.0
x86-64 icx 2021.3.0
x86-64 icx 2021.4.0
x86-64 icx 2022.0.0
x86-64 icx 2022.1.0
x86-64 icx 2022.2.0
x86-64 icx 2022.2.1
x86-64 icx 2023.0.0
x86-64 icx 2023.1.0
x86-64 icx 2023.2.1
x86-64 icx 2024.0.0
x86-64 icx 2024.1.0
x86-64 icx 2024.2.0
x86-64 icx 2025.0.0
x86-64 icx 2025.0.0
zig c++ 0.10.0
zig c++ 0.11.0
zig c++ 0.12.0
zig c++ 0.12.1
zig c++ 0.13.0
zig c++ 0.6.0
zig c++ 0.7.0
zig c++ 0.7.1
zig c++ 0.8.0
zig c++ 0.9.0
zig c++ trunk
Options
Source code
#include <concepts> #include <stddef.h> #include <stdint.h> #include <stdlib.h> #include <bit> #include <math.h> #include <compare> #include <string.h> #include <utility> namespace sus::mem { template <class T> constexpr T&& forward(std::remove_reference_t<T>& t) noexcept { return static_cast<T&&>(t); } template <class T> constexpr T&& forward(std::remove_reference_t<T>&& t) noexcept { static_assert(!std::is_lvalue_reference<T>::value, "Can not forward an rvalue as an lvalue."); return static_cast<T&&>(t); } } // namespace sus::mem namespace sus { using ::sus::mem::forward; } namespace sus::option { template <class T> class Option; } #if _MSC_VER #define sus_if_msvc(x) x #else #define sus_if_msvc(x) #endif #if _MSC_VER #define sus_if_msvc_else(x, y) x #else #define sus_if_msvc_else(x, y) y #endif /// Replace the `inline` keyword on a function declaration with /// `sus_always_inline` to force the compiler to inline it regardless of its /// heuristics. #define sus_always_inline \ sus_if_msvc_else(__forceinline, inline __attribute__((__always_inline__))) namespace sus::mem { /// Verify that an object of type `T`, or referred to by `T` if it's a /// reference, is non-const. template <class T> concept NonConstObject = (!std::is_const_v<std::remove_reference_t<T>>); /// Verify that `T` can be moved with `sus::move()` to construct another `T`. /// /// This is similar to `std::is_move_constructible`, however it requires that /// `T` is non-const. Otherwise, a copy would occur and `sus::move()` will fail /// to compile. template <class T> concept Moveable = (NonConstObject<T> && std::is_move_constructible_v<T>); /// Verify that `T` can be moved with `sus::move()` to assign to another `T`. /// /// This is similar to `std::is_move_assignable`, however it requires that /// `T` is non-const. Otherwise, a copy would occur and `sus::move()` will fail /// to compile. template <class T> concept MoveableForAssign = (NonConstObject<T> && std::is_move_assignable_v<T>); /// Cast `t` to an r-value reference so that it can be used to construct or be /// assigned to another `T`. /// /// `move()` requires that `t` can be moved from, so it requires that `t` is /// non-const. /// /// The `move()` call itself does nothing to `t`, as it is just a cast, similar /// to `std::move()`. It enables an lvalue object to be used as an rvalue. // // TODO: Should this be `as_rvalue()`? Kinda technical. `as_...something...()`? template <NonConstObject T> [[nodiscard]] sus_always_inline constexpr std::remove_reference_t<T>&& move(T&& t) noexcept { return static_cast<typename std::remove_reference_t<T>&&>(t); } } // namespace sus::mem namespace sus { using ::sus::mem::move; } #if !defined(__has_builtin) #define __has_builtin(X) false #endif #if !defined(__has_extension) #define __has_extension(X) false #endif #if !defined(__has_feature) #define __has_feature(X) false #endif namespace sus::marker { class UnsafeFnMarker {}; constexpr inline UnsafeFnMarker unsafe_fn; } // namespace sus::marker // TODO: Provide a way to opt in/out of these in the toplevel namespace. using sus::marker::unsafe_fn; namespace sus::mem { namespace __private { template <class T> struct relocatable_tag final { static constexpr bool value(...) { return false; } static constexpr bool value(int) requires requires { requires(std::same_as<decltype(T::SusUnsafeTrivialRelocate), const bool>); } { return T::SusUnsafeTrivialRelocate; }; }; // Tests if the type T can be relocated with memcpy(). Checking for trivially // movable and destructible is not sufficient - this also honors the // [[trivial_abi]] clang attribute, as types annotated with the attribute are // now considered "trivially relocatable" in https://reviews.llvm.org/D114732. // // TODO: @ssbr has pointed out that this must also verify that the type has no // padding at the end which can be used by an outer type. If so, either // - You can't memcpy it, or // - You must memcpy without including the padding bytes. // // If type `T` has padding at its end, such as: // ``` // class T { i64 a; i32 b; }; // ``` // // Then there are two ways for another type to place a field inside the padding // adjacent to `b` and inside area allocated for `sizeof(T)`: // // 1. A subclass of a non-POD type can insert its fields into the padding of the // base class. // // So a subclass of `T` may have its first field inside the padding adjacent to // `b`: // ``` // class S : T { i32 c; }; // ``` // In this example, `sizeof(S) == sizeof(T)` because `c` sits inside the // trailing padding of `T`. // // 2. A class with a `[[no_unique_address]]` field may insert other fields below // it into the padding of the `[[no_unique_address]]` field. // // So a class that contains `T` as a field can insert another field into `T`: // ``` // class S { // [[no_unique_address]] T t; // i32 c; // }; // ``` // In this example, `sizeof(S) == sizeof(T)` because `c` sits inside the // trailing padding of `T`. // // From @ssbr: // // So the dsizeof(T) algorithm [to determine how much to memcpy safely] is // something like: // // - A: find out how many bytes fit into the padding via inheritance (`struct S // : T { bytes }` for all `bytes` until `sizeof(T) != sizeof(S)`). // - B: find out how many bytes fit into the padding via no_unique_address // (struct S { [[no_unique_address]] T x; bytes } for all `bytes` until // `sizeof(T) != sizeof(S)`). // // ``` // return sizeof(T) - max(A, B) // ``` // // And I think on every known platform, A == B. It might even be guaranteed by // the standard, but I wouldn't know how to check // // clang-format off template <class... T> struct relocate_one_by_memcpy_helper final : public std::integral_constant< bool, (... && (relocatable_tag<T>::value(0) #if __has_extension(trivially_relocatable) || __is_trivially_relocatable(T) #else || (std::is_trivially_move_constructible_v<T> && std::is_trivially_destructible_v<T>) #endif ) ) > {}; // clang-format on // Tests if an array of type T[] can be relocated with memcpy(). Checking for // trivially movable and destructible is not sufficient - this also honors the // [[trivial_abi]] clang attribute, as types annotated with the attribute are // now considered "trivially relocatable" in https://reviews.llvm.org/D114732. // // Tests against `std::remove_all_extents_t<T>` so that the same answer is // returned for `T` or `T[]` or `T[][][]` etc. // // Volatile types are excluded, since if we have a range of volatile Foo, then // the user is probably expecting us to follow the abstract machine and copy the // Foo objects one by one, instead of byte-by-byte (possible tearing). See: // https://reviews.llvm.org/D61761?id=198907#inline-548830 // // clang-format off template <class T> struct relocate_array_by_memcpy_helper final : public std::integral_constant< bool, (relocatable_tag<T>::value(0) #if __has_extension(trivially_relocatable) || __is_trivially_relocatable(std::remove_all_extents_t<T>) #else || (std::is_trivially_move_constructible_v<std::remove_all_extents_t<T>> && std::is_trivially_destructible_v<std::remove_all_extents_t<T>>) #endif ) && !std::is_volatile_v<std::remove_all_extents_t<T>> > {}; // clang-format on } // namespace __private template <class T> concept relocate_array_by_memcpy = __private::relocate_array_by_memcpy_helper<T>::value; template <class... T> concept relocate_one_by_memcpy = __private::relocate_one_by_memcpy_helper<T...>::value; } // namespace sus::mem namespace sus::mem { namespace __private { template <class T, bool HasField> struct never_value_field_helper; template <class T> struct never_value_field_helper<T, false> { static constexpr bool has_field = false; using OverlayType = char; static constexpr bool is_constructed(const OverlayType&) noexcept { return false; } static constexpr void set_never_value(OverlayType&) noexcept {} }; template <class T> struct never_value_field_helper<T, true> { static constexpr bool has_field = true; using OverlayType = T::SusUnsafeNeverValueOverlay::type; static sus_always_inline constexpr bool is_constructed( const OverlayType& t) noexcept { return t.SusUnsafeNeverValueIsConstructed(); } static sus_always_inline constexpr void set_never_value( OverlayType& t) noexcept { t.SusUnsafeNeverValueSetNeverValue(); } }; template <class NeverType, NeverType never_value, class FieldType, size_t N> struct SusUnsafeNeverValueOverlayImpl; template <class NeverType, NeverType never_value, class FieldType, size_t N> struct SusUnsafeNeverValueOverlayImpl { char padding[N]; FieldType never_value_field; constexpr inline void SusUnsafeNeverValueSetNeverValue() noexcept { never_value_field = never_value; } constexpr inline bool SusUnsafeNeverValueIsConstructed() const noexcept { return never_value_field != never_value; } }; template <class NeverType, NeverType never_value, class FieldType> struct SusUnsafeNeverValueOverlayImpl<NeverType, never_value, FieldType, 0> { FieldType never_value_field; constexpr inline void SusUnsafeNeverValueSetNeverValue() noexcept { never_value_field = never_value; } constexpr inline bool SusUnsafeNeverValueIsConstructed() const noexcept { return never_value_field != never_value; } }; } // namespace __private /// A trait to inspect if a type `T` has a field with a never-value. For such a /// type, it is possible to tell if the type is constructed in a memory /// location, by storing the never-value through `set_never_value()` in the /// memory location before it is constructed and/or after it is destroyed. /// /// This allows a flag to check for a class being constructed without an /// additional boolean flag. template <class T> struct never_value_field { /// Whether the type `T` has a never-value field. static constexpr bool has_field = requires { T::SusUnsafeNeverValueOverlay::exists; }; /// A type with a common initial sequence with `T` up to and including the /// never-value field, so that it can be used to read and write the /// never-value field in a union (though reading an inactive union field is /// invalid in a constant expression in C++20). using OverlayType = typename __private::never_value_field_helper<T, has_field>::OverlayType; /// Returns whether there is a type `T` constructed at the memory location /// `t`, where the OverlayType `t` has the same address as a type `T` in a /// union. /// /// # Safety /// This will only produce a correct answer if the memory was previous set to /// the never-value through `set_never_value()` before construction of the /// type `T`. static constexpr sus_always_inline bool is_constructed( ::sus::marker::UnsafeFnMarker, const OverlayType& t) noexcept requires(has_field) { return __private::never_value_field_helper<T, has_field>::is_constructed(t); } /// Sets a field in the memory location `t` to a value that is never set /// during the lifetime of `T`, where the OverlayType `t` has the same address /// as a type `T` in a union. /// /// # Safety /// This must never be called while there is an object of type `T` constructed /// at the given memory location. It must be called only before a constructor /// is run, or after a destructor is run. static constexpr sus_always_inline void set_never_value( ::sus::marker::UnsafeFnMarker, OverlayType& t) noexcept requires(has_field) { return __private::never_value_field_helper<T, has_field>::set_never_value( t); } }; } // namespace sus::mem /// Mark a class field as never being a specific value, often a zero, after a /// constructor has run and bef ore the destructor has completed. This allows /// querying if a class is constructed in a memory location, since the class is /// constructed iff the value of the field is not the never-value. #define sus_class_never_value_field(unsafe_fn, T, field_name, never_value) \ static_assert( \ std::same_as<decltype(unsafe_fn), const ::sus::marker::UnsafeFnMarker>); \ static_assert( \ std::is_assignable_v<decltype(field_name)&, decltype(never_value)>, \ "The `never_value` must be able to be assigned to the named field."); \ template <class> \ friend struct ::sus::mem::never_value_field; \ template <class, bool> \ friend struct ::sus::mem::__private::never_value_field_helper; \ \ public: \ struct SusUnsafeNeverValueOverlay { \ using type = ::sus::mem::__private::SusUnsafeNeverValueOverlayImpl< \ decltype(never_value), never_value, decltype(field_name), \ offsetof(T, field_name)>; \ static constexpr bool exists = true; \ /* For the inclined, this is because otherwise using the Overlay type in \ the Option's internal union, after the destruction of the type T, would \ require placement new of the Overlay type which is not a constant \ expression. */ \ static_assert( \ std::is_trivially_constructible_v<type>, \ "The `never_value` field must be trivially constructible or " \ "else Option<T> couldn't be constexpr."); \ }; \ static_assert(true) #if defined(__clang__) /// An attribute to allow a class to be passed in registers. /// /// This should only be used when the class is also marked as unconditionally /// relocatable with `sus_class_trivial_relocatable()`. /// /// This also enables trivial relocation in libc++ if compiled with clang. #define sus_trivial_abi clang::trivial_abi #else /// An attribute to allow a class to be passed in registers. /// /// This should only be used when the class is also marked as unconditionally /// relocatable with `sus_class_trivial_relocatable()`. /// /// This also enables trivial relocation in libc++ if compiled with clang. #define sus_trivial_abi #endif /// Mark a class as trivially relocatable. /// /// To additionally allow the class to be passed in registers, the class can be /// marked with the `sus_trivial_abi` attribute. #define sus_class_trivial_relocatable(unsafe_fn) \ static_assert(std::is_same_v<decltype(unsafe_fn), \ const ::sus::marker::UnsafeFnMarker>); \ template <class SusOuterClassTypeForTriviallyReloc> \ friend struct ::sus::mem::__private::relocatable_tag; \ static constexpr bool SusUnsafeTrivialRelocate = true /// Mark a class as trivially relocatable based on a compile-time condition. #define sus_class_trivial_relocatable_value(unsafe_fn, is_trivially_reloc) \ static_assert(std::is_same_v<decltype(unsafe_fn), \ const ::sus::marker::UnsafeFnMarker>); \ static_assert( \ std::is_same_v<std::remove_cv_t<decltype(is_trivially_reloc)>, bool>); \ template <class SusOuterClassTypeForTriviallyReloc> \ friend struct ::sus::mem::__private::relocatable_tag; \ static constexpr bool SusUnsafeTrivialRelocate = is_trivially_reloc /// Mark a class as trivially relocatable if all of the types passed as /// arguments are also marked as such. #define sus_class_maybe_trivial_relocatable_types(unsafe_fn, ...) \ static_assert(std::is_same_v<decltype(unsafe_fn), \ const ::sus::marker::UnsafeFnMarker>); \ template <class SusOuterClassTypeForTriviallyReloc> \ friend struct ::sus::mem::__private::relocatable_tag; \ static constexpr bool SusUnsafeTrivialRelocate = \ ::sus::mem::relocate_one_by_memcpy<__VA_ARGS__> /// Mark a class as unconditionally trivially relocatable while also asserting /// that all of the types passed as arguments are also marked as such. /// /// To additionally allow the class to be passed in registers, the class can be /// marked with the `sus_trivial_abi` attribute. #define sus_class_assert_trivial_relocatable_types(unsafe_fn, ...) \ sus_class_maybe_trivial_relocatable_types(unsafe_fn, __VA_ARGS__); \ static_assert(SusUnsafeTrivialRelocate, \ "Type is not trivially " \ "relocatable"); namespace sus::iter::__private { struct IteratorEnd {}; } // namespace sus::iter::__private namespace sus::iter::__private { /// An adaptor for range-based for loops. template <class Iterator> class IteratorLoop final { using Item = typename std::remove_reference_t<Iterator>::Item; public: IteratorLoop(Iterator iter) noexcept : iter_(static_cast<Iterator&&>(iter)), item_(iter_.next()) {} inline bool operator==(const __private::IteratorEnd&) const noexcept { return item_.is_nome(); } inline bool operator!=(const __private::IteratorEnd&) const noexcept { return item_.is_some(); } inline void operator++() & noexcept { item_ = iter_.next(); } inline Item operator*() & noexcept { return item_.take().unwrap(); } private: /* TODO: NonNull<IteratorBase<Item>> */ Iterator iter_; ::sus::option::Option<Item> item_; }; // ADL helpers to call T::iter() in a range-based for loop, which will call // `begin(T)`. template <class T> constexpr auto begin(const T& t) noexcept { return IteratorLoop(t.iter()); } template <class T> constexpr auto end(const T&) noexcept { return ::sus::iter::__private::IteratorEnd(); } } // namespace sus::iter::__private // TODO: https://github.com/llvm/llvm-project/issues/56394 #if __clang_major__ <= 16 // TODO: Update when the bug is fixed. #define sus_clang_bug_56394(...) __VA_ARGS__ #define sus_clang_bug_56394_else(...) #else #define sus_clang_bug_56394(...) #define sus_clang_bug_56394_else(...) __VA_ARGS__ #endif // TODO: https://github.com/llvm/llvm-project/issues/58835 #if __clang_major__ <= 16 // TODO: Update when the bug is fixed. #define sus_clang_bug_58835(...) __VA_ARGS__ #define sus_clang_bug_58835_else(...) #else #define sus_clang_bug_58835(...) #define sus_clang_bug_58835_else(...) __VA_ARGS__ #endif // TODO: https://github.com/llvm/llvm-project/issues/54040 #if defined(__clang__) && !__has_feature(__cpp_aggregate_paren_init) #define sus_clang_bug_54040(...) __VA_ARGS__ #define sus_clang_bug_54040_else(...) #else #define sus_clang_bug_54040(...) #define sus_clang_bug_54040_else(...) __VA_ARGS__ #endif // TODO: https://github.com/llvm/llvm-project/issues/58836 #if __clang_major__ <= 16 // TODO: Update when the bug is fixed. #define sus_clang_bug_58836(...) __VA_ARGS__ #define sus_clang_bug_58836_else(...) #else #define sus_clang_bug_58836(...) #define sus_clang_bug_58836_else(...) __VA_ARGS__ #endif // TODO: https://github.com/llvm/llvm-project/issues/58837 #if __clang_major__ <= 16 // TODO: Update when the bug is fixed. #define sus_clang_bug_58837(...) __VA_ARGS__ #define sus_clang_bug_58837_else(...) #else #define sus_clang_bug_58837(...) #define sus_clang_bug_58837_else(...) __VA_ARGS__ #endif // TODO: https://github.com/llvm/llvm-project/issues/58859 #if __clang_major__ <= 16 // TODO: Update when the bug is fixed. #define sus_clang_bug_58859(...) __VA_ARGS__ #define sus_clang_bug_58859_else(...) #else #define sus_clang_bug_58859(...) #define sus_clang_bug_58859_else(...) __VA_ARGS__ #endif namespace sus::num::__private { template <unsigned int bytes = sizeof(void*)> struct ptr_type; template <> struct ptr_type<4> { using unsigned_type = uint32_t; using signed_type = int32_t; }; template <> struct ptr_type<8> { using unsigned_type = uint64_t; using signed_type = int64_t; }; } // namespace sus::num::__private /// Replace the `inline` keyword on a function declaration with /// `sus_always_inline` to force the compiler to inline it regardless of its /// heuristics. #define sus_always_inline \ sus_if_msvc_else(__forceinline, inline __attribute__((__always_inline__))) namespace sus { [[noreturn]] sus_always_inline void panic() { ::abort(); } [[noreturn]] void panic_with_message( /* TODO: string view type, or format + args */ const char& msg); } // namespace sus namespace sus::assertions { constexpr sus_always_inline void check(bool cond) { if (!cond) [[unlikely]] ::sus::panic(); } constexpr sus_always_inline void check_with_message( bool cond, /* TODO: string view type, or format + args */ const char& msg) { if (!cond) [[unlikely]] ::sus::panic_with_message(msg); } } // namespace sus::assertions // Promote check() and check_with_message() into the `sus` namespace. namespace sus { using ::sus::assertions::check; using ::sus::assertions::check_with_message; } namespace sus::assertions { constexpr sus_always_inline bool is_big_endian() { #if _MSC_VER #if _M_PPC return true; #else return false; #endif #elif defined(__BYTE_ORDER__) #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return true; #else return false; #endif #else #error "Compiler doesn't specify __BYTE_ORDER__." #endif } constexpr sus_always_inline bool is_little_endian() noexcept { return !is_big_endian(); } } // namespace sus::assertions // Based on https://doc.rust-lang.org/nightly/src/core/num/int_log10.rs.html namespace sus::num::__private::int_log10 { // 0 < val < 100_000 constexpr sus_always_inline uint32_t less_than_5(uint32_t val) { // Similar to u8, when adding one of these constants to val, // we get two possible bit patterns above the low 17 bits, // depending on whether val is below or above the threshold. constexpr uint32_t C1 = 0b011'00000000000000000 - 10; // 393206 constexpr uint32_t C2 = 0b100'00000000000000000 - 100; // 524188 constexpr uint32_t C3 = 0b111'00000000000000000 - 1000; // 916504 constexpr uint32_t C4 = 0b100'00000000000000000 - 10000; // 514288 // Value of top bits: // +c1 +c2 1&2 +c3 +c4 3&4 ^ // 0..=9 010 011 010 110 011 010 000 = 0 // 10..=99 011 011 011 110 011 010 001 = 1 // 100..=999 011 100 000 110 011 010 010 = 2 // 1000..=9999 011 100 000 111 011 011 011 = 3 // 10000..=99999 011 100 000 111 100 100 100 = 4 return (((val + C1) & (val + C2)) ^ ((val + C3) & (val + C4))) >> 17; } // 0 < val <= u8::MAX constexpr sus_always_inline uint32_t u8(uint8_t val) { return less_than_5(uint32_t{val}); } // 0 < val <= u16::MAX constexpr sus_always_inline uint32_t u16(uint16_t val) { return less_than_5(uint32_t{val}); } // 0 < val <= u32::MAX constexpr sus_always_inline uint32_t u32(uint32_t val) { auto log = uint32_t{0}; if (val >= 100'000) { val /= 100'000; log += 5; } return log + less_than_5(val); } // 0 < val <= u64::MAX constexpr sus_always_inline uint32_t u64(uint64_t val) { auto log = uint32_t{0}; if (val >= 10'000'000'000) { val /= 10'000'000'000; log += 10; } if (val >= 100'000) { val /= 100'000; log += 5; } return log + less_than_5(static_cast<uint32_t>(val)); } constexpr sus_always_inline uint32_t usize(uint32_t val) { return u32(val); } constexpr sus_always_inline uint32_t usize(uint64_t val) { return u64(val); } constexpr sus_always_inline uint32_t i8(int8_t val) { return u8(static_cast<uint8_t>(val)); } constexpr sus_always_inline uint32_t i16(int16_t val) { return u16(static_cast<uint16_t>(val)); } constexpr sus_always_inline uint32_t i32(int32_t val) { return u32(static_cast<uint32_t>(val)); } constexpr sus_always_inline uint32_t i64(int64_t val) { return u64(static_cast<uint64_t>(val)); } constexpr sus_always_inline uint32_t isize(int32_t val) { return usize(static_cast<uint32_t>(val)); } constexpr sus_always_inline uint32_t isize(int64_t val) { return usize(static_cast<uint64_t>(val)); } } // namespace sus::num::__private::int_log10 namespace sus { [[noreturn]] sus_always_inline void unreachable() { panic(); } [[noreturn]] sus_always_inline void unreachable_unchecked( ::sus::marker::UnsafeFnMarker) { #if __has_builtin(__builtin_unreachable) __builtin_unreachable(); #else __assume(false); #endif } } // namespace sus namespace sus::num { /// A classification of floating point numbers. /// /// This enum is used as the return type for `f32::classify()` and /// `f64::classify()`. See their documentation for more. enum class FpCategory { Nan, Infinite, Zero, Subnormal, Normal }; } namespace sus::num::__private { template <class T> struct OverflowOut final { bool overflow; T value; }; template <class T> sus_always_inline constexpr uint32_t unchecked_sizeof() noexcept { static_assert(sizeof(T) <= 0xfffffff); return sizeof(T); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T>) sus_always_inline constexpr T unchecked_neg(T x) noexcept { return static_cast<T>(-x); } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T>) sus_always_inline constexpr T unchecked_not(T x) noexcept { return static_cast<T>(~x); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_add(T x, T y) noexcept { return static_cast<T>(x + y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_sub(T x, T y) noexcept { return static_cast<T>(x - y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_mul(T x, T y) noexcept { return static_cast<T>(x * y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_div(T x, T y) noexcept { return static_cast<T>(x / y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_rem(T x, T y) noexcept { return static_cast<T>(x % y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_and(T x, T y) noexcept { return static_cast<T>(x & y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_or(T x, T y) noexcept { return static_cast<T>(x | y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr T unchecked_xor(T x, T y) noexcept { return static_cast<T>(x ^ y); } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T>) sus_always_inline constexpr T unchecked_shl(T x, uint32_t y) noexcept { return static_cast<T>(x << y); } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T>) sus_always_inline constexpr T unchecked_shr(T x, uint32_t y) noexcept { return static_cast<T>(x >> y); } template <class T> requires(std::is_integral_v<T>) sus_always_inline constexpr uint32_t num_bits() noexcept { return unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}); } template <class T> requires(std::is_integral_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T high_bit() noexcept { if constexpr (sizeof(T) == 1) return static_cast<T>(0x80); else if constexpr (sizeof(T) == 2) return static_cast<T>(0x8000); else if constexpr (sizeof(T) == 4) return static_cast<T>(0x80000000); else return static_cast<T>(0x8000000000000000); } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) == 4) sus_always_inline constexpr uint32_t high_bit() noexcept { return uint32_t{0x80000000}; } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) == 8) sus_always_inline constexpr uint64_t high_bit() noexcept { return uint64_t{0x8000000000000000}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T>) sus_always_inline constexpr T max_value() noexcept { return unchecked_not(T{0}); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T max_value() noexcept { if constexpr (sizeof(T) == 1) return T{0x7f}; else if constexpr (sizeof(T) == 2) return T{0x7fff}; else if constexpr (sizeof(T) == 4) return T{0x7fffffff}; else return T{0x7fffffffffffffff}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T>) sus_always_inline constexpr T min_value() noexcept { return T{0}; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T min_value() noexcept { return -max_value<T>() - T{1}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T epsilon() noexcept { if constexpr (sizeof(T) == sizeof(float)) return 1.1920929E-7f; else return 2.2204460492503131E-16; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T max_value() noexcept { if constexpr (sizeof(T) == sizeof(float)) { return 3.40282346639e+38f; } else return 1.7976931348623157E+308; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T min_value() noexcept { if constexpr (sizeof(T) == sizeof(float)) return -3.40282346639e+38f; else return -1.7976931348623157E+308; } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) sus_always_inline constexpr uint32_t count_ones(T value) noexcept { #if _MSC_VER if (std::is_constant_evaluated()) { // Algorithm to count the number of bits in parallel, up to a 128 bit value. // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel value = value - ((value >> 1) & (~T{0} / T{3})); value = (value & (~T{0} / T{15} * T{3})) + ((value >> 2) & (~T{0} / T{15} * T{3})); value = (value + (value >> 4)) & (~T{0} / T{255} * T{15}); auto count = (value * (~T{0} / T{255})) >> (unchecked_sizeof<T>() - uint32_t{1}) * uint32_t{8}; return static_cast<uint32_t>(count); } else if constexpr (sizeof(value) <= 2) { return uint32_t{__popcnt16(uint16_t{value})}; } else if constexpr (sizeof(value) == 8) { return static_cast<uint32_t>(__popcnt64(uint64_t{value})); } else { return uint32_t{__popcnt(uint32_t{value})}; } #else if constexpr (sizeof(value) <= sizeof(unsigned int)) { using U = unsigned int; return static_cast<uint32_t>(__builtin_popcount(U{value})); } else if constexpr (sizeof(value) <= sizeof(unsigned long)) { using U = unsigned long; return static_cast<uint32_t>(__builtin_popcountl(U{value})); } else { using U = unsigned long long; return static_cast<uint32_t>(__builtin_popcountll(U{value})); } #endif } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) constexpr sus_always_inline uint32_t leading_zeros_nonzero(::sus::marker::UnsafeFnMarker, T value) noexcept { if (std::is_constant_evaluated()) { uint32_t count = 0; for (auto i = uint32_t{0}; i < unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}); ++i) { const bool zero = (value & high_bit<T>()) == 0; if (!zero) break; count += 1; value <<= 1; } return count; } #if _MSC_VER if constexpr (sizeof(value) == 8u) { #if 1 unsigned long index; _BitScanReverse64(&index, value); return static_cast<uint32_t>(63ul ^ index); #else // TODO: Enable this when target CPU is appropriate: // - AMD: Advanced Bit Manipulation (ABM) // - Intel: Haswell // TODO: On Arm ARMv5T architecture and later use `_arm_clz` return static_cast<uint32_t>(__lzcnt64(&count, int64_t{value})); #endif } else if constexpr (sizeof(value) == 4u) { #if 1 unsigned long index; _BitScanReverse(&index, uint32_t{value}); return static_cast<uint32_t>(31ul ^ index); #else // TODO: Enable this when target CPU is appropriate: // - AMD: Advanced Bit Manipulation (ABM) // - Intel: Haswell // TODO: On Arm ARMv5T architecture and later use `_arm_clz` return __lzcnt(&count, uint32_t{value}); #endif } else { static_assert(sizeof(value) <= 2u); #if 1 unsigned long index; _BitScanReverse(&index, uint32_t{value}); return static_cast<uint32_t>((31ul ^ index) - ((sizeof(unsigned int) - sizeof(value)) * 8u)); #else // TODO: Enable this when target CPU is appropriate: // - AMD: Advanced Bit Manipulation (ABM) // - Intel: Haswell // TODO: On Arm ARMv5T architecture and later use `_arm_clz` return static_cast<uint32_t>(__lzcnt16(&count, uint16_t{value}) - ((sizeof(unsigned int) - sizeof(value)) * 8u)); #endif } #else if constexpr (sizeof(value) <= sizeof(unsigned int)) { using U = unsigned int; return static_cast<uint32_t>(__builtin_clz(U{value}) - ((sizeof(unsigned int) - sizeof(value)) * 8u)); } else if constexpr (sizeof(value) <= sizeof(unsigned long)) { using U = unsigned long; return static_cast<uint32_t>(__builtin_clzl(U{value})); } else { using U = unsigned long long; return static_cast<uint32_t>(__builtin_clzll(U{value})); } #endif } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) constexpr sus_always_inline uint32_t leading_zeros(T value) noexcept { if (value == 0) return unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}); return leading_zeros_nonzero(unsafe_fn, value); } /** Counts the number of trailing zeros in a non-zero input. * * # Safety * This function produces Undefined Behaviour if passed a zero value. */ // TODO: Any way to make it constexpr? template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) constexpr sus_always_inline uint32_t trailing_zeros_nonzero(::sus::marker::UnsafeFnMarker, T value) noexcept { if (std::is_constant_evaluated()) { uint32_t count = 0; for (auto i = uint32_t{0}; i < unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}); ++i) { const bool zero = (value & 1) == 0; if (!zero) break; count += 1; value >>= 1; } return count; } #if _MSC_VER if constexpr (sizeof(value) == 8u) { unsigned long index; _BitScanForward64(&index, value); return static_cast<uint32_t>(index); } else if constexpr (sizeof(value) == 4u) { unsigned long index; _BitScanForward(&index, uint32_t{value}); return static_cast<uint32_t>(index); } else { static_assert(sizeof(value) <= 2u); unsigned long index; _BitScanForward(&index, uint32_t{value}); return static_cast<uint32_t>(index); } #else if constexpr (sizeof(value) <= sizeof(unsigned int)) { using U = unsigned int; return static_cast<uint32_t>(__builtin_ctz(U{value})); } else if constexpr (sizeof(value) <= sizeof(unsigned long)) { using U = unsigned long; return static_cast<uint32_t>(__builtin_ctzl(U{value})); } else { using U = unsigned long long; return static_cast<uint32_t>(__builtin_ctzll(U{value})); } #endif } // TODO: Any way to make it constexpr? template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) constexpr sus_always_inline uint32_t trailing_zeros(T value) noexcept { if (value == 0) return static_cast<uint32_t>(sizeof(T) * 8u); return trailing_zeros_nonzero(unsafe_fn, value); } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T reverse_bits(T value) noexcept { #if __clang__ if constexpr (sizeof(T) == 1) { return __builtin_bitreverse8(value); } else if constexpr (sizeof(T) == 2) { return __builtin_bitreverse16(value); } else if constexpr (sizeof(T) == 4) { return __builtin_bitreverse32(value); } else { static_assert(sizeof(T) == 8); return __builtin_bitreverse64(value); } #else // Algorithm from Ken Raeburn: // http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel uint32_t bits = unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}); auto mask = unchecked_not(T(0)); while ((bits >>= 1) > 0) { mask ^= unchecked_shl(mask, bits); value = (unchecked_shr(value, bits) & mask) | (unchecked_shl(value, bits) & ~mask); } return value; #endif } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T>) sus_always_inline constexpr T rotate_left(T value, uint32_t n) noexcept { n %= sizeof(value) * 8; const auto rshift = unchecked_sub(unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}), n); return unchecked_shl(value, n) | unchecked_shr(value, rshift); } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T>) sus_always_inline constexpr T rotate_right(T value, uint32_t n) noexcept { n %= sizeof(value) * 8; const auto lshift = unchecked_sub(unchecked_mul(unchecked_sizeof<T>(), uint32_t{8}), n); return unchecked_shr(value, n) | unchecked_shl(value, lshift); } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T swap_bytes(T value) noexcept { if (std::is_constant_evaluated()) { if constexpr (sizeof(T) == 1) { return value; } else if constexpr (sizeof(T) == 2) { unsigned char a = (value >> 0) & 0xff; unsigned char b = (value >> 8) & 0xff; return (a << 8) | (b << 0); } else if constexpr (sizeof(T) == 4) { unsigned char a = (value >> 0) & 0xff; unsigned char b = (value >> 8) & 0xff; unsigned char c = (value >> 16) & 0xff; unsigned char d = (value >> 24) & 0xff; return (a << 24) | (b << 16) | (c << 8) | (d << 0); } else if constexpr (sizeof(T) == 8) { unsigned char a = (value >> 0) & 0xff; unsigned char b = (value >> 8) & 0xff; unsigned char c = (value >> 16) & 0xff; unsigned char d = (value >> 24) & 0xff; unsigned char e = (value >> 32) & 0xff; unsigned char f = (value >> 40) & 0xff; unsigned char g = (value >> 48) & 0xff; unsigned char h = (value >> 56) & 0xff; return (a << 24) | (b << 16) | (c << 8) | (d << 0) | (e << 24) | (f << 16) | (g << 8) | (h << 0); } } #if _MSC_VER if constexpr (sizeof(T) == 1) { return value; } else if constexpr (sizeof(T) == sizeof(unsigned short)) { using U = unsigned short; return _byteswap_ushort(U{value}); } else if constexpr (sizeof(T) == sizeof(unsigned long)) { using U = unsigned long; return _byteswap_ulong(U{value}); } else { static_assert(sizeof(T) == 8); return _byteswap_uint64(value); } #else if constexpr (sizeof(T) == 1) { return value; } else if constexpr (sizeof(T) == 2) { return __builtin_bswap16(uint16_t{value}); } else if constexpr (sizeof(T) == 4) { return __builtin_bswap32(value); } else { static_assert(sizeof(T) == 8); return __builtin_bswap64(value); } #endif } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr auto into_unsigned(T x) noexcept { if constexpr (sizeof(x) == 1) return static_cast<uint8_t>(x); else if constexpr (sizeof(x) == 2) return static_cast<uint16_t>(x); else if constexpr (sizeof(x) == 4) return static_cast<uint32_t>(x); else return static_cast<uint64_t>(x); } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 4) sus_always_inline constexpr auto into_widened(T x) noexcept { if constexpr (sizeof(x) == 1) return static_cast<uint16_t>(x); else if constexpr (sizeof(x) == 2) return static_cast<uint32_t>(x); else return static_cast<uint64_t>(x); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 4) sus_always_inline constexpr auto into_widened(T x) noexcept { if constexpr (sizeof(x) == 1) return static_cast<int16_t>(x); else if constexpr (sizeof(x) == 2) return static_cast<int32_t>(x); else return static_cast<int64_t>(x); } template <class T> requires(std::is_integral_v<T> && std::is_unsigned_v<T> && sizeof(T) <= 8) sus_always_inline constexpr auto into_signed(T x) noexcept { if constexpr (sizeof(x) == 1) return static_cast<int8_t>(x); else if constexpr (sizeof(x) == 2) return static_cast<int16_t>(x); else if constexpr (sizeof(x) == 4) return static_cast<int32_t>(x); else return static_cast<int64_t>(x); } template <class T> requires(sizeof(T) <= 8) sus_always_inline constexpr bool sign_bit(T x) noexcept { if constexpr (sizeof(x) == 1) return (x & (T(1) << 7)) != 0; else if constexpr (sizeof(x) == 2) return (x & (T(1) << 15)) != 0; else if constexpr (sizeof(x) == 4) return (x & (T(1) << 31)) != 0; else return (x & (T(1) << 63)) != 0; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr OverflowOut<T> add_with_overflow(T x, T y) noexcept { return OverflowOut sus_clang_bug_56394(<T>){ .overflow = x > max_value<T>() - y, .value = unchecked_add(x, y), }; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr OverflowOut<T> add_with_overflow(T x, T y) noexcept { const auto out = into_signed(unchecked_add(into_unsigned(x), into_unsigned(y))); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = y >= 0 != out >= x, .value = out, }; } template <class T, class U = decltype(to_signed(std::declval<T>()))> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8 && sizeof(T) == sizeof(U)) sus_always_inline constexpr OverflowOut<T> add_with_overflow_signed(T x, U y) noexcept { return OverflowOut sus_clang_bug_56394(<T>){ .overflow = (y >= 0 && into_unsigned(y) > max_value<T>() - x) || (y < 0 && into_unsigned(-y) > x), .value = unchecked_add(x, into_unsigned(y)), }; } template <class T, class U = decltype(to_unsigned(std::declval<T>()))> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8 && sizeof(T) == sizeof(U)) sus_always_inline constexpr OverflowOut<T> add_with_overflow_unsigned(T x, U y) noexcept { const auto out = into_signed(unchecked_add(into_unsigned(x), y)); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = static_cast<U>(max_value<T>()) - static_cast<U>(x) < y, .value = out, }; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr OverflowOut<T> sub_with_overflow(T x, T y) noexcept { return OverflowOut sus_clang_bug_56394(<T>){ .overflow = x < unchecked_add(min_value<T>(), y), .value = unchecked_sub(x, y), }; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr OverflowOut<T> sub_with_overflow(T x, T y) noexcept { const auto out = into_signed(unchecked_sub(into_unsigned(x), into_unsigned(y))); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = y >= 0 != out <= x, .value = out, }; } template <class T, class U = decltype(to_unsigned(std::declval<T>()))> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8 && sizeof(T) == sizeof(U)) sus_always_inline constexpr OverflowOut<T> sub_with_overflow_unsigned(T x, U y) noexcept { const auto out = into_signed(unchecked_sub(into_unsigned(x), y)); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = static_cast<U>(x) - static_cast<U>(min_value<T>()) < y, .value = out, }; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 4) sus_always_inline constexpr OverflowOut<T> mul_with_overflow(T x, T y) noexcept { // TODO: Can we use compiler intrinsics? auto out = unchecked_mul(into_widened(x), into_widened(y)); using Wide = decltype(out); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = out > Wide{max_value<T>()}, .value = static_cast<T>(out)}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) == 8) sus_always_inline constexpr OverflowOut<T> mul_with_overflow(T x, T y) noexcept { #if _MSC_VER if (std::is_constant_evaluated()) { const bool overflow = x > T{1} && y > T{1} && x > unchecked_div(max_value<T>(), y); return OverflowOut sus_clang_bug_56394(<T>){.overflow = overflow, .value = unchecked_mul(x, y)}; } else { // For MSVC, use _umul128, but what about constexpr?? If we can't do // it then make the whole function non-constexpr? uint64_t highbits; auto out = static_cast<T>(_umul128(x, y, &highbits)); return OverflowOut sus_clang_bug_56394(<T>){.overflow = highbits != 0, .value = out}; } #else auto out = __uint128_t{x} * __uint128_t{y}; return OverflowOut sus_clang_bug_56394(<T>){ .overflow = out > __uint128_t{max_value<T>()}, .value = static_cast<T>(out)}; #endif } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 4) sus_always_inline constexpr OverflowOut<T> mul_with_overflow(T x, T y) noexcept { // TODO: Can we use compiler intrinsics? auto out = into_widened(x) * into_widened(y); using Wide = decltype(out); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = out > Wide{max_value<T>()} || out < Wide{min_value<T>()}, .value = static_cast<T>(out)}; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) == 8) sus_always_inline constexpr OverflowOut<T> mul_with_overflow(T x, T y) noexcept { #if _MSC_VER if (std::is_constant_evaluated()) { if (x == T{0} || y == T{0}) return OverflowOut sus_clang_bug_56394(<T>){.overflow = false, .value = T{0}}; using U = decltype(into_unsigned(x)); const auto absx = x >= T{0} ? into_unsigned(x) : unchecked_add(into_unsigned(unchecked_add(x, T{1})), U{1}); const auto absy = y >= T{0} ? into_unsigned(y) : unchecked_add(into_unsigned(unchecked_add(y, T{1})), U{1}); const bool mul_negative = (x ^ y) < 0; const auto mul_max = unchecked_add(into_unsigned(max_value<T>()), U{mul_negative}); const bool overflow = absx > unchecked_div(mul_max, absy); const auto mul_val = unchecked_mul(absx, absy); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = overflow, .value = mul_negative ? unchecked_sub(unchecked_neg(static_cast<T>(mul_val - 1)), T{1}) : static_cast<T>(mul_val)}; } else { // For MSVC, use _mul128, but what about constexpr?? If we can't do // it then make the whole function non-constexpr? int64_t highbits; auto out = static_cast<T>(_mul128(x, y, &highbits)); return OverflowOut sus_clang_bug_56394(<T>){.overflow = highbits != 0, .value = out}; } #else auto out = __int128_t{x} * __int128_t{y}; return OverflowOut sus_clang_bug_56394(<T>){ .overflow = out > __int128_t{max_value<T>()} || out < __int128_t{min_value<T>()}, .value = static_cast<T>(out)}; #endif } template <class T> requires(std::is_integral_v<T> && sizeof(T) <= 8) sus_always_inline constexpr OverflowOut<T> pow_with_overflow(T base, uint32_t exp) noexcept { if (exp == 0) return OverflowOut sus_clang_bug_56394(<T>){.overflow = false, .value = T{1}}; auto acc = T{1}; bool overflow = false; while (exp > 1) { if (exp & 1) { auto r = mul_with_overflow(acc, base); overflow |= r.overflow; acc = r.value; } exp /= 2; auto r = mul_with_overflow(base, base); overflow |= r.overflow; base = r.value; } auto r = mul_with_overflow(acc, base); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = overflow || r.overflow, .value = r.value}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8)) sus_always_inline constexpr OverflowOut<T> shl_with_overflow(T x, uint32_t shift) noexcept { // Using `num_bits<T>() - 1` as a mask only works if num_bits<T>() is a power // of two, so we verify that sizeof(T) is a power of 2, which implies the // number of bits is as well (since each byte is 2^3 bits). const bool overflow = shift >= num_bits<T>(); if (overflow) [[unlikely]] shift = shift & (unchecked_sub(num_bits<T>(), uint32_t{1})); return OverflowOut sus_clang_bug_56394(<T>){.overflow = overflow, .value = unchecked_shl(x, shift)}; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8)) sus_always_inline constexpr OverflowOut<T> shl_with_overflow(T x, uint32_t shift) noexcept { // Using `num_bits<T>() - 1` as a mask only works if num_bits<T>() is a power // of two, so we verify that sizeof(T) is a power of 2, which implies the // number of bits is as well (since each byte is 2^3 bits). const bool overflow = shift >= num_bits<T>(); if (overflow) [[unlikely]] shift = shift & (unchecked_sub(num_bits<T>(), uint32_t{1})); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = overflow, .value = into_signed(unchecked_shl(into_unsigned(x), shift))}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8)) sus_always_inline constexpr OverflowOut<T> shr_with_overflow(T x, uint32_t shift) noexcept { // Using `num_bits<T>() - 1` as a mask only works if num_bits<T>() is a power // of two, so we verify that sizeof(T) is a power of 2, which implies the // number of bits is as well (since each byte is 2^3 bits). const bool overflow = shift >= num_bits<T>(); if (overflow) [[unlikely]] shift = shift & (unchecked_sub(num_bits<T>(), uint32_t{1})); return OverflowOut sus_clang_bug_56394(<T>){.overflow = overflow, .value = unchecked_shr(x, shift)}; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8)) sus_always_inline constexpr OverflowOut<T> shr_with_overflow(T x, uint32_t shift) noexcept { // Using `num_bits<T>() - 1` as a mask only works if num_bits<T>() is a power // of two, so we verify that sizeof(T) is a power of 2, which implies the // number of bits is as well (since each byte is 2^3 bits). const bool overflow = shift >= num_bits<T>(); if (overflow) [[unlikely]] shift = shift & (unchecked_sub(num_bits<T>(), uint32_t{1})); return OverflowOut sus_clang_bug_56394(<T>){ .overflow = overflow, .value = into_signed(unchecked_shr(into_unsigned(x), shift))}; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_add(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? const auto out = add_with_overflow(x, y); if (!out.overflow) [[likely]] return out.value; else return max_value<T>(); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_add(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? if (y >= 0) { if (x <= max_value<T>() - y) [[likely]] return x + y; else return max_value<T>(); } else { if (x >= min_value<T>() - y) [[likely]] return x + y; else return min_value<T>(); } } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_sub(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? const auto out = sub_with_overflow(x, y); if (!out.overflow) [[likely]] return out.value; else return min_value<T>(); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_sub(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? if (y <= 0) { if (x <= max_value<T>() + y) [[likely]] return x - y; else return max_value<T>(); } else { if (x >= min_value<T>() + y) [[likely]] return x - y; else return min_value<T>(); } } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_mul(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? const auto out = mul_with_overflow(x, y); if (!out.overflow) [[likely]] return out.value; else return max_value<T>(); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T saturating_mul(T x, T y) noexcept { // TODO: Optimize this? Use intrinsics? const auto out = mul_with_overflow(x, y); if (!out.overflow) [[likely]] return out.value; else if (x > 0 == y > 0) return max_value<T>(); else return min_value<T>(); } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_add(T x, T y) noexcept { return x + y; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_add(T x, T y) noexcept { // TODO: Are there cheaper intrinsics? return add_with_overflow(x, y).value; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_sub(T x, T y) noexcept { return unchecked_sub(x, y); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_sub(T x, T y) noexcept { // TODO: Are there cheaper intrinsics? return sub_with_overflow(x, y).value; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_mul(T x, T y) noexcept { return x * y; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_mul(T x, T y) noexcept { // TODO: Are there cheaper intrinsics? return mul_with_overflow(x, y).value; } template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_pow(T base, uint32_t exp) noexcept { // TODO: Don't need to track overflow and unsigned wraps by default, so this // can be cheaper. return pow_with_overflow(base, exp).value; } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T wrapping_pow(T base, uint32_t exp) noexcept { // TODO: Are there cheaper intrinsics? return pow_with_overflow(base, exp).value; } // Returns one less than next power of two. // (For 8u8 next power of two is 8u8 and for 6u8 it is 8u8) // // 8u8.one_less_than_next_power_of_two() == 7 // 6u8.one_less_than_next_power_of_two() == 7 // // This method cannot overflow, as in the `next_power_of_two` // overflow cases it instead ends up returning the maximum value // of the type, and can return 0 for 0. template <class T> requires(std::is_integral_v<T> && !std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr T one_less_than_next_power_of_two(T x) noexcept { if (x <= 1u) { return 0u; } else { const auto p = unchecked_sub(x, T{1}); // SAFETY: Because `p > 0`, it cannot consist entirely of leading zeros. // That means the shift is always in-bounds, and some processors (such as // intel pre-haswell) have more efficient ctlz intrinsics when the argument // is non-zero. const auto z = leading_zeros_nonzero(unsafe_fn, p); return unchecked_shr(max_value<T>(), z); } } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr bool div_overflows(T x, T y) noexcept { // Using `&` helps LLVM see that it is the same check made in division. return y == T{0} || ((x == min_value<T>()) & (y == T{-1})); } template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) sus_always_inline constexpr bool div_overflows_nonzero(::sus::marker::UnsafeFnMarker, T x, T y) noexcept { // Using `&` helps LLVM see that it is the same check made in division. return ((x == min_value<T>()) & (y == T{-1})); } // SAFETY: Requires that !div_overflows(x, y) or Undefined Behaviour results. template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) constexpr T div_euclid(::sus::marker::UnsafeFnMarker, T x, T y) noexcept { const auto q = unchecked_div(x, y); if (x % y >= 0) return q; else if (y > 0) return unchecked_sub(q, T{1}); else return unchecked_add(q, T{1}); } // SAFETY: Requires that !div_overflows(x, y) or Undefined Behaviour results. template <class T> requires(std::is_integral_v<T> && std::is_signed_v<T> && sizeof(T) <= 8) constexpr T rem_euclid(::sus::marker::UnsafeFnMarker, T x, T y) noexcept { const auto r = unchecked_rem(x, y); if (r < 0) { if (y < 0) return unchecked_sub(r, y); else return unchecked_add(r, y); } else { return r; } } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr auto into_unsigned_integer(T x) noexcept { if constexpr (sizeof(T) == sizeof(float)) return std::bit_cast<uint32_t>(x); else return std::bit_cast<uint64_t>(x); } // Prefer the non-constexpr `into_float()` to avoid problems where you get a // different value in a constexpr context from a runtime context. It's safe to // call this function if the argument is not a NaN. Otherwise, you must ensure // the NaN is exactly in the form that would be produced in a constexpr context // in order to avoid problems. template <class T> requires(std::is_integral_v<T> && sizeof(T) <= 8) constexpr auto into_float_constexpr(::sus::marker::UnsafeFnMarker, T x) noexcept { if constexpr (sizeof(T) == sizeof(float)) return std::bit_cast<float>(x); else return std::bit_cast<double>(x); } // This is NOT constexpr because it can produce different results in a constexpr // context than in a runtime one. For example. // // ``` // constexpr float x = into_float(uint32_t{0x7f800001}); // const float y = into_float(uint32_t{0x7f800001}); // ``` // In this case `x` is `7fc00001` (the quiet bit became set), but `y` is // `0x7f800001`. template <class T> requires(std::is_integral_v<T> && sizeof(T) <= 8) inline auto into_float(T x) noexcept { // SAFETY: Since this isn't a constexpr context, we're okay. return into_float_constexpr(unsafe_fn, x); } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T min_positive_value() noexcept { if constexpr (sizeof(T) == sizeof(float)) return 1.17549435E-38f; else return 0.22250738585072014E-307; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr int32_t min_exp() noexcept { if constexpr (sizeof(T) == sizeof(float)) return int32_t{-125}; else return int32_t{-1021}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr int32_t max_exp() noexcept { if constexpr (sizeof(T) == sizeof(float)) return int32_t{128}; else return int32_t{1024}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr int32_t min_10_exp() noexcept { if constexpr (sizeof(T) == sizeof(float)) return int32_t{-37}; else return int32_t{-307}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr int32_t max_10_exp() noexcept { if constexpr (sizeof(T) == sizeof(float)) return int32_t{38}; else return int32_t{308}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr uint32_t radix() noexcept { return 2; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr uint32_t num_mantissa_digits() noexcept { if constexpr (sizeof(T) == sizeof(float)) return uint32_t{24}; else return uint32_t{53}; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr uint32_t num_digits() noexcept { if constexpr (sizeof(T) == sizeof(float)) return 6; else return 15; } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T nan() noexcept { // SAFETY: We must take care that the value returned here is the same in both // a constexpr and non-constexpr context. The quiet bit is always set in a // constexpr context, so we return a quiet bit here. if constexpr (sizeof(T) == sizeof(float)) return into_float_constexpr(unsafe_fn, uint32_t{0x7fc00000}); else return into_float_constexpr(unsafe_fn, uint64_t{0x7ff8000000000000}); } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T infinity() noexcept { // SAFETY: The value being constructed is non a NaN so we can do this in a // constexpr way. if constexpr (sizeof(T) == sizeof(float)) return into_float_constexpr(unsafe_fn, uint32_t{0x7f800000}); else return into_float_constexpr(unsafe_fn, uint64_t{0x7ff0000000000000}); } template <class T> requires(std::is_floating_point_v<T>) sus_always_inline constexpr T negative_infinity() noexcept { // SAFETY: The value being constructed is non a NaN so we can do this in a // constexpr way. if constexpr (sizeof(T) == sizeof(float)) return into_float_constexpr(unsafe_fn, uint32_t{0xff800000}); else return into_float_constexpr(unsafe_fn, uint64_t{0xfff0000000000000}); } constexpr inline int32_t exponent_bits(float x) noexcept { constexpr uint32_t mask = 0b01111111100000000000000000000000; return static_cast<int32_t>( unchecked_shr(into_unsigned_integer(x) & mask, 23)); } constexpr inline int32_t exponent_bits(double x) noexcept { constexpr uint64_t mask = 0b0111111111110000000000000000000000000000000000000000000000000000; return static_cast<int32_t>( unchecked_shr(into_unsigned_integer(x) & mask, 52)); } constexpr inline int32_t exponent_value(float x) noexcept { return exponent_bits(x) - int32_t{127}; } constexpr inline int32_t exponent_value(double x) noexcept { return exponent_bits(x) - int32_t{1023}; } constexpr inline uint32_t mantissa(float x) noexcept { constexpr uint32_t mask = 0b00000000011111111111111111111111; return into_unsigned_integer(x) & mask; } constexpr inline uint64_t mantissa(double x) noexcept { constexpr uint64_t mask = 0b0000000000001111111111111111111111111111111111111111111111111111; return into_unsigned_integer(x) & mask; } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr inline bool float_is_zero(T x) noexcept { return (into_unsigned_integer(x) & ~high_bit<T>()) == 0; } constexpr inline bool float_is_inf(float x) noexcept { #if __has_builtin(__builtin_isinf) return __builtin_isinf(x); #else constexpr auto inf = uint32_t{0x7f800000}; constexpr auto mask = uint32_t{0x7fffffff}; const auto y = into_unsigned_integer(x); return (y & mask) == inf; #endif } constexpr inline bool float_is_inf(double x) noexcept { #if __has_builtin(__builtin_isinf) return __builtin_isinf(x); #else constexpr auto inf = uint64_t{0x7ff0000000000000}; constexpr auto mask = uint64_t{0x7fffffffffffffff}; return (into_unsigned_integer(x) & mask) == inf; #endif } constexpr inline bool float_is_inf_or_nan(float x) noexcept { constexpr auto mask = uint32_t{0x7f800000}; return (into_unsigned_integer(x) & mask) == mask; } constexpr inline bool float_is_inf_or_nan(double x) noexcept { constexpr auto mask = uint64_t{0x7ff0000000000000}; return (into_unsigned_integer(x) & mask) == mask; } constexpr inline bool float_is_nan(float x) noexcept { #if __has_builtin(__builtin_isnan) return __builtin_isnan(x); #else constexpr auto inf_mask = uint32_t{0x7f800000}; constexpr auto nan_mask = uint32_t{0x7fffffff}; return (into_unsigned_integer(x) & nan_mask) > inf_mask; #endif } constexpr inline bool float_is_nan(double x) noexcept { #if __has_builtin(__builtin_isnan) return __builtin_isnan(x); #else constexpr auto inf_mask = uint64_t{0x7ff0000000000000}; constexpr auto nan_mask = uint64_t{0x7fffffffffffffff}; return (into_unsigned_integer(x) & nan_mask) > inf_mask; #endif } // Assumes that x is a NaN. constexpr inline bool float_is_nan_quiet(float x) noexcept { // The quiet bit is the highest bit in the mantissa. constexpr auto quiet_mask = uint32_t{uint32_t{1} << (23 - 1)}; return (into_unsigned_integer(x) & quiet_mask) != 0; } // Assumes that x is a NaN. constexpr inline bool float_is_nan_quiet(double x) noexcept { // The quiet bit is the highest bit in the mantissa. constexpr auto quiet_mask = uint64_t{uint64_t{1} << (52 - 1)}; return (into_unsigned_integer(x) & quiet_mask) != 0; } // This is only valid if the argument is not (positive or negative) zero. template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr inline bool float_nonzero_is_subnormal(T x) noexcept { return exponent_bits(x) == int32_t{0}; } constexpr inline bool float_is_normal(float x) noexcept { // If the exponent is 0, the number is zero or subnormal. If the exponent is // all ones, the number is infinite or NaN. const auto e = exponent_bits(x); return e != 0 && e != int32_t{0b011111111}; } constexpr inline bool float_is_normal(double x) noexcept { // If the exponent is 0, the number is zero or subnormal. If the exponent is // all ones, the number is infinite or NaN. const auto e = exponent_bits(x); return e != 0 && e != int32_t{0b011111111111}; } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr T truncate_float(T x) noexcept { constexpr auto mantissa_width = sizeof(T) == sizeof(float) ? uint32_t{23} : uint32_t{52}; if (float_is_inf_or_nan(x) || float_is_zero(x)) return x; const int32_t exponent = exponent_value(x); // If the exponent is greater than the most negative mantissa // exponent, then x is already an integer. if (exponent >= static_cast<int32_t>(mantissa_width)) return x; // If the exponent is such that abs(x) is less than 1, then return 0. if (exponent <= -1) { if ((into_unsigned_integer(x) & high_bit<T>()) != 0) return T{-0.0}; else return T{0.0}; } const uint32_t trim_bits = mantissa_width - static_cast<uint32_t>(exponent); const auto shr = unchecked_shr(into_unsigned_integer(x), trim_bits); const auto shl = unchecked_shl(shr, trim_bits); // SAFETY: The value here is not a NaN, so will give the same value in // constexpr and non-constexpr contexts. return into_float_constexpr(unsafe_fn, shl); } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr bool float_signbit(T x) noexcept { return unchecked_and(into_unsigned_integer(x), high_bit<T>()) != 0; } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr T float_signum(T x) noexcept { // TODO: Can this be done without a branch? Beware nan values in constexpr // context are rewritten. if (float_is_nan(x)) [[unlikely]] return x; const auto signbit = unchecked_and(into_unsigned_integer(x), high_bit<T>()); // SAFETY: The value passed in is constructed here and is not a NaN. return into_float_constexpr( unsafe_fn, unchecked_add(into_unsigned_integer(T{1}), signbit)); } template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) inline T float_round(T x) noexcept { /* MSVC round(float) is returning a double for some reason. */ const auto out = into_unsigned_integer(static_cast<T>(::round(x))); // `round()` doesn't preserve the sign bit, so we need to restore it, for // (-0.5, -0.0]. return into_float((out & ~high_bit<T>()) | (into_unsigned_integer(x) & high_bit<T>())); } #if __has_builtin(__builtin_fpclassify) template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr inline ::sus::num::FpCategory float_category(T x) noexcept { constexpr auto nan = 1; constexpr auto inf = 2; constexpr auto norm = 3; constexpr auto subnorm = 4; constexpr auto zero = 5; switch (__builtin_fpclassify(nan, inf, norm, subnorm, zero, x)) { case nan: return ::sus::num::FpCategory::Nan; case inf: return ::sus::num::FpCategory::Infinite; case norm: return ::sus::num::FpCategory::Normal; case subnorm: return ::sus::num::FpCategory::Subnormal; case zero: return ::sus::num::FpCategory::Zero; default: ::sus::unreachable_unchecked(unsafe_fn); } } #else template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr inline ::sus::num::FpCategory float_category(T x) noexcept { if (std::is_constant_evaluated()) { if (float_is_nan(x)) return ::sus::num::FpCategory::Nan; if (float_is_inf_or_nan(x)) return ::sus::num::FpCategory::Infinite; if (float_is_zero(x)) return ::sus::num::FpCategory::Zero; if (float_nonzero_is_subnormal(x)) return ::sus::num::FpCategory::Subnormal; return ::sus::num::FpCategory::Normal; } else { // C++23 requires a constexpr way to do this. switch (::fpclassify(x)) { case FP_NAN: return ::sus::num::FpCategory::Nan; case FP_INFINITE: return ::sus::num::FpCategory::Infinite; case FP_NORMAL: return ::sus::num::FpCategory::Normal; case FP_SUBNORMAL: return ::sus::num::FpCategory::Subnormal; case FP_ZERO: return ::sus::num::FpCategory::Zero; default: ::sus::unreachable_unchecked(unsafe_fn); } } } #endif // Requires that `min <= max` and that `min` and `max` are not NaN or else this // function produces Undefined Behaviour. template <class T> requires(std::is_floating_point_v<T> && sizeof(T) <= 8) constexpr T float_clamp(marker::UnsafeFnMarker, T x, T min, T max) noexcept { if (float_is_nan(x)) [[unlikely]] return nan<T>(); else if (x < min) return min; else if (x > max) return max; return x; } } // namespace sus::num::__private #if _MSC_VER /// Literal integer value. #define _sus__integer_literal(Name, T) \ /* A `constexpr` workaround for MSVC bug that doesn't constant-evaluate \ * user-defined literals in all cases: \ * https://developercommunity.visualstudio.com/t/User-defined-literals-not-constant-expre/10108165 \ * \ * This obviously adds some runtime + codegen overhead. We could use a \ * "numeric literal operator template" and construct the number from a \ * `char...` template, which is what we used to do before moving to \ * `consteval` (see commit 531ac278f5b96a63b39332a0b87ef207e0d40575). \ * However that triggers a different MSVC bug when used with any \ * unary/binary operator in a templated function: \ * https://developercommunity.visualstudio.com/t/MSVC-Compiler-bug-with:-numeric-literal/10108160 \ */ \ T inline constexpr operator""_##Name(unsigned long long val) noexcept { \ ::sus::check(val <= static_cast<unsigned long long>(T::MAX_PRIMITIVE)); \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } #else /// Literal integer value. #define _sus__integer_literal(Name, T) \ T inline consteval operator""_##Name(unsigned long long val) { \ if (val > static_cast<unsigned long long>(T::MAX_PRIMITIVE)) \ throw "Integer literal out of bounds for ##T##"; \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } #endif #if _MSC_VER /// Literal float value. #define _sus__float_literal(Name, T) \ /* A `constexpr` workaround for MSVC bug that doesn't constant-evaluate \ * user-defined literals in all cases: \ * https://developercommunity.visualstudio.com/t/User-defined-literals-not-constant-expre/10108165 \ * \ * This obviously adds some runtime + codegen overhead. We could use a \ * "numeric literal operator template" and construct the number from a \ * `char...` template, which is what we used to do before moving to \ * `consteval` (see commit 531ac278f5b96a63b39332a0b87ef207e0d40575). \ * However that triggers a different MSVC bug when used with any \ * unary/binary operator in a templated function: \ * https://developercommunity.visualstudio.com/t/MSVC-Compiler-bug-with:-numeric-literal/10108160 \ */ \ T inline constexpr operator""_##Name(long double val) noexcept { \ ::sus::check(val <= static_cast<long double>(T::MAX_PRIMITIVE)); \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } \ T inline constexpr operator""_##Name(unsigned long long val) noexcept { \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } #else /// Literal float value. #define _sus__float_literal(Name, T) \ T inline consteval operator""_##Name(long double val) { \ if (val > static_cast<long double>(T::MAX_PRIMITIVE)) \ throw "Float literal out of bounds for ##T##"; \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } \ T inline consteval operator""_##Name(unsigned long long val) { \ return T(static_cast<decltype(T::primitive_value)>(val)); \ } #endif namespace sus::num { struct i8; struct i16; struct i32; struct i64; struct isize; struct u8; struct u16; struct u32; struct u64; struct usize; template <class T> concept Unsigned = std::same_as<u8, std::decay_t<T>> || std::same_as<u16, std::decay_t<T>> || std::same_as<u32, std::decay_t<T>> || std::same_as<u64, std::decay_t<T>> || std::same_as<usize, std::decay_t<T>>; template <class T> concept Signed = std::same_as<i8, std::decay_t<T>> || std::same_as<i16, std::decay_t<T>> || std::same_as<i32, std::decay_t<T>> || std::same_as<i64, std::decay_t<T>> || std::same_as<isize, std::decay_t<T>>; template <class T> concept Integer = Unsigned<T> || Signed<T>; template <class T> concept UnsignedPrimitiveInteger = std::same_as<size_t, T> || (std::is_unsigned_v<char> && std::same_as<char, T>) || std::same_as<unsigned char, T> || std::same_as<unsigned short, T> || std::same_as<unsigned int, T> || std::same_as<unsigned long, T> || std::same_as<unsigned long long, T>; template <class T> concept SignedPrimitiveInteger = (!std::is_unsigned_v<char> && std::same_as<char, T>) || std::same_as<signed char, T> || std::same_as<short, T> || std::same_as<int, T> || std::same_as<long, T> || std::same_as<long long, T>; template <class T> concept PrimitiveInteger = UnsignedPrimitiveInteger<T> || SignedPrimitiveInteger<T>; } // namespace sus::num namespace sus::construct { namespace __private { template <class T> constexpr inline bool has_with_default(...) { return false; } template <class T, class... Args> requires(std::same_as<decltype(T::with_default(std::declval<Args>()...)), T>) constexpr inline bool has_with_default(int) { return true; } } // namespace __private // clang-format off template <class T> concept MakeDefault = (std::constructible_from<T> && !__private::has_with_default<T>(0)) || (!std::constructible_from<T> && __private::has_with_default<T>(0)); // clang-format on template <MakeDefault T> inline constexpr T make_default() noexcept { if constexpr (std::constructible_from<T>) return T(); else return T::with_default(); } } // namespace sus::construct /// Defines an attribute to place at the end of a function definition that /// declares all pointer arguments are not null. To actually receive null would /// be UB, as the compiler is free to optimize for them never being null. #define sus_nonnull_fn sus_if_msvc_else(, __attribute__((nonnull))) /// Defines an attribute to place before the type of a pointer-type function /// parameter that declares the pointer is not null. To actually receive null /// would be UB, as the compiler is free to optimize for it never being null. #define sus_nonnull_arg sus_if_msvc(_Notnull_) namespace sus::mem { template <class T> requires(!std::is_reference_v<T> && !std::is_array_v<T>) struct Mref; /// Pass a variable to a function as a mutable reference. template <class T> constexpr inline Mref<T> mref(T& t) { return Mref<T>(Mref<T>::kConstruct, t); } template <class T> constexpr inline Mref<T> mref(const T& t) = delete; /// An Mref can be passed along as an Mref. template <class T> constexpr inline Mref<T> mref(Mref<T>& t) { return Mref<T>(Mref<T>::kConstruct, t.inner()); } /// A mutable reference receiver. /// /// Mref should only be used as a function parameter. It receives a mutable /// (lvalue) reference, and requires the caller to pass it explicitly with /// mref(). /// /// This ensures that passing a variable as mutable is visible at the callsite. /// It generates the same code as a bare reference: /// https://godbolt.org/z/9xPaqhvfq /// /// # Example /// /// ``` /// // Without Mref: /// void receive_ref(int& i) { i++; } /// /// // With Mref: /// void receive_ref(Mref<int> i) { i++; } /// /// int i; /// receive_ref(mref(i)); // Explicitly pass lvalue ref. /// ``` template <class T> requires(!std::is_reference_v<T> && !std::is_array_v<T>) struct [[sus_trivial_abi]] Mref final { /// Mref can be trivially moved, so this is the move constructor. constexpr Mref(Mref&&) noexcept = default; /// Mref can be trivially moved, but is only meant for a function argument, so /// no need for assignment. constexpr Mref& operator=(Mref&&) noexcept = delete; /// Prevent constructing an Mref argument without writing mref(). Mref(T& t) = delete; /// Prevent passing an Mref argument along without writing mref() again. Mref(Mref&) = delete; /// Returns the reference held by the Mref. constexpr inline T& inner() & { return t_; } /// Act like a T&. It can convert to a T&. constexpr inline operator T&() & noexcept { return t_; } /// Act like a T&. It can be assigned a new T. constexpr inline Mref& operator=(const T& t) requires(std::is_copy_assignable_v<T>) { t_ = t; return *this; } /// Act like a T&. It can be assigned a new T. constexpr inline Mref& operator=(T&& t) requires(std::is_move_assignable_v<T>) { t_ = static_cast<T&&>(t); return *this; } private: friend constexpr Mref<T> mref<>(T&); friend constexpr Mref<T> mref<>(Mref&); enum Construct { kConstruct }; constexpr inline Mref(Construct, T& reference) noexcept : t_(reference) {} T& t_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(t_)); }; } // namespace sus::mem // Promote Mref into the `sus` namespace. namespace sus { using ::sus::mem::Mref; } // namespace sus // Promote mref() into the top level namespace. // TODO: Provide an option to do this or not. using ::sus::mem::mref; namespace sus::mem { template <class T> requires(std::is_object_v<T>) constexpr T* addressof(T& arg) noexcept { return __builtin_addressof(arg); } template <class T> requires(!std::is_object_v<T>) constexpr T* addressof(T& arg) noexcept { return &arg; } template <class T> T* addressof(T&& arg) = delete; } // namespace sus::mem namespace sus::mem { // It would be nice to have an array overload of replace() but functions can't // return arrays. template <class T> requires(!std::is_array_v<T> && std::is_move_constructible_v<T> && std::is_copy_assignable_v<T>) [[nodiscard]] constexpr T replace(Mref<T> dest_ref, const T& src) noexcept { T& dest = dest_ref; T old(static_cast<T&&>(dest)); // memcpy() is not constexpr so we can't use it in constexpr evaluation. bool can_memcpy = ::sus::mem::relocate_one_by_memcpy<T> && !std::is_constant_evaluated(); if (can_memcpy) { memcpy(::sus::mem::addressof(dest), ::sus::mem::addressof(src), sizeof(T)); } else { dest = src; } return old; } template <class T> requires(!std::is_array_v<T> && std::is_move_constructible_v<T> && std::is_move_assignable_v<T>) [[nodiscard]] constexpr T replace(Mref<T> dest_ref, T&& src) noexcept { T& dest = dest_ref; T old(static_cast<T&&>(dest)); // memcpy() is not constexpr so we can't use it in constexpr evaluation. bool can_memcpy = ::sus::mem::relocate_one_by_memcpy<T> && !std::is_constant_evaluated(); if (can_memcpy) { memcpy(::sus::mem::addressof(dest), ::sus::mem::addressof(src), sizeof(T)); } else { dest = static_cast<T&&>(src); } return old; } template <class T> requires(!std::is_array_v<T> && std::is_copy_assignable_v<T>) void replace_and_discard(Mref<T> dest_ref, const T& src) noexcept { T& dest = dest_ref; // memcpy() is not constexpr so we can't use it in constexpr evaluation. bool can_memcpy = ::sus::mem::relocate_one_by_memcpy<T> && !std::is_constant_evaluated(); if (can_memcpy) { memcpy(::sus::mem::addressof(dest), ::sus::mem::addressof(src), sizeof(T)); } else { dest = src; } } template <class T> requires(!std::is_array_v<T> && std::is_move_assignable_v<T>) void replace_and_discard(Mref<T> dest_ref, T&& src) noexcept { T& dest = dest_ref; // memcpy() is not constexpr so we can't use it in constexpr evaluation. bool can_memcpy = ::sus::mem::relocate_one_by_memcpy<T> && !std::is_constant_evaluated(); if (can_memcpy) { memcpy(::sus::mem::addressof(dest), ::sus::mem::addressof(src), sizeof(T)); } else { dest = static_cast<T&&>(src); } } template <class T> [[nodiscard]] constexpr T* replace_ptr(Mref<T*> dest, T* src) noexcept { T* old = dest; dest = src; return old; } template <class T> [[nodiscard]] constexpr const T* replace_ptr(Mref<const T*> dest, const T* src) noexcept { const T* old = dest; dest = src; return old; } template <class T> [[nodiscard]] constexpr T* replace_ptr(Mref<T*> dest, decltype(nullptr)) noexcept { T* old = dest; dest = nullptr; return old; } template <class T> [[nodiscard]] constexpr const T* replace_ptr(Mref<const T*> dest, decltype(nullptr)) noexcept { const T* old = dest; dest = nullptr; return old; } } // namespace sus::mem namespace sus::mem { /// Calling take() on a base class could lead to Undefined Behaviour, unless the /// object was accessed through std::launder thereafter, as replacing a subclass /// with the base class would change the underlying storage. template <class T> requires(std::is_move_constructible_v<T> && std::is_default_constructible_v<T> && std::is_final_v<T>) inline constexpr T take(Mref<T> t_ref) noexcept { T& t = t_ref; T taken(static_cast<T&&>(t)); t.~T(); // TODO: Support classes with a `with_default()` constructor as well. new (&t) T(); return taken; } // SAFETY: This does *not* re-construct the object pointed to by `t`. It must // not be used (or destructed again) afterward. template <class T> requires std::is_move_constructible_v<T> inline constexpr T take_and_destruct(::sus::marker::UnsafeFnMarker, Mref<T> t_ref) noexcept { T& t = t_ref; T taken(static_cast<T&&>(t)); t.~T(); return taken; } template <class T> requires std::is_move_constructible_v<T> inline constexpr T take_and_destruct(::sus::marker::UnsafeFnMarker, T& t) noexcept { T taken(static_cast<T&&>(t)); t.~T(); return taken; } } // namespace sus::mem namespace sus::num { template <class Rhs, class Output = Rhs> concept Neg = requires(Rhs rhs) { { -static_cast<Rhs&&>(rhs) } -> std::same_as<Output>; }; template <class Rhs, class Output = Rhs> concept BitNot = requires(Rhs rhs) { { ~static_cast<Rhs&&>(rhs) } -> std::same_as<Output>; }; template <class Lhs, class Rhs, class Output = Lhs> concept Add = requires(const Lhs& lhs, const Rhs& rhs) { { lhs + rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept AddAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs += rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept Sub = requires(const Lhs& lhs, const Rhs& rhs) { { lhs - rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept SubAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs -= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept Mul = requires(const Lhs& lhs, const Rhs& rhs) { { lhs* rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept MulAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs *= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept Div = requires(const Lhs& lhs, const Rhs& rhs) { { lhs / rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept DivAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs /= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept Rem = requires(const Lhs& lhs, const Rhs& rhs) { { lhs % rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept RemAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs %= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept BitAnd = requires(const Lhs& lhs, const Rhs& rhs) { { lhs& rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept BitAndAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs &= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept BitOr = requires(const Lhs& lhs, const Rhs& rhs) { { lhs | rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept BitOrAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs |= rhs } -> std::same_as<void>; }; template <class Lhs, class Rhs, class Output = Lhs> concept BitXor = requires(const Lhs& lhs, const Rhs& rhs) { { lhs ^ rhs } -> std::same_as<Output>; }; template <class Lhs, class Rhs> concept BitXorAssign = requires(Lhs& lhs, const Rhs& rhs) { { lhs ^= rhs } -> std::same_as<void>; }; template <class Lhs, class Output = Lhs> concept Shl = requires(const Lhs& lhs /* TODO: , u32 rhs */) { { lhs << 2u } -> std::same_as<Output>; }; template <class Lhs> concept ShlAssign = requires(Lhs& lhs /* TODO: , u32 rhs */) { { lhs <<= 2u } -> std::same_as<void>; }; template <class Lhs, class Output = Lhs> concept Shr = requires(const Lhs& lhs /* TODO: , u32 rhs */) { { lhs >> 2u } -> std::same_as<Output>; }; template <class Lhs> concept ShrAssign = requires(Lhs& lhs /* TODO: , u32 rhs */) { { lhs >>= 2u } -> std::same_as<void>; }; } // namespace sus::num namespace sus::ops { // Type `A` and `B` are `Eq<A, B>` if an object of each type can be compared for // equality with the `==` operator. // // TODO: How do we do PartialEq? Can we even? Can we require Ord to be Eq? But // then it depends on ::num? template <class Lhs, class Rhs> concept Eq = requires(const Lhs& lhs, const Rhs& rhs) { { lhs == rhs } -> std::same_as<bool>; }; } // namespace sus::ops namespace sus::ops { /// Determines if the types `Lhs` and `Rhs` have a total ordering (aka /// `std::strong_ordering`). template <class Lhs, class Rhs> concept Ord = requires(const Lhs& lhs, const Rhs& rhs) { { lhs <=> rhs } -> std::same_as<std::strong_ordering>; }; /// Determines if the types `Lhs` and `Rhs` have a weak ordering (aka /// `std::weak_ordering`). /// /// This will be true if the types have a total ordering as well, which is /// stronger than a weak ordering. To determine if a weak ordering is the /// strongest type of ordering between the types, use `ExclusiveWeakOrd`. template <class Lhs, class Rhs> concept WeakOrd = Ord<Lhs, Rhs> || requires(const Lhs& lhs, const Rhs& rhs) { { lhs <=> rhs } -> std::same_as<std::weak_ordering>; }; /// Determines if the types `Lhs` and `Rhs` have a partial ordering (aka /// `std::partial_ordering`). /// /// This will be true if the types have a weak r total ordering as well, which /// is stronger than a partial ordering. To determine if a partial ordering is /// the strongest type of ordering between the types, use `ExclusivePartialOrd`. template <class Lhs, class Rhs> concept PartialOrd = WeakOrd<Lhs, Rhs> || Ord<Lhs, Rhs> || requires(const Lhs& lhs, const Rhs& rhs) { { lhs <=> rhs } -> std::same_as<std::partial_ordering>; }; /// Determines if the types `Lhs` and `Rhs` have a total ordering (aka /// `std::strong_ordering`). template <class Lhs, class Rhs> concept ExclusiveOrd = Ord<Lhs, Rhs>; /// Determines if the types `Lhs` and `Rhs` have a weak ordering (aka /// `std::weak_ordering`), and that this is the strongest ordering that exists /// between the types. template <class Lhs, class Rhs> concept ExclusiveWeakOrd = (!Ord<Lhs, Rhs> && WeakOrd<Lhs, Rhs>); /// Determines if the types `Lhs` and `Rhs` have a partial ordering (aka /// `std::partial_ordering`), and that this is the strongest ordering that /// exists between the types. template <class Lhs, class Rhs> concept ExclusivePartialOrd = (!Ord<Lhs, Rhs> && !WeakOrd<Lhs, Rhs> && PartialOrd<Lhs, Rhs>); } // namespace sus::ops namespace sus::option { template <class T> class Option; } namespace sus::option::__private { template <class U> struct IsOptionType final : std::false_type { using inner_type = void; }; template <class U> struct IsOptionType<Option<U>> final : std::true_type { using inner_type = U; }; } // namespace sus::option::__private namespace sus::option { /// The representation of an Option's state, which can either be #None to /// represent it has no value, or #Some for when it is holding a value. enum class State : bool { /// The Option is not holding any value. None, /// The Option is holding a value. Some, }; } namespace sus::option::__private { using State::None; using State::Some; template <class T> constexpr inline bool UseNeverValueFieldOptimization = std::is_standard_layout_v<T> && sus::mem::never_value_field<T>::has_field; template <class T, bool = UseNeverValueFieldOptimization<T>> struct Storage; // TODO: Determine if we can put the State into the storage of `T`. Probably // though a user-defined trait for `T`? // // TODO: If the compiler provided an extension to get the offset of a reference // or non-null-annotated pointer inside a type, we could use that to determine a // place to "store" the liveness bit inside `T`. When we destroy `T`, we'd write // a `null` to that location, and when `T` is constructed, we know it will write // a non-`null` there. This is a generalization of what we have done for the // `T&` type. Something like `__offset_of_nonnull_field(T)`, which would be // possible to determine at compile time for a fully-defined type `T`. template <class T> struct Storage<T, false> final { constexpr ~Storage() requires(std::is_trivially_destructible_v<T>) = default; constexpr ~Storage() requires(!std::is_trivially_destructible_v<T>) {} constexpr Storage(const Storage&) requires(std::is_trivially_copy_constructible_v<T>) = default; constexpr Storage& operator=(const Storage&) requires(std::is_trivially_copy_assignable_v<T>) = default; constexpr Storage(Storage&&) requires(std::is_trivially_move_constructible_v<T>) = default; constexpr Storage& operator=(Storage&&) requires(std::is_trivially_move_assignable_v<T>) = default; constexpr Storage() {} constexpr Storage(const std::remove_cvref_t<T>& t) : val_(t), state_(Some) {} constexpr Storage(std::remove_cvref_t<T>& t) : val_(t), state_(Some) {} constexpr Storage(std::remove_cvref_t<T>&& t) : val_(static_cast<T&&>(t)), state_(Some) {} union { T val_; }; State state_ = None; [[nodiscard]] constexpr inline State state() const noexcept { return state_; } constexpr inline void construct_from_none(T&& t) noexcept { new (&val_) T(static_cast<T&&>(t)); state_ = Some; } constexpr inline void set_some(T&& t) noexcept { if (state_ == None) construct_from_none(static_cast<T&&>(t)); else ::sus::mem::replace_and_discard(mref(val_), static_cast<T&&>(t)); state_ = Some; } [[nodiscard]] constexpr inline T replace_some(T&& t) noexcept { return ::sus::mem::replace(mref(val_), static_cast<T&&>(t)); } [[nodiscard]] constexpr inline T take_and_set_none() noexcept { state_ = None; return ::sus::mem::take_and_destruct(unsafe_fn, val_); } constexpr inline void set_none() noexcept { state_ = None; val_.~T(); } }; template <class T> struct Storage<T, true> final { constexpr ~Storage() requires(std::is_trivially_destructible_v<T>) = default; constexpr ~Storage() requires(!std::is_trivially_destructible_v<T>) {} constexpr Storage(const Storage&) requires(std::is_trivially_copy_constructible_v<T>) = default; constexpr Storage& operator=(const Storage&) requires(std::is_trivially_copy_assignable_v<T>) = default; constexpr Storage(Storage&&) requires(std::is_trivially_move_constructible_v<T>) = default; constexpr Storage& operator=(Storage&&) requires(std::is_trivially_move_assignable_v<T>) = default; constexpr Storage() : overlay_() { ::sus::mem::never_value_field<T>::set_never_value(unsafe_fn, overlay_); } constexpr Storage(const T& t) : val_(t) {} constexpr Storage(T&& t) : val_(static_cast<T&&>(t)) {} using Overlay = typename ::sus::mem::never_value_field<T>::OverlayType; union { Overlay overlay_; T val_; }; // If both `bytes_` and `val_` are standard layout, and the same size, then we // can access the memory of one through the other in a well-defined way: // https://en.cppreference.com/w/cpp/language/union static_assert(std::is_standard_layout_v<Overlay>); static_assert(std::is_standard_layout_v<T>); // Not constexpr because in a constant-evalation context, the compiler will // produce an error if the Option is #Some, since we're reading the union's // inactive field in that case. When using the never-value field optimization, // it's not possible to query the state of the Option, without already knowing // the correct state and thus the correct union field to read, given the // current limitations of constexpr in C++20. [[nodiscard]] inline State state() const noexcept { return ::sus::mem::never_value_field<T>::is_constructed(unsafe_fn, overlay_) ? Some : None; } inline void construct_from_none(T&& t) noexcept { new (&val_) T(static_cast<T&&>(t)); } constexpr inline void set_some(T&& t) noexcept { if (state() == None) construct_from_none(static_cast<T&&>(t)); else ::sus::mem::replace_and_discard(mref(val_), static_cast<T&&>(t)); } [[nodiscard]] constexpr inline T replace_some(T&& t) noexcept { return ::sus::mem::replace(mref(val_), static_cast<T&&>(t)); } [[nodiscard]] constexpr inline T take_and_set_none() noexcept { T t = take_and_destruct(unsafe_fn, mref(val_)); // Make the overlay_ field active. overlay_ = Overlay(); ::sus::mem::never_value_field<T>::set_never_value(unsafe_fn, overlay_); return t; } constexpr inline void set_none() noexcept { val_.~T(); // Make the overlay_ field active. overlay_ = Overlay(); ::sus::mem::never_value_field<T>::set_never_value(unsafe_fn, overlay_); } }; template <class T> struct [[sus_trivial_abi]] StoragePointer { explicit constexpr sus_always_inline sus_nonnull_fn StoragePointer(sus_nonnull_arg T& ref) noexcept : ptr_(::sus::mem::addressof(ref)) {} constexpr sus_always_inline const T& as_ref() const { return *ptr_; } constexpr sus_always_inline T& as_mut() { return *ptr_; } private: T* ptr_; // Pointers are trivially relocatable. sus_class_trivial_relocatable(unsafe_fn); // The pointer is never set to null. sus_class_never_value_field(unsafe_fn, StoragePointer, ptr_, nullptr); }; // This must be true in order for StoragePointer to be useful with the // never-value field optimization. static_assert(std::is_standard_layout_v<StoragePointer<int>>); } // namespace sus::option::__private namespace sus::result { template <class T, class E> class Result; } namespace sus::result::__private { template <class U> struct IsResultType final : std::false_type { using ok_type = void; using err_type = void; }; template <class U, class V> struct IsResultType<Result<U, V>> final : std::true_type { using ok_type = U; using err_type = V; }; } // namespace sus::result::__private namespace sus::iter { template <class Item> class Once; template <class I> class Iterator; namespace __private { template <class T> constexpr auto begin(const T& t) noexcept; template <class T> constexpr auto end(const T& t) noexcept; } // namespace __private } // namespace sus::iter namespace sus::result { template <class T, class E> class Result; } namespace sus::tuple { template <class T, class... Ts> class Tuple; } namespace sus::option { using State::None; using State::Some; using sus::iter::Iterator; using sus::iter::Once; using sus::option::__private::Storage; using sus::option::__private::StoragePointer; /// A type which either holds #Some value of type `T`, or #None. /// /// `Option<const T>` for non-reference-type `T` is disallowed, as the Option /// owns the `T` in that case and it ensures the `Option` and the `T` are both /// accessed with the same const-ness. /// /// If a type provides a never-value field (see mem/never_value.h), then /// Option<T> will have the same size as T. /// /// However the never-value field places some limitations on what can be /// constexpr in the Option type. Because it is not possible to query the state /// of the Option in a constant evaluation context, state-querying methods can /// not be constexpr, nor any method that branches based on the current state, /// such as `unwrap_or()`. /// template <class T> class Option; /// Implementation of Option for a value type (non-reference). template <class T> class Option final { static_assert(!std::is_const_v<T>); public: /// Construct an Option that is holding the given value. static inline constexpr Option some(const T& t) noexcept requires(std::is_copy_constructible_v<T>) { return Option(t); } /// Construct an Option that is holding the given value. template <class U> static inline constexpr Option some(Mref<U> t) noexcept requires(std::is_copy_constructible_v<T>) { return Option(t); } /// Construct an Option that is holding the given value. static inline constexpr Option some(T&& t) noexcept requires(std::is_move_constructible_v<T>) { return Option(static_cast<T&&>(t)); } /// Construct an Option that is holding no value. static inline constexpr Option none() noexcept { return Option(); } /// Construct an Option with the default value for the type it contains. /// /// The Option's contained type `T` must be #MakeDefault, and will be /// constructed through that trait. static inline constexpr Option<T> with_default() noexcept requires(::sus::construct::MakeDefault<T>) { return Option<T>(::sus::construct::make_default<T>()); } /// Destructor for the Option. /// /// If T can be trivially destroyed, we don't need to explicitly destroy it, /// so we can use the default destructor, which allows Option<T> to also be /// trivially destroyed. constexpr ~Option() noexcept requires(std::is_trivially_destructible_v<T>) = default; /// Destructor for the Option. /// /// Destroys the value contained within the option, if there is one. inline ~Option() noexcept requires(!std::is_trivially_destructible_v<T>) { if (t_.state() == Some) t_.val_.~T(); } /// If T can be trivially copy-constructed, Option<T> can also be trivially /// copy-constructed. constexpr Option(const Option& o) requires(std::is_trivially_copy_constructible_v<T>) = default; Option(const Option& o) noexcept requires(!std::is_trivially_copy_constructible_v<T> && std::is_copy_constructible_v<T>) { if (o.t_.state() == Some) t_.set_some(o.t_.val_); } constexpr Option(const Option& o) requires(!std::is_copy_constructible_v<T>) = delete; /// If T can be trivially copy-constructed, Option<T> can also be trivially /// move-constructed. constexpr Option(Option&& o) requires(std::is_trivially_move_constructible_v<T>) = default; // TODO: If this could be done in a `constexpr` way, methods that receive an // Option could also be constexpr. Option(Option&& o) noexcept requires(!std::is_trivially_move_constructible_v<T> && std::is_move_constructible_v<T>) { if (o.t_.state() == Some) t_.set_some(o.t_.take_and_set_none()); } constexpr Option(Option&& o) requires(!std::is_move_constructible_v<T>) = delete; /// If T can be trivially copy-assigned, Option<T> can also be trivially /// copy-assigned. constexpr Option& operator=(const Option& o) requires(std::is_trivially_copy_assignable_v<T>) = default; Option& operator=(const Option& o) noexcept requires(!std::is_trivially_copy_assignable_v<T> && std::is_copy_assignable_v<T>) { if (o.t_.state() == Some) t_.set_some(o.t_.val_); else if (t_.state() == Some) t_.set_none(); return *this; } constexpr Option& operator=(const Option& o) requires(!std::is_copy_assignable_v<T>) = delete; /// If T can be trivially move-assigned, we don't need to explicitly construct /// it, so we can use the default destructor, which allows Option<T> to also /// be trivially move-assigned. constexpr Option& operator=(Option&& o) requires(std::is_trivially_move_assignable_v<T>) = default; Option& operator=(Option&& o) noexcept requires(!std::is_trivially_move_assignable_v<T> && std::is_move_assignable_v<T>) { if (o.t_.state() == Some) t_.set_some(o.t_.take_and_set_none()); else if (t_.state() == Some) t_.set_none(); return *this; } constexpr Option& operator=(Option&& o) requires(!std::is_move_assignable_v<T>) = delete; /// Drop the current value from the Option, if there is one. /// /// Afterward the option will unconditionally be #None. void clear() & noexcept { if (t_.state() == Some) t_.set_none(); } /// Returns whether the Option currently contains a value. /// /// If there is a value present, it can be extracted with <unwrap>() or /// <expect>(). bool is_some() const noexcept { return t_.state() == Some; } /// Returns whether the Option is currently empty, containing no value. bool is_none() const noexcept { return t_.state() == None; } /// An operator which returns the state of the Option, either #Some or #None. /// /// This supports the use of an Option in a `switch()`, allowing it to act as /// a tagged union between "some value" and "no value". /// /// # Example /// /// ```cpp /// auto x = Option<int>::some(2); /// switch (x) { /// case Some: /// return sus::move(x).unwrap_unchecked(unsafe_fn); /// case None: /// return -1; /// } /// ``` operator State() const& { return t_.state(); } /// Returns the contained value inside the Option. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn T expect( /* TODO: string view type */ sus_nonnull_arg const char* msg) && noexcept { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); } /// Returns a const reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_ref().expect()`. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn const T& expect_ref( /* TODO: string view type */ sus_nonnull_arg const char* msg) const& noexcept { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return t_.val_; } const T& expect_ref() && noexcept = delete; /// Returns a mutable reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_mut().expect()`. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn T& expect_mut( /* TODO: string view type */ sus_nonnull_arg const char* msg) & noexcept { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return t_.val_; } /// Returns the contained value inside the Option. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr T unwrap() && noexcept { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); } /// Returns the contained value inside the Option. /// /// # Safety /// /// It is Undefined Behaviour to call this function when the Option's state is /// `None`. The caller is responsible for ensuring the Option contains a value /// beforehand, and the safer <unwrap>() or <expect>() should almost always be /// preferred. constexpr inline T unwrap_unchecked( ::sus::marker::UnsafeFnMarker) && noexcept { return t_.take_and_set_none(); } /// Returns a const reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_ref().unwrap()`. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr const T& unwrap_ref() const& noexcept { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return t_.val_; } const T& unwrap_ref() && noexcept = delete; /// Returns a mutable reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_mut().unwrap()`. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr T& unwrap_mut() & noexcept { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return t_.val_; } /// Returns the contained value inside the Option, if there is one. Otherwise, /// returns `default_result`. /// /// Note that if it is non-trivial to construct a `default_result`, that /// <unwrap_or_else>() should be used instead, as it will only construct the /// default value if required. T unwrap_or(T default_result) && noexcept { if (t_.state() == Some) { return t_.take_and_set_none(); } else { return default_result; } } /// Returns the contained value inside the Option, if there is one. /// Otherwise, returns the result of the given function. template <class Functor> requires(std::is_same_v<std::invoke_result_t<Functor>, T>) T unwrap_or_else(Functor f) && noexcept { if (t_.state() == Some) { return t_.take_and_set_none(); } else { return f(); } } /// Returns the contained value inside the Option, if there is one. /// Otherwise, constructs a default value for the type and returns that. /// /// The Option's contained type `T` must be #MakeDefault, and will be /// constructed through that trait. T unwrap_or_default() && noexcept requires(::sus::construct::MakeDefault<T>) { if (t_.state() == Some) { return t_.take_and_set_none(); } else { return ::sus::construct::make_default<T>(); } } /// Stores the value `t` inside this Option, replacing any previous value, and /// returns a mutable reference to the new value. constexpr T& insert(T t) & noexcept { t_.set_some(static_cast<T&&>(t)); return t_.val_; } /// If the Option holds a value, returns a mutable reference to it. Otherwise, /// stores `t` inside the Option and returns a mutable reference to the new /// value. /// /// If it is non-trivial to construct `T`, the <get_or_insert_with>() method /// would be preferable, as it only constructs a `T` if needed. T& get_or_insert(T t) & noexcept { if (t_.state() == None) t_.construct_from_none(static_cast<T&&>(t)); return t_.val_; } /// If the Option holds a value, returns a mutable reference to it. Otherwise, /// constructs a default value `T`, stores it inside the Option and returns a /// mutable reference to the new value. /// /// This method differs from <unwrap_or_default>() in that it does not consume /// the Option, and instead it can not be called on rvalues. /// /// This is a shorthand for /// `Option<T>::get_or_insert_default(MakeDefault<T>::make_default)`. /// /// The Option's contained type `T` must be #MakeDefault, and will be /// constructed through that trait. T& get_or_insert_default() & noexcept requires(::sus::construct::MakeDefault<T>) { if (t_.state() == None) t_.construct_from_none(::sus::construct::make_default<T>()); return t_.val_; } /// If the Option holds a value, returns a mutable reference to it. Otherwise, /// constructs a `T` by calling `f`, stores it inside the Option and returns a /// mutable reference to the new value. /// /// This method differs from <unwrap_or_else>() in that it does not consume /// the Option, and instead it can not be called on rvalues. template <class WithFn> requires(std::is_same_v<std::invoke_result_t<WithFn>, T>) T& get_or_insert_with(WithFn f) & noexcept { if (t_.state() == None) t_.construct_from_none(f()); return t_.val_; } /// Returns a new Option containing whatever was inside the current Option. /// /// If this Option contains #None then it is left unchanged and returns an /// Option containing #None. If this Option contains #Some with a value, the /// value is moved into the returned Option and this Option will contain #None /// afterward. Option take() & noexcept { if (t_.state() == Some) return Option(t_.take_and_set_none()); else return Option::none(); } /// Maps the Option's value through a function. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` in state #None if the current Option is in state /// #None. template <class MapFn, int&..., class R = std::invoke_result_t<MapFn, T&&>> requires(!std::is_void_v<R>) Option<R> map(MapFn m) && noexcept { if (t_.state() == Some) { return Option<R>(m(t_.take_and_set_none())); } else { return Option<R>::none(); } } /// Maps the Option's value through a function, or returns a default value. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` with the `default_result` as its value if the /// current Option's state is #None. template <class MapFn, class D, int&..., class R = std::invoke_result_t<MapFn, T&&>> requires(!std::is_void_v<R> && std::is_same_v<D, R>) Option<R> map_or(D default_result, MapFn m) && noexcept { if (t_.state() == Some) { return Option<R>(m(t_.take_and_set_none())); } else { return Option<R>(static_cast<R&&>(default_result)); } } /// Maps the Option's value through a function, or returns a default value /// constructed from the default function. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` with the result of calling `default_fn` as its /// value if the current Option's state is #None. template <class DefaultFn, class MapFn, int&..., class D = std::invoke_result_t<DefaultFn>, class R = std::invoke_result_t<MapFn, T&&>> requires(!std::is_void_v<R> && std::is_same_v<D, R>) Option<R> map_or_else(DefaultFn default_fn, MapFn m) && noexcept { if (t_.state() == Some) { return Option<R>(m(t_.take_and_set_none())); } else { return Option<R>(default_fn()); } } /// Consumes the Option and applies a predicate function to the value /// contained in the Option. Returns a new Option with the same value if the /// predicate returns true, otherwise returns an Option with its state set to /// #None. /// /// The predicate function must take `const T&` and return `bool`. template <class Predicate> requires(std::is_same_v<std::invoke_result_t<Predicate, const T&>, bool>) Option<T> filter(Predicate p) && noexcept { if (t_.state() == Some) { if (p(const_cast<const T&>(t_.val_))) { return Option(t_.take_and_set_none()); } else { // The state has to become None, and we must destroy the inner T. t_.set_none(); return Option::none(); } } else { return Option::none(); } } /// Consumes this Option and returns an Option with #None if this Option holds /// #None, otherwise returns the given `opt`. template <class U> Option<U> and_opt(Option<U> opt) && noexcept { if (t_.state() == Some) { t_.set_none(); return opt; } else { return Option<U>::none(); } } /// Consumes this Option and returns an Option with #None if this Option holds /// #None, otherwise calls `f` with the contained value and returns an Option /// with the result. /// /// Some languages call this operation flatmap. template < class AndFn, int&..., class R = std::invoke_result_t<AndFn, T&&>, class InnerR = ::sus::option::__private::IsOptionType<R>::inner_type> requires(::sus::option::__private::IsOptionType<R>::value) Option<InnerR> and_then(AndFn f) && noexcept { if (t_.state() == Some) return f(t_.take_and_set_none()); else return Option<InnerR>::none(); } /// Consumes and returns an Option with the same value if this Option contains /// a value, otherwise returns the given `opt`. Option<T> or_opt(Option<T> opt) && noexcept { if (t_.state() == Some) return Option(t_.take_and_set_none()); else return opt; } /// Consumes and returns an Option with the same value if this Option contains /// a value, otherwise returns the Option returned by `f`. template <class ElseFn, int&..., class R = std::invoke_result_t<ElseFn>> requires(std::is_same_v<R, Option<T>>) Option<T> or_else(ElseFn f) && noexcept { if (t_.state() == Some) return Option(t_.take_and_set_none()); else return static_cast<ElseFn&&>(f)(); } /// Consumes this Option and returns an Option, holding the value from either /// this Option `opt`, if exactly one of them holds a value, otherwise returns /// an Option that holds #None. Option<T> xor_opt(Option<T> opt) && noexcept { if (t_.state() == Some) { // If `this` holds Some, we change `this` to hold None. If `opt` is None, // we return what this was holding, otherwise we return None. if (opt.t_.state() == None) { return Option(t_.take_and_set_none()); } else { t_.set_none(); return Option::none(); } } else { // If `this` holds None, we need to do nothing to `this`. If `opt` is Some // we would return its value, and if `opt` is None we should return None. return opt; } } /// Transforms the `Option<T>` into a `Result<T, E>`, mapping `Some(v)` to /// `Ok(v)` and `None` to `Err(e)`. /// /// Arguments passed to #ok_or are eagerly evaluated; if you are passing the /// result of a function call, it is recommended to use ok_or_else, which is /// lazily evaluated. template <class E, int&..., class Result = ::sus::result::Result<T, E>> inline Result ok_or(E e) && noexcept { if (t_.state() == Some) return Result::with(t_.take_and_set_none()); else return Result::with_err(static_cast<E&&>(e)); } /// Transforms the `Option<T>` into a `Result<T, E>`, mapping `Some(v)` to /// `Ok(v)` and `None` to `Err(f())`. template <class ElseFn, int&..., class E = std::invoke_result_t<ElseFn>, class Result = ::sus::result::Result<T, E>> inline Result ok_or_else(ElseFn f) && noexcept { if (t_.state() == Some) return Result::with(t_.take_and_set_none()); else return Result::with_err(static_cast<ElseFn&&>(f)()); } /// Zips self with another Option. /// /// If self is `Some(s)` and other is `Some(o)`, this method returns `Some((s, /// o))`. Otherwise, `None` is returned. template <class U, int&..., class Tuple = ::sus::tuple::Tuple<T, U>> inline Option<Tuple> zip(Option<U> o) && noexcept { if (o.t_.state() == None) { if (t_.state() == Some) t_.set_none(); return Option<Tuple>::none(); } else if (t_.state() == None) { return Option<Tuple>::none(); } else { return Option<Tuple>::some(Tuple::with( t_.take_and_set_none(), static_cast<Option<U>&&>(o).unwrap())); } } /// Transposes an #Option of a #Result into a #Result of an #Option. /// /// `None` will be mapped to `Ok(None)`. `Some(Ok(_))` and `Some(Err(_))` will /// be mapped to `Ok(Some(_))` and `Err(_)`. template <int&..., class OkType = typename ::sus::result::__private::IsResultType<T>::ok_type, class ErrType = typename ::sus::result::__private::IsResultType<T>::err_type, class Result = ::sus::result::Result<Option<OkType>, ErrType>> requires(::sus::result::__private::IsResultType<T>::value) inline Result transpose() && noexcept { if (t_.state() == None) { return Result::with(Option<OkType>::none()); } else { if (t_.val_.is_ok()) { return Result::with(Option<OkType>::some( t_.take_and_set_none().unwrap_unchecked(unsafe_fn))); } else { return Result::with_err( t_.take_and_set_none().unwrap_err_unchecked(unsafe_fn)); } } } /// Replaces whatever the Option is currently holding with #Some value `t` and /// returns an Option holding what was there previously. Option replace(T t) & noexcept { if (t_.state() == None) { t_.construct_from_none(static_cast<T&&>(t)); return Option::none(); } else { return Option(t_.replace_some(static_cast<T&&>(t))); } } /// Maps an `Option<Option<T>>` to an `Option<T>`. T flatten() && noexcept requires(::sus::option::__private::IsOptionType<T>::value) { if (t_.state() == Some) return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); else return T::none(); } /// Returns an Option<const T&> from this Option<T>, that either holds #None /// or a reference to the value in this Option. Option<const T&> as_ref() const& noexcept { if (t_.state() == None) return Option<const T&>::none(); else return Option<const T&>(t_.val_); } Option<const T&> as_ref() && noexcept = delete; /// Returns an Option<T&> from this Option<T>, that either holds #None or a /// reference to the value in this Option. Option<T&> as_mut() & noexcept { if (t_.state() == None) return Option<T&>::none(); else return Option<T&>(t_.val_); } constexpr Iterator<Once<const T&>> iter() const& noexcept { return Iterator<Once<const T&>>(as_ref()); } Iterator<Once<const T&>> iter() const&& = delete; constexpr Iterator<Once<T&>> iter_mut() & noexcept { return Iterator<Once<T&>>(as_mut()); } constexpr Iterator<Once<T>> into_iter() && noexcept { return Iterator<Once<T>>(take()); } private: template <class U> friend class Option; /// Constructor for #None. constexpr explicit Option() = default; /// Constructor for #Some. constexpr explicit Option(const T& t) : t_(t) {} constexpr explicit Option(T&& t) : t_(static_cast<T&&>(t)) {} Storage<T> t_; sus_class_maybe_trivial_relocatable_types(unsafe_fn, T); }; /// Implementation of Option for a reference type. template <class T> class Option<T&> final { public: /// Construct an Option that is holding the given value. static inline constexpr Option some(T& t) noexcept requires(std::is_const_v<T>) // Require mref() for mutable references. { return Option(t); } /// Construct an Option that is holding the given value. static inline constexpr Option some(Mref<std::remove_const_t<T>> t) noexcept { return Option(t); } /// Construct an Option that is holding no value. static inline constexpr Option none() noexcept { return Option(); } /// Destructor for the Option. /// /// This is a no-op for references. constexpr ~Option() noexcept = default; // References can be trivially copied and moved, so we use the default // constructors and operators. constexpr Option(const Option& o) = default; constexpr Option(Option&& o) = default; constexpr Option& operator=(const Option& o) = default; constexpr Option& operator=(Option&& o) = default; /// Drop the current value from the Option, if there is one. /// /// Afterward the option will unconditionally be #None. void clear() & noexcept { if (t_.state() == Some) t_.set_none(); } /// Returns whether the Option currently contains a value. /// /// If there is a value present, it can be extracted with <unwrap>() or /// <expect>(). bool is_some() const noexcept { return t_.state() == Some; } /// Returns whether the Option is currently empty, containing no value. bool is_none() const noexcept { return t_.state() == None; } /// An operator which returns the state of the Option, either #Some or #None. /// /// This supports the use of an Option in a `switch()`, allowing it to act as /// a tagged union between "some value" and "no value". /// /// # Example /// /// ```cpp /// auto x = Option<int>::some(2); /// switch (x) { /// case Some: /// return sus::move(x).unwrap_unchecked(unsafe_fn); /// case None: /// return -1; /// } /// ``` operator State() const& { return t_.state(); } /// Returns the contained value inside the Option. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn T& expect( /* TODO: string view type */ sus_nonnull_arg const char* msg) && noexcept { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); } /// Returns a const reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_ref().expect()`. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn const T& expect_ref( /* TODO: string view type */ sus_nonnull_arg const char* msg) const& noexcept { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return t_.val_.as_ref(); } /// Returns a mutable reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_mut().expect()`. /// /// The function will panic with the given message if the Option's state is /// currently `None`. constexpr sus_nonnull_fn T& expect_mut( /* TODO: string view type */ sus_nonnull_arg const char* msg) noexcept requires(!std::is_const_v<T>) { if (!std::is_constant_evaluated()) ::sus::check_with_message(t_.state() == Some, *msg); return t_.val_.as_mut(); } /// Returns the contained value inside the Option. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr T& unwrap() && noexcept { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); } /// Returns the contained value inside the Option. /// /// # Safety /// /// It is Undefined Behaviour to call this function when the Option's state is /// `None`. The caller is responsible for ensuring the Option contains a value /// beforehand, and the safer <unwrap>() or <expect>() should almost always be /// preferred. constexpr inline T& unwrap_unchecked( ::sus::marker::UnsafeFnMarker) && noexcept { return get_ref(t_.take_and_set_none()); } /// Returns a const reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_ref().unwrap()`. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr const T& unwrap_ref() const& noexcept { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return t_.val_.as_ref(); } /// Returns a mutable reference to the contained value inside the Option. /// /// This is a shortcut for `option.as_mut().unwrap()`. /// /// The function will panic without a message if the Option's state is /// currently `None`. constexpr T& unwrap_mut() noexcept requires(!std::is_const_v<T>) { if (!std::is_constant_evaluated()) ::sus::check(t_.state() == Some); return t_.val_.as_mut(); } /// Returns the contained value inside the Option, if there is one. Otherwise, /// returns `default_result`. /// /// Note that if it is non-trivial to construct a `default_result`, that /// <unwrap_or_else>() should be used instead, as it will only construct the /// default value if required. /// /// Not constexpr because it's not possible T& unwrap_or(T& default_result) && noexcept { if (t_.state() == Some) { return get_ref(t_.take_and_set_none()); } else return default_result; } /// Returns the contained value inside the Option, if there is one. /// Otherwise, returns the result of the given function. template <class Functor> requires(std::is_same_v<std::invoke_result_t<Functor>, T&>) T& unwrap_or_else(Functor f) && noexcept { if (t_.state() == Some) return get_ref(t_.take_and_set_none()); else return f(); } /// Stores the value `t` inside this Option, replacing any previous value, and /// returns a mutable reference to the new value. T& insert(T& t) noexcept { t_.set_some(StoragePointer(t)); return t_.val_.as_mut(); } /// If the Option holds a value, returns a mutable reference to it. Otherwise, /// stores `t` inside the Option and returns a mutable reference to the new /// value. T& get_or_insert(T& t) noexcept { if (t_.state() == None) t_.construct_from_none(StoragePointer(t)); return t_.val_.as_mut(); } /// If the Option holds a value, returns a mutable reference to it. Otherwise, /// constructs a `T` by calling `f`, stores it inside the Option and returns a /// mutable reference to the new value. /// /// This method differs from <unwrap_or_else>() in that it does not consume /// the Option, and instead it can not be called on rvalues. template <class WithFn> requires(std::is_same_v<std::invoke_result_t<WithFn>, T&>) T& get_or_insert_with(WithFn f) noexcept { if (t_.state() == None) t_.construct_from_none(StoragePointer(f())); return t_.val_.as_mut(); } /// Returns a new Option containing whatever was inside the current Option. /// /// If this Option contains #None then it is left unchanged and returns an /// Option containing #None. If this Option contains #Some with a value, the /// value is moved into the returned Option and this Option will contain #None /// afterward. Option take() noexcept { if (t_.state() == Some) return Option(get_ref(t_.take_and_set_none())); else return Option::none(); } /// Maps the Option's value through a function. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` in state #None if the current Option is in state /// #None. template <class MapFn, int&..., class R = std::invoke_result_t<MapFn, T&>> requires(!std::is_void_v<R>) Option<R> map(MapFn m) && noexcept { if (t_.state() == Some) return Option<R>::some(m(get_ref(t_.take_and_set_none()))); else return Option<R>::none(); } /// Maps the Option's value through a function, or returns a default value. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` with the `default_result` as its value if the /// current Option's state is #None. template <class MapFn, class D, int&..., class R = std::invoke_result_t<MapFn, T&>> requires(!std::is_void_v<R> && std::is_same_v<D, R>) Option<R> map_or(D default_result, MapFn m) && noexcept { if (t_.state() == Some) return Option<R>(m(get_ref(t_.take_and_set_none()))); else return Option<R>(static_cast<R&&>(default_result)); } /// Maps the Option's value through a function, or returns a default value /// constructed from the default function. /// /// Consumes the Option, passing the value through the map function, and /// returning an `Option<R>` where `R` is the return type of the map function. /// /// Returns an `Option<R>` with the result of calling `default_fn` as its /// value if the current Option's state is #None. template <class DefaultFn, class MapFn, int&..., class D = std::invoke_result_t<DefaultFn>, class R = std::invoke_result_t<MapFn, T&>> requires(!std::is_void_v<R> && std::is_same_v<D, R>) Option<R> map_or_else(DefaultFn default_fn, MapFn m) && noexcept { if (t_.state() == Some) return Option<R>(m(get_ref(t_.take_and_set_none()))); else return Option<R>(default_fn()); } /// Consumes the Option and applies a predicate function to the value /// contained in the Option. Returns a new Option with the same value if the /// predicate returns true, otherwise returns an Option with its state set to /// #None. /// /// The predicate function must take `const T&` and return `bool`. template <class Predicate> requires(std::is_same_v<std::invoke_result_t<Predicate, const T&>, bool>) Option<T&> filter(Predicate p) && noexcept { if (t_.state() == Some) { // The state must move to None. StoragePointer ptr = t_.take_and_set_none(); if (p(const_cast<const T&>(ptr.as_ref()))) return Option(get_ref(static_cast<decltype(ptr)&&>(ptr))); else return Option::none(); } else { return Option::none(); } } /// Consumes this Option and returns an Option with #None if this Option holds /// #None, otherwise returns the given `opt`. template <class U> Option<U> and_opt(Option<U> opt) && noexcept { if (t_.state() == Some) { t_.set_none(); return opt; } else { return Option<U>::none(); } } /// Consumes this Option and returns an Option with #None if this Option holds /// #None, otherwise calls `f` with the contained value and returns an Option /// with the result. /// /// Some languages call this operation flatmap. template < class AndFn, int&..., class R = std::invoke_result_t<AndFn, T&>, class InnerR = ::sus::option::__private::IsOptionType<R>::inner_type> requires(::sus::option::__private::IsOptionType<R>::value) Option<InnerR> and_then(AndFn f) && noexcept { if (t_.state() == Some) return f(get_ref(t_.take_and_set_none())); else return Option<InnerR>::none(); } /// Consumes and returns an Option with the same value if this Option contains /// a value, otherwise returns the given `opt`. Option<T&> or_opt(Option<T&> opt) && noexcept { if (t_.state() == Some) return Option(get_ref(t_.take_and_set_none())); else return opt; } /// Consumes and returns an Option with the same value if this Option contains /// a value, otherwise returns the Option returned by `f`. template <class ElseFn, int&..., class R = std::invoke_result_t<ElseFn>> requires(std::is_same_v<R, Option<T&>>) Option<T&> or_else(ElseFn f) && noexcept { if (t_.state() == Some) return Option(get_ref(t_.take_and_set_none())); else return f(); } /// Consumes this Option and returns an Option, holding the value from either /// this Option `opt`, if exactly one of them holds a value, otherwise returns /// an Option that holds #None. Option<T&> xor_opt(Option<T&> opt) && noexcept { if (t_.state() == Some) { // If `this` holds Some, we change `this` to hold None. If `opt` is None, // we return what this was holding, otherwise we return None. auto nonnull = t_.take_and_set_none(); if (opt.t_.state() == None) return Option(nonnull.as_mut()); else return Option::none(); } else { // If `this` holds None, we need to do nothing to `this`. If `opt` is Some // we would return its value, and if `opt` is None we should return None. return opt; } } /// Transforms the `Option<T>` into a `Result<T, E>`, mapping `Some(v)` to /// `Ok(v)` and `None` to `Err(e)`. /// /// Arguments passed to #ok_or are eagerly evaluated; if you are passing the /// result of a function call, it is recommended to use ok_or_else, which is /// lazily evaluated. template <class E, int&..., class Result = ::sus::result::Result<T&, E>> inline Result ok_or(E e) && noexcept { if (t_.state() == Some) return Result::with(get_ref(t_.take_and_set_none())); else return Result::with_err(static_cast<E&&>(e)); } /// Transforms the `Option<T>` into a `Result<T, E>`, mapping `Some(v)` to /// `Ok(v)` and `None` to `Err(f())`. template <class ElseFn, int&..., class E = std::invoke_result_t<ElseFn>, class Result = ::sus::result::Result<T&, E>> constexpr inline Result ok_or_else(ElseFn f) && noexcept { if (t_.set_state(None) == Some) return Result::with(get_ref(t_.take_and_set_none())); else return Result::with_err(static_cast<ElseFn&&>(f)()); } /// Zips self with another Option. /// /// If self is `Some(s)` and other is `Some(o)`, this method returns `Some((s, /// o))`. Otherwise, `None` is returned. template <class U, int&..., class Tuple = ::sus::tuple::Tuple<T&, U>> inline Option<Tuple> zip(Option<U> o) && noexcept { if (o.t_.state() == None) { t_.set_none(); return Option<Tuple>::none(); } else if (t_.state() == None) { return Option<Tuple>::none(); } else { return Option<Tuple>::some( Tuple::with(get_ref(t_.take_and_set_none()), static_cast<Option<U>&&>(o).unwrap())); } } /// Replaces whatever the Option is currently holding with #Some value `t` and /// returns an Option holding what was there previously. Option replace(T& t) noexcept { if (t_.state() == None) { t_.construct_from_none(StoragePointer(t)); return Option::none(); } else { return Option( ::sus::mem::replace(mref(t_.val_), StoragePointer(t)).as_mut()); } } /// Maps an `Option<T&>` to an `Option<T>` by copying the referenced `T`. Option<std::remove_const_t<T>> copied() && noexcept requires(std::is_nothrow_copy_constructible_v<T>) { if (t_.state() == None) return Option<std::remove_const_t<T>>::none(); else return Option<std::remove_const_t<T>>::some(t_.val_.as_ref()); } /// Maps an `Option<Option<T>>` to an `Option<T>`. T flatten() && noexcept requires(::sus::option::__private::IsOptionType<T>::value) { if (t_.state() == Some) return static_cast<Option&&>(*this).unwrap_unchecked(unsafe_fn); else return T::none(); } /// Returns an Option<const T&> from this Option<const T&> or Option<T&>, that /// either holds #None or a const reference to the reference in this Option. Option<const T&> as_ref() const& noexcept { if (t_.state() == None) return Option<const T&>::none(); else return Option<const T&>(t_.val_.as_ref()); } /// Returns an Option<T&> that is a copy of this Option<T&>. Option<T&> as_mut() noexcept requires(!std::is_const_v<T>) { if (t_.state() == None) return Option<T&>::none(); else return Option<T&>(t_.val_.as_mut()); } Iterator<Once<const T&>> iter() const& noexcept { return Iterator<Once<const T&>>(as_ref()); } Iterator<Once<T&>> iter_mut() noexcept requires(!std::is_const_v<T>) { return Iterator<Once<T&>>(as_mut()); } Iterator<Once<T&>> into_iter() && noexcept { return Iterator<Once<T&>>(take()); } private: template <class U> friend class Option; /// Constructor for #None. constexpr explicit Option() = default; /// Constructor for #Some. constexpr explicit Option(T& t) : t_(StoragePointer(t)) {} /// Gets a reference with the same const-ness as the `T` in `Option<T&>`. constexpr sus_always_inline static T& get_ref( StoragePointer<T>&& ptr) noexcept { if constexpr (std::is_const_v<T>) return ptr.as_ref(); else return ptr.as_mut(); } Storage<StoragePointer<T>> t_; sus_class_maybe_trivial_relocatable_types(unsafe_fn, T&); }; /// sus::ops::Eq<Option<U>> trait. template <class T, class U> requires(::sus::ops::Eq<T, U>) constexpr inline bool operator==(const Option<T>& l, const Option<U>& r) noexcept { switch (l) { case Some: return r.is_some() && l.unwrap_ref() == r.unwrap_ref(); case None: return r.is_none(); } ::sus::unreachable_unchecked(unsafe_fn); } /// sus::ops::Ord<Option<U>> trait. template <class T, class U> requires(::sus::ops::ExclusiveOrd<T, U>) constexpr inline auto operator<=>(const Option<T>& l, const Option<U>& r) noexcept { switch (l) { case Some: if (r.is_some()) return l.unwrap_ref() <=> r.unwrap_ref(); else return std::strong_ordering::greater; case None: if (r.is_some()) return std::strong_ordering::less; else return std::strong_ordering::equivalent; } ::sus::unreachable_unchecked(unsafe_fn); } /// sus::ops::WeakOrd<Option<U>> trait. template <class T, class U> requires(::sus::ops::ExclusiveWeakOrd<T, U>) constexpr inline auto operator<=>(const Option<T>& l, const Option<U>& r) noexcept { switch (l) { case Some: if (r.is_some()) return l.unwrap_ref() <=> r.unwrap_ref(); else return std::weak_ordering::greater; case None: if (r.is_some()) return std::weak_ordering::less; else return std::weak_ordering::equivalent; } ::sus::unreachable_unchecked(unsafe_fn); } /// sus::ops::PartialOrd<Option<U>> trait. template <class T, class U> requires(::sus::ops::ExclusivePartialOrd<T, U>) constexpr inline auto operator<=>(const Option<T>& l, const Option<U>& r) noexcept { switch (l) { case Some: if (r.is_some()) return l.unwrap_ref() <=> r.unwrap_ref(); else return std::partial_ordering::greater; case None: if (r.is_some()) return std::partial_ordering::less; else return std::partial_ordering::equivalent; } ::sus::unreachable_unchecked(unsafe_fn); } // Implicit for-ranged loop iteration via `Array::iter()`. using sus::iter::__private::begin; using sus::iter::__private::end; } // namespace sus::option // Promote Option and its enum values into the `sus` namespace. namespace sus { using ::sus::option::None; using ::sus::option::Option; using ::sus::option::Some; } // namespace sus namespace sus::fn::callable { template <class T> concept FunctionPointer = requires(T t) { { std::is_pointer_v<decltype(+t)> }; }; // clang-format off template <class T, class R, class... Args> concept FunctionPointerReturns = ( FunctionPointer<T> && requires (T t, Args&&... args) { { t(forward<Args>(args)...) } -> std::convertible_to<R>; } ); // clang-format on // clang-format off template <class T, class... Args> concept FunctionPointerWith = ( FunctionPointer<T> && requires (T t, Args&&... args) { t(forward<Args>(args)...); } ); // clang-format on namespace __private { template <class T, class R, class... Args> inline constexpr bool callable_const(R (T::*)(Args...) const) { return true; }; template <class T, class R, class... Args> inline constexpr bool callable_mut(R (T::*)(Args...)) { return true; }; } // namespace __private // clang-format off template <class T, class R, class... Args> concept CallableObjectReturnsConst = ( !FunctionPointer<T> && requires (const T& t, Args&&... args) { { t(forward<Args>(args)...) } -> std::convertible_to<R>; } ); template <class T, class... Args> concept CallableObjectWithConst = ( !FunctionPointer<T> && requires (const T& t, Args&&... args) { t(forward<Args>(args)...); } ); template <class T, class R, class... Args> concept CallableObjectReturnsMut = ( !FunctionPointer<T> && requires (T& t, Args&&... args) { { t(forward<Args>(args)...) } -> std::convertible_to<R>; } ); template <class T, class... Args> concept CallableObjectWithMut = ( !FunctionPointer<T> && requires (T& t, Args&&... args) { t(forward<Args>(args)...); } ); // clang-format on template <class T, class R, class... Args> concept CallableObjectReturns = CallableObjectReturnsConst<T, R, Args...> || CallableObjectReturnsMut<T, R, Args...>; template <class T, class... Args> concept CallableObjectWith = CallableObjectWithConst<T, Args...> || CallableObjectWithMut<T, Args...>; template <class T> concept CallableObjectConst = __private::callable_const(&T::operator()); template <class T> concept CallableObjectMut = CallableObjectConst<T> || __private::callable_mut(&T::operator()); template <class T, class... Args> concept CallableWith = FunctionPointerWith<T, Args...> || CallableObjectWith<T, Args...>; template <class T, class R, class... Args> concept CallableReturns = FunctionPointerReturns<T, R, Args...> || CallableObjectReturns<T, R, Args...>; } // namespace sus::fn::callable /// sus_for_each() will apply `macro` to each argument in the variadic argument /// list, putting the output of `sep()` between each one. /// /// The `sep` should be one of sus_for_each_sep_XYZ() macros, or a function /// macro that returns a separator. #define sus_for_each(macro, sep, ...) \ __VA_OPT__( \ _sus__for_each_expand(_sus__for_each_helper(macro, sep, __VA_ARGS__))) #define sus_for_each_sep_comma() , #define sus_for_each_sep_none() // Private helpers. #define _sus__for_each_helper(macro, sep, a1, ...) \ macro(a1) __VA_OPT__(sep()) __VA_OPT__( \ _sus__for_each_again _sus__for_each_parens(macro, sep, __VA_ARGS__)) #define _sus__for_each_parens () #define _sus__for_each_again() _sus__for_each_helper #define _sus__for_each_expand(...) \ _sus__for_each_expand1(_sus__for_each_expand1( \ _sus__for_each_expand1(_sus__for_each_expand1(__VA_ARGS__)))) #define _sus__for_each_expand1(...) __VA_ARGS__ // TODO: Provide a different for_each version that can handle lots of args if // needed. /* #define _sus__for_each_expand(...) \ _sus__for_each_expand4(_sus__for_each_expand4( \ _sus__for_each_expand4(_sus__for_each_expand4(__VA_ARGS__)))) #define _sus__for_each_expand4(...) \ _sus__for_each_expand3(_sus__for_each_expand3( \ _sus__for_each_expand3(_sus__for_each_expand3(__VA_ARGS__)))) #define _sus__for_each_expand3(...) \ _sus__for_each_expand2(_sus__for_each_expand2( \ _sus__for_each_expand2(_sus__for_each_expand2(__VA_ARGS__)))) #define _sus__for_each_expand2(...) \ _sus__for_each_expand1(_sus__for_each_expand1( \ _sus__for_each_expand1(_sus__for_each_expand1(__VA_ARGS__)))) #define _sus__for_each_expand1(...) __VA_ARGS__ */ // Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once /// Remove parentheses around one or more arguments, if they are present. /// /// It performs the following transformations. /// x => x /// (x) => x /// (x, y) => x, y /// /// Based on: https://stackoverflow.com/a/62984543 #define sus_remove_parens(x) _sus__remove_inner_rename(_sus__remove_inner x) // Step 1: If the input had brackets, now it no longer does. The result will // always be `_sus__remove_inner x` at the end. #define _sus__remove_inner(...) _sus__remove_inner __VA_ARGS__ // Step 2: Now that `x` has no parentheses, expand `x` into all of its // arguments, which we denote `x...`. #define _sus__remove_inner_rename(...) _sus__remove_inner_rename_(__VA_ARGS__) // Step 3: Concat to the start of the content, resulting in // `_sus_remove_outer_sus__remove_inner x...`. #define _sus__remove_inner_rename_(...) _sus__remove_outer##__VA_ARGS__ // Step 4: Remove the `_sus_remove_outer_sus__remove_inner`, leaving just // `x...`. #define _sus__remove_outer_sus__remove_inner /// Bind a const lambda to storage for its bound arguments. The output can be /// used to construct a FnOnce, FnMut, or Fn. /// /// The first argument is a list of variables that will be bound into storage /// for access from the lambda, wrapped in sus_store(). If there are no /// variables to mention, sus_store() can be empty, or use the sus_bind0() macro /// which omits this list. /// /// The second argument is a lambda, which can include captures. Any captures of /// variables outside the lambda must be referenced in the sus_store() list. /// /// Use `sus_take(x)` in the sus_store() list to move `x` into storage instead /// of copying it. /// /// Use `sus_unsafe_pointer(x)` to store a pointer named `x`. This is dangerous /// and discouraged, and using smart pointers is strongly preferred. /// /// # Example /// /// This binds a lambda with 3 captures, the first two being variables from the /// outside scope. The second varible is used as a reference to the storage, /// rather that copying or moving it into the lambda. /// ``` /// sus_bind(sus_store(a, b), [a, &b, c = 1]() {})) /// ``` /// /// # Implementation note /// The lambda may arrive in multiple arguments, if there is a comma in the /// definition of it. Thus we use variadic arguments to capture all of the /// lambda. #define sus_bind(names, lambda, ...) \ [&]() { \ [&]() consteval {sus_for_each(_sus__check_storage, sus_for_each_sep_none, \ _sus__unpack names)}(); \ using ::sus::fn::__private::SusBind; \ return SusBind( \ [sus_for_each(_sus__declare_storage, sus_for_each_sep_comma, \ _sus__unpack names)]<class... Args>(Args&&... args) { \ const auto x = lambda __VA_OPT__(, ) __VA_ARGS__; \ const bool is_const = \ ::sus::fn::callable::CallableObjectConst<decltype(x)>; \ if constexpr (!is_const) { \ return ::sus::fn::__private::CheckCallableObjectConst< \ is_const>::template error<void>(); \ } else { \ return x(::sus::forward<Args>(args)...); \ } \ }); \ }() /// A variant of `sus_bind()` which only takes a lambda, omitting the /// `sus_store()` list. The output can be used to construct a FnOnce, FnMut, or /// Fn. /// /// Because there is no `sus_store()` list, the lambda can not capture variables /// from the outside scope, however it can still declare captures contained /// entirely inside the lambda. /// /// # Example /// /// This defines a lambda with a capture `a` of type `int`, and binds it so it /// can be used to construct a FnOnce, FnMut, or Fn. /// ``` /// sus_bind0([a = int(1)](char, int){}) /// ``` #define sus_bind0(lambda, ...) \ sus_bind(sus_store(), lambda __VA_OPT__(, ) __VA_ARGS__) /// Bind a mutable lambda to storage for its bound arguments. The output can be /// used to construct a FnOnce or FnMut. /// /// Because the storage is mutable, the lambda may capture references to the /// storage and mutate it, and the lambda itself may be marked mutable. /// /// The first argument is a list of variables that will be bound into storage /// for access from the lambda, wrapped in sus_store(). If there are no /// variables to mention, sus_store() can be empty, or use the sus_bind0() macro /// which omits this list. /// /// The second argument is a lambda, which can include captures. Any captures of /// variables outside the lambda must be referenced in the sus_store() list. /// /// Use `sus_take(x)` in the sus_store() list to move `x` into storage instead /// of copying it. /// /// Use `sus_unsafe_pointer(x)` to store a pointer named `x`. This is dangerous /// and discouraged, and using smart pointers is strongly preferred. /// /// # Example /// /// This binds a lambda with 3 captures, the first two being variables from the /// outside scope. The second varible is used as a reference to the storage, /// rather that copying or moving it into the lambda. /// ``` /// sus_bind_mut(sus_store(a, b), [a, &b, c = 1]() {})) /// ``` /// /// # Implementation note The lambda may arrive in multiple arguments, if there /// is a comma in the definition of it. Thus we use variadic arguments to /// capture all of the lambda. #define sus_bind_mut(names, lambda, ...) \ [&]() { \ [&]() consteval {sus_for_each(_sus__check_storage, sus_for_each_sep_none, \ _sus__unpack names)}(); \ return ::sus::fn::__private::SusBind( \ [sus_for_each(_sus__declare_storage_mut, sus_for_each_sep_comma, \ _sus__unpack names)]<class... Args>( \ Args&&... args) mutable { \ auto x = lambda __VA_OPT__(, ) __VA_ARGS__; \ return x(::sus::mem::forward<Args>(args)...); \ }); \ }() /// A variant of `sus_bind_mut()` which only takes a lambda, omitting the /// `sus_store()` list. The output can be used to construct a FnOnce or FnMut. /// /// Because there is no `sus_store()` list, the lambda can not capture variables /// from the outside scope, however it can still declare captures contained /// entirely inside the lambda. /// /// Can be used with a mutable lambda that can mutate its captures. /// /// # Example /// /// This defines a lambda with a capture `a` of type `int`, and binds it so it /// can be used to construct a FnOnce, FnMut, or Fn. /// ``` /// sus_bind0_mut([a = int(1)](char, int){}) /// ``` #define sus_bind0_mut(lambda, ...) \ sus_bind_mut(sus_store(), lambda __VA_OPT__(, ) __VA_ARGS__) /// Declares the set of captures from the outside scope in `sus_bind()` or /// `sus_bind_mut()`. #define sus_store(...) (__VA_ARGS__) /// Marks a capture in the `sus_store()` list to be moved from the outside scope /// instead of copied. #define sus_take(x) (x, _sus__bind_move) /// Marks a capture in the `sus_store()` list as a pointer which is being /// intentionally and unsafely captured. Otherwise, pointers are not allowed to /// be captured. #define sus_unsafe_pointer(x) (x, _sus__bind_pointer) namespace sus::fn::__private { /// Helper type returned by sus_bind() and used to construct a closure. template <class F> struct SusBind final { sus_clang_bug_54040(constexpr inline SusBind(F&& lambda) : lambda(static_cast<F&&>(lambda)){}) /// The lambda generated by sus_bind() which holds the user-provided lambda /// and any storage required for it. F lambda; }; // The type generated by sus_unsafe_pointer() for storage in sus_bind(). template <class T> struct UnsafePointer; template <class T> struct UnsafePointer<T*> final { sus_clang_bug_54040(constexpr inline UnsafePointer( ::sus::marker::UnsafeFnMarker, T* pointer) : pointer(pointer) {}) [[no_unique_address]] ::sus::marker::UnsafeFnMarker marker; T* pointer; }; template <class T> UnsafePointer(::sus::marker::UnsafeFnMarker, T*) -> UnsafePointer<T*>; template <class T> auto make_storage(T&& t) { return std::decay_t<T>(forward<T>(t)); } template <class T> auto make_storage(T* t) { static_assert(!std::is_pointer_v<T*>, "Can not store a pointer in sus_bind() except through " "sus_unsafe_pointer()."); } template <class T> auto make_storage(UnsafePointer<T*> p) { return static_cast<const T*>(p.pointer); } template <class T> auto make_storage_mut(T&& t) { return std::decay_t<T>(forward<T>(t)); } template <class T> auto make_storage_mut(T* t) { make_storage(t); } template <class T> auto make_storage_mut(UnsafePointer<T*> p) { return p.pointer; } // Verifies the input is an lvalue (a name), so we can bind it to that same // lvalue name in the resuling lambda. template <class T> std::true_type is_lvalue(T&); std::false_type is_lvalue(...); /// Helper used when verifying if a lambda is const. The template parameter /// represents the constness of the lambda. When false, the error() function /// generates a compiler error. template <bool = true> struct CheckCallableObjectConst final { template <class U> static constexpr inline auto error() {} }; template <> struct CheckCallableObjectConst<false> final { template <class U> static consteval inline auto error() { throw "Use sus_bind_mut() to bind a mutable lambda"; } }; } // namespace sus::fn::__private // Private helper. #define _sus__declare_storage(x) \ _sus__macro(_sus__declare_storage_impl, sus_remove_parens(x), _sus__bind_noop) #define _sus__declare_storage_impl(x, modify, ...) \ x = ::sus::fn::__private::make_storage(modify(x)) #define _sus__declare_storage_mut(x) \ _sus__macro(_sus__declare_storage_impl_mut, sus_remove_parens(x), \ _sus__bind_noop) #define _sus__declare_storage_impl_mut(x, modify, ...) \ x = ::sus::fn::__private::make_storage_mut(modify(x)) #define _sus__check_storage(x, ...) \ _sus__macro(_sus__check_storage_impl, sus_remove_parens(x), _sus__bind_noop) #define _sus__check_storage_impl(x, modify, ...) \ static_assert(decltype(::sus::fn::__private::is_lvalue(x))::value, \ "sus_bind() can only bind to variable names (lvalues)."); #define _sus__bind_noop(x) x #define _sus__bind_move(x) ::sus::move(x) #define _sus__bind_pointer(x) \ ::sus::fn::__private::UnsafePointer(::sus::marker::unsafe_fn, x) #define _sus__macro(x, ...) x(__VA_ARGS__) // Private helper. #define _sus__unpack sus_bind_stored_argumnts_should_be_wrapped_in_sus_store #define sus_bind_stored_argumnts_should_be_wrapped_in_sus_store(...) __VA_ARGS__ namespace sus::fn { namespace __private { /// The type-erased type (dropping the type of the internal lambda) of the /// closure's heap-allocated storage. struct FnStorageBase; /// Helper type returned by sus_bind() and used to construct a closure. template <class F> struct SusBind; /// Helper to determine which functions need to be instatiated for the closure, /// to be called from FnOnce, FnMut, and/or Fn. /// /// This type indicates the closure can only be called from FnOnce. enum StorageConstructionFnOnceType { StorageConstructionFnOnce }; /// Helper to determine which functions need to be instatiated for the closure, /// to be called from FnOnce, FnMut, and/or Fn. /// /// This type indicates the closure can be called from FnMut or FnOnce. enum StorageConstructionFnMutType { StorageConstructionFnMut }; /// Helper to determine which functions need to be instatiated for the closure, /// to be called from FnOnce, FnMut, and/or Fn. /// /// This type indicates the closure can be called from Fn, FnMut or FnOnce. enum StorageConstructionFnType { StorageConstructionFn }; /// Used to indicate if the closure is holding a function pointer or /// heap-allocated storage. enum FnType { /// Holds a function pointer or captureless lambda. FnPointer = 1, /// Holds the type-erased output of sus_bind() in a heap allocation. Storage = 2, }; } // namespace __private template <class R, class... Args> class FnOnce; template <class R, class... Args> class FnMut; template <class R, class... Args> class Fn; // TODO: Consider generic lambdas, it should be possible to bind them into // FnOnce/FnMut/Fn? // TODO: There's no way to capture an rvalue right now. Need something like // sus_take() but like sus_make(i, x.foo()) to bind `i = x.foo()`. // TODO: There's no way to capture a reference right now, sus_unsafe_ref()? /// A closure that erases the type of the internal callable object (lambda). A /// FnOnce may only be called a single time. /// /// Fn can be used as a FnMut, which can be used as a FnOnce. /// /// Lambdas without captures can be converted into a FnOnce, FnMut, or Fn /// directly. If the lambda has captured, it must be given to one of: /// /// - `sus_bind(sus_store(..captures..), lambda)` to bind a const lambda which /// captures variables from local state. Variables to be captured in the lambda /// must also be named in sus_store(). `sus_bind()` only allows those named /// variables to be captured, and ensures they are stored by value instead of by /// reference. /// /// - `sus_bind0(lambda)` to bind a const lambda which has bound variables that /// don't capture state from outside the lambda, such as `[i = 2]() { return i; /// }`. /// /// - sus_bind_mut(sus_store(...captures...), lambda)` to bind a mutable lambda /// which captures variables from local state. /// /// - `sus_bind0_mut(lambda)` to bind a mutable lambda which has bound variables /// that don't capture state from outside the lambda, such as `[i = 2]() { /// return ++i; }`. /// /// Within sus_store(), a variable name can be wrapped with a helper to capture /// in different ways: /// /// - `sus_take(x)` will move `x` into the closure instead of copying it. /// /// - `sus_unsafe_pointer(x)` will allow capturing a pointer. Otherwise it be /// disallowed, and it is strongly discouraged as it requires careful present /// and future understanding of the pointee's lifetime. /// /// # Example /// /// Moves `a` into the closure's storage, and copies b. The lambda then refers /// to the closure's stored values by reference. /// /// ``` /// int a = 1; /// int b = 2; /// FnOnce<void()> f = sus_bind_mut( /// sus_store(sus_take(a), b), [&a, &b]() mutable { a += b; } /// ); /// ``` /// /// Copies `a` into the closure's storage and defines a `b` from an rvalue. /// Since `b` isn't referred to outside the Fn it does not need to be bound. /// /// ``` /// int a = 1; /// FnOnce<void()> f = sus_bind_mut( /// sus_store(a), [&a, b = 2]() mutable { a += b; } /// ); /// ``` /// /// TODO: There's no way to do this currently, since it won't know what to name /// the `x.foo()` value. /// /// ``` /// struct { int foo() { return 2; } } x; /// FnOnce<void()> f = sus_bind_mut( /// sus_store(x.foo()), [&a]() mutable { a += 1; } /// ); /// ``` /// /// # Why can a "const" Fn convert to a mutable FnMut or FnOnce? /// /// A FnMut or FnOnce is _allowed_ to mutate its storage, but a "const" Fn /// closure would just choose not to do so. /// /// However, a `const Fn` requires that the storage is not mutated, so it is not /// useful if converted to a `const FnMut` or `const FnOnce` which are only /// callable as mutable objects. /// /// # Null pointers /// /// A null function pointer is not allowed, constructing a FnOnce from a null /// pointer will panic. template <class R, class... CallArgs> class [[sus_trivial_abi]] FnOnce<R(CallArgs...)> { public: /// Construction from a function pointer or captureless lambda. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> FnOnce(F ptr) noexcept; /// Construction from the output of `sus_bind()`. template <::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> FnOnce(__private::SusBind<F>&& holder) noexcept : FnOnce(__private::StorageConstructionFnOnce, static_cast<F&&>(holder.lambda)) {} ~FnOnce() noexcept; FnOnce(FnOnce&& o) noexcept; FnOnce& operator=(FnOnce&& o) noexcept; FnOnce(const FnOnce&) noexcept = delete; FnOnce& operator=(const FnOnce&) noexcept = delete; /// Runs and consumes the closure. /// /// Arguments passed by value to the underlying callable object are always /// moved. Thus, a const reference, or a mutable lvalue reference will not be /// accepted here, to prevent an implicit copy from occuring. inline R operator()(CallArgs&&... args) && noexcept; /// `sus::construct::From` trait implementation for function pointers or /// lambdas without captures. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> constexpr static auto from(F fn) noexcept { return FnOnce(static_cast<R (*)(CallArgs...)>(fn)); } /// `sus::construct::From` trait implementation for the output of /// `sus_bind()`. template <::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> constexpr static auto from(__private::SusBind<F>&& holder) noexcept { return FnOnce(static_cast<__private::SusBind<F>&&>(holder)); } protected: template <class ConstructionType, ::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> FnOnce(ConstructionType, F&& lambda) noexcept; // Functions to construct and return a pointer to a static vtable object for // the `__private::FnStorage` being stored in `storage_`. // // A FnOnce needs to store only a single pointer, for call_once(). But a Fn // needs to store three, for call(), call_mut() and call_once() since it can // be converted to a FnMut or FnOnce. For that reason we have 3 overloads // where each one instantiates only the functions it requires - to avoid // trying to compile functions that aren't accessible and thus don't need to // be able to compile. template <class FnStorage> static void make_vtable(FnStorage&, __private::StorageConstructionFnOnceType) noexcept; template <class FnStorage> static void make_vtable(FnStorage&, __private::StorageConstructionFnMutType) noexcept; template <class FnStorage> static void make_vtable(FnStorage&, __private::StorageConstructionFnType) noexcept; union { // Used when the closure is a function pointer (or a captureless lambda, // which is converted to a function pointer). R (*fn_ptr_)(CallArgs...); // Used when the closure is a lambda with storage, generated by // `sus_bind()`. This is a type-erased pointer to the heap storage. __private::FnStorageBase* storage_; }; // TODO: Could we query the allocator to see if the pointer here is heap // allocated or not, instead of storing a (pointer-sized, due to alignment) // flag here? __private::FnType type_; private: sus_class_trivial_relocatable(unsafe_fn); sus_class_never_value_field(unsafe_fn, FnOnce, type_, static_cast<__private::FnType>(0)); }; /// A closure that erases the type of the internal callable object (lambda). A /// FnMut may be called a multiple times, and may mutate its storage. /// /// Fn can be used as a FnMut, which can be used as a FnOnce. /// /// Lambdas without captures can be converted into a FnOnce, FnMut, or Fn /// directly. If the lambda has captured, it must be given to one of: /// /// - `sus_bind(sus_store(..captures..), lambda)` to bind a const lambda which /// captures variables from local state. Variables to be captured in the lambda /// must also be named in sus_store(). `sus_bind()` only allows those named /// variables to be captured, and ensures they are stored by value instead of by /// reference. /// /// - `sus_bind0(lambda)` to bind a const lambda which has bound variables that /// don't capture state from outside the lambda, such as `[i = 2]() { return i; /// }`. /// /// - sus_bind_mut(sus_store(...captures...), lambda)` to bind a mutable lambda /// which captures variables from local state. /// /// - `sus_bind0_mut(lambda)` to bind a mutable lambda which has bound variables /// that don't capture state from outside the lambda, such as `[i = 2]() { /// return ++i; }`. /// /// Within sus_store(), a varaible name can be wrapped with a helper to capture /// in different ways: /// /// - `sus_take(x)` will move `x` into the closure instead of copying it. /// /// - `sus_unsafe_pointer(x)` will allow capturing a pointer. Otherwise it be /// disallowed, and it is strongly discouraged as it requires careful present /// and future understanding of the pointee's lifetime. /// /// # Example /// /// Moves `a` into the closure's storage, and copies b. The lambda then refers /// to the closure's stored values by reference. /// /// ``` /// int a = 1; /// int b = 2; /// FnMut<void()> f = sus_bind_mut( /// sus_store(sus_take(a), b), [&a, &b]() mutable { a += b; } /// ); /// ``` /// /// # Why can a "const" Fn convert to a mutable FnMut or FnOnce? /// /// A FnMut or FnOnce is _allowed_ to mutate its storage, but a "const" Fn /// closure would just choose not to do so. /// /// However, a `const Fn` requires that the storage is not mutated, so it is not /// useful if converted to a `const FnMut` or `const FnOnce` which are only /// callable as mutable objects. /// /// # Null pointers /// /// A null function pointer is not allowed, constructing a FnMut from a null /// pointer will panic. template <class R, class... CallArgs> class [[sus_trivial_abi]] FnMut<R(CallArgs...)> : public FnOnce<R(CallArgs...)> { public: /// Construction from a function pointer or captureless lambda. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> FnMut(F ptr) noexcept : FnOnce<R(CallArgs...)>(static_cast<F&&>(ptr)) {} /// Construction from the output of `sus_bind()`. template <::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> FnMut(__private::SusBind<F>&& holder) noexcept : FnOnce<R(CallArgs...)>(__private::StorageConstructionFnMut, static_cast<F&&>(holder.lambda)) {} ~FnMut() noexcept = default; FnMut(FnMut&&) noexcept = default; FnMut& operator=(FnMut&&) noexcept = default; FnMut(const FnMut&) noexcept = delete; FnMut& operator=(const FnMut&) noexcept = delete; // Runs the closure. /// /// Arguments passed by value to the underlying callable object are always /// moved. Thus, a const reference, or a mutable lvalue reference will not be /// accepted here, to prevent an implicit copy from occuring. inline R operator()(CallArgs&&... args) & noexcept; // Runs and consumes the closure. /// /// Arguments passed by value to the underlying callable object are always /// moved. Thus, a const reference, or a mutable lvalue reference will not be /// accepted here, to prevent an implicit copy from occuring. inline R operator()(CallArgs&&... args) && noexcept { return static_cast<FnOnce<R(CallArgs...)>&&>(*this)( forward<CallArgs>(args)...); } /// `sus::construct::From` trait implementation for function pointers or /// lambdas without captures. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> constexpr static auto from(F fn) noexcept { return FnMut(static_cast<R (*)(CallArgs...)>(fn)); } /// `sus::construct::From` trait implementation for the output of /// `sus_bind()`. template <::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> constexpr static auto from(__private::SusBind<F>&& holder) noexcept { return FnMut(static_cast<__private::SusBind<F>&&>(holder)); } protected: // This class may only have trivially-destructible storage and must not // do anything in its destructor, as `FnOnce` moves from itself, and it // would slice that off. template <class ConstructionType, ::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> FnMut(ConstructionType c, F&& lambda) noexcept : FnOnce<R(CallArgs...)>(c, static_cast<F&&>(lambda)) {} private: sus_class_trivial_relocatable(unsafe_fn); }; /// A closure that erases the type of the internal callable object (lambda). A /// Fn may be called a multiple times, and will not mutate its storage. /// /// Fn can be used as a FnMut, which can be used as a FnOnce. /// /// Lambdas without captures can be converted into a FnOnce, FnMut, or Fn /// directly. If the lambda has captured, it must be given to one of: /// /// - `sus_bind(sus_store(..captures..), lambda)` to bind a const lambda which /// captures variables from local state. Variables to be captured in the lambda /// must also be named in sus_store(). `sus_bind()` only allows those named /// variables to be captured, and ensures they are stored by value instead of by /// reference. /// /// - `sus_bind0(lambda)` to bind a const lambda which has bound variables that /// don't capture state from outside the lambda, such as `[i = 2]() { return i; /// }`. /// /// - sus_bind_mut(sus_store(...captures...), lambda)` to bind a mutable lambda /// which captures variables from local state. /// /// - `sus_bind0_mut(lambda)` to bind a mutable lambda which has bound variables /// that don't capture state from outside the lambda, such as `[i = 2]() { /// return ++i; }`. /// /// Within sus_store(), a varaible name can be wrapped with a helper to capture /// in different ways: /// /// - `sus_take(x)` will move `x` into the closure instead of copying it. /// /// - `sus_unsafe_pointer(x)` will allow capturing a pointer. Otherwise it be /// disallowed, and it is strongly discouraged as it requires careful present /// and future understanding of the pointee's lifetime. /// /// # Example /// /// Moves `a` into the closure's storage, and copies b. The lambda then refers /// to the closure's stored values by reference. /// /// ``` /// int a = 1; /// int b = 2; /// Fn<int()> f = sus_bind( /// sus_store(sus_take(a), b), [&a, &b]() { return a + b; } /// ); /// ``` /// /// # Why can a "const" Fn convert to a mutable FnMut or FnOnce? /// /// A FnMut or FnOnce is _allowed_ to mutate its storage, but a "const" Fn /// closure would just choose not to do so. /// /// However, a `const Fn` requires that the storage is not mutated, so it is not /// useful if converted to a `const FnMut` or `const FnOnce` which are only /// callable as mutable objects. /// /// # Null pointers /// /// A null function pointer is not allowed, constructing a Fn from a null /// pointer will panic. template <class R, class... CallArgs> class [[sus_trivial_abi]] Fn<R(CallArgs...)> final : public FnMut<R(CallArgs...)> { public: /// Construction from a function pointer or captureless lambda. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> Fn(F ptr) noexcept : FnMut<R(CallArgs...)>(static_cast<F&&>(ptr)) {} /// Construction from the output of `sus_bind()`. template <::sus::fn::callable::CallableObjectReturnsConst<R, CallArgs...> F> Fn(__private::SusBind<F>&& holder) noexcept : FnMut<R(CallArgs...)>(__private::StorageConstructionFn, static_cast<F&&>(holder.lambda)) {} ~Fn() noexcept = default; Fn(Fn&&) noexcept = default; Fn& operator=(Fn&&) noexcept = default; Fn(const Fn&) noexcept = delete; Fn& operator=(const Fn&) noexcept = delete; // Runs the closure. /// /// Arguments passed by value to the underlying callable object are always /// moved. Thus, a const reference, or a mutable lvalue reference will not be /// accepted here, to prevent an implicit copy from occuring. inline R operator()(CallArgs&&... args) const& noexcept; // Runs and consumes the closure. /// /// Arguments passed by value to the underlying callable object are always /// moved. Thus, a const reference, or a mutable lvalue reference will not be /// accepted here, to prevent an implicit copy from occuring. inline R operator()(CallArgs&&... args) && noexcept { return static_cast<FnOnce<R(CallArgs...)>&&>(*this)( forward<CallArgs>(args)...); } /// `sus::construct::From` trait implementation for function pointers or /// lambdas without captures. template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> constexpr static auto from(F fn) noexcept { return Fn(static_cast<R (*)(CallArgs...)>(fn)); } /// `sus::construct::From` trait implementation for the output of /// `sus_bind()`. template <::sus::fn::callable::CallableObjectReturnsConst<R, CallArgs...> F> constexpr static auto from(__private::SusBind<F>&& holder) noexcept { return Fn(static_cast<__private::SusBind<F>&&>(holder)); } protected: // This class may only have trivially-destructible storage and must not // do anything in its destructor, as `FnOnce` moves from itself, and it // would slice that off. template <::sus::fn::callable::CallableObjectReturnsConst<R, CallArgs...> F> Fn(__private::StorageConstructionFnType, F&& fn) noexcept; private: sus_class_trivial_relocatable(unsafe_fn); }; } // namespace sus::fn namespace sus::fn::__private { struct FnStorageVtableBase {}; struct FnStorageBase { // Should be to a static lifetime pointee. Option<FnStorageVtableBase&> vtable = Option<FnStorageVtableBase&>::none(); }; template <class R, class... CallArgs> struct FnStorageVtable final : public FnStorageVtableBase { R (*call_once)(__private::FnStorageBase&&, CallArgs...); R (*call_mut)(__private::FnStorageBase&, CallArgs...); R (*call)(const __private::FnStorageBase&, CallArgs...); }; template <class F> class FnStorage final : public FnStorageBase { public: constexpr FnStorage(F&& callable) : callable_(static_cast<F&&>(callable)) {} template <class R, class... CallArgs> static R call(const FnStorageBase& self_base, CallArgs... callargs) { const auto& self = static_cast<const FnStorage&>(self_base); return self.callable_(forward<CallArgs>(callargs)...); } template <class R, class... CallArgs> static R call_mut(FnStorageBase& self_base, CallArgs... callargs) { auto& self = static_cast<FnStorage&>(self_base); return self.callable_(forward<CallArgs>(callargs)...); } template <class R, class... CallArgs> static R call_once(FnStorageBase&& self_base, CallArgs... callargs) { auto&& self = static_cast<FnStorage&&>(self_base); return static_cast<F&&>(self.callable_)(forward<CallArgs>(callargs)...); } F callable_; }; } // namespace sus::fn::__private namespace sus::fn { template <class R, class... CallArgs> template <::sus::fn::callable::FunctionPointerReturns<R, CallArgs...> F> FnOnce<R(CallArgs...)>::FnOnce(F ptr) noexcept : fn_ptr_(ptr), type_(__private::FnPointer) { ::sus::check(ptr != nullptr); } template <class R, class... CallArgs> template <class ConstructionType, ::sus::fn::callable::CallableObjectReturns<R, CallArgs...> F> FnOnce<R(CallArgs...)>::FnOnce(ConstructionType construction, F&& lambda) noexcept : type_(__private::Storage) { using FnStorage = __private::FnStorage<F>; // TODO: Allow overriding the global allocator? Use the allocator in place of // `new` and `delete` directly? auto* s = new FnStorage(static_cast<F&&>(lambda)); make_vtable(*s, construction); storage_ = s; } template <class R, class... CallArgs> template <class FnStorage> void FnOnce<R(CallArgs...)>::make_vtable( FnStorage& storage, __private::StorageConstructionFnOnceType) noexcept { static __private::FnStorageVtable<R, CallArgs...> vtable = { .call_once = &FnStorage::template call_once<R, CallArgs...>, .call_mut = nullptr, .call = nullptr, }; storage.vtable.insert(vtable); } template <class R, class... CallArgs> template <class FnStorage> void FnOnce<R(CallArgs...)>::make_vtable( FnStorage& storage, __private::StorageConstructionFnMutType) noexcept { static __private::FnStorageVtable<R, CallArgs...> vtable = { .call_once = &FnStorage::template call_once<R, CallArgs...>, .call_mut = &FnStorage::template call_mut<R, CallArgs...>, .call = nullptr, }; storage.vtable.insert(vtable); } template <class R, class... CallArgs> template <class FnStorage> void FnOnce<R(CallArgs...)>::make_vtable( FnStorage& storage, __private::StorageConstructionFnType) noexcept { static __private::FnStorageVtable<R, CallArgs...> vtable = { .call_once = &FnStorage::template call_once<R, CallArgs...>, .call_mut = &FnStorage::template call_mut<R, CallArgs...>, .call = &FnStorage::template call<R, CallArgs...>, }; storage.vtable.insert(vtable); } template <class R, class... CallArgs> FnOnce<R(CallArgs...)>::~FnOnce() noexcept { switch (type_) { case __private::FnPointer: break; case __private::Storage: { if (auto* s = ::sus::mem::replace_ptr(mref(storage_), nullptr); s) delete s; break; } } } template <class R, class... CallArgs> FnOnce<R(CallArgs...)>::FnOnce(FnOnce&& o) noexcept : type_(o.type_) { switch (type_) { case __private::FnPointer: ::sus::check(o.fn_ptr_); // Catch use-after-move. fn_ptr_ = ::sus::mem::replace_ptr(mref(o.fn_ptr_), nullptr); break; case __private::Storage: ::sus::check(o.storage_); // Catch use-after-move. storage_ = ::sus::mem::replace_ptr(mref(o.storage_), nullptr); break; } } template <class R, class... CallArgs> FnOnce<R(CallArgs...)>& FnOnce<R(CallArgs...)>::operator=(FnOnce&& o) noexcept { switch (type_) { case __private::FnPointer: break; case __private::Storage: if (auto* s = ::sus::mem::replace_ptr(mref(storage_), nullptr); s) delete s; } switch (type_ = o.type_) { case __private::FnPointer: ::sus::check(o.fn_ptr_); // Catch use-after-move. fn_ptr_ = ::sus::mem::replace_ptr(mref(o.fn_ptr_), nullptr); break; case __private::Storage: ::sus::check(o.storage_); // Catch use-after-move. storage_ = ::sus::mem::replace_ptr(mref(o.storage_), nullptr); break; } return *this; } template <class R, class... CallArgs> R FnOnce<R(CallArgs...)>::operator()(CallArgs&&... args) && noexcept { switch (type_) { case __private::FnPointer: { ::sus::check(fn_ptr_); // Catch use-after-move. auto* fn = ::sus::mem::replace_ptr(mref(fn_ptr_), nullptr); return fn(static_cast<CallArgs&&>(args)...); } case __private::Storage: { ::sus::check(storage_); // Catch use-after-move. auto* storage = ::sus::mem::replace_ptr(mref(storage_), nullptr); auto& vtable = static_cast<__private::FnStorageVtable<R, CallArgs...>&>( storage->vtable.unwrap_mut()); // Delete storage, after the call_once() is complete. // // TODO: `storage` and `storage_` should be owning smart pointers. struct DeleteStorage final { sus_clang_bug_54040(constexpr inline DeleteStorage(__private::FnStorageBase* storage) : storage(storage) {}) ~DeleteStorage() { delete storage; } __private::FnStorageBase* storage; } deleter(storage); return vtable.call_once(static_cast<__private::FnStorageBase&&>(*storage), forward<CallArgs>(args)...); } } ::sus::unreachable_unchecked(unsafe_fn); } template <class R, class... CallArgs> R FnMut<R(CallArgs...)>::operator()(CallArgs&&... args) & noexcept { using Super = FnOnce<R(CallArgs...)>; switch (Super::type_) { case __private::FnPointer: ::sus::check(Super::fn_ptr_); // Catch use-after-move. return Super::fn_ptr_(static_cast<CallArgs&&>(args)...); case __private::Storage: { ::sus::check(Super::storage_); // Catch use-after-move. auto& vtable = static_cast<__private::FnStorageVtable<R, CallArgs...>&>( Super::storage_->vtable.unwrap_mut()); return vtable.call_mut( static_cast<__private::FnStorageBase&>(*Super::storage_), forward<CallArgs>(args)...); } } ::sus::unreachable_unchecked(unsafe_fn); } template <class R, class... CallArgs> R Fn<R(CallArgs...)>::operator()(CallArgs&&... args) const& noexcept { using Super = FnOnce<R(CallArgs...)>; switch (Super::type_) { case __private::FnPointer: ::sus::check(Super::fn_ptr_); // Catch use-after-move. return Super::fn_ptr_(static_cast<CallArgs&&>(args)...); case __private::Storage: { ::sus::check(Super::storage_); // Catch use-after-move. auto& vtable = static_cast<__private::FnStorageVtable<R, CallArgs...>&>( Super::storage_->vtable.unwrap_mut()); return vtable.call( static_cast<const __private::FnStorageBase&>(*Super::storage_), forward<CallArgs>(args)...); } } ::sus::unreachable_unchecked(unsafe_fn); } } // namespace sus::fn namespace sus::containers { template <class T, size_t N> requires(N <= PTRDIFF_MAX) class Array; } namespace sus::num { struct u8; } namespace sus::tuple { template <class T, class... Ts> class Tuple; } #define _sus__unsigned_impl(T, PrimitiveT, SignedT) \ _sus__unsigned_storage(PrimitiveT); \ _sus__unsigned_constants(T, PrimitiveT); \ _sus__unsigned_construct(T, PrimitiveT); \ _sus__unsigned_from(T, PrimitiveT); \ _sus__unsigned_integer_comparison(T); \ _sus__unsigned_unary_ops(T); \ _sus__unsigned_binary_logic_ops(T); \ _sus__unsigned_binary_bit_ops(T); \ _sus__unsigned_mutable_logic_ops(T); \ _sus__unsigned_mutable_bit_ops(T); \ _sus__unsigned_abs(T); \ _sus__unsigned_add(T, SignedT); \ _sus__unsigned_div(T); \ _sus__unsigned_mul(T); \ _sus__unsigned_neg(T, PrimitiveT); \ _sus__unsigned_rem(T); \ _sus__unsigned_euclid(T); \ _sus__unsigned_shift(T); \ _sus__unsigned_sub(T); \ _sus__unsigned_bits(T); \ _sus__unsigned_pow(T); \ _sus__unsigned_log(T); \ _sus__unsigned_power_of_two(T, PrimitiveT); \ _sus__unsigned_endian(T, PrimitiveT, sizeof(PrimitiveT)) #define _sus__unsigned_storage(PrimitiveT) \ /** The inner primitive value, in case it needs to be unwrapped from the \ * type. Avoid using this member except to convert when a consumer requires \ * it. \ */ \ PrimitiveT primitive_value { 0u } #define _sus__unsigned_constants(T, PrimitiveT) \ static constexpr auto MIN_PRIMITIVE = __private::min_value<PrimitiveT>(); \ static constexpr auto MAX_PRIMITIVE = __private::max_value<PrimitiveT>(); \ static constexpr inline T MIN() noexcept { return MIN_PRIMITIVE; } \ static constexpr inline T MAX() noexcept { return MAX_PRIMITIVE; } \ static constexpr inline u32 BITS() noexcept { \ return __private::num_bits<PrimitiveT>(); \ } \ static_assert(true) #define _sus__unsigned_construct(T, PrimitiveT) \ /** Default constructor, which sets the integer to 0. \ * \ * The trivial copy and move constructors are implicitly declared, as is the \ * trivial destructor. \ */ \ constexpr inline T() noexcept = default; \ \ /** Assignment from the underlying primitive type. \ */ \ template <std::same_as<PrimitiveT> P> /* Prevent implicit conversions. */ \ constexpr inline void operator=(P v) noexcept { \ primitive_value = v; \ } \ static_assert(true) #define _sus__unsigned_from(T, PrimitiveT) \ /** Constructs a ##T## from a signed integer type (i8, i16, i32, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <Signed S> \ static constexpr T from(S s) noexcept { \ ::sus::check(s.primitive_value >= 0); \ constexpr auto umax = __private::into_unsigned(S::MAX_PRIMITIVE); \ if constexpr (MAX_PRIMITIVE < umax) \ ::sus::check(__private::into_unsigned(s.primitive_value) <= \ MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(s.primitive_value)); \ } \ \ /** Constructs a ##T## from an unsigned integer type (u8, u16, u32, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <Unsigned U> \ static constexpr T from(U u) noexcept { \ if constexpr (MAX_PRIMITIVE < U::MAX_PRIMITIVE) \ ::sus::check(u.primitive_value <= MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(u.primitive_value)); \ } \ \ /** Constructs a ##T## from a signed primitive integer type (int, long, \ * etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <SignedPrimitiveInteger S> \ static constexpr T from(S s) { \ ::sus::check(s >= 0); \ constexpr auto umax = __private::into_unsigned(__private::max_value<S>()); \ if constexpr (MAX_PRIMITIVE < umax) \ ::sus::check(__private::into_unsigned(s) <= MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(s)); \ } \ \ /** Constructs a ##T## from an unsigned primitive integer type (unsigned \ * int, unsigned long, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <UnsignedPrimitiveInteger U> \ static constexpr T from(U u) { \ if constexpr (MAX_PRIMITIVE < __private::max_value<U>()) \ ::sus::check(u <= MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(u)); \ } \ static_assert(true) #define _sus__unsigned_integer_comparison(T) \ /** sus::concepts::Eq<##T##> trait. */ \ friend constexpr inline bool operator==(const T& l, const T& r) noexcept { \ return (l.primitive_value <=> r.primitive_value) == 0; \ } \ /** sus::concepts::Ord<##T##> trait. */ \ friend constexpr inline auto operator<=>(const T& l, const T& r) noexcept { \ return l.primitive_value <=> r.primitive_value; \ } \ static_assert(true) #define _sus__unsigned_unary_ops(T) \ /** sus::concepts::Neg trait intentionally omitted. */ \ /** sus::concepts::BitNot trait. */ \ constexpr inline T operator~() const& noexcept { \ return __private::unchecked_not(primitive_value); \ } \ static_assert(true) #define _sus__unsigned_binary_logic_ops(T) \ /** sus::concepts::Add<##T##> trait. */ \ friend constexpr inline T operator+(const T& l, const T& r) noexcept { \ const auto out = \ __private::add_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Sub<##T##> trait. */ \ friend constexpr inline T operator-(const T& l, const T& r) noexcept { \ const auto out = \ __private::sub_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Mul<##T##> trait. */ \ friend constexpr inline T operator*(const T& l, const T& r) noexcept { \ const auto out = \ __private::mul_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Div<##T##> trait. */ \ friend constexpr inline T operator/(const T& l, const T& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0u); \ return __private::unchecked_div(l.primitive_value, r.primitive_value); \ } \ /** sus::concepts::Rem<##T##> trait. */ \ friend constexpr inline T operator%(const T& l, const T& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0u); \ return __private::unchecked_rem(l.primitive_value, r.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_binary_bit_ops(T) \ /** sus::concepts::BitAnd<##T##> trait. */ \ friend constexpr inline T operator&(const T& l, const T& r) noexcept { \ return __private::unchecked_and(l.primitive_value, r.primitive_value); \ } \ /** sus::concepts::BitOr<##T##> trait. */ \ friend constexpr inline T operator|(const T& l, const T& r) noexcept { \ return __private::unchecked_or(l.primitive_value, r.primitive_value); \ } \ /** sus::concepts::BitXor<##T##> trait. */ \ friend constexpr inline T operator^(const T& l, const T& r) noexcept { \ return __private::unchecked_xor(l.primitive_value, r.primitive_value); \ } \ /** sus::concepts::Shl trait. */ \ friend constexpr inline T operator<<(const T& l, const u32& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ return __private::unchecked_shl(l.primitive_value, r.primitive_value); \ } \ /** sus::concepts::Shr trait. */ \ friend constexpr inline T operator>>(const T& l, const u32& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ return __private::unchecked_shr(l.primitive_value, r.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_mutable_logic_ops(T) \ /** sus::concepts::AddAssign<##T##> trait. */ \ constexpr inline void operator+=(T r)& noexcept { \ const auto out = \ __private::add_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::SubAssign<##T##> trait. */ \ constexpr inline void operator-=(T r)& noexcept { \ const auto out = \ __private::sub_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::MulAssign<##T##> trait. */ \ constexpr inline void operator*=(T r)& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::DivAssign<##T##> trait. */ \ constexpr inline void operator/=(T r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0u); \ primitive_value /= r.primitive_value; \ } \ /** sus::concepts::RemAssign<##T##> trait. */ \ constexpr inline void operator%=(T r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0u); \ primitive_value %= r.primitive_value; \ } \ static_assert(true) #define _sus__unsigned_mutable_bit_ops(T) \ /** sus::concepts::BitAndAssign<##T##> trait. */ \ constexpr inline void operator&=(T r)& noexcept { \ primitive_value &= r.primitive_value; \ } \ /** sus::concepts::BitOrAssign<##T##> trait. */ \ constexpr inline void operator|=(T r)& noexcept { \ primitive_value |= r.primitive_value; \ } \ /** sus::concepts::BitXorAssign<##T##> trait. */ \ constexpr inline void operator^=(T r)& noexcept { \ primitive_value ^= r.primitive_value; \ } \ /** sus::concepts::ShlAssign trait. */ \ constexpr inline void operator<<=(const u32& r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ primitive_value <<= r.primitive_value; \ } \ /** sus::concepts::ShrAssign trait. */ \ constexpr inline void operator>>=(const u32& r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ primitive_value >>= r.primitive_value; \ } \ static_assert(true) #define _sus__unsigned_abs(T) \ /** Computes the absolute difference between self and other. \ */ \ constexpr T abs_diff(const T& r) const& noexcept { \ if (primitive_value >= r.primitive_value) \ return __private::unchecked_sub(primitive_value, r.primitive_value); \ else \ return __private::unchecked_sub(r.primitive_value, primitive_value); \ } \ static_assert(true) #define _sus__unsigned_add(T, SignedT) \ /** Checked integer addition. Computes self + rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_add(const T& rhs) const& noexcept { \ const auto out = \ __private::add_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Checked integer addition with an unsigned rhs. Computes self + rhs, \ * returning None if overflow occurred. \ */ \ template <std::same_as<SignedT> S> \ constexpr Option<T> checked_add_signed(const S& rhs) const& noexcept { \ const auto out = __private::add_with_overflow_signed(primitive_value, \ rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates self + rhs \ * \ * Returns a tuple of the addition along with a boolean indicating whether \ * an arithmetic overflow would occur. If an overflow would have occurred \ * then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_add(const T& rhs) const& noexcept { \ const auto out = \ __private::add_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Calculates self + rhs with an unsigned rhs \ * \ * Returns a tuple of the addition along with a boolean indicating whether \ * an arithmetic overflow would occur. If an overflow would have occurred \ * then the wrapped value is returned. \ */ \ template <std::same_as<SignedT> S, int&..., \ class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_add_signed(const S& rhs) const& noexcept { \ const auto r = __private::add_with_overflow_signed(primitive_value, \ rhs.primitive_value); \ return Tuple::with(r.value, r.overflow); \ } \ \ /** Saturating integer addition. Computes self + rhs, saturating at the \ * numeric bounds instead of overflowing. \ */ \ constexpr T saturating_add(const T& rhs) const& noexcept { \ return __private::saturating_add(primitive_value, rhs.primitive_value); \ } \ \ /** Saturating integer addition with an unsigned rhs. Computes self + rhs, \ * saturating at the numeric bounds instead of overflowing. \ */ \ template <std::same_as<SignedT> S> \ constexpr T saturating_add_signed(const S& rhs) const& noexcept { \ const auto r = __private::add_with_overflow_signed(primitive_value, \ rhs.primitive_value); \ if (!r.overflow) [[likely]] \ return r.value; \ else { \ /* TODO: Can this be done without a branch? If it's complex or uses \ * compiler stuff, move into intrinsics. */ \ if (rhs.primitive_value >= 0) \ return MAX(); \ else \ return MIN(); \ } \ } \ \ /** Unchecked integer addition. Computes self + rhs, assuming overflow \ * cannot occur. \ * \ * # Safety \ * This function is allowed to result in undefined behavior when `self + rhs \ * > ##T##::MAX` or `self + rhs < ##T##::MIN`, i.e. when `checked_add()` \ * would return None. \ */ \ inline constexpr T unchecked_add(::sus::marker::UnsafeFnMarker, \ const T& rhs) const& noexcept { \ return __private::unchecked_add(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) addition. Computes self + rhs, wrapping around at the \ * boundary of the type. \ */ \ constexpr T wrapping_add(const T& rhs) const& noexcept { \ return __private::wrapping_add(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) addition with an unsigned rhs. Computes self + rhs, \ * wrapping around at the boundary of the type. \ */ \ template <std::same_as<SignedT> S> \ constexpr T wrapping_add_signed(const S& rhs) const& noexcept { \ return __private::add_with_overflow_signed(primitive_value, \ rhs.primitive_value) \ .value; \ } \ static_assert(true) #define _sus__unsigned_div(T) \ /** Checked integer division. Computes self / rhs, returning None if `rhs == \ * 0`. \ */ \ constexpr Option<T> checked_div(const T& rhs) const& noexcept { \ if (rhs.primitive_value != 0u) [[likely]] \ return Option<T>::some( \ __private::unchecked_div(primitive_value, rhs.primitive_value)); \ else \ return Option<T>::none(); \ } \ \ /** Calculates the divisor when self is divided by rhs. \ * \ * Returns a tuple of the divisor along with a boolean indicating whether an \ *arithmetic overflow would occur. Note that for unsigned integers overflow \ *never occurs, so the second value is always false. \ * \ * #Panics \ *This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return Tuple::with( \ __private::unchecked_div(primitive_value, rhs.primitive_value), \ false); \ } \ \ /** Saturating integer division. Computes self / rhs, saturating at the \ numeric bounds instead of overflowing. \ * \ * #Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T saturating_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) division. Computes self / rhs. Wrapped division on \ * unsigned types is just normal division. There's no way wrapping could \ * ever happen. This function exists, so that all operations are accounted \ * for in the wrapping operations. \ * \ * #Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_mul(T) \ /** Checked integer multiplication. Computes self * rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_mul(const T& rhs) const& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates the multiplication of self and rhs. \ * \ * Returns a tuple of the multiplication along with a boolean indicating \ * whether an arithmetic overflow would occur. If an overflow would have \ * occurred then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_mul(const T& rhs) const& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Saturating integer multiplication. Computes self * rhs, saturating at \ * the numeric bounds instead of overflowing. \ */ \ constexpr T saturating_mul(const T& rhs) const& noexcept { \ return __private::saturating_mul(primitive_value, rhs.primitive_value); \ } \ \ /** Unchecked integer multiplication. Computes self * rhs, assuming overflow \ * cannot occur. \ * \ * # Safety \ * This function is allowed to result in undefined behavior when `self * rhs \ * > ##T##::MAX` or `self * rhs < ##T##::MIN`, i.e. when `checked_mul()` \ * would return None. \ */ \ constexpr inline T unchecked_mul(::sus::marker::UnsafeFnMarker, \ const T& rhs) const& noexcept { \ return __private::unchecked_mul(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) multiplication. Computes self * rhs, wrapping around \ * at the boundary of the type. \ */ \ constexpr T wrapping_mul(const T& rhs) const& noexcept { \ return __private::wrapping_mul(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_neg(T, PrimitiveT) \ /** Checked negation. Computes -self, returning None unless `self == 0`. \ * \ * Note that negating any positive integer will overflow. \ */ \ constexpr Option<T> checked_neg() const& noexcept { \ if (primitive_value == 0u) \ return Option<T>::some(T(PrimitiveT{0u})); \ else \ return Option<T>::none(); \ } \ \ /** Negates self in an overflowing fashion. \ * \ * Returns `~self + 1` using wrapping operations to return the value that \ * represents the negation of this unsigned value. Note that for positive \ * unsigned values overflow always occurs, but negating 0 does not overflow. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_neg() const& noexcept { \ return Tuple::with((~(*this)).wrapping_add(T(PrimitiveT{1u})), \ primitive_value != 0u); \ } \ \ /** Wrapping (modular) negation. Computes `-self`, wrapping around at the \ boundary of the type. \ * \ * Since unsigned types do not have negative equivalents all applications of \ * this function will wrap (except for -0). For values smaller than the \ * corresponding signed type's maximum the result is the same as casting \ * the corresponding signed value. Any larger values are equivalent to \ * `MAX + 1 - (val - MAX - 1)` where MAX is the corresponding signed type's \ * maximum. \ */ \ constexpr T wrapping_neg() const& noexcept { \ return (T(PrimitiveT{0u})).wrapping_sub(*this); \ } \ static_assert(true) #define _sus__unsigned_rem(T) \ /** Checked integer remainder. Computes `self % rhs`, returning None if `rhs \ * == 0`. \ */ \ constexpr Option<T> checked_rem(const T& rhs) const& noexcept { \ if (rhs.primitive_value != 0u) [[likely]] \ return Option<T>::some( \ __private::unchecked_rem(primitive_value, rhs.primitive_value)); \ else \ return Option<T>::none(); \ } \ \ /** Calculates the remainder when self is divided by rhs. \ * \ * Returns a tuple of the remainder after dividing along with a boolean \ * indicating whether an arithmetic overflow would occur. Note that for \ * unsigned integers overflow never occurs, so the second value is always \ * false. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_rem(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return Tuple::with( \ __private::unchecked_rem(primitive_value, rhs.primitive_value), \ false); \ } \ \ /** Wrapping (modular) remainder. Computes self % rhs. Wrapped remainder \ * calculation on unsigned types is just the regular remainder calculation. \ * \ * There's no way wrapping could ever happen. This function exists, so that \ * all operations are accounted for in the wrapping operations. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_rem(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_rem(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_euclid(T) \ /** Performs Euclidean division. \ * \ * Since, for the positive integers, all common definitions of division are \ * equal, this is exactly equal to self / rhs. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ \ /** Checked Euclidean division. Computes self.div_euclid(rhs), returning \ * None if rhs == 0. \ */ \ constexpr Option<T> checked_div_euclid(const T& rhs) const& noexcept { \ if (rhs.primitive_value == 0u) [[unlikely]] { \ return Option<T>::none(); \ } else { \ return Option<T>::some( \ __private::unchecked_div(primitive_value, rhs.primitive_value)); \ } \ } \ \ /** Calculates the quotient of Euclidean division self.div_euclid(rhs). \ * \ * Returns a tuple of the divisor along with a boolean indicating whether an \ * arithmetic overflow would occur. Note that for unsigned integers overflow \ * never occurs, so the second value is always false. Since, for the \ * positive integers, all common definitions of division are equal, this is \ * exactly equal to self.overflowing_div(rhs). \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return Tuple::with( \ __private::unchecked_div(primitive_value, rhs.primitive_value), \ false); \ } \ \ /** Wrapping Euclidean division. Computes self.div_euclid(rhs). Wrapped \ * division on unsigned types is just normal division. \ * \ * There's no way wrapping could ever happen. This function exists so that \ * all operations are accounted for in the wrapping operations. Since, for \ * the positive integers, all common definitions of division are equal, this \ * is exactly equal to self.wrapping_div(rhs). \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ \ /** Calculates the least remainder of self (mod rhs). \ * \ * Since, for the positive integers, all common definitions of division are \ * equal, this is exactly equal to self % rhs. \ \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_rem(primitive_value, rhs.primitive_value); \ } \ \ /** Checked Euclidean modulo. Computes self.rem_euclid(rhs), returning None \ * if rhs == 0. \ */ \ constexpr Option<T> checked_rem_euclid(const T& rhs) const& noexcept { \ if (rhs.primitive_value == 0u) [[unlikely]] { \ return Option<T>::none(); \ } else { \ return Option<T>::some( \ __private::unchecked_rem(primitive_value, rhs.primitive_value)); \ } \ } \ \ /** Calculates the remainder self.rem_euclid(rhs) as if by Euclidean \ * division. \ * \ * Returns a tuple of the modulo after dividing along with a boolean \ * indicating whether an arithmetic overflow would occur. Note that for \ * unsigned integers overflow never occurs, so the second value is always \ * false. Since, for the positive integers, all common definitions of \ * division are equal, this operation is exactly equal to \ * self.overflowing_rem(rhs). \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return Tuple::with( \ __private::unchecked_rem(primitive_value, rhs.primitive_value), \ false); \ } \ \ /** Wrapping Euclidean modulo. Computes self.rem_euclid(rhs). Wrapped modulo \ * calculation on unsigned types is just the regular remainder calculation. \ * \ * There’s no way wrapping could ever happen. This function exists, so that \ * all operations are accounted for in the wrapping operations. Since, for \ * the positive integers, all common definitions of division are equal, this \ * is exactly equal to self.wrapping_rem(rhs). \ \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0u); \ return __private::unchecked_rem(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_shift(T) \ /** Checked shift left. Computes `*this << rhs`, returning None if rhs is \ * larger than or equal to the number of bits in self. \ */ \ constexpr Option<T> checked_shl(const u32& rhs) const& noexcept { \ const auto out = \ __private::shl_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Shifts self left by rhs bits. \ * \ * Returns a tuple of the shifted version of self along with a boolean \ * indicating whether the shift value was larger than or equal to the number \ * of bits. If the shift value is too large, then value is masked (N-1) \ * where N is the number of bits, and this value is then used to perform the \ * shift. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_shl(const u32& rhs) const& noexcept { \ const auto out = \ __private::shl_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Panic-free bitwise shift-left; yields `*this << mask(rhs)`, where mask \ * removes any high-order bits of `rhs` that would cause the shift to exceed \ * the bitwidth of the type. \ * \ * Note that this is not the same as a rotate-left; the RHS of a wrapping \ * shift-left is restricted to the range of the type, rather than the bits \ * shifted out of the LHS being returned to the other end. The primitive \ * integer types all implement a rotate_left function, which may be what you \ * want instead. \ */ \ constexpr T wrapping_shl(const u32& rhs) const& noexcept { \ return __private::shl_with_overflow(primitive_value, rhs.primitive_value) \ .value; \ } \ \ /** Checked shift right. Computes `*this >> rhs`, returning None if rhs is \ * larger than or equal to the number of bits in self. \ */ \ constexpr Option<T> checked_shr(const u32& rhs) const& noexcept { \ const auto out = \ __private::shr_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Shifts self right by rhs bits. \ * \ * Returns a tuple of the shifted version of self along with a boolean \ * indicating whether the shift value was larger than or equal to the number \ * of bits. If the shift value is too large, then value is masked (N-1) \ * where N is the number of bits, and this value is then used to perform the \ * shift. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_shr(const u32& rhs) const& noexcept { \ const auto out = \ __private::shr_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Panic-free bitwise shift-right; yields `*this >> mask(rhs)`, where mask \ * removes any high-order bits of `rhs` that would cause the shift to exceed \ * the bitwidth of the type. \ * \ * Note that this is not the same as a rotate-right; the RHS of a wrapping \ * shift-right is restricted to the range of the type, rather than the bits \ * shifted out of the LHS being returned to the other end. The primitive \ * integer types all implement a rotate_right function, which may be what \ * you want instead. \ */ \ constexpr T wrapping_shr(const u32& rhs) const& noexcept { \ return __private::shr_with_overflow(primitive_value, rhs.primitive_value) \ .value; \ } \ static_assert(true) #define _sus__unsigned_sub(T) \ /** Checked integer subtraction. Computes self - rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_sub(const T& rhs) const& { \ const auto out = \ __private::sub_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates self - rhs \ * \ * Returns a tuple of the subtraction along with a boolean indicating \ * whether an arithmetic overflow would occur. If an overflow would have \ * occurred then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_sub(const T& rhs) const& noexcept { \ const auto out = \ __private::sub_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Saturating integer subtraction. Computes self - rhs, saturating at the \ * numeric bounds instead of overflowing. \ */ \ constexpr T saturating_sub(const T& rhs) const& { \ return __private::saturating_sub(primitive_value, rhs.primitive_value); \ } \ \ /** Unchecked integer subtraction. Computes self - rhs, assuming overflow \ * cannot occur. \ * \ * # Safety \ * This function is allowed to result in undefined behavior when `self - rhs \ * > ##T##::MAX` or `self - rhs < ##T##::MIN`, i.e. when `checked_sub()` \ * would return None. \ */ \ constexpr T unchecked_sub(::sus::marker::UnsafeFnMarker, const T& rhs) \ const& { \ return __private::unchecked_sub(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) subtraction. Computes self - rhs, wrapping around at \ * the boundary of the type. \ */ \ constexpr T wrapping_sub(const T& rhs) const& { \ return __private::wrapping_sub(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_bits(T) \ /** Returns the number of ones in the binary representation of the current \ * value. \ */ \ constexpr u32 count_ones() const& noexcept { \ return __private::count_ones(primitive_value); \ } \ \ /** Returns the number of zeros in the binary representation of the current \ * value. \ */ \ constexpr u32 count_zeros() const& noexcept { \ return (~(*this)).count_ones(); \ } \ \ /** Returns the number of leading ones in the binary representation of the \ * current value. \ */ \ constexpr u32 leading_ones() const& noexcept { \ return (~(*this)).leading_zeros(); \ } \ \ /** Returns the number of leading zeros in the binary representation of the \ * current value. \ */ \ constexpr u32 leading_zeros() const& noexcept { \ return __private::leading_zeros(primitive_value); \ } \ \ /** Returns the number of trailing ones in the binary representation of the \ * current value. \ */ \ constexpr u32 trailing_ones() const& noexcept { \ return (~(*this)).trailing_zeros(); \ } \ \ /** Returns the number of trailing zeros in the binary representation of the \ * current value. \ */ \ constexpr u32 trailing_zeros() const& noexcept { \ return __private::trailing_zeros(primitive_value); \ } \ \ /** Reverses the order of bits in the integer. The least significant bit \ * becomes the most significant bit, second least-significant bit becomes \ * second most-significant bit, etc. \ */ \ constexpr T reverse_bits() const& noexcept { \ return __private::reverse_bits(primitive_value); \ } \ \ /** Shifts the bits to the left by a specified amount, `n`, wrapping the \ * truncated bits to the end of the resulting integer. \ * \ * Please note this isn't the same operation as the `<<` shifting operator! \ */ \ constexpr T rotate_left(const u32& n) const& noexcept { \ return __private::rotate_left(primitive_value, n.primitive_value); \ } \ \ /** Shifts the bits to the right by a specified amount, n, wrapping the \ * truncated bits to the beginning of the resulting integer. \ * \ * Please note this isn't the same operation as the >> shifting operator! \ */ \ constexpr T rotate_right(const u32& n) const& noexcept { \ return __private::rotate_right(primitive_value, n.primitive_value); \ } \ \ /** Reverses the byte order of the integer. \ */ \ constexpr T swap_bytes() const& noexcept { \ return __private::swap_bytes(primitive_value); \ } \ static_assert(true) #define _sus__unsigned_pow(T) \ /** Raises self to the power of `exp`, using exponentiation by squaring. */ \ constexpr inline T pow(const u32& rhs) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, rhs.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ \ /** Checked exponentiation. Computes `##T##::pow(exp)`, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_pow(const u32& rhs) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, rhs.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Raises self to the power of `exp`, using exponentiation by squaring. \ * \ * Returns a tuple of the exponentiation along with a bool indicating \ * whether an overflow happened. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_pow(const u32& exp) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, exp.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Wrapping (modular) exponentiation. Computes self.pow(exp), wrapping \ * around at the boundary of the type. \ */ \ constexpr T wrapping_pow(const u32& exp) const& noexcept { \ return __private::wrapping_pow(primitive_value, exp.primitive_value); \ } \ static_assert(true) #define _sus__unsigned_log(T) \ /** Returns the base 2 logarithm of the number, rounded down. \ * \ * Returns None if the number is zero. \ */ \ constexpr Option<u32> checked_log2() const& { \ if (primitive_value == 0u) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ uint32_t zeros = \ __private::leading_zeros_nonzero(unsafe_fn, primitive_value); \ return Option<u32>::some(BITS() - u32(1u) - u32(zeros)); \ } \ } \ \ /** Returns the base 2 logarithm of the number, rounded down. \ * \ * # Panics \ * When the number is zero the function will panic. \ \ */ \ constexpr u32 log2() const& { \ /* TODO: Allow opting out of all overflow checks? */ \ return checked_log2().unwrap(); \ } \ \ /** Returns the base 10 logarithm of the number, rounded down. \ * \ * Returns None if the number is zero. \ */ \ constexpr Option<u32> checked_log10() const& { \ if (primitive_value == 0u) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ return Option<u32>::some(__private::int_log10::T(primitive_value)); \ } \ } \ \ /** Returns the base 10 logarithm of the number, rounded down. \ * \ * # Panics \ * When the number is zero the function will panic. \ */ \ constexpr u32 log10() const& { \ /* TODO: Allow opting out of all overflow checks? */ \ return checked_log10().unwrap(); \ } \ \ /** Returns the logarithm of the number with respect to an arbitrary base, \ * rounded down. \ * \ * Returns None if the number is zero, or if the base is not at least 2. \ * \ * This method might not be optimized owing to implementation details; \ * `checked_log2` can produce results more efficiently for base 2, and \ * `checked_log10` can produce results more efficiently for base 10. \ */ \ constexpr Option<u32> checked_log(const T& base) const& noexcept { \ if (primitive_value == 0u || base.primitive_value <= 1u) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ auto n = uint32_t{0u}; \ auto r = primitive_value; \ const auto b = base.primitive_value; \ while (r >= b) { \ r /= b; \ n += 1u; \ } \ return Option<u32>::some(n); \ } \ } \ \ /** Returns the logarithm of the number with respect to an arbitrary base, \ * rounded down. \ * \ * This method might not be optimized owing to implementation details; log2 \ * can produce results more efficiently for base 2, and log10 can produce \ * results more efficiently for base 10. \ * \ * # Panics \ * When the number is zero, or if the base is not at least 2, the function \ * will panic. \ */ \ constexpr u32 log(const T& base) const& noexcept { \ return checked_log(base).unwrap(); \ } \ static_assert(true) #define _sus__unsigned_power_of_two(T, PrimitiveT) \ /** Returns the smallest power of two greater than or equal to self. \ * \ * # Panics \ * The function panics when the return value overflows (i.e., `self > (1 << \ * (N-1))` for type uN). */ \ constexpr T next_power_of_two() noexcept { \ const auto one_less = \ __private::one_less_than_next_power_of_two(primitive_value); \ return T(one_less) + T(PrimitiveT{1u}); \ } \ \ /** Returns the smallest power of two greater than or equal to n. \ * \ * If the next power of two is greater than the type's maximum value, None \ * is returned, otherwise the power of two is wrapped in Some. \ */ \ constexpr Option<T> checked_next_power_of_two() noexcept { \ const auto one_less = \ __private::one_less_than_next_power_of_two(primitive_value); \ return T(one_less).checked_add(T(PrimitiveT{1u})); \ } \ \ /** Returns the smallest power of two greater than or equal to n. \ * \ * If the next power of two is greater than the type's maximum value, the \ * return value is wrapped to 0. \ */ \ constexpr T wrapping_next_power_of_two() noexcept { \ const auto one_less = \ __private::one_less_than_next_power_of_two(primitive_value); \ return T(one_less).wrapping_add(T(PrimitiveT{1u})); \ } \ static_assert(true) #define _sus__unsigned_endian(T, PrimitiveT, Bytes) \ /** Converts an integer from big endian to the target's endianness. \ * \ * On big endian this is a no-op. On little endian the bytes are swapped. \ */ \ static constexpr T from_be(const T& x) noexcept { \ if (::sus::assertions::is_big_endian()) \ return x; \ else \ return x.swap_bytes(); \ } \ \ /** Converts an integer from little endian to the target's endianness. \ * \ * On little endian this is a no-op. On big endian the bytes are swapped. \ */ \ static constexpr T from_le(const T& x) noexcept { \ if (::sus::assertions::is_little_endian()) \ return x; \ else \ return x.swap_bytes(); \ } \ \ /** Converts self to big endian from the target's endianness. \ * \ * On big endian this is a no-op. On little endian the bytes are swapped. \ */ \ constexpr T to_be() const& noexcept { \ if (::sus::assertions::is_big_endian()) \ return *this; \ else \ return swap_bytes(); \ } \ \ /** Converts self to little endian from the target's endianness. \ * \ * On little endian this is a no-op. On big endian the bytes are swapped. \ */ \ constexpr T to_le() const& noexcept { \ if (::sus::assertions::is_little_endian()) \ return *this; \ else \ return swap_bytes(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * big-endian (network) byte order. \ */ \ template <sus_clang_bug_58835_else(int&..., ) class Array = \ ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_be_bytes() const& noexcept { \ return to_be().to_ne_bytes sus_clang_bug_58835(<Array>)(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * little-endian byte order. \ */ \ template <sus_clang_bug_58835_else(int&..., ) class Array = \ ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_le_bytes() const& noexcept { \ return to_le().to_ne_bytes sus_clang_bug_58835(<Array>)(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * native byte order. \ * \ * As the target platform's native endianness is used, portable code should \ * use `to_be_bytes()` or `to_le_bytes()`, as appropriate, instead. \ */ \ template <sus_clang_bug_58835_else(int&..., ) class Array = \ ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_ne_bytes() const& noexcept { \ if (std::is_constant_evaluated()) { \ auto bytes = Array::with_value(uint8_t{0}); \ auto uval = primitive_value; \ for (auto i = size_t{0}; i < Bytes; ++i) { \ const auto last_byte = static_cast<uint8_t>(uval & 0xff); \ if (sus::assertions::is_little_endian()) \ bytes[i] = last_byte; \ else \ bytes[Bytes - 1 - i] = last_byte; \ /* If T is one byte, this shift would be UB. But it's also not needed \ since the loop will not run again. */ \ if constexpr (Bytes > 1) uval >>= 8u; \ } \ return bytes; \ } else { \ auto bytes = Array::with_uninitialized(unsafe_fn); \ memcpy(bytes.as_mut_ptr(), &primitive_value, Bytes); \ return bytes; \ } \ } \ \ /** Create an integer value from its representation as a byte array in big \ * endian. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_be_bytes(const Array& bytes) noexcept { \ return from_be(from_ne_bytes(bytes)); \ } \ \ /** Create an integer value from its representation as a byte array in \ * little endian. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_le_bytes(const Array& bytes) noexcept { \ return from_le(from_ne_bytes(bytes)); \ } \ \ /** Create an integer value from its memory representation as a byte array \ * in native endianness. \ * \ * As the target platform's native endianness is used, portable code likely \ * wants to use `from_be_bytes()` or `from_le_bytes()`, as appropriate \ * instead. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_ne_bytes(const Array& bytes) noexcept { \ PrimitiveT val; \ if (std::is_constant_evaluated()) { \ val = 0u; \ for (auto i = size_t{0}; i < Bytes; ++i) { \ val |= bytes[i].primitive_value << (Bytes - 1 - i); \ } \ } else { \ memcpy(&val, bytes.as_ptr(), Bytes); \ } \ return val; \ } \ static_assert(true) namespace sus::num { // TODO: from_str_radix(). Need Result type and Errors. // TODO: Split apart the declarations and the definitions? Then they can be in // u32_defn.h and u32_impl.h, allowing most of the library to just use // u32_defn.h which will keep some headers smaller. But then the combined // headers are larger, is that worse? /// A 32-bit unsigned integer. struct u32 final { _sus__unsigned_impl(u32, /*PrimitiveT=*/uint32_t, /*SignedT=*/i32); /** Construction from the underlying primitive type. */ template <std::same_as<decltype(primitive_value)> P> // Prevent implicit conversions. constexpr inline u32(P val) noexcept : primitive_value(val) {} }; /// An 8-bit unsigned integer. struct u8 final { _sus__unsigned_impl(u8, /*PrimitiveT=*/uint8_t, /*SignedT=*/i8); /** Construction from the underlying primitive type. */ template <std::same_as<decltype(primitive_value)> P> // Prevent implicit conversions. constexpr inline u8(P val) noexcept : primitive_value(val) {} }; /// A 16-bit unsigned integer. struct u16 final { _sus__unsigned_impl(u16, /*PrimitiveT=*/uint16_t, /*SignedT=*/i16); /** Construction from the underlying primitive type. */ template <std::same_as<decltype(primitive_value)> P> // Prevent implicit conversions. constexpr inline u16(P val) noexcept : primitive_value(val) {} }; /// A 64-bit unsigned integer. struct u64 final { _sus__unsigned_impl(u64, /*PrimitiveT=*/uint64_t, /*SignedT=*/i64); /** Construction from the underlying primitive type. */ template <std::same_as<decltype(primitive_value)> P> // Prevent implicit conversions. constexpr inline u64(P val) noexcept : primitive_value(val) {} }; /// A pointer-sized unsigned integer. struct usize final { _sus__unsigned_impl( usize, /*PrimitiveT=*/::sus::num::__private::ptr_type<>::unsigned_type, /*SignedT=*/isize); /** Construction from an unsigned literal. */ constexpr inline usize(uint8_t val) noexcept : primitive_value(static_cast<decltype(primitive_value)>(val)) {} /** Construction from an unsigned literal. */ constexpr inline usize(uint16_t val) noexcept : primitive_value(static_cast<decltype(primitive_value)>(val)) {} /** Construction from an unsigned literal. */ constexpr inline usize(uint32_t val) noexcept : primitive_value(static_cast<decltype(primitive_value)>(val)) {} /** Construction from an unsigned literal. */ constexpr inline usize(uint64_t val) : primitive_value(static_cast<decltype(primitive_value)>(val)) { if (std::is_constant_evaluated()) { if (val > uint64_t{MAX_PRIMITIVE}) [[unlikely]] throw "usize construction from literal is out of bounds"; } else { check(val <= uint64_t{MAX_PRIMITIVE}); } } /** Converts to its primitive value implicitly, just as it can convert from a * primitive value. */ constexpr operator decltype(primitive_value)() { return primitive_value; } }; } // namespace sus::num _sus__integer_literal(u8, ::sus::num::u8); _sus__integer_literal(u16, ::sus::num::u16); _sus__integer_literal(u32, ::sus::num::u32); _sus__integer_literal(u64, ::sus::num::u64); _sus__integer_literal(usize, ::sus::num::usize); // Promote unsigned integer types into the top-level namespace. using sus::num::u16; using sus::num::u32; using sus::num::u64; using sus::num::u8; using sus::num::usize; namespace sus::containers { template <class T, size_t N> requires(N <= PTRDIFF_MAX) class Array; } namespace sus::tuple { template <class T, class... Ts> class Tuple; } #define _sus__signed_impl(T, PrimitiveT, UnsignedT) \ _sus__signed_storage(PrimitiveT); \ _sus__signed_constants(T, PrimitiveT); \ _sus__signed_construct(T, PrimitiveT); \ _sus__signed_from(T, PrimitiveT); \ _sus__signed_integer_comparison(T, PrimitiveT); \ _sus__signed_unary_ops(T); \ _sus__signed_binary_logic_ops(T, PrimitiveT); \ _sus__signed_binary_bit_ops(T, PrimitiveT); \ _sus__signed_mutable_logic_ops(T); \ _sus__signed_mutable_bit_ops(T); \ _sus__signed_abs(T, PrimitiveT, UnsignedT); \ _sus__signed_add(T, UnsignedT); \ _sus__signed_div(T); \ _sus__signed_mul(T); \ _sus__signed_neg(T); \ _sus__signed_rem(T, PrimitiveT); \ _sus__signed_euclid(T, PrimitiveT); \ _sus__signed_shift(T); \ _sus__signed_sub(T, PrimitiveT, UnsignedT); \ _sus__signed_bits(T); \ _sus__signed_pow(T); \ _sus__signed_log(T); \ _sus__signed_endian(T, UnsignedT, sizeof(PrimitiveT)) #define _sus__signed_storage(PrimitiveT) \ /** The inner primitive value, in case it needs to be unwrapped from the \ * type. Avoid using this member except to convert when a consumer requires \ * it. \ */ \ PrimitiveT primitive_value { 0 } #define _sus__signed_constants(T, PrimitiveT) \ static constexpr auto MIN_PRIMITIVE = __private::min_value<PrimitiveT>(); \ static constexpr auto MAX_PRIMITIVE = __private::max_value<PrimitiveT>(); \ static constexpr inline T MIN() noexcept { return MIN_PRIMITIVE; } \ static constexpr inline T MAX() noexcept { return MAX_PRIMITIVE; } \ static constexpr inline u32 BITS() noexcept { \ return __private::num_bits<PrimitiveT>(); \ } \ static_assert(true) #define _sus__signed_construct(T, PrimitiveT) \ /** Default constructor, which sets the integer to 0. \ * \ * The trivial copy and move constructors are implicitly declared, as is the \ * trivial destructor. \ */ \ constexpr inline T() noexcept = default; \ \ /** Construction from the underlying primitive type. \ */ \ template <std::same_as<PrimitiveT> P> /* Prevent implicit conversions. */ \ constexpr inline T(P val) noexcept : primitive_value(val) {} \ \ /** Assignment from the underlying primitive type. \ */ \ template <std::same_as<PrimitiveT> P> /* Prevent implicit conversions. */ \ constexpr inline void operator=(P v) noexcept { \ primitive_value = v; \ } \ static_assert(true) #define _sus__signed_from(T, PrimitiveT) \ /** Constructs a ##T## from a signed integer type (i8, i16, i32, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <Signed S> \ static constexpr T from(S s) noexcept { \ if constexpr (MIN_PRIMITIVE > S::MIN_PRIMITIVE) \ ::sus::check(s.primitive_value >= MIN_PRIMITIVE); \ if constexpr (MAX_PRIMITIVE < S::MAX_PRIMITIVE) \ ::sus::check(s.primitive_value <= MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(s.primitive_value)); \ } \ \ /** Constructs a ##T## from an unsigned integer type (u8, u16, u32, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <Unsigned U> \ static constexpr T from(U u) noexcept { \ constexpr auto umax = __private::into_unsigned(MAX_PRIMITIVE); \ if constexpr (umax < U::MAX_PRIMITIVE) { \ ::sus::check(u.primitive_value <= umax); \ } \ return T(static_cast<PrimitiveT>(u.primitive_value)); \ } \ \ /** Constructs a ##T## from a signed primitive integer type (int, long, \ * etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <SignedPrimitiveInteger S> \ static constexpr T from(S s) { \ if constexpr (MIN_PRIMITIVE > __private::min_value<S>()) \ ::sus::check(s >= MIN_PRIMITIVE); \ if constexpr (MAX_PRIMITIVE < __private::max_value<S>()) \ ::sus::check(s <= MAX_PRIMITIVE); \ return T(static_cast<PrimitiveT>(s)); \ } \ \ /** Constructs a ##T## from an unsigned primitive integer type (unsigned \ * int, unsigned long, etc). \ * \ * # Panics \ * The function will panic if the input value is out of range for ##T##. \ */ \ template <UnsignedPrimitiveInteger U> \ static constexpr T from(U u) { \ constexpr auto umax = __private::into_unsigned(MAX_PRIMITIVE); \ if constexpr (umax < __private::max_value<U>()) { \ ::sus::check(u <= umax); \ } \ return T(static_cast<PrimitiveT>(u)); \ } \ static_assert(true) #define _sus__signed_integer_comparison(T, PrimitiveT) \ /** Returns true if the current value is positive and false if the number is \ * zero or negative. \ */ \ constexpr bool is_negative() const& noexcept { return primitive_value < 0; } \ /** Returns true if the current value is negative and false if the number is \ * zero or positive. \ */ \ constexpr bool is_positive() const& noexcept { return primitive_value > 0; } \ \ /** Returns a number representing sign of the current value. \ * \ * - 0 if the number is zero \ * - 1 if the number is positive \ * - -1 if the number is negative \ */ \ constexpr T signum() const& noexcept { \ if (primitive_value < 0) \ return PrimitiveT{-1}; \ else \ return PrimitiveT{primitive_value != 0}; \ } \ \ /** sus::concepts::Eq<##T##> trait. */ \ friend constexpr inline bool operator==(const T& l, const T& r) noexcept { \ return (l.primitive_value <=> r.primitive_value) == 0; \ } \ /** sus::concepts::Ord<##T##> trait. */ \ friend constexpr inline auto operator<=>(const T& l, const T& r) noexcept { \ return l.primitive_value <=> r.primitive_value; \ } \ static_assert(true) #define _sus__signed_unary_ops(T) \ /** sus::concepts::Neg trait. */ \ constexpr inline T operator-() const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(primitive_value != MIN_PRIMITIVE); \ return __private::unchecked_neg(primitive_value); \ } \ /** sus::concepts::BitNot trait. */ \ constexpr inline T operator~() const& noexcept { \ return __private::into_signed( \ __private::unchecked_not(__private::into_unsigned(primitive_value))); \ } \ static_assert(true) #define _sus__signed_binary_logic_ops(T, PrimitiveT) \ /** sus::concepts::Add<##T##> trait. */ \ friend constexpr inline T operator+(const T& l, const T& r) noexcept { \ const auto out = \ __private::add_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Sub<##T##> trait. */ \ friend constexpr inline T operator-(const T& l, const T& r) noexcept { \ const auto out = \ __private::sub_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Mul<##T##> trait. */ \ friend constexpr inline T operator*(const T& l, const T& r) noexcept { \ const auto out = \ __private::mul_with_overflow(l.primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ /** sus::concepts::Div<##T##> trait. */ \ friend constexpr inline T operator/(const T& l, const T& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(l.primitive_value != MIN_PRIMITIVE || \ r.primitive_value != -1); \ return static_cast<PrimitiveT>(l.primitive_value / r.primitive_value); \ } \ /** sus::concepts::Rem<##T##> trait. */ \ friend constexpr inline T operator%(const T& l, const T& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(l.primitive_value != MIN_PRIMITIVE || \ r.primitive_value != -1); \ return static_cast<PrimitiveT>(l.primitive_value % r.primitive_value); \ } \ static_assert(true) #define _sus__signed_binary_bit_ops(T, PrimitiveT) \ /** sus::concepts::BitAnd<##T##> trait. */ \ friend constexpr inline T operator&(const T& l, const T& r) noexcept { \ return static_cast<PrimitiveT>(l.primitive_value & r.primitive_value); \ } \ /** sus::concepts::BitOr<##T##> trait. */ \ friend constexpr inline T operator|(const T& l, const T& r) noexcept { \ return static_cast<PrimitiveT>(l.primitive_value | r.primitive_value); \ } \ /** sus::concepts::BitXor<##T##> trait. */ \ friend constexpr inline T operator^(const T& l, const T& r) noexcept { \ return static_cast<PrimitiveT>(l.primitive_value ^ r.primitive_value); \ } \ /** sus::concepts::Shl trait. */ \ friend constexpr inline T operator<<(const T& l, const u32& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ return __private::into_signed(__private::unchecked_shl( \ __private::into_unsigned(l.primitive_value), r.primitive_value)); \ } \ /** sus::concepts::Shr trait. */ \ friend constexpr inline T operator>>(const T& l, const u32& r) noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ return __private::into_signed(__private::unchecked_shr( \ __private::into_unsigned(l.primitive_value), r.primitive_value)); \ } \ static_assert(true) #define _sus__signed_mutable_logic_ops(T) \ /** sus::concepts::AddAssign<##T##> trait. */ \ constexpr inline void operator+=(T r)& noexcept { \ const auto out = \ __private::add_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::SubAssign<##T##> trait. */ \ constexpr inline void operator-=(T r)& noexcept { \ const auto out = \ __private::sub_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::MulAssign<##T##> trait. */ \ constexpr inline void operator*=(T r)& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, r.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ primitive_value = out.value; \ } \ /** sus::concepts::DivAssign<##T##> trait. */ \ constexpr inline void operator/=(T r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(primitive_value != MIN_PRIMITIVE || r.primitive_value != -1); \ primitive_value /= r.primitive_value; \ } \ /** sus::concepts::RemAssign<##T##> trait. */ \ constexpr inline void operator%=(T r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r.primitive_value != 0); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(primitive_value != MIN_PRIMITIVE || r.primitive_value != -1); \ primitive_value %= r.primitive_value; \ } \ static_assert(true) #define _sus__signed_mutable_bit_ops(T) \ /** sus::concepts::BitAndAssign<##T##> trait. */ \ constexpr inline void operator&=(T r)& noexcept { \ primitive_value &= r.primitive_value; \ } \ /** sus::concepts::BitOrAssign<##T##> trait. */ \ constexpr inline void operator|=(T r)& noexcept { \ primitive_value |= r.primitive_value; \ } \ /** sus::concepts::BitXorAssign<##T##> trait. */ \ constexpr inline void operator^=(T r)& noexcept { \ primitive_value ^= r.primitive_value; \ } \ /** sus::concepts::ShlAssign trait. */ \ constexpr inline void operator<<=(const u32& r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ primitive_value = __private::into_signed(__private::unchecked_shl( \ __private::into_unsigned(primitive_value), r.primitive_value)); \ } \ /** sus::concepts::ShrAssign trait. */ \ constexpr inline void operator>>=(const u32& r)& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(r < BITS()); \ primitive_value = __private::into_signed(__private::unchecked_shr( \ __private::into_unsigned(primitive_value), r.primitive_value)); \ } \ static_assert(true) #define _sus__signed_abs(T, PrimitiveT, UnsignedT) \ /** Computes the absolute value of itself. \ * \ * The absolute value of ##T##::MIN() cannot be represented as an ##T##, and \ * attempting to calculate it will panic. \ */ \ constexpr inline T abs() const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(primitive_value != MIN_PRIMITIVE); \ if (primitive_value >= 0) \ return primitive_value; \ else \ return __private::unchecked_neg(primitive_value); \ } \ \ /** Checked absolute value. Computes `abs()`, returning None if the current \ * value is MIN(). \ */ \ constexpr Option<T> checked_abs() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return Option<T>::some(abs()); \ else \ return Option<T>::none(); \ } \ \ /** Computes the absolute value of self. \ * \ * Returns a tuple of the absolute version of self along with a boolean \ * indicating whether an overflow happened. If self is the minimum value \ * (e.g., ##T##::MIN for values of type ##T##), then the minimum value will \ * be returned again and true will be returned for an overflow happening. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_abs() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return Tuple::with(abs(), false); \ else \ return Tuple::with(MIN(), true); \ } \ \ /** Saturating absolute value. Computes `abs()`, returning MAX if the \ * current value is MIN() instead of overflowing. \ */ \ constexpr T saturating_abs() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return abs(); \ else \ return MAX(); \ } \ \ /** Computes the absolute value of self without any wrapping or panicking. \ */ \ constexpr UnsignedT unsigned_abs() const& noexcept { \ if (primitive_value >= 0) { \ return __private::into_unsigned(primitive_value); \ } else { \ const auto neg_plus_one = \ __private::unchecked_add(primitive_value, PrimitiveT{1}); \ const auto pos_minus_one = \ __private::into_unsigned(__private::unchecked_neg(neg_plus_one)); \ return __private::unchecked_add(pos_minus_one, \ decltype(pos_minus_one){1}); \ } \ } \ \ /** Wrapping (modular) absolute value. Computes `this->abs()`, wrapping \ * around at the boundary of the type. \ * \ * The only case where such wrapping can occur is when one takes the \ * absolute value of the negative minimal value for the type; this is a \ * positive value that is too large to represent in the type. In such a \ * case, this function returns MIN itself. \ */ \ constexpr T wrapping_abs() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return abs(); \ else \ return MIN(); \ } \ \ /** Computes the absolute difference between self and other. \ * \ * This function always returns the correct answer without overflow or \ * panics by returning an unsigned integer. \ */ \ constexpr UnsignedT abs_diff(const T& r) const& noexcept { \ if (primitive_value >= r.primitive_value) \ return __private::into_unsigned( \ __private::unchecked_sub(primitive_value, r.primitive_value)); \ else \ return __private::into_unsigned( \ __private::unchecked_sub(r.primitive_value, primitive_value)); \ } \ static_assert(true) #define _sus__signed_add(T, UnsignedT) \ /** Checked integer addition. Computes self + rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_add(const T& rhs) const& noexcept { \ const auto out = \ __private::add_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Checked integer addition with an unsigned rhs. Computes self + rhs, \ * returning None if overflow occurred. \ */ \ constexpr Option<T> checked_add_unsigned(const UnsignedT& rhs) \ const& noexcept { \ const auto out = __private::add_with_overflow_unsigned( \ primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates self + rhs \ * \ * Returns a tuple of the addition along with a boolean indicating whether \ * an arithmetic overflow would occur. If an overflow would have occurred \ * then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_add(const T& rhs) const& noexcept { \ const auto r = \ __private::add_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(r.value, r.overflow); \ } \ \ /** Calculates self + rhs with an unsigned rhs \ * \ * Returns a tuple of the addition along with a boolean indicating whether \ * an arithmetic overflow would occur. If an overflow would have occurred \ * then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_add_unsigned(const UnsignedT& rhs) \ const& noexcept { \ const auto r = __private::add_with_overflow_unsigned(primitive_value, \ rhs.primitive_value); \ return Tuple::with(r.value, r.overflow); \ } \ \ /** Saturating integer addition. Computes self + rhs, saturating at the \ * numeric bounds instead of overflowing. \ */ \ constexpr T saturating_add(const T& rhs) const& noexcept { \ return __private::saturating_add(primitive_value, rhs.primitive_value); \ } \ \ /** Saturating integer addition with an unsigned rhs. Computes self + rhs, \ * saturating at the numeric bounds instead of overflowing. \ */ \ constexpr T saturating_add_unsigned(const UnsignedT& rhs) const& noexcept { \ const auto r = __private::add_with_overflow_unsigned(primitive_value, \ rhs.primitive_value); \ if (!r.overflow) [[likely]] \ return r.value; \ else \ return MAX(); \ } \ \ /** Unchecked integer addition. Computes self + rhs, assuming overflow \ * cannot occur. \ * \ * # Safety \ * This results in undefined behavior when self + rhs > ##T##::MAX() or self \ * + rhs < ##T##::MIN(), i.e. when checked_add() would return None. \ */ \ inline constexpr T unchecked_add(::sus::marker::UnsafeFnMarker, \ const T& rhs) const& noexcept { \ return __private::unchecked_add(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) addition. Computes self + rhs, wrapping around at the \ * boundary of the type. \ */ \ constexpr T wrapping_add(const T& rhs) const& noexcept { \ return __private::wrapping_add(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) addition with an unsigned rhs. Computes self + rhs, \ * wrapping around at the boundary of the type. \ */ \ constexpr T wrapping_add_unsigned(const UnsignedT& rhs) const& noexcept { \ return __private::add_with_overflow_unsigned(primitive_value, \ rhs.primitive_value) \ .value; \ } \ static_assert(true) #define _sus__signed_div(T) \ /** Checked integer division. Computes self / rhs, returning None if rhs == \ * 0 or the division results in overflow. \ */ \ constexpr Option<T> checked_div(const T& rhs) const& noexcept { \ if (__private::div_overflows(primitive_value, rhs.primitive_value)) \ [[unlikely]] \ return Option<T>::none(); \ else \ return Option<T>::some( \ __private::unchecked_div(primitive_value, rhs.primitive_value)); \ } \ \ /** Calculates the divisor when self is divided by rhs. \ * \ * Returns a tuple of the divisor along with a boolean indicating whether an \ * arithmetic overflow would occur. If an overflow would occur then self is \ * returned. \ * \ * #Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return Tuple::with(MIN(), true); \ } else { \ return Tuple::with( \ __private::unchecked_div(primitive_value, rhs.primitive_value), \ false); \ } \ } \ \ /** Saturating integer division. Computes self / rhs, saturating at the \ * numeric bounds instead of overflowing. \ * \ * #Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T saturating_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ /* Only overflows in the case of -MIN() / -1, which gives MAX() + 1, \ saturated to MAX(). */ \ return MAX(); \ } else { \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ } \ \ /** Wrapping (modular) division. Computes self / rhs, wrapping around at the \ * boundary of the type. \ * \ * The only case where such wrapping can occur is when one divides MIN / -1 \ * on a signed type (where MIN is the negative minimal value for the type); \ * this is equivalent to -MIN, a positive value that is too large to \ * represent in the type. In such a case, this function returns MIN itself. \ * \ * #Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_div(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ /* Only overflows in the case of -MIN() / -1, which gives MAX() + 1, \ that wraps around to MIN(). */ \ return MIN(); \ } else { \ return __private::unchecked_div(primitive_value, rhs.primitive_value); \ } \ } \ static_assert(true) #define _sus__signed_mul(T) \ /** Checked integer multiplication. Computes self * rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_mul(const T& rhs) const& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates the multiplication of self and rhs. \ * \ * Returns a tuple of the multiplication along with a boolean indicating \ * whether an arithmetic overflow would occur. If an overflow would have \ * occurred then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_mul(const T& rhs) const& noexcept { \ const auto out = \ __private::mul_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Saturating integer multiplication. Computes self * rhs, saturating at \ * the numeric bounds instead of overflowing. \ */ \ constexpr T saturating_mul(const T& rhs) const& noexcept { \ return __private::saturating_mul(primitive_value, rhs.primitive_value); \ } \ \ /** Unchecked integer multiplication. Computes self * rhs, assuming overflow \ * cannot occur. \ * \ * # Safety \ * This results in undefined behavior when `self * rhs > ##T##::MAX()` or \ * `self \ * * rhs < ##T##::MIN()`, i.e. when `checked_mul()` would return None. \ */ \ constexpr inline T unchecked_mul(::sus::marker::UnsafeFnMarker, \ const T& rhs) const& noexcept { \ return __private::unchecked_mul(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) multiplication. Computes self * rhs, wrapping around \ * at the boundary of the type. \ */ \ constexpr T wrapping_mul(const T& rhs) const& noexcept { \ return __private::wrapping_mul(primitive_value, rhs.primitive_value); \ } \ static_assert(true) #define _sus__signed_neg(T) \ /** Checked negation. Computes -self, returning None if self == MIN. \ */ \ constexpr Option<T> checked_neg() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return Option<T>::some(__private::unchecked_neg(primitive_value)); \ else \ return Option<T>::none(); \ } \ \ /** Negates self, overflowing if this is equal to the minimum value. \ * \ * Returns a tuple of the negated version of self along with a boolean \ * indicating whether an overflow happened. If self is the minimum value \ * (e.g., ##T##::MIN for values of type ##T##), then the minimum value will \ * be returned again and true will be returned for an overflow happening. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_neg() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return Tuple::with(__private::unchecked_neg(primitive_value), false); \ else \ return Tuple::with(MIN(), true); \ } \ \ /** Saturating integer negation. Computes -self, returning MAX if self == \ * MIN instead of overflowing. \ */ \ constexpr T saturating_neg() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return __private::unchecked_neg(primitive_value); \ else \ return MAX(); \ } \ \ /** Wrapping (modular) negation. Computes -self, wrapping around at the \ * boundary of the type. \ * \ * The only case where such wrapping can occur is when one negates MIN() on \ * a signed type (where MIN() is the negative minimal value for the type); \ * this is a positive value that is too large to represent in the type. In \ * such a case, this function returns MIN() itself. \ */ \ constexpr T wrapping_neg() const& noexcept { \ if (primitive_value != MIN_PRIMITIVE) [[likely]] \ return __private::unchecked_neg(primitive_value); \ else \ return MIN(); \ } \ static_assert(true) #define _sus__signed_rem(T, PrimitiveT) \ /** Checked integer remainder. Computes self % rhs, returning None if rhs == \ * 0 or the division results in overflow. \ */ \ constexpr Option<T> checked_rem(const T& rhs) const& noexcept { \ if (__private::div_overflows(primitive_value, rhs.primitive_value)) \ [[unlikely]] \ return Option<T>::none(); \ else \ return Option<T>::some( \ static_cast<PrimitiveT>(primitive_value % rhs.primitive_value)); \ } \ \ /** Calculates the remainder when self is divided by rhs. \ * \ * Returns a tuple of the remainder after dividing along with a boolean \ * indicating whether an arithmetic overflow would occur. If an overflow \ * would occur then 0 is returned. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_rem(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return Tuple::with(PrimitiveT{0}, true); \ } else { \ return Tuple::with( \ static_cast<PrimitiveT>(primitive_value % rhs.primitive_value), \ false); \ } \ } \ \ /** Wrapping (modular) remainder. Computes self % rhs, wrapping around at \ * the boundary of the type. \ * \ * Such wrap-around never actually occurs mathematically; implementation \ * artifacts make x % y invalid for MIN() / -1 on a signed type (where MIN() \ * is the negative minimal value). In such a case, this function returns 0. \ */ \ constexpr T wrapping_rem(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[likely]] { \ return PrimitiveT{0}; \ } else { \ return static_cast<PrimitiveT>(primitive_value % rhs.primitive_value); \ } \ } \ static_assert(true) #define _sus__signed_euclid(T, PrimitiveT) \ /** Calculates the quotient of Euclidean division of self by rhs. \ * \ * This computes the integer q such that self = q * rhs + r, with r = \ * self.rem_euclid(rhs) and 0 <= r < abs(rhs). \ * \ * In other words, the result is self / rhs rounded to the integer q such \ * that self >= q * rhs. If self > 0, this is equal to round towards zero \ * (the default in Rust); if self < 0, this is equal to round towards +/- \ * infinity. \ * \ * # Panics \ * This function will panic if rhs is 0 or the division results in overflow. \ */ \ constexpr T div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check( \ !__private::div_overflows(primitive_value, rhs.primitive_value)); \ return __private::div_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value); \ } \ \ /** Checked Euclidean division. Computes self.div_euclid(rhs), returning \ * None if rhs == 0 or the division results in overflow. \ */ \ constexpr Option<T> checked_div_euclid(const T& rhs) const& noexcept { \ if (__private::div_overflows(primitive_value, rhs.primitive_value)) \ [[unlikely]] { \ return Option<T>::none(); \ } else { \ return Option<T>::some(__private::div_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value)); \ } \ } \ \ /** Calculates the quotient of Euclidean division self.div_euclid(rhs). \ * \ * Returns a tuple of the divisor along with a boolean indicating whether an \ * arithmetic overflow would occur. If an overflow would occur then self is \ * returned. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return Tuple::with(MIN(), true); \ } else { \ return Tuple::with(__private::div_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value), \ false); \ } \ } \ \ /** Wrapping Euclidean division. Computes self.div_euclid(rhs), wrapping \ * around at the boundary of the type. \ * \ * Wrapping will only occur in MIN / -1 on a signed type (where MIN is the \ * negative minimal value for the type). This is equivalent to -MIN, a \ * positive value that is too large to represent in the type. In this case, \ * this method returns MIN itself. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_div_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return MIN(); \ } else { \ return __private::div_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value); \ } \ } \ \ /** Calculates the least nonnegative remainder of self (mod rhs). \ * \ * This is done as if by the Euclidean division algorithm – given r = \ * self.rem_euclid(rhs), self = rhs * self.div_euclid(rhs) + r, and 0 <= r < \ * abs(rhs). \ * \ * # Panics \ * This function will panic if rhs is 0 or the division results in overflow. \ */ \ constexpr T rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check( \ !__private::div_overflows(primitive_value, rhs.primitive_value)); \ return __private::rem_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value); \ } \ \ /** Checked Euclidean remainder. Computes self.rem_euclid(rhs), returning \ * None if rhs == 0 or the division results in overflow. \ */ \ constexpr Option<T> checked_rem_euclid(const T& rhs) const& noexcept { \ if (__private::div_overflows(primitive_value, rhs.primitive_value)) \ [[unlikely]] { \ return Option<T>::none(); \ } else { \ return Option<T>::some(__private::rem_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value)); \ } \ } \ \ /** Overflowing Euclidean remainder. Calculates self.rem_euclid(rhs). \ * \ * Returns a tuple of the remainder after dividing along with a boolean \ * indicating whether an arithmetic overflow would occur. If an overflow \ * would occur then 0 is returned. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return Tuple::with(PrimitiveT{0}, true); \ } else { \ return Tuple::with(__private::rem_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value), \ false); \ } \ } \ \ /** Wrapping Euclidean remainder. Computes self.rem_euclid(rhs), wrapping \ * around at the boundary of the type. \ * \ * Wrapping will only occur in MIN % -1 on a signed type (where MIN is the \ * negative minimal value for the type). In this case, this method returns \ * 0. \ * \ * # Panics \ * This function will panic if rhs is 0. \ */ \ constexpr T wrapping_rem_euclid(const T& rhs) const& noexcept { \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(rhs.primitive_value != 0); \ if (__private::div_overflows_nonzero(unsafe_fn, primitive_value, \ rhs.primitive_value)) [[unlikely]] { \ return PrimitiveT{0}; \ } else { \ return __private::rem_euclid(unsafe_fn, primitive_value, \ rhs.primitive_value); \ } \ } \ static_assert(true) #define _sus__signed_shift(T) \ /** Checked shift left. Computes `*this << rhs`, returning None if rhs is \ * larger than or equal to the number of bits in self. \ */ \ constexpr Option<T> checked_shl(const u32& rhs) const& noexcept { \ const auto out = \ __private::shl_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Shifts self left by rhs bits. \ * \ * Returns a tuple of the shifted version of self along with a boolean \ * indicating whether the shift value was larger than or equal to the number \ * of bits. If the shift value is too large, then value is masked (N-1) \ * where N is the number of bits, and this value is then used to perform the \ * shift. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_shl(const u32& rhs) const& noexcept { \ const auto out = \ __private::shl_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Panic-free bitwise shift-left; yields `*this << mask(rhs)`, where mask \ * removes any high-order bits of `rhs` that would cause the shift to exceed \ * the bitwidth of the type. \ * \ * Note that this is not the same as a rotate-left; the RHS of a wrapping \ * shift-left is restricted to the range of the type, rather than the bits \ * shifted out of the LHS being returned to the other end. The primitive \ * integer types all implement a rotate_left function, which may be what you \ * want instead. \ */ \ constexpr T wrapping_shl(const u32& rhs) const& noexcept { \ return __private::shl_with_overflow(primitive_value, rhs.primitive_value) \ .value; \ } \ \ /** Checked shift right. Computes `*this >> rhs`, returning None if rhs is \ * larger than or equal to the number of bits in self. \ */ \ constexpr Option<T> checked_shr(const u32& rhs) const& noexcept { \ const auto out = \ __private::shr_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Shifts self right by rhs bits. \ * \ * Returns a tuple of the shifted version of self along with a boolean \ * indicating whether the shift value was larger than or equal to the number \ * of bits. If the shift value is too large, then value is masked (N-1) \ * where N is the number of bits, and this value is then used to perform the \ * shift. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_shr(const u32& rhs) const& noexcept { \ const auto out = \ __private::shr_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Panic-free bitwise shift-right; yields `*this >> mask(rhs)`, where mask \ * removes any high-order bits of `rhs` that would cause the shift to exceed \ * the bitwidth of the type. \ * \ * Note that this is not the same as a rotate-right; the RHS of a wrapping \ * shift-right is restricted to the range of the type, rather than the bits \ * shifted out of the LHS being returned to the other end. The primitive \ * integer types all implement a rotate_right function, which may be what \ * you want instead. \ */ \ constexpr T wrapping_shr(const u32& rhs) const& noexcept { \ return __private::shr_with_overflow(primitive_value, rhs.primitive_value) \ .value; \ } \ static_assert(true) #define _sus__signed_sub(T, PrimitiveT, UnsignedT) \ /** Checked integer subtraction. Computes self - rhs, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_sub(const T& rhs) const& { \ const auto out = \ __private::sub_with_overflow(primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Checked integer subtraction with an unsigned rhs. Computes self - rhs, \ * returning None if overflow occurred. \ */ \ constexpr Option<T> checked_sub_unsigned(const UnsignedT& rhs) const& { \ const auto out = __private::sub_with_overflow_unsigned( \ primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Calculates self - rhs \ * \ * Returns a tuple of the subtraction along with a boolean indicating \ * whether an arithmetic overflow would occur. If an overflow would have \ * occurred then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_sub(const T& rhs) const& noexcept { \ const auto out = \ __private::sub_with_overflow(primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Calculates self - rhs with an unsigned rhs. \ * \ * Returns a tuple of the subtraction along with a boolean indicating \ * whether an arithmetic overflow would occur. If an overflow would have \ * occurred then the wrapped value is returned. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_sub_unsigned(const UnsignedT& rhs) \ const& noexcept { \ const auto out = __private::sub_with_overflow_unsigned( \ primitive_value, rhs.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Saturating integer subtraction. Computes self - rhs, saturating at the \ * numeric bounds instead of overflowing. \ */ \ constexpr T saturating_sub(const T& rhs) const& { \ return __private::saturating_sub(primitive_value, rhs.primitive_value); \ } \ \ /** Saturating integer subtraction with an unsigned rhs. Computes self - \ * rhs, saturating at the numeric bounds instead of overflowing. \ */ \ constexpr T saturating_sub_unsigned(const UnsignedT& rhs) const& { \ const auto out = __private::sub_with_overflow_unsigned( \ primitive_value, rhs.primitive_value); \ if (!out.overflow) [[likely]] \ return out.value; \ else \ return MIN(); \ } \ \ /** Unchecked integer subtraction. Computes self - rhs, assuming overflow \ * cannot occur. \ */ \ constexpr T unchecked_sub(::sus::marker::UnsafeFnMarker, const T& rhs) \ const& { \ return static_cast<PrimitiveT>(primitive_value - rhs.primitive_value); \ } \ \ /** Wrapping (modular) subtraction. Computes self - rhs, wrapping around at \ * the boundary of the type. \ */ \ constexpr T wrapping_sub(const T& rhs) const& { \ return __private::wrapping_sub(primitive_value, rhs.primitive_value); \ } \ \ /** Wrapping (modular) subtraction with an unsigned rhs. Computes self - \ * rhs, wrapping around at the boundary of the type. \ */ \ constexpr T wrapping_sub_unsigned(const UnsignedT& rhs) const& { \ return __private::sub_with_overflow_unsigned(primitive_value, \ rhs.primitive_value) \ .value; \ } \ static_assert(true) #define _sus__signed_bits(T) \ /** Returns the number of ones in the binary representation of the current \ * value. \ */ \ constexpr u32 count_ones() const& noexcept { \ return __private::count_ones(__private::into_unsigned(primitive_value)); \ } \ \ /** Returns the number of zeros in the binary representation of the current \ * value. \ */ \ constexpr u32 count_zeros() const& noexcept { \ return (~(*this)).count_ones(); \ } \ \ /** Returns the number of leading ones in the binary representation of the \ * current value. \ */ \ constexpr u32 leading_ones() const& noexcept { \ return (~(*this)).leading_zeros(); \ } \ \ /** Returns the number of leading zeros in the binary representation of the \ * current value. \ */ \ constexpr u32 leading_zeros() const& noexcept { \ return __private::leading_zeros( \ __private::into_unsigned(primitive_value)); \ } \ \ /** Returns the number of trailing ones in the binary representation of the \ * current value. \ */ \ constexpr u32 trailing_ones() const& noexcept { \ return (~(*this)).trailing_zeros(); \ } \ \ /** Returns the number of trailing zeros in the binary representation of the \ * current value. \ */ \ constexpr u32 trailing_zeros() const& noexcept { \ return __private::trailing_zeros( \ __private::into_unsigned(primitive_value)); \ } \ \ /** Reverses the order of bits in the integer. The least significant bit \ * becomes the most significant bit, second least-significant bit becomes \ * second most-significant bit, etc. \ */ \ constexpr T reverse_bits() const& noexcept { \ return __private::into_signed( \ __private::reverse_bits(__private::into_unsigned(primitive_value))); \ } \ \ /** Shifts the bits to the left by a specified amount, `n`, wrapping the \ * truncated bits to the end of the resulting integer. \ * \ * Please note this isn't the same operation as the `<<` shifting operator! \ */ \ constexpr T rotate_left(const u32& n) const& noexcept { \ return __private::into_signed(__private::rotate_left( \ __private::into_unsigned(primitive_value), n.primitive_value)); \ } \ \ /** Shifts the bits to the right by a specified amount, n, wrapping the \ * truncated bits to the beginning of the resulting integer. \ * \ * Please note this isn't the same operation as the >> shifting operator! \ */ \ constexpr T rotate_right(const u32& n) const& noexcept { \ return __private::into_signed(__private::rotate_right( \ __private::into_unsigned(primitive_value), n.primitive_value)); \ } \ \ /** Reverses the byte order of the integer. \ */ \ constexpr T swap_bytes() const& noexcept { \ return __private::into_signed( \ __private::swap_bytes(__private::into_unsigned(primitive_value))); \ } \ static_assert(true) #define _sus__signed_pow(T) \ /** Raises self to the power of `exp`, using exponentiation by squaring. */ \ constexpr inline T pow(const u32& rhs) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, rhs.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ ::sus::check(!out.overflow); \ return out.value; \ } \ \ /** Checked exponentiation. Computes `##T##::pow(exp)`, returning None if \ * overflow occurred. \ */ \ constexpr Option<T> checked_pow(const u32& rhs) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, rhs.primitive_value); \ /* TODO: Allow opting out of all overflow checks? */ \ if (!out.overflow) [[likely]] \ return Option<T>::some(out.value); \ else \ return Option<T>::none(); \ } \ \ /** Raises self to the power of `exp`, using exponentiation by squaring. \ * \ * Returns a tuple of the exponentiation along with a bool indicating \ * whether an overflow happened. \ */ \ template <int&..., class Tuple = ::sus::tuple::Tuple<T, bool>> \ constexpr Tuple overflowing_pow(const u32& exp) const& noexcept { \ const auto out = \ __private::pow_with_overflow(primitive_value, exp.primitive_value); \ return Tuple::with(out.value, out.overflow); \ } \ \ /** Wrapping (modular) exponentiation. Computes self.pow(exp), wrapping \ * around at the boundary of the type. \ */ \ constexpr T wrapping_pow(const u32& exp) const& noexcept { \ return __private::wrapping_pow(primitive_value, exp.primitive_value); \ } \ static_assert(true) #define _sus__signed_log(T) \ /** Returns the base 2 logarithm of the number, rounded down. \ * \ * Returns None if the number is negative or zero. \ */ \ constexpr Option<u32> checked_log2() const& { \ if (primitive_value <= 0) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ uint32_t zeros = __private::leading_zeros_nonzero( \ unsafe_fn, __private::into_unsigned(primitive_value)); \ return Option<u32>::some(BITS() - 1_u32 - u32(zeros)); \ } \ } \ \ /** Returns the base 2 logarithm of the number, rounded down. \ * \ * # Panics \ * When the number is zero or negative the function will panic. \ \ * */ \ constexpr u32 log2() const& { \ /* TODO: Allow opting out of all overflow checks? */ \ return checked_log2().unwrap(); \ } \ \ /** Returns the base 10 logarithm of the number, rounded down. \ * \ * Returns None if the number is negative or zero. \ */ \ constexpr Option<u32> checked_log10() const& { \ if (primitive_value <= 0) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ return Option<u32>::some(__private::int_log10::T(primitive_value)); \ } \ } \ \ /** Returns the base 10 logarithm of the number, rounded down. \ * \ * # Panics \ * When the number is zero or negative the function will panic. \ */ \ constexpr u32 log10() const& { \ /* TODO: Allow opting out of all overflow checks? */ \ return checked_log10().unwrap(); \ } \ \ /** Returns the logarithm of the number with respect to an arbitrary base, \ * rounded down. \ * \ * Returns None if the number is negative or zero, or if the base is not at \ * least 2. \ * \ * This method might not be optimized owing to implementation details; \ * `checked_log2` can produce results more efficiently for base 2, and \ * `checked_log10` can produce results more efficiently for base 10. \ */ \ constexpr Option<u32> checked_log(const T& base) const& noexcept { \ if (primitive_value <= 0 || base.primitive_value <= 1) [[unlikely]] { \ return Option<u32>::none(); \ } else { \ auto n = uint32_t{0}; \ auto r = primitive_value; \ const auto b = base.primitive_value; \ while (r >= b) { \ r /= b; \ n += 1u; \ } \ return Option<u32>::some(n); \ } \ } \ \ /** Returns the logarithm of the number with respect to an arbitrary base, \ * rounded down. \ * \ * This method might not be optimized owing to implementation details; log2 \ * can produce results more efficiently for base 2, and log10 can produce \ * results more efficiently for base 10. \ * \ * # Panics \ * When the number is negative, zero, or if the base is not at least 2. \ */ \ constexpr u32 log(const T& base) const& noexcept { \ return checked_log(base).unwrap(); \ } \ static_assert(true) #define _sus__signed_endian(T, UnsignedT, Bytes) \ /** Converts an integer from big endian to the target's endianness. \ * \ * On big endian this is a no-op. On little endian the bytes are swapped. \ */ \ static constexpr T from_be(const T& x) noexcept { \ if (::sus::assertions::is_big_endian()) \ return x; \ else \ return x.swap_bytes(); \ } \ \ /** Converts an integer from little endian to the target's endianness. \ * \ * On little endian this is a no-op. On big endian the bytes are swapped. \ */ \ static constexpr T from_le(const T& x) noexcept { \ if (::sus::assertions::is_little_endian()) \ return x; \ else \ return x.swap_bytes(); \ } \ \ /** Converts self to big endian from the target's endianness. \ * \ * On big endian this is a no-op. On little endian the bytes are swapped. \ */ \ constexpr T to_be() const& noexcept { \ if (::sus::assertions::is_big_endian()) \ return *this; \ else \ return swap_bytes(); \ } \ \ /** Converts self to little endian from the target's endianness. \ * \ * On little endian this is a no-op. On big endian the bytes are swapped. \ */ \ constexpr T to_le() const& noexcept { \ if (::sus::assertions::is_little_endian()) \ return *this; \ else \ return swap_bytes(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * big-endian (network) byte order. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_be_bytes() const& noexcept { \ return to_be().to_ne_bytes sus_clang_bug_58835(<Array>)(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * little-endian byte order. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_le_bytes() const& noexcept { \ return to_le().to_ne_bytes sus_clang_bug_58835(<Array>)(); \ } \ \ /** Return the memory representation of this integer as a byte array in \ * native byte order. \ * \ * As the target platform's native endianness is used, portable code should \ * use `to_be_bytes()` or `to_le_bytes()`, as appropriate, instead. \ */ \ template <sus_clang_bug_58835_else(int&..., ) class Array = \ ::sus::containers::Array<u8, Bytes>> \ constexpr Array to_ne_bytes() const& noexcept { \ if (std::is_constant_evaluated()) { \ auto bytes = Array::with_value(uint8_t{0}); \ auto uval = __private::into_unsigned(primitive_value); \ for (auto i = size_t{0}; i < Bytes; ++i) { \ const auto last_byte = static_cast<uint8_t>(uval & 0xff); \ if (sus::assertions::is_little_endian()) \ bytes[i] = last_byte; \ else \ bytes[Bytes - 1 - i] = last_byte; \ /* If T is one byte, this shift would be UB. But it's also not needed \ since the loop will not run again. */ \ if constexpr (Bytes > 1) uval >>= 8u; \ } \ return bytes; \ } else { \ auto bytes = Array::with_uninitialized(unsafe_fn); \ memcpy(bytes.as_mut_ptr(), &primitive_value, Bytes); \ return bytes; \ } \ } \ \ /** Create an integer value from its representation as a byte array in big \ * endian. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_be_bytes(const Array& bytes) noexcept { \ return from_be(from_ne_bytes(bytes)); \ } \ \ /** Create an integer value from its representation as a byte array in \ * little endian. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_le_bytes(const Array& bytes) noexcept { \ return from_le(from_ne_bytes(bytes)); \ } \ \ /** Create an integer value from its memory representation as a byte array \ * in native endianness. \ * \ * As the target platform's native endianness is used, portable code likely \ * wants to use `from_be_bytes()` or `from_le_bytes()`, as appropriate \ * instead. \ */ \ template <int&..., class Array = ::sus::containers::Array<u8, Bytes>> \ static constexpr T from_ne_bytes(const Array& bytes) noexcept { \ using U = decltype(__private::into_unsigned(primitive_value)); \ U val; \ if (std::is_constant_evaluated()) { \ val = U{0}; \ for (auto i = size_t{0}; i < Bytes; ++i) { \ val |= bytes[i].primitive_value << (Bytes - size_t{1} - i); \ } \ } else { \ memcpy(&val, bytes.as_ptr(), Bytes); \ } \ return __private::into_signed(val); \ } \ static_assert(true) namespace sus::num { // TODO: from_str_radix(). Need Result type and Errors. // TODO: div_ceil() and div_floor()? Lots of discussion still on // https://github.com/rust-lang/rust/issues/88581 for signed types. // TODO: Split apart the declarations and the definitions? Then they can be in // u32_defn.h and u32_impl.h, allowing most of the library to just use // u32_defn.h which will keep some headers smaller. But then the combined // headers are larger, is that worse? /// A 32-bit signed integer. struct i32 final { _sus__signed_impl(i32, /*PrimitiveT=*/int32_t, /*UnsignedT=*/u32); }; /// An 8-bit signed integer. struct i8 final { _sus__signed_impl(i8, /*PrimitiveT=*/int8_t, /*UnsignedT=*/u8); }; /// A 16-bit signed integer. struct i16 final { _sus__signed_impl(i16, /*PrimitiveT=*/int16_t, /*UnsignedT=*/u16); }; /// A 64-bit signed integer. struct i64 final { _sus__signed_impl(i64, /*PrimitiveT=*/int64_t, /*UnsignedT=*/u64); }; /// A pointer-sized signed integer. struct isize final { _sus__signed_impl( isize, /*PrimitiveT=*/::sus::num::__private::ptr_type<>::signed_type, /*UnsignedT=*/usize); /** Converts to its primitive value implicitly. */ constexpr operator decltype(primitive_value)() { return primitive_value; } }; } // namespace sus::num _sus__integer_literal(i8, ::sus::num::i8); _sus__integer_literal(i16, ::sus::num::i16); _sus__integer_literal(i32, ::sus::num::i32); _sus__integer_literal(i64, ::sus::num::i64); _sus__integer_literal(isize, ::sus::num::isize); // Promote signed integer types into the top-level namespace. using sus::num::i16; using sus::num::i32; using sus::num::i64; using sus::num::i8; using sus::num::isize; namespace sus::iter { using ::sus::option::Option; // TODO: Move forward decls somewhere? template <class Item, size_t InnerIterSize, size_t InnerIterAlign> class Filter; namespace __private { template <class IteratorBase> class IteratorLoop; template <class Iterator> class IteratorImplicitLoop; struct IteratorEnd; template <class T> constexpr auto begin(const T& t) noexcept; template <class T> constexpr auto end(const T& t) noexcept; } // namespace __private // TODO: Do we want to be able to pass IteratorBase& as a "generic" iterator? // Then it needs access to the adapator methods of Iterator<T>, so make them // virtual methods on IteratorBase? // // TODO: Do we want to be able to pass Iterator by value as a "generic" // iterator? Then we need an opaque Iterator type, which can be returned from an // adaptor method (and we can have an explicit operator to convert to it)? // // TODO: We need virtual methods because we erase the type in SizedIterator and // call the virtual methods there. But when the iterator is being used directly, // do we need each call to next() to go through virtual? Could CRTP, so we can // call `Subclass::next()`, with the next() method being marked `final` in the // subclass, bypass the vtable pointer? template <class ItemT> class IteratorBase { public: using Item = ItemT; // Required methods. /// Gets the next element from the iterator, if there is one. Otherwise, it /// returns an Option holding #None. virtual Option<Item> next() noexcept = 0; // Provided methods. /// Tests whether all elements of the iterator match a predicate. /// /// If the predicate returns `true` for all elements in the iterator, this /// functions returns `true`, otherwise `false`. The function is /// short-circuiting; it stops iterating on the first `false` returned from /// the predicate. /// /// Returns `true` if the iterator is empty. virtual bool all(::sus::fn::FnMut<bool(Item)> f) noexcept; /// Tests whether any elements of the iterator match a predicate. /// /// If the predicate returns `true` for any elements in the iterator, this /// functions returns `true`, otherwise `false`. The function is /// short-circuiting; it stops iterating on the first `true` returned from /// the predicate. /// /// Returns `false` if the iterator is empty. virtual bool any(::sus::fn::FnMut<bool(Item)> f) noexcept; /// Consumes the iterator, and returns the number of elements that were in /// it. /// /// The function walks the iterator until it sees an Option holding #None. /// /// # Safety /// /// If the `usize` type does not have trapping arithmetic enabled, and the /// iterator has more than `usize::MAX` elements in it, the value will wrap /// and be incorrect. Otherwise, `usize` will catch overflow and panic. virtual usize count() noexcept; /// Adaptor for use in ranged for loops. __private::IteratorLoop<IteratorBase<Item>&> begin() & noexcept; /// Adaptor for use in ranged for loops. __private::IteratorEnd end() & noexcept; protected: IteratorBase() = default; }; template <class I> class Iterator final : public I { private: using sus_clang_bug_58837(Item =) typename I::Item; template <class T> friend class __private::IteratorLoop; // Can see Item. friend I; // I::foo() can construct Iterator<I>. template <class J> friend class Iterator; // Iterator<J>::foo() can construct Iterator<I>. // Option can't include Iterator, due to a circular dependency between // Option->Iterator->Option. So it forward declares Iterator, and needs // to use the constructor directly. template <class T> friend class sus_clang_bug_58836( ::sus::option::) Option; // Option<T>::foo() can construct Iterator<I>. template <class... Args> Iterator(Args&&... args) : I(static_cast<Args&&>(args)...) { // We want to be able to use Iterator<I> and I interchangably, so that if an // `I` gets stored in SizedIterator, it doesn't misbehave. static_assert(sizeof(I) == sizeof(Iterator<I>), ""); } public: // Adaptor methods. // TODO: map() Iterator<Filter<Item, sizeof(I), alignof(I)>> filter( ::sus::fn::FnMut<bool(const std::remove_reference_t<Item>&)> pred) && noexcept; }; } // namespace sus::iter namespace sus::iter { template <class Item, size_t SubclassSize, size_t SubclassAlign> struct SizedIterator final { SizedIterator(void (*destroy)(char& sized)) : destroy(destroy) {} SizedIterator(SizedIterator&& o) : destroy(o.destroy) { o.destroy = nullptr; memcpy(sized, o.sized, SubclassSize); } SizedIterator& operator=(SizedIterator&& o) = delete; ~SizedIterator() { if (destroy) destroy(*sized); } IteratorBase<Item>& iterator_mut() { return *reinterpret_cast<IteratorBase<Item>*>(sized); } alignas(SubclassAlign) char sized[SubclassSize]; void (*destroy)(char& sized); }; template <::sus::mem::Moveable IteratorSubclass, int&..., class SubclassItem = typename IteratorSubclass::Item, class SizedIteratorType = SizedIterator<SubclassItem, sizeof(IteratorSubclass), alignof(IteratorSubclass)>> inline SizedIteratorType make_sized_iterator(IteratorSubclass&& subclass) // TODO: write a sus::is_subclass? requires( std::is_convertible_v<IteratorSubclass&, IteratorBase<SubclassItem>&>) { static_assert(::sus::mem::relocate_one_by_memcpy<IteratorSubclass>); auto it = SizedIteratorType([](char& sized) { reinterpret_cast<IteratorSubclass&>(sized).~IteratorSubclass(); }); new (it.sized) IteratorSubclass(::sus::move(subclass)); return it; } } // namespace sus::iter // IteratorBase provided functions are implemented in this file, so that they // can be easily included by library users, but don't have to be included in // every library header that returns an IteratorBase. namespace sus::iter { template <class Item> __private::IteratorLoop<IteratorBase<Item>&> IteratorBase<Item>::begin() & noexcept { return {*this}; } template <class Item> __private::IteratorEnd IteratorBase<Item>::end() & noexcept { return __private::IteratorEnd(); } template <class Item> bool IteratorBase<Item>::all(::sus::fn::FnMut<bool(Item)> f) noexcept { Option<Item> item = next(); while (item.is_some()) { // Safety: `item` was checked to hold Some already. if (!f(item.take().unwrap_unchecked(unsafe_fn))) return false; item = next(); } return true; } template <class Item> bool IteratorBase<Item>::any(::sus::fn::FnMut<bool(Item)> f) noexcept { Option<Item> item = next(); while (item.is_some()) { // Safety: `item` was checked to hold Some already. if (f(item.take().unwrap_unchecked(unsafe_fn))) return true; item = next(); } return false; } template <class Item> usize IteratorBase<Item>::count() noexcept { auto c = 0_usize; while (next().is_some()) c += 1_usize; return c; } template <class I> Iterator<Filter<typename I::Item, sizeof(I), alignof(I)>> Iterator<I>::filter( ::sus::fn::FnMut<bool(const std::remove_reference_t<typename I::Item>&)> pred) && noexcept { // TODO: make_sized_iterator immediately copies `this` to either the body of // the output iterator or to a heap allocation (if it can't be trivially // relocated). It is plausible to be more lazy here and avoid moving `this` // until it's actually needed, which may not be ever if the resulting iterator // is used before `this` gets destroyed. The problem is `this` could be a // temporary. So to do this, we could build a doubly-linked list along the // chain of iterators. `this` would point to the returned iterator here, and // vice versa. If `this` gets destroyed, then we would have to walk the entire // linked list and move them all up into the outermost iterator immediately. // Doing so dynamically would require a (single) heap allocation at that point // always. It would be elided if the iterator was kept on the stack, or used // inside the temporary expression. But it would require one heap allocation // to use any chain of iterators in a for loop, since temporaies get destroyed // after initialing the loop. return {::sus::move(pred), make_sized_iterator(static_cast<I&&>(*this))}; } } // namespace sus::iter namespace sus::mem::__private { template <class T> struct [[sus_trivial_abi]] RelocatableStorage; template <class T> requires(!::sus::mem::relocate_one_by_memcpy<T>) struct [[sus_trivial_abi]] RelocatableStorage<T> final { RelocatableStorage(Option<T>&& t) : heap_(::sus::move(t).and_then([](T&& t) { return Option<T&>::some(mref(*new T(static_cast<T&&>(t)))); })) {} ~RelocatableStorage() { heap_.take().and_then( [](T& t) { // TODO: Use inspect() instead if we add it? delete &t; return Option<T&>::none(); }); } RelocatableStorage(RelocatableStorage&& o) : heap_(o.heap_.take()) {} RelocatableStorage& operator=(RelocatableStorage&& o) { heap_.take().and_then( [](T& t) { // TODO: Use inspect() instead if we add it? delete &t; return Option<T&>::none(); }); heap_ = o.heap_.take(); return *this; } T& storage_mut() & { return heap_.as_mut().unwrap(); } Option<T> take() & { return heap_.take().map([](T& t) { T take = static_cast<T&&>(t); delete &t; return take; }); } Option<T&> heap_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(heap_)); }; template <class T> requires(::sus::mem::relocate_one_by_memcpy<T>) struct [[sus_trivial_abi]] RelocatableStorage<T> final { RelocatableStorage(Option<T>&& t) : stack_(::sus::move(t)) {} // TODO: Do memcpy instead of take() when not in a constexpr context. RelocatableStorage(RelocatableStorage&& o) : stack_(o.stack_.take()) {} RelocatableStorage& operator=(RelocatableStorage&& o) { stack_ = o.stack_.take(); return *this; } T& storage_mut() & { return stack_.as_mut().unwrap(); } Option<T> take() & { return stack_.take(); } // TODO: Remove the Option from here, put the Option at the callers where it's // needed. Option<T> stack_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(stack_)); }; } // namespace sus::mem::__private namespace sus::iter { using ::sus::mem::__private::RelocatableStorage; using ::sus::option::Option; // clang-format off sus_clang_bug_58859( namespace __private { template <class Item> inline Iterator<Once<Item>> once(Option<Item>&& single) noexcept { return Once<Item>::with_option(::sus::move(single)); } // namespace sus::iter } ) // clang-format on /// An IteratorBase implementation that walks over at most a single Item. template <class Item> class [[sus_trivial_abi]] Once : public IteratorBase<Item> { public: Option<Item> next() noexcept final { return single_.take(); } protected: Once(Option<Item>&& single) : single_(::sus::move(single)) {} private: template <class U> friend inline Iterator<Once<U>> once(Option<U>&& single) noexcept requires(std::is_move_constructible_v<U>); // clang-format off sus_clang_bug_58859( template <class U> friend inline Iterator<Once<U>> __private::once(Option<U>&& single) noexcept ); // clang-format on static Iterator<Once> with_option(Option<Item>&& single) { return Iterator<Once>(static_cast<Option<Item>&&>(single)); } RelocatableStorage<Item> single_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(single_)); }; template <class Item> inline Iterator<Once<Item>> once(Option<Item>&& single) noexcept requires(std::is_move_constructible_v<Item>) { sus_clang_bug_58859(return __private::once(::sus::move(single))); sus_clang_bug_58859_else(return Once<Item>::with_option(::sus::move(single))); } } // namespace sus::iter namespace sus::containers { template <class T> class Slice; template <class Item> struct [[sus_trivial_abi]] SliceIter : public ::sus::iter::IteratorBase<const Item&> { public: static constexpr auto with(const Item* start, usize len) noexcept { return ::sus::iter::Iterator<SliceIter>(start, len); } Option<const Item&> next() noexcept final { if (ptr_ == end_) [[unlikely]] return Option<const Item&>::none(); // SAFETY: Since end_ > ptr_, which is checked in the constructor, ptr_ + 1 // will never be null. return Option<const Item&>::some( *::sus::mem::replace_ptr(mref(ptr_), ptr_ + 1_usize)); } protected: constexpr SliceIter(const Item* start, usize len) noexcept : ptr_(start), end_(start + len) { check(end_ > ptr_ || !end_); // end_ may wrap around to 0, but not past 0. } private: const Item* ptr_; const Item* end_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(ptr_), decltype(end_)); }; template <class Item> struct [[sus_trivial_abi]] SliceIterMut : public ::sus::iter::IteratorBase<Item&> { public: static constexpr auto with(Item* start, usize len) noexcept { return ::sus::iter::Iterator<SliceIterMut>(start, len); } Option<Item&> next() noexcept final { if (ptr_ == end_) [[unlikely]] return Option<Item&>::none(); // SAFETY: Since end_ > ptr_, which is checked in the constructor, ptr_ + 1 // will never be null. return Option<Item&>::some(mref(*::sus::mem::replace_ptr(mref(ptr_), ptr_ + 1_usize))); } protected: constexpr SliceIterMut(Item* start, usize len) noexcept : ptr_(start), end_(start + len) { check(end_ > ptr_ || !end_); // end_ may wrap around to 0, but not past 0. } private: Item* ptr_; Item* end_; sus_class_assert_trivial_relocatable_types(unsafe_fn, decltype(ptr_), decltype(end_)); }; } // namespace sus::containers namespace sus::containers { /// A dynamically-sized view into a contiguous sequence, `[T]`. /// /// Contiguous here means that elements are laid out so that every element is /// the same distance from its neighbors. /// /// Slices are a view into a block of memory represented as a pointer and a /// length. template <class T> class Slice { public: static constexpr inline Slice from_raw_parts(T* data, usize len) noexcept { check(len.primitive_value <= PTRDIFF_MAX); return Slice(data, len); } // sus::construct::Into<Slice<T>, T[]> trait. template <size_t N> requires(N <= PTRDIFF_MAX) static constexpr inline Slice from(T (&data)[N]) { return Slice(data, N); } /// Returns the number of elements in the slice. constexpr inline usize len() const& noexcept { return len_; } /// Returns a const reference to the element at index `i`. constexpr Option<const T&> get(usize i) const& noexcept { if (i < len_) [[likely]] return Option<const T&>::some(data_[i.primitive_value]); else return Option<const T&>::none(); } constexpr Option<const T&> get(usize i) && = delete; /// Returns a mutable reference to the element at index `i`. constexpr Option<T&> get_mut(usize i) & noexcept requires(!std::is_const_v<T>) { if (i < len_) [[likely]] return Option<T&>::some(mref(data_[i.primitive_value])); else return Option<T&>::none(); } /// Returns a const reference to the element at index `i`. /// /// # Safety /// The index `i` must be inside the bounds of the slice or Undefined /// Behaviour results. The size of the slice must therefore also be larger /// than 0. constexpr inline const T& get_unchecked(::sus::marker::UnsafeFnMarker, usize i) const& noexcept { return data_[i.primitive_value]; } constexpr inline const T& get_unchecked(::sus::marker::UnsafeFnMarker, usize i) && = delete; /// Returns a mutable reference to the element at index `i`. /// /// # Safety /// The index `i` must be inside the bounds of the slice or Undefined /// Behaviour results. The size of the slice must therefore also be larger /// than 0. constexpr inline T& get_unchecked_mut(::sus::marker::UnsafeFnMarker, usize i) & noexcept requires(!std::is_const_v<T>) { return data_[i.primitive_value]; } constexpr inline const T& operator[](usize i) const& noexcept { check(i < len_); return data_[i.primitive_value]; } constexpr inline const T& operator[](usize i) && = delete; constexpr T& operator[](usize i) & noexcept requires(!std::is_const_v<T>) { check(i < len_); return data_[i.primitive_value]; } /// Returns a const pointer to the first element in the slice. inline const T* as_ptr() const& noexcept { check(len_ > 0_usize); return data_; } inline const T* as_ptr() && = delete; /// Returns a mutable pointer to the first element in the slice. inline T* as_mut_ptr() & noexcept requires(!std::is_const_v<T>) { check(len_ > 0_usize); return data_; } /// Returns an iterator over all the elements in the slice, visited in the /// same order they appear in the slice. The iterator gives const access to /// each element. constexpr ::sus::iter::Iterator<SliceIter<T>> iter() const& noexcept { return SliceIter<T>::with(data_, len_); } /// Returns an iterator over all the elements in the slice, visited in the /// same order they appear in the slice. The iterator gives mutable access to /// each element. constexpr ::sus::iter::Iterator<SliceIterMut<T>> iter_mut() noexcept requires(!std::is_const_v<T>) { return SliceIterMut<T>::with(data_, len_); } /// Converts the slice into an iterator that consumes the slice and returns /// each element in the same order they appear in the array. /// /// For a Slice<const T> the iterator will return `const T&`. For a Slice<T> /// the iterator will return `T&`. constexpr ::sus::iter::Iterator<SliceIter<T>> into_iter() && noexcept requires(std::is_const_v<T>) { return SliceIter<T>::with(data_, len_); } constexpr ::sus::iter::Iterator<SliceIterMut<T>> into_iter() && noexcept requires(!std::is_const_v<T>) { return SliceIterMut<T>::with(data_, len_); } private: constexpr Slice(T* data, usize len) noexcept : data_(data), len_(len) {} T* data_; usize len_; sus_class_never_value_field(unsafe_fn, Slice, data_, nullptr); }; // Implicit for-ranged loop iteration via `Slice::iter()`. using ::sus::iter::__private::begin; using ::sus::iter::__private::end; } // namespace sus::containers // Promote Slice into the `sus` namespace. namespace sus { using ::sus::containers::Slice; } namespace sus::containers { template <class T, size_t N> requires(N <= PTRDIFF_MAX) class Array; template <class Item, size_t N> requires(std::is_move_constructible_v<Item>) struct ArrayIntoIter : public ::sus::iter::IteratorBase<Item> { public: static constexpr auto with(Array<Item, N>&& array) noexcept { return ::sus::iter::Iterator<ArrayIntoIter>(::sus::move(array)); } Option<Item> next() noexcept final { if (next_index_.primitive_value == N) [[unlikely]] return Option<Item>::none(); // SAFETY: The array has a fixed size. The next_index_ is encapsulated and // only changed in this class/method. The next_index_ stops incrementing // when it reaches N and starts at 0, and N >= 0, so when we get here we // know next_index_ is in range of the array. We use get_unchecked_mut() // here because it's difficult for the compiler to make the same // observations we have here, as next_index_ is a field and changes across // multiple method calls. Item& item = array_.get_unchecked_mut( unsafe_fn, ::sus::mem::replace(mref(next_index_), next_index_ + 1_usize)); return Option<Item>::some(move(item)); } protected: ArrayIntoIter(Array<Item, N>&& array) noexcept : array_(::sus::move(array)) {} private: usize next_index_ = 0_usize; Array<Item, N> array_; }; } // namespace sus::containers namespace sus::containers { namespace __private { template <class T, size_t N> struct Storage final { T data_[N]; }; template <class T> struct Storage<T, 0> final {}; } // namespace __private /// A container of objects of type T, with a fixed size N. /// /// An Array can not be larger than PTRDIFF_MAX, as subtracting a pointer at a /// greater distance results in Undefined Behaviour. template <class T, size_t N> requires(N <= PTRDIFF_MAX) class Array final { static_assert(!std::is_const_v<T>); public: constexpr static Array with_default() noexcept requires(::sus::construct::MakeDefault<T>) { auto a = Array(kWithUninitialized); if constexpr (N > 0) { for (size_t i = 0; i < N; ++i) a.storage_.data_[i] = ::sus::construct::make_default<T>(); } return a; } constexpr static Array with_uninitialized( ::sus::marker::UnsafeFnMarker) noexcept { return Array(kWithUninitialized); } template <::sus::fn::callable::CallableReturns<T> InitializerFn> constexpr static Array with_initializer(InitializerFn f) noexcept { return Array(kWithInitializer, move(f), std::make_index_sequence<N>()); } /// Uses convertible_to<T> to accept `sus::into()` values. But doesn't use /// sus::construct::Into<T> to avoid implicit conversions. template <std::convertible_to<T> U> constexpr static Array with_value(const U& t) noexcept { return Array(kWithValue, t, std::make_index_sequence<N>()); } /// Uses convertible_to<T> instead of same_as<T> to accept `sus::into()` /// values. But doesn't use sus::construct::Into<T> to avoid implicit /// conversions. template <std::convertible_to<T>... Ts> requires(sizeof...(Ts) == N) constexpr static Array with_values(Ts... ts) noexcept { auto a = Array(kWithUninitialized); init_values(a.as_mut_ptr(), 0, move(ts)...); return a; } /// Returns the number of elements in the array. constexpr usize len() const& noexcept { return N; } /// Returns a const reference to the element at index `i`. constexpr Option<const T&> get(usize i) const& noexcept requires(N > 0) { if (i.primitive_value >= N) [[unlikely]] return Option<const T&>::none(); return Option<const T&>::some(storage_.data_[i.primitive_value]); } constexpr Option<const T&> get(usize i) && = delete; /// Returns a mutable reference to the element at index `i`. constexpr Option<T&> get_mut(usize i) & noexcept requires(N > 0) { if (i.primitive_value >= N) [[unlikely]] return Option<T&>::none(); return Option<T&>::some(mref(storage_.data_[i.primitive_value])); } /// Returns a const reference to the element at index `i`. /// /// # Safety /// The index `i` must be inside the bounds of the array or Undefined /// Behaviour results. constexpr inline const T& get_unchecked(::sus::marker::UnsafeFnMarker, usize i) const& noexcept requires(N > 0) { return storage_.data_[i.primitive_value]; } constexpr inline const T& get_unchecked(::sus::marker::UnsafeFnMarker, usize i) && = delete; /// Returns a mutable reference to the element at index `i`. /// /// # Safety /// The index `i` must be inside the bounds of the array or Undefined /// Behaviour results. constexpr inline T& get_unchecked_mut(::sus::marker::UnsafeFnMarker, usize i) & noexcept requires(N > 0) { return storage_.data_[i.primitive_value]; } constexpr inline const T& operator[](usize i) const& noexcept { check(i.primitive_value < N); return storage_.data_[i.primitive_value]; } constexpr inline const T& operator[](usize i) && = delete; constexpr inline T& operator[](usize i) & noexcept { check(i.primitive_value < N); return storage_.data_[i.primitive_value]; } /// Returns a const pointer to the first element in the array. inline const T* as_ptr() const& noexcept requires(N > 0) { return storage_.data_; } const T* as_ptr() && = delete; /// Returns a mutable pointer to the first element in the array. inline T* as_mut_ptr() & noexcept requires(N > 0) { return storage_.data_; } // Returns a slice that references all the elements of the array as const // references. constexpr Slice<const T> as_ref() const& noexcept { return Slice<const T>::from(storage_.data_); } constexpr Slice<const T> as_ref() && = delete; // Returns a slice that references all the elements of the array as mutable // references. constexpr Slice<T> as_mut() & noexcept { return Slice<T>::from(storage_.data_); } /// Returns an iterator over all the elements in the array, visited in the /// same order they appear in the array. The iterator gives const access to /// each element. constexpr ::sus::iter::Iterator<SliceIter<T>> iter() const& noexcept { return SliceIter<T>::with(storage_.data_, N); } constexpr ::sus::iter::Iterator<SliceIter<T>> iter() && = delete; /// Returns an iterator over all the elements in the array, visited in the /// same order they appear in the array. The iterator gives mutable access to /// each element. constexpr ::sus::iter::Iterator<SliceIterMut<T>> iter_mut() & noexcept { return SliceIterMut<T>::with(storage_.data_, N); } /// Converts the array into an iterator that consumes the array and returns /// each element in the same order they appear in the array. constexpr ::sus::iter::Iterator<ArrayIntoIter<T, N>> into_iter() && noexcept { return ArrayIntoIter<T, N>::with(static_cast<Array&&>(*this)); } /// Consumes the array, and returns a new array, mapping each element of the /// array to a new type with the given function. /// /// To just walk each element and map them, consider using `iter()` and /// `Iterator::map`. This does not require consuming the array. template <::sus::fn::callable::CallableWith<T&&> MapFn, int&..., class R = std::invoke_result_t<MapFn, T&&>> requires(N > 0 && !std::is_void_v<R>) Array<R, N> map(MapFn f) && noexcept { return Array<R, N>::with_initializer([this, &f, i = size_t{0}]() mutable { return f(move(storage_.data_[i++])); }); } /// sus::ops::Eq<Array<U, N>> trait. template <::sus::ops::Eq<T> U> constexpr bool operator==(const Array<U, N>& r) const& noexcept { return eq_impl(r, std::make_index_sequence<N>()); } private: enum WithInitializer { kWithInitializer }; template <class F, size_t... Is> constexpr Array(WithInitializer, F&& f, std::index_sequence<Is...>) noexcept : storage_{((void)Is, f())...} {} enum WithValue { kWithValue }; template <size_t... Is> constexpr Array(WithValue, const T& t, std::index_sequence<Is...>) noexcept : storage_{((void)Is, t)...} {} enum WithUninitialized { kWithUninitialized }; template <size_t... Is> constexpr Array(WithUninitialized) noexcept {} template <std::convertible_to<T> T1, std::convertible_to<T>... Ts> static inline void init_values(T* a, size_t index, T1&& t1, Ts&&... ts) { new (a + index) T(move(t1)); init_values(a, index + 1, move(ts)...); } template <std::convertible_to<T> T1> static inline void init_values(T* a, size_t index, T1&& t1) { new (a + index) T(move(t1)); } template <class U, size_t... Is> constexpr inline auto eq_impl(const Array<U, N>& r, std::index_sequence<Is...>) const& noexcept { return (true && ... && (get(Is) == r.get(Is))); }; // Using a union ensures that the default constructor doesn't initialize // anything. union { ::sus::containers::__private::Storage<T, N> storage_; }; sus_class_trivial_relocatable_value(unsafe_fn, ::sus::mem::relocate_array_by_memcpy<T>); }; namespace __private { template <size_t I, class O, class T, class U, size_t N> constexpr inline bool array_cmp_impl(O& val, const Array<T, N>& l, const Array<U, N>& r) noexcept { auto cmp = l.get(I) <=> r.get(I); // Allow downgrading from equal to equivalent, but not the inverse. if (cmp != 0) val = cmp; // Short circuit by returning true when we find a difference. return val == 0; }; template <class T, class U, size_t N, size_t... Is> constexpr inline auto array_cmp(auto equal, const Array<T, N>& l, const Array<U, N>& r, std::index_sequence<Is...>) noexcept { auto val = equal; (true && ... && (array_cmp_impl<Is>(val, l, r))); return val; }; } // namespace __private /// sus::ops::Ord<Option<U>> trait. template <class T, class U, size_t N> requires(::sus::ops::ExclusiveOrd<T, U>) constexpr inline auto operator<=>(const Array<T, N>& l, const Array<U, N>& r) noexcept { return __private::array_cmp(std::strong_ordering::equivalent, l, r, std::make_index_sequence<N>()); } /// sus::ops::Ord<Option<U>> trait. template <class T, class U, size_t N> requires(::sus::ops::ExclusiveWeakOrd<T, U>) constexpr inline auto operator<=>(const Array<T, N>& l, const Array<U, N>& r) noexcept { return __private::array_cmp(std::weak_ordering::equivalent, l, r, std::make_index_sequence<N>()); } /// sus::ops::Ord<Option<U>> trait. template <class T, class U, size_t N> requires(::sus::ops::ExclusivePartialOrd<T, U>) constexpr inline auto operator<=>(const Array<T, N>& l, const Array<U, N>& r) noexcept { return __private::array_cmp(std::partial_ordering::equivalent, l, r, std::make_index_sequence<N>()); } // Implicit for-ranged loop iteration via `Array::iter()`. using ::sus::iter::__private::begin; using ::sus::iter::__private::end; } // namespace sus::containers // Promote Array into the `sus` namespace. namespace sus { using ::sus::containers::Array; } namespace sus::iter { using ::sus::iter::IteratorBase; using ::sus::mem::relocate_one_by_memcpy; using ::sus::mem::__private::RelocatableStorage; template <class Item, size_t InnerIterSize, size_t InnerIterAlign> class Filter : public IteratorBase<Item> { using Pred = ::sus::fn::FnMut<bool( // TODO: write a sus::const_ref<T>? const std::remove_reference_t<const std::remove_reference_t<Item>&>&)>; using InnerSizedIter = SizedIterator<Item, InnerIterSize, InnerIterAlign>; struct Data final { Pred pred_; InnerSizedIter next_iter_; sus_class_maybe_trivial_relocatable_types(unsafe_fn, decltype(pred_), decltype(next_iter_)); }; public: Option<Item> next() noexcept final { IteratorBase<Item>& next_iter = data_.storage_mut().next_iter_.iterator_mut(); Pred& pred = data_.storage_mut().pred_; // TODO: Just call find(pred) on itself? Option<Item> item = next_iter.next(); while (item.is_some() && !pred(item.as_ref().unwrap_unchecked(unsafe_fn))) { item = next_iter.next(); } return item; } protected: Filter(Pred&& pred, InnerSizedIter&& next_iter) : data_(Option<Data>::some(Data{.pred_ = ::sus::move(pred), .next_iter_ = ::sus::move(next_iter)})) {} private: RelocatableStorage<Data> data_; sus_class_maybe_trivial_relocatable_types(unsafe_fn, decltype(data_)); }; } // namespace sus::iter
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
About the author
Statistics
Changelog
Version tree