Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Algol68
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C with Coccinelle
C++ with Coccinelle
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Mojo
Nim
Numba
Nix
Objective-C
Objective-C++
OCaml
Odin
OpenCL C
Pascal
Pony
PTX
Python
Racket
Raku
Ruby
Rust
Sail
Snowball
Scala
Slang
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
Triton
TypeScript Native
V
Vala
Visual Basic
Vyper
WASM
Zig
Javascript
GIMPLE
Ygen
sway
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.79.0
rustc 1.8.0
rustc 1.80.0
rustc 1.81.0
rustc 1.82.0
rustc 1.83.0
rustc 1.84.0
rustc 1.85.0
rustc 1.86.0
rustc 1.87.0
rustc 1.88.0
rustc 1.89.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC assertions)
x86-64 GCCRS 14.1 (GCC)
x86-64 GCCRS 14.2 (GCC assertions)
x86-64 GCCRS 14.2 (GCC)
x86-64 GCCRS 14.3 (GCC assertions)
x86-64 GCCRS 14.3 (GCC)
x86-64 GCCRS 15.1 (GCC assertions)
x86-64 GCCRS 15.1 (GCC)
x86-64 GCCRS 15.2 (GCC assertions)
x86-64 GCCRS 15.2 (GCC)
Options
Source code
#![feature(allocator_api)] #![feature(lazy_cell)] use std::{ alloc::{Allocator, Global, Layout}, arch::global_asm, sync::Mutex, }; use std::cell::RefCell; use std::thread::LocalKey; use std::ptr::NonNull; use std::sync::OnceLock; /// This might be not enough stack. so allocate more if we stack overflow const STACK_SIZE: usize = 65536; const STACK_ALIGN: usize = 16; #[derive(Clone, Copy)] #[repr(transparent)] struct StackHandle(*mut u8); unsafe impl Send for StackHandle {} impl StackHandle { fn null_mut() -> StackHandle { StackHandle(std::ptr::null_mut()) } } #[derive(Clone, Copy)] struct StackAlloc(*mut u8); unsafe impl Send for StackAlloc {} type FiberStarterFunc = fn(*mut u8, StackHandle, usize) -> (); extern "sysv64" { fn switch_to_other_stack(new_stack: StackHandle) -> StackHandle; fn start_new_stack(new_stack: StackHandle, f_ptr: *const (), new_id: usize, data: *mut u8) -> StackHandle; } // f_ptr is function pointer with a rust call convention. so it cannot be called directly from inline assembly. so we use this jump-pad to call it. extern "sysv64" fn jump_pad(old_stack: StackHandle, f_ptr: *const (), new_id: usize, data: *mut u8) -> StackHandle { let f_ptr: FiberStarterFunc = unsafe { std::mem::transmute(f_ptr) }; f_ptr(data, old_stack, new_id); std::unreachable!("this function should never exit"); } // all context-switches are done inside a functions called with the sysv64 calling convention. // so the only registers that need to be preserved are RBX, RSP, RBP, and R12, R13, R14, and R15 // all other registers are preserved by the caller, which is the compiler in this case. global_asm!(" .global switch_to_other_stack switch_to_other_stack: push rbx push r12 push r13 push r14 push r15 push rbp mov rax, rsp mov rsp, rdi pop rbp pop r15 pop r14 pop r13 pop r12 pop rbx ret .global start_new_stack start_new_stack: push rbx push r12 push r13 push r14 push r15 push rbp mov rbx, rsp mov rsp, rdi mov rdi, rbx call {} mov rsp, rax pop rbp pop r15 pop r14 pop r13 pop r12 pop rbx ret ", sym jump_pad, ); trait Scheduler : Send { fn on_new_id(&mut self, id: usize); fn on_yield(&mut self, id: usize) -> usize; fn on_exit(&mut self, id: usize) -> usize; } struct SchedElem { id: usize, next: usize, prev: usize, } struct LRUScheduler { elems: Vec<SchedElem>, head: usize, tail: usize, } impl LRUScheduler { // TODO all the linked-list logic should be factored out instead its own container. fn unlink(&mut self, idx: usize) { if self.head == idx { self.head = self.elems[idx].next; } else { let p = self.elems[idx].prev; self.elems[p].next = self.elems[idx].next; } if self.tail == idx { self.tail = self.elems[idx].prev; } else { let n = self.elems[idx].next; self.elems[n].prev = self.elems[idx].prev; } self.elems[idx].next = usize::MAX; self.elems[idx].prev = usize::MAX; } fn insert_head(&mut self, idx: usize) { assert_eq!(self.elems[idx].prev, usize::MAX); assert_eq!(self.elems[idx].next, usize::MAX); if self.head == usize::MAX { self.tail = idx; } else { self.elems[self.head].prev = idx; } self.elems[idx].next = self.head; self.head = idx; } fn insert_tail(&mut self, idx: usize) { assert_eq!(self.elems[idx].prev, usize::MAX); assert_eq!(self.elems[idx].next, usize::MAX); if self.tail == usize::MAX { self.head = idx; } else { self.elems[self.tail].next = idx; } self.elems[idx].prev = self.tail; self.tail = idx; } } impl Default for LRUScheduler { fn default() -> Self { Self { elems: Vec::new(), head: usize::MAX, tail: usize::MAX, } } } impl Scheduler for LRUScheduler { fn on_new_id(&mut self, id: usize) { if id >= self.elems.len() { self.elems.push(SchedElem { id, next: usize::MAX, prev: usize::MAX, }); } self.insert_head(id); } fn on_yield(&mut self, id: usize) -> usize { self.unlink(id); self.insert_tail(id); self.elems[self.head].id } fn on_exit(&mut self, id: usize) -> usize { self.unlink(id); self.elems[self.head].id } } struct FiberContext { stacks: Vec<StackHandle>, cleanups: Vec<StackAlloc>, scheduler: Box<dyn Scheduler>, unused_ids: Vec<usize>, } impl FiberContext { fn get() -> &'static Mutex<FiberContext> { static SINGLETON: OnceLock<Mutex<FiberContext>> = OnceLock::new(); &SINGLETON.get_or_init(|| { Mutex::new(FiberContext { stacks: Vec::new(), cleanups: Vec::new(), scheduler: Box::new(LRUScheduler::default()), unused_ids: Vec::new(), }) }) } fn fiber_id_impl() -> &'static LocalKey<RefCell<usize>> { thread_local! { static FIBER_ID: RefCell<usize> = RefCell::new(usize::MAX); } return &FIBER_ID; } pub fn fiber_id() -> usize { Self::fiber_id_impl().with(|fiber_id| *fiber_id.borrow()) } fn set_fiber_id(id: usize) { Self::fiber_id_impl().with(|fiber_id| *fiber_id.borrow_mut() = id); } fn update_fiber_metadata(new_id: usize, old_stack: StackHandle) { let mut ctx = Self::get().lock().unwrap(); ctx.stacks[Self::fiber_id()] = old_stack; Self::set_fiber_id(new_id); } pub fn yield_impl(is_exit: bool) { let fiber_id = Self::fiber_id(); let new_stack = { let mut ctx = Self::get().lock().unwrap(); let selected_id = if is_exit { ctx.scheduler.on_exit(fiber_id) } else { ctx.scheduler.on_yield(fiber_id) }; // if the selected fiber is the current fiber, do nothing. note that the stack address is not valid anymore. if selected_id == fiber_id { return; } ctx.stacks[selected_id] }; let old_stack = unsafe { switch_to_other_stack(new_stack) }; Self::update_fiber_metadata(fiber_id, old_stack); } pub fn self_yield() { Self::yield_impl(false); } fn get_stack_layout() -> Layout { Layout::from_size_align(STACK_SIZE, STACK_ALIGN).expect("failed to create layout for stack") } fn get_new_id() -> usize { let mut ctx = Self::get().lock().unwrap(); if let Some(id) = ctx.unused_ids.pop() { ctx.scheduler.on_new_id(id); return id; } ctx.stacks.push(StackHandle::null_mut()); let id = ctx.stacks.len() - 1; ctx.scheduler.on_new_id(id); id } fn exit_fiber() { let id = Self::fiber_id(); { let mut ctx = Self::get().lock().unwrap(); ctx.stacks[id] = StackHandle::null_mut(); ctx.unused_ids.push(id); }; Self::yield_impl(true); } fn create_new_fiber() -> (usize, StackAlloc) { let ptr: *mut u8 = Global .allocate(Self::get_stack_layout()) .expect("failed to allocate new stack") .as_ptr() as *mut u8; Self::get().lock().unwrap().cleanups.push(StackAlloc(ptr)); (Self::get_new_id(), StackAlloc(ptr)) } fn get_or_create_id() -> usize { if Self::fiber_id() != usize::MAX { Self::fiber_id() } else { Self::get_new_id() } } // spawn a new fiber and move execution to it. pub fn spawn<T: FnOnce() -> ()>(func: T) { let fiber_id = Self::get_or_create_id(); let (new_id, ptr) = Self::create_new_fiber(); let aligned_size = std::mem::size_of::<T>() + 15 & !15; assert!(STACK_SIZE > aligned_size); // stack should be aligned on 16 bytes, so ptr is aligned on 16 bytes, // but any offset need to ptr should be aligned on 16 as well. // leave space for the closure at the top of the stack. // the stack grows down so we need to start from the highest address. let addr = unsafe { ptr.0.add(STACK_SIZE - aligned_size) }; // write the closure at the top of the stack in the space left for it unsafe { std::ptr::write(addr as *mut T, func); } Self::set_fiber_id(fiber_id); fn specialization<T: FnOnce() -> ()>(data: *mut u8, old_stack: StackHandle, new_id: usize) { FiberContext::update_fiber_metadata(new_id, old_stack); // the closure of type T was previously written to the top of the stack at data. unsafe { std::ptr::read(data as *mut T)(); } FiberContext::exit_fiber(); std::unreachable!("the scheduler should never re-schedule this fiber after a exit_fiber"); } // switch to the newly allocated stack and call specialization::<T> on it. let old_stack = unsafe { start_new_stack( StackHandle(addr), specialization::<T> as FiberStarterFunc as *const (), new_id, addr, ) }; Self::update_fiber_metadata(fiber_id, old_stack); } pub fn set_scheduler(scheduler: Box<dyn Scheduler>) { Self::get().lock().unwrap().scheduler = scheduler } } impl Drop for FiberContext { fn drop(&mut self) { let mut ctx = Self::get().lock().unwrap(); for cleanup in self.cleanups.drain(..) { unsafe { Global.deallocate( NonNull::<u8>::new(cleanup.0).unwrap(), Self::get_stack_layout(), ); } } ctx.cleanups.clear(); } } fn stack_addr() -> *const u8 { let i : u8 = 0; &i as *const u8 } fn main() { const COUNT: usize = 3; for i in 0..COUNT { FiberContext::spawn(move || { println!("stack({}) 1: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 2: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 3: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); }); } println!("main stack 2: {:#?}, fiber_id={}", stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("main stack 3: {:#?}, fiber_id={}", stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); for i in COUNT..(COUNT * 2) { FiberContext::spawn(move || { println!("stack({}) 1: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 2: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 3: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); }); } println!("main stack 4: {:#?}, fiber_id={}", stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("main stack 5: {:#?}, fiber_id={}", stack_addr(), FiberContext::fiber_id()); for i in COUNT..(COUNT * 2) { FiberContext::spawn(move || { println!("stack({}) 1: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 2: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); FiberContext::self_yield(); println!("stack({}) 3: {:#?}, fiber_id={}", i, stack_addr(), FiberContext::fiber_id()); }); } FiberContext::self_yield(); println!("main stack 6: {:#?}, fiber_id={}", stack_addr(), FiberContext::fiber_id()); }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Compiler Explorer Shop
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
CE on Bluesky
Statistics
Changelog
Version tree