Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
WASM
Zig
Javascript
GIMPLE
Ygen
cuda source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
10.0.0 sm_75 CUDA-10.2
10.0.1 sm_75 CUDA-10.2
11.0.0 sm_75 CUDA-10.2
16.0.0 sm_90 CUDA-11.8
NVCC 10.0.130
NVCC 10.1.105
NVCC 10.1.168
NVCC 10.1.243
NVCC 10.2.89
NVCC 11.0.2
NVCC 11.0.3
NVCC 11.1.0
NVCC 11.1.1
NVCC 11.2.0
NVCC 11.2.1
NVCC 11.2.2
NVCC 11.3.0
NVCC 11.3.1
NVCC 11.4.0
NVCC 11.4.1
NVCC 11.4.2
NVCC 11.4.3
NVCC 11.4.4
NVCC 11.5.0
NVCC 11.5.1
NVCC 11.5.2
NVCC 11.6.0
NVCC 11.6.1
NVCC 11.6.2
NVCC 11.7.0
NVCC 11.7.1
NVCC 11.8.0
NVCC 12.0.0
NVCC 12.0.1
NVCC 12.1.0
NVCC 12.2.1
NVCC 12.3.1
NVCC 12.4.1
NVCC 12.5.1
NVCC 12.6.1
NVCC 9.1.85
NVCC 9.2.88
NVRTC 11.0.2
NVRTC 11.0.3
NVRTC 11.1.0
NVRTC 11.1.1
NVRTC 11.2.0
NVRTC 11.2.1
NVRTC 11.2.2
NVRTC 11.3.0
NVRTC 11.3.1
NVRTC 11.4.0
NVRTC 11.4.1
NVRTC 11.5.0
NVRTC 11.5.1
NVRTC 11.5.2
NVRTC 11.6.0
NVRTC 11.6.1
NVRTC 11.6.2
NVRTC 11.7.0
NVRTC 11.7.1
NVRTC 11.8.0
NVRTC 12.0.0
NVRTC 12.0.1
NVRTC 12.1.0
clang 7.0.0 sm_70 CUDA-9.1
clang 8.0.0 sm_75 CUDA-10.0
clang 9.0.0 sm_75 CUDA-10.1
clang rocm-4.5.2
clang rocm-5.0.2
clang rocm-5.1.3
clang rocm-5.2.3
clang rocm-5.3.2
clang rocm-5.7.0
clang rocm-6.0.2
clang rocm-6.1.2
clang staging rocm-6.1.2
clang trunk rocm-6.1.2
trunk sm_90 CUDA-12.6.1
Options
Source code
#include <cooperative_groups.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <stdio.h> namespace cg = cooperative_groups; struct sentinel {}; struct strided_iterator_t { int i_; int size_; int group_size_; __device__ int operator*() const { return i_; } __device__ void operator++() { i_ += group_size_; } __device__ bool operator==(sentinel) const { return i_ >= size_; } __device__ bool operator!=(sentinel s) const { return !(*this == s); } }; struct federated_sequence_t { int group_size_; int size_; int i_; __device__ strided_iterator_t begin() { return strided_iterator_t{i_, size_, group_size_}; }; __device__ sentinel end() { return {}; } }; template<typename ViewFn> struct view_closure_t; struct view_closure_base_t { template <typename ProducerT, typename ConsumerConstructorT> __device__ friend auto operator|(ProducerT &&producer, view_closure_t<ConsumerConstructorT> consumer) { return consumer(producer); } }; template <typename ViewFn> struct view_closure_t : view_closure_base_t , ViewFn { __device__ explicit view_closure_t(ViewFn fn) : ViewFn(fn) { } }; template<typename Fun> __device__ view_closure_t<Fun> make_view_closure(Fun fun) { return view_closure_t<Fun>{fun}; } // Indirection layer: // allows to abstract data access from subsequent algorithms template <class G> __device__ federated_sequence_t federated_sequence(G&& group, int size) { return federated_sequence_t{ static_cast<int>(group.size()), size, static_cast<int>(group.thread_rank()) }; } template <typename RangeT, typename IteratorT> struct load_iterator_t { using i_t = decltype(std::declval<RangeT>().begin()); RangeT rng_; IteratorT in_; i_t i_; using value_type = typename std::iterator_traits<IteratorT>::value_type; __device__ value_type& operator*() { return *(in_ + *i_); } __device__ void operator++() { ++i_; } __device__ bool operator==(sentinel s) const { return i_ == s; } __device__ bool operator!=(sentinel s) const { return !(*this == s); } }; template <class IteratorT, class Rng> struct load_t { IteratorT in_; Rng rng_; __device__ auto begin() { return load_iterator_t<Rng, IteratorT>{rng_, in_, rng_.begin()}; } __device__ sentinel end() const { return {}; } }; template <class IteratorT> struct load_fn_t { IteratorT in_; template <class Rng> __device__ load_t<IteratorT, Rng> operator()(Rng rng) const { return load_t<IteratorT, Rng>{in_, rng}; } }; template <class IteratorT> __device__ auto load(IteratorT in) { return make_view_closure(load_fn_t<IteratorT>{in}); } template <typename RangeT, typename T> struct filter_iterator_t { using i_t = decltype(std::declval<RangeT>().begin()); RangeT rng_; T key_; i_t i_; __device__ T& operator*() { return *i_; } __device__ void operator++() { ++i_; for (; i_ != rng_.end(); ++i_) { if (*i_ == key_) { break; } } } __device__ bool operator==(sentinel s) const { return i_ == s; } __device__ bool operator!=(sentinel s) const { return !(*this == s); } }; template <class T, class Rng> struct filter_t { T key_; Rng rng_; __device__ auto begin() { auto i = rng_.begin(); for (; i != rng_.end(); ++i) { if (*i == key_) { return filter_iterator_t<Rng, T>{rng_, key_, i}; } } return filter_iterator_t<Rng, T>{rng_, key_, i}; } __device__ sentinel end() const { return {}; } }; template <class T> struct filter_fn_t { T in_; template <class Rng> __device__ filter_t<T, Rng> operator()(Rng rng) const { return filter_t<T, Rng>{in_, rng}; } }; template <class T> __device__ auto filter(T key) { return make_view_closure(filter_fn_t<T>{key}); } template <typename RangeT, typename T> struct sync_iterator_t { using i_t = decltype(std::declval<RangeT>().begin()); RangeT rng_; T oob_; i_t i_; __device__ T& operator*() { if (i_ == sentinel{}) { return oob_; } return *i_; } __device__ void operator++() { if (i_ != sentinel{}) { ++i_; } } __device__ bool operator==(sentinel s) const { // TODO Why there's no ballot in thread group? return __syncthreads_and(i_ == s); } __device__ bool operator!=(sentinel s) const { return !(*this == s); } }; template <class T, class Rng> struct sync_t { T oob_; Rng rng_; __device__ auto begin() { return sync_iterator_t<Rng, T>{rng_, oob_, rng_.begin()}; } __device__ sentinel end() const { return {}; } }; template <class T> struct sync_fn_t { T oob_; template <class Rng> __device__ sync_t<T, Rng> operator()(Rng rng) const { return sync_t<T, Rng>{oob_, rng}; } }; template <class T> __device__ auto sync(T oob) { return make_view_closure(sync_fn_t<T>{oob}); } template <class T> __global__ void range(T key, int size, int* in, int* out) { for (int i: federated_sequence(cg::this_thread_block(), size)) { out[i] = threadIdx.x; } } template <class T> __global__ void for_each(T key, int size, int* in, int* out) { auto group = cg::this_thread_block(); for (int i: federated_sequence(group, size) | load(in)) { out[i] = threadIdx.x; } } template <class T> __global__ void filter(T key, int size, int* in, bool* out) { auto group = cg::this_thread_block(); for (int& i: federated_sequence(group, size) | load(in) | filter(key)) { out[(int)(&i - in)] = true; } } template <class T> __global__ void contains_unique(T key, int size, int* in, bool* out) { auto group = cg::this_thread_block(); for (int& i: federated_sequence(group, size) | load(in) | filter(key)) { *out = true; break; } } template <class T> __global__ void contains_sync(T key, int size, int* in, bool* out) { auto group = cg::this_thread_block(); auto oob = INT_MAX; for (int& i: federated_sequence(group, size) | load(in) | filter(key) | sync(oob)) { if (i != oob) { *out = true; } // Just to illustrate that it's possible group.sync(); } } int main() { const int size = 64; const int key = size / 2; thrust::device_vector<bool> out(1); thrust::device_vector<int> in(size); thrust::sequence(in.rbegin(), in.rend()); contains_sync<<<1, 32>>>( key, size, thrust::raw_pointer_cast(in.data()), thrust::raw_pointer_cast(out.data())); cudaDeviceSynchronize(); std::cout << out[0] << std::endl; }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree