Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
Go
Haskell
HLSL
Hook
Hylo
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
Zig
Javascript
GIMPLE
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.8.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC)
Options
Source code
use crate::json::{next_json_token, JsonError, JsonToken, JsonTokenType, JsonTokenizerMode}; use std::str; fn main() { let string_data = r#"{ "scalar types": { "bool": [true, false], "null": null, "int": [0, -42, 123], "float": [0.0, -0, -1.25e-3, 2.125], "string": [ "Just a string", "Escapes: (\\), (\")", "UTF-16 pair: \uD83c\uDf44", "UTF-16 code unit: \u00B6", // traling comma is allowed ] }, "hello": { /** * hey, this comment is * mutiline */ "nested": {"one":/*должно работать*/1,"two":2,"three":{"value": 3}} }, "Duplicate keys do not cause errors because I don't care": 42, "Duplicate keys do not cause errors because I don't care": [null, {}, []], } // sneaky comment"#; let data = string_data.as_bytes(); let mut tokenizer_mode = JsonTokenizerMode::Json; let mut offset = 0; loop { match next_json_token(&data[offset..], tokenizer_mode) { Ok(JsonToken { offset: token_offset, size, token_type, }) => { tokenizer_mode = match token_type { JsonTokenType::OpeningQuote => JsonTokenizerMode::String, JsonTokenType::KeywordStartBoundary => JsonTokenizerMode::Keyword, JsonTokenType::NumberStartBoundary => JsonTokenizerMode::Number, JsonTokenType::SingleLineCommentStart => JsonTokenizerMode::SingleLineComment, JsonTokenType::MultiLineCommentStart => JsonTokenizerMode::MultiLineComment, JsonTokenType::ClosingQuote | JsonTokenType::KeywordEndBoundary | JsonTokenType::NumbreEndBoundary | JsonTokenType::SingleLineCommentEnd | JsonTokenType::MultiLineCommentEnd => JsonTokenizerMode::Json, _ => tokenizer_mode, }; let token_as_string = str::from_utf8(&data[offset + token_offset..offset + token_offset + size]) .unwrap(); if size != 0 { println!("{:?}({})", token_type, token_as_string); } offset += token_offset + size; } Err(error) => { match error { JsonError::End => println!("Successfully finished tokenizing."), JsonError::OutOfRange => eprintln!("Out of range."), _ => eprintln!("Error while tokenizing."), } break; } } } } pub mod json { use crate::ascii; use crate::utf8::{self, Utf8CodePoint, Utf8Error}; use std::str; #[derive(PartialEq, Clone, Copy)] pub enum JsonTokenizerMode { Json, String, Keyword, Number, SingleLineComment, MultiLineComment, } pub enum JsonError { Utf8Error, UnexpectedChar, OutOfRange, End, } #[derive(PartialEq, Debug, Clone, Copy)] pub enum JsonTokenType { OpeningCurlyBracket, ClosingCurlyBracket, OpeningSquareBracket, ClosingSquareBracket, Colon, Comma, OpeningQuote, String, ClosingQuote, KeywordStartBoundary, Keyword, KeywordEndBoundary, NumberStartBoundary, Number, NumbreEndBoundary, SingleLineCommentStart, SingleLineComment, SingleLineCommentEnd, MultiLineCommentStart, MultiLineComment, MultiLineCommentEnd, } pub struct JsonToken { pub offset: usize, pub size: usize, pub token_type: JsonTokenType, } fn is_boundary(next_byte: u8, next_char: &str) -> bool { ascii::is_whitespace(next_byte) || ["{", "}", "[", "]", ":", ",", "\"", "/"].contains(&next_char) } fn token_as_string(buffer: &[u8], offset: usize, size: usize) -> Result<&str, JsonError> { str::from_utf8(&buffer[offset..offset + size]).map_err(|_| JsonError::Utf8Error) } pub fn next_json_token(buffer: &[u8], mode: JsonTokenizerMode) -> Result<JsonToken, JsonError> { let mut offset = 0; let mut token_size = 0; let mut maybe_comment_start = false; let mut maybe_multi_line_comment_end = false; loop { match utf8::next_code_point(&buffer[offset + token_size..]) { Ok(Utf8CodePoint { size: code_point_size, }) => { let next_byte = buffer[offset + token_size]; let next_char = token_as_string(buffer, offset + token_size, code_point_size)?; match mode { JsonTokenizerMode::Json => { if ascii::is_whitespace(next_byte) { offset += code_point_size; continue; } if ascii::is_latin_alpha(next_byte) { return Ok(JsonToken { offset, size: 0, token_type: JsonTokenType::KeywordStartBoundary, }); } if ascii::is_digit(next_byte) || next_char == "-" { return Ok(JsonToken { offset, size: 0, token_type: JsonTokenType::NumberStartBoundary, }); } token_size += code_point_size; if next_char == "/" && !maybe_comment_start { maybe_comment_start = true; continue; } if maybe_comment_start && (token_size == "//".as_bytes().len() || token_size == "/*".as_bytes().len()) { let token_type = match token_as_string(buffer, offset, token_size)? { "//" => JsonTokenType::SingleLineCommentStart, "/*" => JsonTokenType::MultiLineCommentStart, _ => return Err(JsonError::UnexpectedChar), }; return Ok(JsonToken { offset, size: token_size, token_type, }); } if token_size != 1 { return Err(JsonError::UnexpectedChar); } let token_type = match next_char { "{" => JsonTokenType::OpeningCurlyBracket, "}" => JsonTokenType::ClosingCurlyBracket, "[" => JsonTokenType::OpeningSquareBracket, "]" => JsonTokenType::ClosingSquareBracket, ":" => JsonTokenType::Colon, "\"" => JsonTokenType::OpeningQuote, "," => JsonTokenType::Comma, _ => return Err(JsonError::UnexpectedChar), }; return Ok(JsonToken { offset, size: token_size, token_type, }); } JsonTokenizerMode::String => { if next_char == "\\" { if token_size + "\\\"".as_bytes().len() > buffer.len() { return Err(JsonError::OutOfRange); } token_size += "\\\"".as_bytes().len(); continue; } if next_char == "\"" { if token_size == 0 { return Ok(JsonToken { offset, size: code_point_size, token_type: JsonTokenType::ClosingQuote, }); } return Ok(JsonToken { offset, size: token_size, token_type: JsonTokenType::String, }); } token_size += code_point_size; } JsonTokenizerMode::Keyword | JsonTokenizerMode::Number => { if is_boundary(next_byte, next_char) { if token_size == 0 { return Ok(JsonToken { offset, size: 0, token_type: match mode { JsonTokenizerMode::Keyword => { JsonTokenType::KeywordEndBoundary } JsonTokenizerMode::Number => { JsonTokenType::NumbreEndBoundary } _ => unreachable!(), }, }); } return Ok(JsonToken { offset, size: token_size, token_type: match mode { JsonTokenizerMode::Keyword => JsonTokenType::Keyword, JsonTokenizerMode::Number => JsonTokenType::Number, _ => unreachable!(), }, }); } token_size += code_point_size; } JsonTokenizerMode::SingleLineComment => { if ["\n", "\r"].contains(&next_char) { if token_size == 0 { return Ok(JsonToken { offset, size: code_point_size, token_type: JsonTokenType::SingleLineCommentEnd, }); } return Ok(JsonToken { offset, size: token_size, token_type: JsonTokenType::SingleLineComment, }); } token_size += code_point_size; } JsonTokenizerMode::MultiLineComment => { if next_char == "*" { maybe_multi_line_comment_end = true; } else if next_char != "/" { maybe_multi_line_comment_end = false; } if next_char == "/" && maybe_multi_line_comment_end { if token_size + code_point_size == "*/".as_bytes().len() && token_as_string(buffer, offset, token_size + code_point_size)? == "*/" { return Ok(JsonToken { offset, size: token_size + code_point_size, token_type: JsonTokenType::MultiLineCommentEnd, }); } return Ok(JsonToken { offset, size: token_size - "*".as_bytes().len(), token_type: JsonTokenType::MultiLineComment, }); } token_size += code_point_size; } } } Err(Utf8Error::OutOfRange) => return Err(JsonError::OutOfRange), Err(Utf8Error::End) => { if token_size == 0 { return Err(JsonError::End); } else { return match mode { JsonTokenizerMode::Number | JsonTokenizerMode::Keyword | JsonTokenizerMode::SingleLineComment => Ok(JsonToken { offset, size: token_size, token_type: match mode { JsonTokenizerMode::Number => JsonTokenType::Number, JsonTokenizerMode::Keyword => JsonTokenType::Keyword, JsonTokenizerMode::SingleLineComment => { JsonTokenType::SingleLineComment } _ => unreachable!(), }, }), _ => Err(JsonError::OutOfRange), }; } } Err(_) => return Err(JsonError::Utf8Error), } } } } pub mod ascii { pub fn is_latin_lowercase(byte: u8) -> bool { 0x61 <= byte && byte <= 0x7a } pub fn is_latin_uppercase(byte: u8) -> bool { 0x41 <= byte && byte <= 0x5a } pub fn is_latin_alpha(byte: u8) -> bool { is_latin_lowercase(byte) || is_latin_uppercase(byte) } pub fn is_digit(byte: u8) -> bool { 0x30 <= byte && byte <= 0x39 } pub fn is_whitespace(byte: u8) -> bool { [0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x20].contains(&byte) } } pub mod utf8 { pub mod code_point { pub fn size(first_byte: u8) -> usize { if first_byte & 0x80 == 0 { 1 } else if first_byte & 0xe0 == 0xc0 { 2 } else if first_byte & 0xf0 == 0xe0 { 3 } else if first_byte & 0xf8 == 0xf0 { 4 } else { // safely fallback to 1, because I don't want to deal with error handling here 1 } } pub fn is_first_byte(first_byte: u8) -> bool { first_byte & 0x80 == 0 || first_byte & 0xe0 == 0xc0 || first_byte & 0xf0 == 0xe0 || first_byte & 0xf8 == 0xf0 } pub fn is_next_byte(byte: u8) -> bool { byte & 0xc0 == 0x80 } } pub enum Utf8Error { InvalidFirstByte, NotCodePointFirstByte, InvalidCodePoint, OutOfRange, End, } pub struct Utf8CodePoint { pub size: usize, } pub fn next_code_point(buffer: &[u8]) -> Result<Utf8CodePoint, Utf8Error> { let Some(&first_byte) = buffer.first() else { return Err(Utf8Error::End); }; if code_point::is_next_byte(first_byte) { return Err(Utf8Error::NotCodePointFirstByte); } if !code_point::is_first_byte(first_byte) { return Err(Utf8Error::InvalidFirstByte); } let code_point_size = code_point::size(first_byte); if code_point_size > buffer.len() { return Err(Utf8Error::OutOfRange); } return Ok(Utf8CodePoint { size: code_point_size, }); } }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree