Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
Go
Haskell
HLSL
Hook
Hylo
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
Zig
Javascript
GIMPLE
rust source #1
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
mrustc (master)
rustc 1.0.0
rustc 1.1.0
rustc 1.10.0
rustc 1.11.0
rustc 1.12.0
rustc 1.13.0
rustc 1.14.0
rustc 1.15.1
rustc 1.16.0
rustc 1.17.0
rustc 1.18.0
rustc 1.19.0
rustc 1.2.0
rustc 1.20.0
rustc 1.21.0
rustc 1.22.0
rustc 1.23.0
rustc 1.24.0
rustc 1.25.0
rustc 1.26.0
rustc 1.27.0
rustc 1.27.1
rustc 1.28.0
rustc 1.29.0
rustc 1.3.0
rustc 1.30.0
rustc 1.31.0
rustc 1.32.0
rustc 1.33.0
rustc 1.34.0
rustc 1.35.0
rustc 1.36.0
rustc 1.37.0
rustc 1.38.0
rustc 1.39.0
rustc 1.4.0
rustc 1.40.0
rustc 1.41.0
rustc 1.42.0
rustc 1.43.0
rustc 1.44.0
rustc 1.45.0
rustc 1.45.2
rustc 1.46.0
rustc 1.47.0
rustc 1.48.0
rustc 1.49.0
rustc 1.5.0
rustc 1.50.0
rustc 1.51.0
rustc 1.52.0
rustc 1.53.0
rustc 1.54.0
rustc 1.55.0
rustc 1.56.0
rustc 1.57.0
rustc 1.58.0
rustc 1.59.0
rustc 1.6.0
rustc 1.60.0
rustc 1.61.0
rustc 1.62.0
rustc 1.63.0
rustc 1.64.0
rustc 1.65.0
rustc 1.66.0
rustc 1.67.0
rustc 1.68.0
rustc 1.69.0
rustc 1.7.0
rustc 1.70.0
rustc 1.71.0
rustc 1.72.0
rustc 1.73.0
rustc 1.74.0
rustc 1.75.0
rustc 1.76.0
rustc 1.77.0
rustc 1.78.0
rustc 1.8.0
rustc 1.9.0
rustc beta
rustc nightly
rustc-cg-gcc (master)
x86-64 GCCRS (GCC master)
x86-64 GCCRS (GCCRS master)
x86-64 GCCRS 14.1 (GCC)
Options
Source code
use json::JsonValue; fn main() { let source = r#"{ "scalar types": { "bool": [true, false], "null": null, "int": [0, -42, 123], "float": [0.0, -0, -1.25e-3, 2.125], "string": [ "Just a string", "Escapes: (\\), (\")", "UTF-16 pair: \uD83c\uDf44", "UTF-16 code unit: \u00B6", // traling comma is allowed ] }, "hello": { /** * hey, this comment is * mutiline */ "nested": {"one":/*should work*/1,"two":2,"three":{"value": 3}} }, "Duplicate keys do not cause errors because I don't care": 42, "Duplicate keys do not cause errors because I don't care": [null, {}, []], } // sneaky comment"#; if let Ok(json) = source.parse::<JsonValue>() { println!("{}", json); } else { println!("Error while parsing."); } } mod json { use std::{ collections::HashMap, fmt::Display, iter::Peekable, str::{CharIndices, FromStr}, }; // these should fit all of the JS numbers and a little bit more pub type Int = i64; pub type Float = f64; #[derive(Debug, PartialEq, Clone)] enum Token { Bool(bool), Null, Int(Int), Float(Float), String(String), Comma, Colon, OpeningCurlyBracket, ClosingCurlyBracket, OpeningSquareBracket, ClosingSquareBracket, Comment(String), } #[derive(Clone, Default)] struct Error; #[derive(Clone)] struct Tokenizer<'source> { error: Option<Error>, position: usize, source: &'source str, source_iterator: Peekable<CharIndices<'source>>, } enum Boundary<'source> { Char(&'source str), EndOfLine, } const LINE_ENDINGS: [char; 2] = ['\n', '\r']; const WHITESPACES: [char; 4] = [' ', '\n', '\r', '\t']; const BOUNDARIES: [char; 9] = [' ', '\n', '\r', '\t', ':', ',', ']', '}', '/']; const NON_ZERO_DIGITS: [char; 9] = ['1', '2', '3', '4', '5', '6', '7', '8', '9']; const DIGITS: [char; 10] = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; const HEXADECIMAL_DIGITS: [char; 22] = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'A', 'b', 'B', 'c', 'C', 'd', 'D', 'e', 'E', 'f', 'F', ]; // FIXME spaghetti // it returns the result of using only the first consumer, the rest of them (post_consumers) // are only used to check if there is something important after the consumed token // (for example, a boundary, to prevent "truefalse" from parsing as two bool tokens) macro_rules! try_consume { ( $tokenizer:expr, $consumer:ident($($args:expr)*) $(.$post_consumers:ident($($post_args:expr)*))* ) => {{ let mut updated_tokenizer = $tokenizer.clone(); let consumed_result = updated_tokenizer.$consumer($($args)*); $(updated_tokenizer.$post_consumers($($post_args)*);)* if updated_tokenizer.has_errors() { None } else { if let Some(value) = consumed_result { *$tokenizer = updated_tokenizer; Some(value) } else { None } } }}; } fn utf16_pair_to_char(high_code_unit: u16, low_code_unit: u16) -> Result<char, ()> { let string = String::from_utf16(&[high_code_unit, low_code_unit]).map_err(|_| ())?; let chars = string.chars().collect::<Vec<_>>(); chars.first().cloned().ok_or(()) } fn utf16_code_unit_to_char(code_unit: u16) -> Result<char, ()> { let string = String::from_utf16(&[code_unit]).map_err(|_| ())?; let chars = string.chars().collect::<Vec<_>>(); chars.first().cloned().ok_or(()) } impl<'source> Tokenizer<'source> { fn new(source: &'source str) -> Self { let mut tokenizer = Self { error: None, source, source_iterator: source.char_indices().peekable(), position: 0, }; tokenizer.source_iterator.next(); tokenizer } fn is_empty(&mut self) -> bool { self.position == self.source.bytes().len() } fn has_errors(&self) -> bool { self.error.is_some() } fn got_error(&mut self) { self.error = Some(Error); } /// Panics if the tokenizer is empty. fn next_char(&mut self) -> &'source str { let start_position = self.position; let end_position = self .source_iterator .next() .map(|pair| pair.0) .unwrap_or(self.source.bytes().len()); self.position = end_position; &self.source[start_position..end_position] } fn consume_char(&mut self, required_char: char) -> Option<&'source str> { // FIXME checking if the tokenizer is empty every time you read a char is kinda dumb // add an unchecked version of this method so that the caller could decide for themselves? if self.is_empty() { self.got_error(); } if self.has_errors() { return None; } let consumed_char_slice = self.next_char(); if consumed_char_slice.chars().next().unwrap() != required_char { self.got_error(); return None; } Some(consumed_char_slice) } fn consume_string(&mut self, required_string: &str) -> Option<&'source str> { let start_position = self.position; for required_char in required_string.chars() { if self.consume_char(required_char).is_none() { return None; } } Some(&self.source[start_position..self.position]) } fn consume_any_of_chars(&mut self, required_chars: &[char]) -> Option<&'source str> { for &required_char in required_chars { if let Some(consumed_value) = try_consume!(self, consume_char(required_char)) { return Some(consumed_value); } } self.got_error(); None } /// Does not actually consume any characters. It consumes a boundary between the characters. fn consume_boundary(&mut self) -> Option<Boundary> { if self.is_empty() { return Some(Boundary::EndOfLine); } if let Some(boundary_char) = self.clone().consume_any_of_chars(&BOUNDARIES) { return Some(Boundary::Char(boundary_char)); } self.got_error(); None } fn maybe_consume_whitespaces(&mut self) { while try_consume!(self, consume_any_of_chars(&WHITESPACES)).is_some() {} } fn consume_opening_curly_bracket(&mut self) -> Option<&'source str> { return self.consume_char('{'); } fn consume_closing_curly_bracket(&mut self) -> Option<&'source str> { return self.consume_char('}'); } fn consume_opening_square_bracket(&mut self) -> Option<&'source str> { return self.consume_char('['); } fn consume_closing_square_bracket(&mut self) -> Option<&'source str> { return self.consume_char(']'); } fn consume_colon(&mut self) -> Option<&'source str> { return self.consume_char(':'); } fn consume_comma(&mut self) -> Option<&'source str> { return self.consume_char(','); } fn consume_bool(&mut self) -> Option<bool> { let consumed_bool = try_consume!(self, consume_string("true")).or_else(|| self.consume_string("false")); if let Some(bool_string) = consumed_bool { // FIXME ugly // I have no idea how to match a &str with &[char] without allocating a string match bool_string { "true" => Some(true), "false" => Some(false), _ => None, } } else { None } } fn consume_null(&mut self) -> Option<()> { self.consume_string("null").map(|_| ()) } fn consume_int(&mut self) -> Option<Int> { let start_position = self.position; // treat -0 as a float instead of an int if try_consume!(self, consume_char('-')).is_some() && try_consume!(self, consume_char('0')).is_some() { self.got_error(); } // consume at least a single non zero digit first OR just a single zero digit if try_consume!(self, consume_char('0')).is_none() { self.consume_any_of_chars(&NON_ZERO_DIGITS); while try_consume!(self, consume_any_of_chars(&DIGITS)).is_some() {} } if self.has_errors() { return None; } let parsed_int = self.source[start_position..self.position].parse::<Int>(); assert!(parsed_int.is_ok(), "Consumed chars must represent an int."); parsed_int.ok() } fn consume_float(&mut self) -> Option<Float> { let start_position = self.position; // FIXME spaghetti let is_negative = try_consume!(self, consume_char('-')).is_some(); let mut is_float = false; // mantissa integer part if try_consume!(self, consume_char('0')).is_none() { self.consume_any_of_chars(&NON_ZERO_DIGITS); while try_consume!(self, consume_any_of_chars(&DIGITS)).is_some() {} } else if is_negative { // treat -0 as a float is_float = true; } // mantissa fractional part if try_consume!(self, consume_char('.')).is_some() { is_float = true; self.consume_any_of_chars(&DIGITS); while try_consume!(self, consume_any_of_chars(&DIGITS)).is_some() {} } // exponent if try_consume!(self, consume_any_of_chars(&['e', 'E'])).is_some() { is_float = true; try_consume!(self, consume_any_of_chars(&['-', '+'])); self.consume_any_of_chars(&DIGITS); while try_consume!(self, consume_any_of_chars(&DIGITS)).is_some() {} } // there is a separate consumer method for handling ints // FIXME maybe instead make a single method for both int and float numbers which returns a // (is_float, &[char]), to get rid of the repeated code in consume_int and consume_float if !is_float { self.got_error(); } if self.has_errors() { return None; } let parsed_int = self.source[start_position..self.position].parse::<Float>(); assert!(parsed_int.is_ok(), "Consumed chars must represent a float."); parsed_int.ok() } fn consume_escaped_char(&mut self) -> Option<char> { self.consume_char('\\'); if let Some(escaped_char) = self.consume_any_of_chars(&['b', 'f', 'n', 'r', 't', '"', '\\']) { return match escaped_char { "b" => Some('\u{0008}'), "f" => Some('\u{000C}'), "n" => Some('\n'), "r" => Some('\r'), "t" => Some('\t'), "\"" => Some('"'), "\\" => Some('\\'), // shouldn't happen _ => { self.got_error(); None } }; } None } fn consume_utf16_code_unit(&mut self) -> Option<u16> { self.consume_string("\\u"); let code_unit_start_position = self.position; for _ in 0..4 { self.consume_any_of_chars(&HEXADECIMAL_DIGITS); } if self.has_errors() { return None; } if let Ok(code_unit) = u16::from_str_radix(&self.source[code_unit_start_position..self.position], 16) { Some(code_unit) } else { self.got_error(); None } } fn consume_utf16_surrogate_pair(&mut self) -> Option<char> { let high_code_unit = self.consume_utf16_code_unit()?; let low_code_unit = self.consume_utf16_code_unit()?; if !(high_code_unit >= 0xD800 && high_code_unit < 0xDC00 || low_code_unit >= 0xDC00 && low_code_unit < 0xE000) { self.got_error(); return None; } if let Ok(consumed_char) = utf16_pair_to_char(high_code_unit, low_code_unit) { Some(consumed_char) } else { self.got_error(); None } } fn consume_utf16_code_unit_char(&mut self) -> Option<char> { let code_unit = self.consume_utf16_code_unit()?; if !(code_unit < 0xD800 || code_unit >= 0xE000) { self.got_error(); return None; } if let Ok(consumed_char) = utf16_code_unit_to_char(code_unit) { Some(consumed_char) } else { self.got_error(); None } } fn consume_string_literal(&mut self) -> Option<String> { self.consume_char('"'); if self.has_errors() { return None; } let mut consumed_string = String::new(); loop { if let Some(consumed_char) = try_consume!(self, consume_escaped_char()) { consumed_string.push(consumed_char); } else if let Some(consumed_char) = try_consume!(self, consume_utf16_code_unit_char()) { consumed_string.push(consumed_char); } else if let Some(consumed_char) = try_consume!(self, consume_utf16_surrogate_pair()) { consumed_string.push(consumed_char); } else if try_consume!(self, consume_char('"')).is_some() { break; } else { // line endings in the middle of a string literal are not allowed if self.is_empty() || try_consume!(self, consume_any_of_chars(&LINE_ENDINGS)).is_some() { self.got_error(); return None; } let consumed_char = self.next_char().chars().next().unwrap(); consumed_string.push(consumed_char); } } if !self.has_errors() { Some(consumed_string) } else { None } } fn consume_single_line_comment(&mut self) -> Option<&'source str> { if self.consume_string("//").is_none() { return None; } let start_position = self.position; let mut line_ending = None; while !self.is_empty() { // FIXME what even is this let crlf = try_consume!(self, consume_string("\r\n")); if crlf.is_some() { line_ending = crlf; break; } let cr_or_lf = try_consume!(self, consume_any_of_chars(&LINE_ENDINGS)); if cr_or_lf.is_some() { line_ending = cr_or_lf; break; } self.next_char(); } // FIXME ugly (make a peek_consume macro?) if let Some(line_ending) = line_ending { assert!(self.position >= line_ending.bytes().len()); self.position -= line_ending.bytes().len(); } if !self.has_errors() { Some(&self.source[start_position..self.position]) } else { None } } fn consume_multiline_comment(&mut self) -> Option<&'source str> { if self.consume_string("/*").is_none() { return None; } let start_position = self.position; while try_consume!(self, consume_string("*/")).is_none() { if self.is_empty() { self.got_error(); return None; } self.next_char(); } if !self.has_errors() { Some( // FIXME ugly ugly ugly &self.source[start_position..self.position - "*/".bytes().len()], ) } else { None } } fn consume_next_token(&mut self) -> Option<Token> { if self.has_errors() { return None; } self.maybe_consume_whitespaces(); if self.is_empty() { return None; } if let Some(bool_value) = try_consume!(self, consume_bool().consume_boundary()) { return Some(Token::Bool(bool_value)); } if try_consume!(self, consume_null().consume_boundary()).is_some() { return Some(Token::Null); } if let Some(string_value) = try_consume!(self, consume_string_literal().consume_boundary()) { return Some(Token::String(string_value)); } if let Some(int_value) = try_consume!(self, consume_int().consume_boundary()) { return Some(Token::Int(int_value)); } if let Some(float_value) = try_consume!(self, consume_float().consume_boundary()) { return Some(Token::Float(float_value)); } if try_consume!(self, consume_opening_curly_bracket()).is_some() { return Some(Token::OpeningCurlyBracket); } if try_consume!(self, consume_closing_curly_bracket()).is_some() { return Some(Token::ClosingCurlyBracket); } if try_consume!(self, consume_opening_square_bracket()).is_some() { return Some(Token::OpeningSquareBracket); } if try_consume!(self, consume_closing_square_bracket()).is_some() { return Some(Token::ClosingSquareBracket); } if try_consume!(self, consume_colon()).is_some() { return Some(Token::Colon); } if try_consume!(self, consume_comma()).is_some() { return Some(Token::Comma); } // FIXME store a str instead? if let Some(comment_string) = try_consume!(self, consume_single_line_comment()) { return Some(Token::Comment(comment_string.to_string())); } if let Some(comment_string) = try_consume!(self, consume_multiline_comment()) { return Some(Token::Comment(comment_string.to_string())); } self.got_error(); None } } #[cfg(test)] mod tokenizer_tests { use super::*; #[test] fn works_for_empty_string() { assert_eq!(None, Tokenizer::new("").consume_next_token()); } #[test] fn fails_for_ints_with_double_zero() { let mut tokenizer = Tokenizer::new("00"); assert_eq!(None, tokenizer.consume_next_token()); assert!(tokenizer.has_errors()); } #[test] fn fails_for_floats_with_double_zero() { let mut tokenizer = Tokenizer::new("00.125"); assert_eq!(None, tokenizer.consume_next_token()); assert!(tokenizer.has_errors()); } #[test] fn minus_zero_is_parsed_as_float() { assert_eq!( Some(Token::Float(-0.0)), Tokenizer::new("-0").consume_next_token() ); } #[test] fn does_not_panic_when_string_literal_quotes_are_not_closed() { Tokenizer::new("\"").consume_next_token(); } #[test] fn fails_when_there_is_no_boundary_between_tokens() { let mut tokenizer = Tokenizer::new("truefalse"); assert_eq!(None, tokenizer.consume_next_token()); assert!(tokenizer.has_errors()); } #[test] fn works_for_multiple_tokens() { let line = " [true] {false, -1.125, -1.25E-2, 123} \"ab \\n\\uD83d\\uDe10 \\u00B6\" /*multi- line comment*/ null: -321::0 0.0 //single-line!\r\n //sneaky comment"; let expected_tokens = [ Token::OpeningSquareBracket, Token::Bool(true), Token::ClosingSquareBracket, Token::OpeningCurlyBracket, Token::Bool(false), Token::Comma, Token::Float(-1.125), Token::Comma, Token::Float(-1.25e-2), Token::Comma, Token::Int(123), Token::ClosingCurlyBracket, Token::String("ab \n😐 ¶".to_owned()), Token::Comment("multi-\nline comment".to_owned()), Token::Null, Token::Colon, Token::Int(-321), Token::Colon, Token::Colon, Token::Int(0), Token::Float(0.0), Token::Comment("single-line!".to_owned()), Token::Comment("sneaky comment".to_owned()), ]; let mut tokenizer = Tokenizer::new(&line); for expected_token in expected_tokens { assert_eq!(Some(expected_token), tokenizer.consume_next_token()); } assert_eq!(None, tokenizer.consume_next_token()); } } #[derive(PartialEq, Debug)] pub enum JsonValue { Bool(bool), Int(Int), Float(Float), String(String), Array(Vec<JsonValue>), Object(HashMap<String, JsonValue>), Null, } impl Display for JsonValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Null => write!(f, "null"), Self::Bool(value) => write!(f, "{}", value), Self::Float(value) => write!(f, "{}", value), Self::Int(value) => write!(f, "{}", value), Self::String(value) => write!(f, "\"{}\"", value), Self::Array(vector) => { let mut string = String::new(); if vector.len() > 0 { for value in &vector[0..vector.len() - 1] { string.push_str(&value.to_string()); string.push_str(", "); } string.push_str(&vector[vector.len() - 1].to_string()); } write!(f, "[{}]", string) } Self::Object(object) => { if object.is_empty() { return write!(f, "{{}}"); } let indent = " "; let mut string = String::new(); let mut entry_index = 0; let values_count = object.keys().len(); for (key, value) in object { string.push_str(&format!("{}\"{}\": ", indent, key)); let value_as_string = value.to_string(); let lines = value_as_string.lines(); let lines_count = lines.clone().count(); let mut line_index = 0; for line in value_as_string.lines() { if line_index != 0 { string.push_str(&indent); } string.push_str(line); if line_index != lines_count - 1 { string.push('\n'); } line_index += 1; } if entry_index != values_count - 1 { string.push_str(",\n"); } entry_index += 1; } write!(f, "{{\n{}\n}}", string) } } } } // FIXME spaghetti macro_rules! try_consume_token { ($tokenizer:expr, $($token_type:pat_param)|+) => {{ let mut updated_tokenizer = $tokenizer.clone(); let mut consumed_token = updated_tokenizer.consume_next_token(); while let Some(Token::Comment(_)) = consumed_token { consumed_token = updated_tokenizer.consume_next_token(); } if !updated_tokenizer.has_errors() && matches!(consumed_token, $(Some($token_type))|+) { *$tokenizer = updated_tokenizer; consumed_token } else { None } }}; } fn parse_scalar_value(tokenizer: &mut Tokenizer) -> Result<JsonValue, ()> { if let Some(token) = try_consume_token!( tokenizer, Token::Null | Token::Bool(_) | Token::Int(_) | Token::Float(_) | Token::String(_) ) { match token { Token::Null => Ok(JsonValue::Null), Token::Bool(value) => Ok(JsonValue::Bool(value.clone())), Token::Int(value) => Ok(JsonValue::Int(value.clone())), Token::Float(value) => Ok(JsonValue::Float(value.clone())), Token::String(value) => Ok(JsonValue::String(value.clone())), // shouldn't happen _ => Err(()), } } else { Err(()) } } // pls? https://github.com/rust-lang/rfcs/issues/2616 fn parse_array(tokenizer: &mut Tokenizer) -> Result<JsonValue, ()> { if try_consume_token!(tokenizer, Token::OpeningSquareBracket).is_none() { return Err(()); } let mut array = vec![]; let mut first_element = true; loop { // FIXME ugly let comma_consumed = if !first_element { // trailing comma is allowed try_consume_token!(tokenizer, Token::Comma).is_some() } else { false }; if try_consume_token!(tokenizer, Token::ClosingSquareBracket).is_some() { break; } if !comma_consumed && !first_element { return Err(()); } if let Ok(value) = parse_json_value(tokenizer) { array.push(value); first_element = false; } else { return Err(()); } } Ok(JsonValue::Array(array)) } fn parse_object(tokenizer: &mut Tokenizer) -> Result<JsonValue, ()> { if try_consume_token!(tokenizer, Token::OpeningCurlyBracket).is_none() { return Err(()); } let mut object = HashMap::<String, JsonValue>::new(); let mut first_element = true; loop { // FIXME ugly let comma_consumed = if !first_element { // trailing comma is allowed try_consume_token!(tokenizer, Token::Comma).is_some() } else { false }; if try_consume_token!(tokenizer, Token::ClosingCurlyBracket).is_some() { break; } if !comma_consumed && !first_element { return Err(()); } let key = try_consume_token!(tokenizer, Token::String(_)).ok_or(())?; let Token::String(key) = key else { return Err(()); }; try_consume_token!(tokenizer, Token::Colon).ok_or(())?; let value = parse_json_value(tokenizer)?; object.insert(key, value); first_element = false; } Ok(JsonValue::Object(object)) } fn parse_json_value(tokenizer: &mut Tokenizer) -> Result<JsonValue, ()> { if let Ok(value) = parse_scalar_value(tokenizer) { Ok(value) } else if let Ok(value) = parse_array(tokenizer) { Ok(value) } else if let Ok(value) = parse_object(tokenizer) { Ok(value) } else { Err(()) } } impl FromStr for JsonValue { type Err = (); fn from_str(source: &str) -> Result<Self, Self::Err> { let mut tokenizer = Tokenizer::new(source); if let Ok(value) = parse_json_value(&mut tokenizer) { while let Some(Token::Comment(_)) = tokenizer.consume_next_token() {} // error out in case there is leftover stuff other than comments if tokenizer.consume_next_token().is_some() { return Err(()); } Ok(value) } else { Err(()) } } } }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree