/third_party/protobuf/src/google/protobuf/io/ |
H A D | tokenizer_unittest.cc | 183 EXPECT_TRUE(Tokenizer::ParseInteger(text, kuint64max, &result)); in ParseInteger() 198 Tokenizer::TokenType type; 208 {"hello", Tokenizer::TYPE_IDENTIFIER}, 211 {"123", Tokenizer::TYPE_INTEGER}, 212 {"0xab6", Tokenizer::TYPE_INTEGER}, 213 {"0XAB6", Tokenizer::TYPE_INTEGER}, 214 {"0X1234567", Tokenizer::TYPE_INTEGER}, 215 {"0x89abcdef", Tokenizer::TYPE_INTEGER}, 216 {"0x89ABCDEF", Tokenizer::TYPE_INTEGER}, 217 {"01234567", Tokenizer [all...] |
H A D | tokenizer.h | 56 class Tokenizer; 94 class PROTOBUF_EXPORT Tokenizer { class 96 // Construct a Tokenizer that reads and tokenizes text from the given 99 Tokenizer(ZeroCopyInputStream* input, ErrorCollector* error_collector); 100 ~Tokenizer(); 203 // comes from a TYPE_FLOAT token parsed by Tokenizer. If it doesn't, the 208 // comes from a TYPE_STRING token parsed by Tokenizer. If it doesn't, the 218 // parsed by a Tokenizer, the result is undefined (possibly an assert 260 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Tokenizer); 397 inline const Tokenizer [all...] |
H A D | tokenizer.cc | 111 // For instance, Tokenizer::ConsumeZeroOrMore<Whitespace>() will eat 195 Tokenizer::Tokenizer(ZeroCopyInputStream* input, in Tokenizer() function in google::protobuf::io::Tokenizer 219 Tokenizer::~Tokenizer() { in ~Tokenizer() 230 void Tokenizer::NextChar() { in NextChar() 251 void Tokenizer::Refresh() { in Refresh() 282 inline void Tokenizer::RecordTo(std::string* target) { in RecordTo() 287 inline void Tokenizer::StopRecording() { in StopRecording() 300 inline void Tokenizer [all...] |
/third_party/gn/src/gn/ |
H A D | tokenizer_unittest.cc | 25 std::vector<Token> results = Tokenizer::Tokenize(&input_file, &err); in CheckTokenizer() 40 TEST(Tokenizer, Empty) { in TEST() 45 std::vector<Token> results = Tokenizer::Tokenize(&empty_string_input, &err); in TEST() 51 results = Tokenizer::Tokenize(&whitespace_input, &err); in TEST() 55 TEST(Tokenizer, Identifier) { in TEST() 60 TEST(Tokenizer, Integer) { in TEST() 66 TEST(Tokenizer, IntegerNoSpace) { in TEST() 72 TEST(Tokenizer, String) { in TEST() 80 TEST(Tokenizer, Operator) { in TEST() 103 TEST(Tokenizer, Scope [all...] |
H A D | tokenizer.cc | 71 Tokenizer::Tokenizer(const InputFile* input_file, in Tokenizer() function in Tokenizer 79 Tokenizer::~Tokenizer() = default; 82 std::vector<Token> Tokenizer::Tokenize( in Tokenize() 86 Tokenizer t(input_file, err, whitespace_transform); in Tokenize() 90 std::vector<Token> Tokenizer::Run() { in Run() 159 size_t Tokenizer::ByteOffsetOfNthLine(std::string_view buf, int n) { in ByteOffsetOfNthLine() 179 bool Tokenizer::IsNewline(std::string_view buffer, size_t offset) { in IsNewline() 186 bool Tokenizer [all...] |
H A D | tokenizer.h | 27 class Tokenizer { class 56 Tokenizer(const InputFile* input_file, 59 ~Tokenizer(); 104 Tokenizer(const Tokenizer&) = delete; 105 Tokenizer& operator=(const Tokenizer&) = delete;
|
H A D | string_utils.cc | 69 std::vector<Token> tokens = Tokenizer::Tokenize(&input_file, err); in AppendInterpolatedExpression() 160 has_non_ident_chars |= Tokenizer::IsIdentifierContinuingChar(input[*i]); in AppendStringInterpolation() 182 if (!Tokenizer::IsIdentifierFirstChar(input[*i])) { in AppendStringInterpolation() 192 while (*i < size && Tokenizer::IsIdentifierContinuingChar(input[*i])) in AppendStringInterpolation()
|
H A D | parser_unittest.cc | 17 *result = Tokenizer::Tokenize(input, &err); in GetTokens() 62 std::vector<Token> tokens = Tokenizer::Tokenize(&input_file, &err); in DoParserErrorTest() 80 std::vector<Token> tokens = Tokenizer::Tokenize(&input_file, &err); in DoExpressionErrorTest()
|
/third_party/vk-gl-cts/external/amber/src/src/ |
H A D | tokenizer_test.cc | 27 Tokenizer t(""); in TEST_F() 34 Tokenizer t("TestIdentifier"); in TEST_F() 46 Tokenizer t("123"); in TEST_F() 58 Tokenizer t("-123"); in TEST_F() 70 Tokenizer t("123.456"); in TEST_F() 84 Tokenizer t(nan_str); in TestNaN() 109 Tokenizer t("-123.456"); in TEST_F() 121 Tokenizer t(".123456"); in TEST_F() 133 Tokenizer t("BufferAccess32"); in TEST_F() 145 Tokenizer in TEST_F() [all...] |
H A D | tokenizer.cc | 55 Tokenizer::Tokenizer(const std::string& data) : data_(data) {} in Tokenizer() function in amber::Tokenizer 57 Tokenizer::~Tokenizer() = default; 59 std::unique_ptr<Token> Tokenizer::NextToken() { in NextToken() 261 std::unique_ptr<Token> Tokenizer::PeekNextToken() { in PeekNextToken() 272 std::string Tokenizer::ExtractToNext(const std::string& str) { in ExtractToNext() 293 bool Tokenizer::IsWhitespace(char ch) { in IsWhitespace() 298 void Tokenizer::SkipWhitespace() { in SkipWhitespace() 305 void Tokenizer [all...] |
H A D | tokenizer.h | 102 class Tokenizer { class 104 explicit Tokenizer(const std::string& data); 105 ~Tokenizer();
|
/third_party/node/deps/npm/node_modules/node-gyp/gyp/pylib/packaging/ |
H A D | _parser.py | 10 from ._tokenizer import DEFAULT_RULES, Tokenizer namespace 64 return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) 67 def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: 89 tokenizer: Tokenizer, 140 tokenizer: Tokenizer, *, span_start: int, after: str 159 def _parse_extras(tokenizer: Tokenizer) -> List[str]: 178 def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: 205 def _parse_specifier(tokenizer: Tokenizer) -> str: 222 def _parse_version_many(tokenizer: Tokenizer) -> str: 255 return _parse_full_marker(Tokenizer(sourc [all...] |
/third_party/vk-gl-cts/framework/xexml/ |
H A D | xeXMLParser.cpp | 57 Tokenizer::Tokenizer (void) in Tokenizer() function in xe::xml::Tokenizer 65 Tokenizer::~Tokenizer (void) in ~Tokenizer() 69 void Tokenizer::clear (void) in clear() 77 void Tokenizer::error (const std::string& what) in error() 82 void Tokenizer::feed (const deUint8* bytes, int numBytes) in feed() 98 int Tokenizer::getChar (int offset) const in getChar() 108 void Tokenizer::advance (void) in advance() 338 void Tokenizer [all...] |
H A D | xeXMLParser.hpp | 83 class Tokenizer class 86 Tokenizer (void); 87 ~Tokenizer (void); 103 Tokenizer (const Tokenizer& other); 104 Tokenizer& operator= (const Tokenizer& other); 190 Tokenizer m_tokenizer; 203 inline void Tokenizer::getTokenStr (std::string& dst) const in getTokenStr() 211 inline void Tokenizer [all...] |
/third_party/skia/third_party/externals/swiftshader/src/OpenGL/compiler/preprocessor/ |
H A D | Tokenizer.h | 27 class Tokenizer : public Lexer class 44 Tokenizer(Diagnostics *diagnostics); 45 ~Tokenizer() override; 56 PP_DISALLOW_COPY_AND_ASSIGN(Tokenizer);
|
H A D | DirectiveParser.h | 28 class Tokenizer; 33 DirectiveParser(Tokenizer *tokenizer, 87 Tokenizer *mTokenizer;
|
H A D | generate_parser.sh | 35 run_flex Tokenizer.l Tokenizer.cpp
|
/third_party/protobuf/src/google/protobuf/compiler/ |
H A D | parser.cc | 200 inline bool Parser::LookingAtType(io::Tokenizer::TokenType token_type) { in LookingAtType() 204 inline bool Parser::AtEnd() { return LookingAtType(io::Tokenizer::TYPE_END); } in AtEnd() 234 if (LookingAtType(io::Tokenizer::TYPE_IDENTIFIER)) { in ConsumeIdentifier() 245 if (LookingAtType(io::Tokenizer::TYPE_INTEGER)) { in ConsumeInteger() 247 if (!io::Tokenizer::ParseInteger(input_->current().text, kint32max, in ConsumeInteger() 277 if (LookingAtType(io::Tokenizer::TYPE_INTEGER)) { in ConsumeInteger64() 278 if (!io::Tokenizer::ParseInteger(input_->current().text, max_value, in ConsumeInteger64() 293 if (LookingAtType(io::Tokenizer::TYPE_FLOAT)) { in ConsumeNumber() 294 *output = io::Tokenizer::ParseFloat(input_->current().text); in ConsumeNumber() 297 } else if (LookingAtType(io::Tokenizer in ConsumeNumber() [all...] |
H A D | parser.h | 75 bool Parse(io::Tokenizer* input, FileDescriptorProto* file); 152 inline bool LookingAtType(io::Tokenizer::TokenType token_type); 247 void StartAt(const io::Tokenizer::Token& token); 255 void EndAt(const io::Tokenizer::Token& token); 527 io::Tokenizer* input_;
|
/third_party/skia/third_party/externals/angle2/src/compiler/preprocessor/ |
H A D | Tokenizer.h | 22 class Tokenizer : public Lexer class 39 Tokenizer(Diagnostics *diagnostics); 40 ~Tokenizer() override;
|
H A D | DirectiveParser.h | 23 class Tokenizer; 28 DirectiveParser(Tokenizer *tokenizer, 76 Tokenizer *mTokenizer;
|
/third_party/python/Tools/peg_generator/pegen/ |
H A D | build.py | 15 from pegen.tokenizer import Tokenizer namespace 170 ) -> Tuple[Grammar, Parser, Tokenizer]: 172 tokenizer = Tokenizer(tokenize.generate_tokens(file.readline), verbose=verbose_tokenizer) 262 ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: 302 ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
|
H A D | __main__.py | 15 from pegen.build import Grammar, Parser, ParserGenerator, Tokenizer namespace 21 ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: 50 ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
|
/third_party/protobuf/src/google/protobuf/ |
H A D | text_format.cc | 269 tokenizer_.set_comment_style(io::Tokenizer::SH_COMMENT_STYLE); in ParserImpl() 288 if (LookingAtType(io::Tokenizer::TYPE_END)) { in Parse() 303 return suc && LookingAtType(io::Tokenizer::TYPE_END); in ParseField() 552 LookingAtType(io::Tokenizer::TYPE_STRING)) { in ConsumeField() 757 if (LookingAtType(io::Tokenizer::TYPE_INTEGER)) { in ConsumeFieldValue() 783 if (LookingAtType(io::Tokenizer::TYPE_IDENTIFIER)) { in ConsumeFieldValue() 789 LookingAtType(io::Tokenizer::TYPE_INTEGER)) { in ConsumeFieldValue() 835 if (LookingAtType(io::Tokenizer::TYPE_STRING)) { in SkipFieldValue() 836 while (LookingAtType(io::Tokenizer::TYPE_STRING)) { in SkipFieldValue() 877 if (!LookingAtType(io::Tokenizer in SkipFieldValue() [all...] |
/third_party/protobuf/python/google/protobuf/internal/ |
H A D | text_format_test.py | 1892 tokenizer = text_format.Tokenizer(text.splitlines()) 1966 tokenizer = text_format.Tokenizer(text.splitlines()) 1975 tokenizer = text_format.Tokenizer(text.splitlines()) 1991 tokenizer = text_format.Tokenizer(text.splitlines()) 2010 tokenizer = text_format.Tokenizer(text.splitlines()) 2020 tokenizer = text_format.Tokenizer(text.splitlines()) 2039 tokenizer = text_format.Tokenizer(text.splitlines()) 2043 tokenizer = text_format.Tokenizer(text.splitlines()) 2047 tokenizer = text_format.Tokenizer(text.splitlines()) 2051 tokenizer = text_format.Tokenizer(tex [all...] |