Lines Matching refs:token
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
19 operators. Additionally, all token lists start with an ENCODING token
35 from token import *
36 from token import EXACT_TOKEN_TYPES
41 import token
42 __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
44 del token
195 tok_type, token, start, end, line = t
197 self.encoding = token
202 indents.append(token)
217 self.tokens.append(token)
224 def compat(self, token, iterable):
227 startline = token[0] in (NEWLINE, NL)
230 for tok in _itertools.chain([token], iterable):
264 token, which is the first token sequence output by tokenize.
266 Each element returned by the iterable must be a token sequence
267 with at least two elements, a token number and token value. If
417 The generator produces 5-tuples with these members: the token type; the
418 token string; a 2-tuple (srow, scol) of ints specifying the row and
419 column where the token begins in the source; a 2-tuple (erow, ecol) of
420 ints specifying the row and column where the token ends in the source;
421 and the line on which the token was found. The line passed is the
424 The first token sequence will always be an ENCODING token
535 token, initial = line[start:end], line[start]
538 (initial == '.' and token != '.' and token != '...')):
539 yield TokenInfo(NUMBER, token, spos, epos, line)
542 yield TokenInfo(NL, token, spos, epos, line)
544 yield TokenInfo(NEWLINE, token, spos, epos, line)
547 assert not token.endswith("\n")
548 yield TokenInfo(COMMENT, token, spos, epos, line)
550 elif token in triple_quoted:
551 endprog = _compile(endpats[token])
555 token = line[start:pos]
556 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
563 # Check up to the first 3 chars of the token to see if
567 # "rb'" (for example) at the start of the token. If
570 # Note that initial == token[:1].
574 token[:2] in single_quoted or
575 token[:3] in single_quoted):
576 if token[-1] == '\n': # continued string
579 # token. This is looking for the matching end
585 endpats.get(token[1]) or
586 endpats.get(token[2]))
591 yield TokenInfo(STRING, token, spos, epos, line)
594 yield TokenInfo(NAME, token, spos, epos, line)
602 yield TokenInfo(OP, token, spos, epos, line)
648 help='display token names using the exact type')
662 for token in tokens:
663 token_type = token.type
665 token_type = token.exact_type
666 token_range = "%d,%d-%d,%d:" % (token.start + token.end)
668 (token_range, tok_name[token_type], token.string))