Lines Matching refs:tokenize
3 from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
39 # Tests for the tokenize module.
49 result = stringify_tokens_from_source(tokenize(f.readline), s)
58 tokens = list(tokenize(f.readline))
96 for tok in tokenize(readline):
223 for toktype, token, start, end, line in tokenize(f.readline):
960 g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
1320 with mock.patch('tokenize._builtin_open', return_value=m):
1328 import tokenize as tokenize_module
1358 results = tokenize(mock_readline)
1375 toks = list(tokenize(BytesIO(buf.encode('utf-8')).readline))
1380 tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
1467 tokens = list(tokenize(BytesIO(source.encode('utf-8')).readline))
1521 tokenize.untokenize(), and the latter tokenized again to 2-tuples.
1535 tokens5 = list(tokenize(readline))
1540 tokens2_from2 = [tok[:2] for tok in tokenize(readline2)]
1545 tokens2_from5 = [tok[:2] for tok in tokenize(readline5)]
1636 print('tokenize', testfile)
1645 return untokenize(tokenize(BytesIO(code).readline)).decode('utf-8')