Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

gh-102856: Python tokenizer implementation for PEP 701#104323

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
pablogsal merged 20 commits intopython:mainfrommgmacias95:python_tokenizer
May 21, 2023
Merged
Show file tree
Hide file tree
Changes from1 commit
Commits
Show all changes
20 commits
Select commitHold shift + click to select a range
008f8e5
First iteration
mgmacias95Apr 19, 2023
67a6ad6
Handle escaping {
mgmacias95Apr 27, 2023
f58104d
nested expressions
mgmacias95Apr 29, 2023
26102cc
Recursive expression tokenization
mgmacias95May 2, 2023
a5f4b40
Remove intermediate token created for dev purposes
mgmacias95May 2, 2023
598bab4
More improvements
mgmacias95May 3, 2023
a0ed816
fix handling of } tokens
mgmacias95May 7, 2023
90b4ab1
other tokenizer
pablogsalMay 16, 2023
63ef1c1
Some progress
pablogsalMay 17, 2023
6833b1a
Fix more bugs
pablogsalMay 18, 2023
90da796
Fix more problems
pablogsalMay 18, 2023
b5ccd94
Use IA to clean code
pablogsalMay 18, 2023
b1c3b2a
Remove lel
pablogsalMay 18, 2023
e941f12
Remove whitespace
pablogsalMay 18, 2023
67a0239
Fix docs
mgmacias95May 18, 2023
dcd221f
Moar tests and fix location error
pablogsalMay 19, 2023
fd8b60a
Some cleanups
pablogsalMay 19, 2023
f1a5090
pass the vacuum cleaner
pablogsalMay 19, 2023
7fb58b0
Fix refleaks
mgmacias95May 20, 2023
e1b5d35
📜🤖 Added by blurb_it.
blurb-it[bot]May 20, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
PrevPrevious commit
NextNext commit
fix handling of } tokens
  • Loading branch information
@mgmacias95@pablogsal
mgmacias95 authored andpablogsal committedMay 18, 2023
commita0ed8162d1bf26183be56a9feb5f05080cfc484b
10 changes: 5 additions & 5 deletionsLib/test/test_tokenize.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -400,9 +400,9 @@ def test_string(self):
OP '{' (1, 6) (1, 7)
NAME 'b' (1, 7) (1, 8)
OP '!' (1, 8) (1, 9)
FSTRING_MIDDLE 'r' (1, 9) (1, 10)
OP '}' (1, 10) (1,12)
FSTRING_MIDDLE '}c' (1,12) (1, 14)
NAME 'r' (1, 9) (1, 10)
OP '}' (1, 10) (1,11)
FSTRING_MIDDLE '}c' (1,11) (1, 14)
FSTRING_END '"' (1, 14) (1, 15)
""")
self.check_tokenize('f"{{{1+1}}}"', """\
Expand All@@ -412,8 +412,8 @@ def test_string(self):
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '}' (1, 8) (1,10)
FSTRING_MIDDLE '}' (1,10) (1, 11)
OP '}' (1, 8) (1,9)
FSTRING_MIDDLE '}' (1,9) (1, 11)
FSTRING_END '"' (1, 11) (1, 12)
""")
self.check_tokenize('f"{1+1"', """\
Expand Down
158 changes: 79 additions & 79 deletionsLib/tokenize.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -682,88 +682,88 @@ def _tokenize_fstring_middle(middle, start, line_number, line, encoding):
mid_expr += c
case '}':
# If two }} are seen, then the first one is skipped and the
# second is added as part of the fstring_middle token
if escaping:
escaping = False
mid_token += c
continue
elif len(middle) > position + 1 and middle[position + 1] == '}':
escaping = True
start += 1
continue

if curly_brackets:
curly_brackets.pop()
if mid_expr and not curly_brackets:
yield TokenInfo(
type=OP,
string='{',
start=end,
end=(line_number, end[1] + 1),
line=line)
end = line_number, end[1] + 1

mid_expr += c

mid_expr = mid_expr[1:-1]

# Find any first level : or !
curly_level = 0
break_char_index = -1
for char_index, char in enumerate(mid_expr):
if char == '{':
curly_level += 1
elif char == '}':
curly_level -= 1
elif char in {'!', ':'} and not curly_level:
break_char_index = char_index
break

expression_chunk = mid_expr
if break_char_index != -1:
expression_chunk = mid_expr[:break_char_index+1]

if encoding is not None:
buffer = BytesIO(expression_chunk.encode()).readline
# second is added as part of the fstring_middle token.
# This is only applied when parsing fstring_middle tokens,
# not when parsing an expression.
if not mid_expr:
if escaping:
escaping = False
mid_token += c
elif len(middle) > position + 1 and middle[position + 1] == '}':
escaping = True
else:
buffer = StringIO(expression_chunk).readline
for t in _tokenize(buffer, encoding, fstring_mode=True):
mid_token += c
else:
# parsing an expression
if curly_brackets:
curly_brackets.pop()
if not curly_brackets:
yield TokenInfo(
type=t.type,
string=t.string,
start=(t.start[0] - 1 + end[0], t.start[1] + end[1]),
end=(t.end[0] - 1 + end[0], t.end[1] + end[1]),
line=line
)

end = t.end[0] - 1 + end[0], t.end[1] + end[1]

if break_char_index != -1:
formatting_chunk = mid_expr[break_char_index+1:]
for t in _tokenize_fstring_middle(
middle=formatting_chunk,
start=end[1],
line_number=line_number,
line=line,
encoding=encoding):

yield t
end = t.end

yield TokenInfo(
type=OP,
string='}',
start=end,
end=(line_number, start + 1),
line=line)
type=OP,
string='{',
start=end,
end=(line_number, end[1] + 1),
line=line)
end = line_number, end[1] + 1

mid_expr = ''
end = line_number, start + 1
else:
if mid_expr:
mid_expr += c

mid_expr = mid_expr[1:-1]

# Find any first level : or !
curly_level = 0
break_char_index = -1
for char_index, char in enumerate(mid_expr):
if char == '{':
curly_level += 1
elif char == '}':
curly_level -= 1
elif char in {':'} and not curly_level:
break_char_index = char_index
break

expression_chunk = mid_expr
if break_char_index != -1:
expression_chunk = mid_expr[:break_char_index+1]

if encoding is not None:
buffer = BytesIO(expression_chunk.encode()).readline
else:
buffer = StringIO(expression_chunk).readline
for t in _tokenize(buffer, encoding, fstring_mode=True):
yield TokenInfo(
type=t.type,
string=t.string,
start=(t.start[0] - 1 + end[0], t.start[1] + end[1]),
end=(t.end[0] - 1 + end[0], t.end[1] + end[1]),
line=line
)

end = t.end[0] - 1 + end[0], t.end[1] + end[1]

if break_char_index != -1:
formatting_chunk = mid_expr[break_char_index+1:]
for t in _tokenize_fstring_middle(
middle=formatting_chunk,
start=end[1],
line_number=line_number,
line=line,
encoding=encoding):

yield t
end = t.end

yield TokenInfo(
type=OP,
string='}',
start=end,
end=(line_number, start + 1),
line=line)

mid_expr = ''
end = line_number, start + 1
else:
mid_token += c
mid_expr += c
case '\n':
if mid_expr:
mid_expr += c
Expand All@@ -785,9 +785,9 @@ def _tokenize_fstring_middle(middle, start, line_number, line, encoding):
type=FSTRING_MIDDLE,
string=mid_token,
start=end,
end=(line_number,end[1] + len(mid_token)),
end=(line_number,start),
line=line)
end = line_number,end[1] + len(mid_token)
end = line_number,start

if curly_brackets:
lnum, pos = curly_brackets.pop()
Expand Down

[8]ページ先頭

©2009-2025 Movatter.jp