Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit3d593ef

Browse files
committed
Use Lib/token.py and Lib/tokenize.py as the source of tokens
1 parent7e3beaf commit3d593ef

File tree

4 files changed

+49
-92
lines changed

4 files changed

+49
-92
lines changed

‎Parser/pgen/__main__.py‎

Lines changed: 10 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,12 @@
1-
importos
2-
importsys
31
importargparse
4-
importcollections
5-
6-
fromlib2to3.pgen2importgrammar,tokenize
7-
8-
from .importtoken
9-
from .importgrammaraspgen_grammar
10-
11-
defmonkey_patch_pgen2(token_lines):
12-
tokens=dict(token.generate_tokens(token_lines))
13-
forname,valueintokens.items():
14-
setattr(tokenize,name,value)
152

163
from .pgenimportParserGenerator
174

18-
19-
defmain(grammar_file,tokens_file,gramminit_h_file,gramminit_c_file,verbose):
20-
withopen(tokens_file)astok_file:
21-
token_lines=tok_file.readlines()
22-
23-
monkey_patch_pgen2(token_lines)
24-
25-
p=ParserGenerator(grammar_file,token_lines,verbose=verbose)
26-
grammar=p.make_grammar()
27-
grammar.produce_graminit_h(gramminit_h_file.write)
28-
grammar.produce_graminit_c(gramminit_c_file.write)
29-
30-
31-
if__name__=="__main__":
5+
defmain():
326
parser=argparse.ArgumentParser(description="Parser generator main program.")
337
parser.add_argument(
348
"grammar",type=str,help="The file with the grammar definition in EBNF format"
359
)
36-
parser.add_argument(
37-
"tokens",type=str,help="The file with the token definition"
38-
)
3910
parser.add_argument(
4011
"gramminit_h",
4112
type=argparse.FileType('w'),
@@ -48,4 +19,12 @@ def main(grammar_file, tokens_file, gramminit_h_file, gramminit_c_file, verbose)
4819
)
4920
parser.add_argument("--verbose","-v",action="count")
5021
args=parser.parse_args()
51-
main(args.grammar,args.tokens,args.gramminit_h,args.gramminit_c,args.verbose)
22+
23+
p=ParserGenerator(args.grammar,verbose=args.verbose)
24+
grammar=p.make_grammar()
25+
grammar.produce_graminit_h(args.gramminit_h.write)
26+
grammar.produce_graminit_c(args.gramminit_c.write)
27+
28+
29+
if__name__=="__main__":
30+
main()

‎Parser/pgen/grammar.py‎

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
fromlib2to3.pgen2importgrammar
22

3-
from .importtoken
4-
5-
63
classGrammar(grammar.Grammar):
74

85
defproduce_graminit_h(self,writer):

‎Parser/pgen/pgen.py‎

Lines changed: 39 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,38 @@
1+
importos
2+
importsys
13
importcollections
2-
fromlib2to3.pgen2importtokenize
4+
importimportlib.machinery
35

4-
from .importtoken,grammar
6+
# Use Lib/token.py and Lib/tokenize.py to obtain the tokens. To maintain this
7+
# compatible with older versions of Python, we need to make sure that we only
8+
# import these two files (and not any of the dependencies of these files).
9+
10+
CURRENT_FOLDER_LOCATION=os.path.dirname(os.path.realpath(__file__))
11+
LIB_LOCATION=os.path.realpath(os.path.join(CURRENT_FOLDER_LOCATION,'..','..','Lib'))
12+
TOKEN_LOCATION=os.path.join(LIB_LOCATION,'token.py')
13+
TOKENIZE_LOCATION=os.path.join(LIB_LOCATION,'tokenize.py')
14+
15+
token=importlib.machinery.SourceFileLoader('token',
16+
TOKEN_LOCATION).load_module()
17+
# Add token to the module cache so tokenize.py uses that excact one instead of
18+
# the one in the stdlib of the interpreter executing this file.
19+
sys.modules['token']=token
20+
tokenize=importlib.machinery.SourceFileLoader('tokenize',
21+
TOKENIZE_LOCATION).load_module()
22+
23+
from .importgrammar
524

625
classParserGenerator(object):
726

8-
def__init__(self,filename,tokens,stream=None,verbose=False):
27+
def__init__(self,filename,stream=None,verbose=False):
928
close_stream=None
1029
ifstreamisNone:
1130
stream=open(filename)
1231
close_stream=stream.close
13-
self.tokens=dict(token.generate_tokens(tokens))
14-
self.opmap=dict(token.generate_opmap(tokens))
32+
self.tokens=token
33+
self.opmap=token.EXACT_TOKEN_TYPES
34+
# Manually add <> so it does not collide with !=
35+
self.opmap['<>']=self.tokens.NOTEQUAL
1536
self.verbose=verbose
1637
self.filename=filename
1738
self.stream=stream
@@ -87,9 +108,9 @@ def make_label(self, c, label):
87108
returnilabel
88109
else:
89110
# A named token (NAME, NUMBER, STRING)
90-
itoken=self.tokens.get(label,None)
111+
itoken=getattr(self.tokens,label,None)
91112
assertisinstance(itoken,int),label
92-
assertitokeninself.tokens.values(),label
113+
assertitokeninself.tokens.tok_name,label
93114
ifitokeninc.tokens:
94115
returnc.tokens[itoken]
95116
else:
@@ -105,12 +126,12 @@ def make_label(self, c, label):
105126
ifvalueinc.keywords:
106127
returnc.keywords[value]
107128
else:
108-
c.labels.append((self.tokens['NAME'],value))
129+
c.labels.append((self.tokens.NAME,value))
109130
c.keywords[value]=ilabel
110131
returnilabel
111132
else:
112133
# An operator (any non-numeric token)
113-
itoken=self.tokens[self.opmap[value]]# Fails if unknown token
134+
itoken=self.opmap[value]# Fails if unknown token
114135
ifitokeninc.tokens:
115136
returnc.tokens[itoken]
116137
else:
@@ -163,16 +184,16 @@ def parse(self):
163184
dfas=collections.OrderedDict()
164185
startsymbol=None
165186
# MSTART: (NEWLINE | RULE)* ENDMARKER
166-
whileself.type!=self.tokens['ENDMARKER']:
167-
whileself.type==self.tokens['NEWLINE']:
187+
whileself.type!=self.tokens.ENDMARKER:
188+
whileself.type==self.tokens.NEWLINE:
168189
self.gettoken()
169190
# RULE: NAME ':' RHS NEWLINE
170-
name=self.expect(self.tokens['NAME'])
191+
name=self.expect(self.tokens.NAME)
171192
ifself.verbose:
172193
print("Processing rule {dfa_name}".format(dfa_name=name))
173-
self.expect(self.tokens['OP'],":")
194+
self.expect(self.tokens.OP,":")
174195
a,z=self.parse_rhs()
175-
self.expect(self.tokens['NEWLINE'])
196+
self.expect(self.tokens.NEWLINE)
176197
ifself.verbose:
177198
self.dump_nfa(name,a,z)
178199
dfa=self.make_dfa(a,z)
@@ -288,7 +309,7 @@ def parse_alt(self):
288309
# ALT: ITEM+
289310
a,b=self.parse_item()
290311
while (self.valuein ("(","[")or
291-
self.typein (self.tokens['NAME'],self.tokens['STRING'])):
312+
self.typein (self.tokens.NAME,self.tokens.STRING)):
292313
c,d=self.parse_item()
293314
b.addarc(c)
294315
b=d
@@ -299,7 +320,7 @@ def parse_item(self):
299320
ifself.value=="[":
300321
self.gettoken()
301322
a,z=self.parse_rhs()
302-
self.expect(self.tokens['OP'],"]")
323+
self.expect(self.tokens.OP,"]")
303324
a.addarc(z)
304325
returna,z
305326
else:
@@ -319,9 +340,9 @@ def parse_atom(self):
319340
ifself.value=="(":
320341
self.gettoken()
321342
a,z=self.parse_rhs()
322-
self.expect(self.tokens['OP'],")")
343+
self.expect(self.tokens.OP,")")
323344
returna,z
324-
elifself.typein (self.tokens['NAME'],self.tokens['STRING']):
345+
elifself.typein (self.tokens.NAME,self.tokens.STRING):
325346
a=NFAState()
326347
z=NFAState()
327348
a.addarc(z,self.value)

‎Parser/pgen/token.py‎

Lines changed: 0 additions & 40 deletions
This file was deleted.

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp