ext/ply/ply/lex.py:
ext/ply/ply/yacc.py:
ext/ply/CHANGES:
ext/ply/README:
ext/ply/TODO:
ext/ply/doc/ply.html:
ext/ply/example/ansic/clex.py:
ext/ply/example/ansic/cparse.py:
ext/ply/example/calc/calc.py:
ext/ply/example/hedit/hedit.py:
ext/ply/example/optcalc/calc.py:
ext/ply/test/README:
ext/ply/test/calclex.py:
ext/ply/test/lex_doc1.exp:
ext/ply/test/lex_doc1.py:
ext/ply/test/lex_dup1.exp:
ext/ply/test/lex_dup1.py:
ext/ply/test/lex_dup2.exp:
ext/ply/test/lex_dup2.py:
ext/ply/test/lex_dup3.exp:
ext/ply/test/lex_dup3.py:
ext/ply/test/lex_empty.py:
ext/ply/test/lex_error1.py:
ext/ply/test/lex_error2.py:
ext/ply/test/lex_error3.exp:
ext/ply/test/lex_error3.py:
ext/ply/test/lex_error4.exp:
ext/ply/test/lex_error4.py:
ext/ply/test/lex_hedit.exp:
ext/ply/test/lex_hedit.py:
ext/ply/test/lex_ignore.exp:
ext/ply/test/lex_ignore.py:
ext/ply/test/lex_re1.exp:
ext/ply/test/lex_re1.py:
ext/ply/test/lex_rule1.py:
ext/ply/test/lex_token1.py:
ext/ply/test/lex_token2.py:
ext/ply/test/lex_token3.py:
ext/ply/test/lex_token4.py:
ext/ply/test/lex_token5.exp:
ext/ply/test/lex_token5.py:
ext/ply/test/yacc_badargs.exp:
ext/ply/test/yacc_badargs.py:
ext/ply/test/yacc_badprec.exp:
ext/ply/test/yacc_badprec.py:
ext/ply/test/yacc_badprec2.exp:
ext/ply/test/yacc_badprec2.py:
ext/ply/test/yacc_badrule.exp:
ext/ply/test/yacc_badrule.py:
ext/ply/test/yacc_badtok.exp:
ext/ply/test/yacc_badtok.py:
ext/ply/test/yacc_dup.exp:
ext/ply/test/yacc_dup.py:
ext/ply/test/yacc_error1.exp:
ext/ply/test/yacc_error1.py:
ext/ply/test/yacc_error2.exp:
ext/ply/test/yacc_error2.py:
ext/ply/test/yacc_error3.exp:
ext/ply/test/yacc_error3.py:
ext/ply/test/yacc_inf.exp:
ext/ply/test/yacc_inf.py:
ext/ply/test/yacc_missing1.exp:
ext/ply/test/yacc_missing1.py:
ext/ply/test/yacc_nodoc.exp:
ext/ply/test/yacc_nodoc.py:
ext/ply/test/yacc_noerror.exp:
ext/ply/test/yacc_noerror.py:
ext/ply/test/yacc_nop.exp:
ext/ply/test/yacc_nop.py:
ext/ply/test/yacc_notfunc.exp:
ext/ply/test/yacc_notfunc.py:
ext/ply/test/yacc_notok.exp:
ext/ply/test/yacc_notok.py:
ext/ply/test/yacc_rr.exp:
ext/ply/test/yacc_rr.py:
ext/ply/test/yacc_simple.exp:
ext/ply/test/yacc_simple.py:
ext/ply/test/yacc_sr.exp:
ext/ply/test/yacc_sr.py:
ext/ply/test/yacc_term1.exp:
ext/ply/test/yacc_term1.py:
ext/ply/test/yacc_unused.exp:
ext/ply/test/yacc_unused.py:
ext/ply/test/yacc_uprec.exp:
ext/ply/test/yacc_uprec.py:
Import patch ply.diff
src/arch/isa_parser.py:
everything is now within the ply package
--HG--
rename : ext/ply/lex.py => ext/ply/ply/lex.py
rename : ext/ply/yacc.py => ext/ply/ply/yacc.py
extra : convert_revision : fca8deabd5c095bdeabd52a1f236ae1404ef106e
113 lines
2.1 KiB
Python
113 lines
2.1 KiB
Python
# lexer for yacc-grammars
|
|
#
|
|
# Author: David Beazley (dave@dabeaz.com)
|
|
# Date : October 2, 2006
|
|
|
|
import sys
|
|
sys.path.append("../..")
|
|
|
|
from ply import *
|
|
|
|
tokens = (
|
|
'LITERAL','SECTION','TOKEN','LEFT','RIGHT','PREC','START','TYPE','NONASSOC','UNION','CODE',
|
|
'ID','QLITERAL','NUMBER',
|
|
)
|
|
|
|
states = (('code','exclusive'),)
|
|
|
|
literals = [ ';', ',', '<', '>', '|',':' ]
|
|
t_ignore = ' \t'
|
|
|
|
t_TOKEN = r'%token'
|
|
t_LEFT = r'%left'
|
|
t_RIGHT = r'%right'
|
|
t_NONASSOC = r'%nonassoc'
|
|
t_PREC = r'%prec'
|
|
t_START = r'%start'
|
|
t_TYPE = r'%type'
|
|
t_UNION = r'%union'
|
|
t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
|
|
t_QLITERAL = r'''(?P<quote>['"]).*?(?P=quote)'''
|
|
t_NUMBER = r'\d+'
|
|
|
|
def t_SECTION(t):
|
|
r'%%'
|
|
if getattr(t.lexer,"lastsection",0):
|
|
t.value = t.lexer.lexdata[t.lexpos+2:]
|
|
t.lexer.lexpos = len(t.lexer.lexdata)
|
|
else:
|
|
t.lexer.lastsection = 0
|
|
return t
|
|
|
|
# Comments
|
|
def t_ccomment(t):
|
|
r'/\*(.|\n)*?\*/'
|
|
t.lineno += t.value.count('\n')
|
|
|
|
t_ignore_cppcomment = r'//.*'
|
|
|
|
def t_LITERAL(t):
|
|
r'%\{(.|\n)*?%\}'
|
|
t.lexer.lineno += t.value.count("\n")
|
|
return t
|
|
|
|
def t_NEWLINE(t):
|
|
r'\n'
|
|
t.lexer.lineno += 1
|
|
|
|
def t_code(t):
|
|
r'\{'
|
|
t.lexer.codestart = t.lexpos
|
|
t.lexer.level = 1
|
|
t.lexer.begin('code')
|
|
|
|
def t_code_ignore_string(t):
|
|
r'\"([^\\\n]|(\\.))*?\"'
|
|
|
|
def t_code_ignore_char(t):
|
|
r'\'([^\\\n]|(\\.))*?\''
|
|
|
|
def t_code_ignore_comment(t):
|
|
r'/\*(.|\n)*?\*/'
|
|
|
|
def t_code_ignore_cppcom(t):
|
|
r'//.*'
|
|
|
|
def t_code_lbrace(t):
|
|
r'\{'
|
|
t.lexer.level += 1
|
|
|
|
def t_code_rbrace(t):
|
|
r'\}'
|
|
t.lexer.level -= 1
|
|
if t.lexer.level == 0:
|
|
t.type = 'CODE'
|
|
t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos+1]
|
|
t.lexer.begin('INITIAL')
|
|
t.lexer.lineno += t.value.count('\n')
|
|
return t
|
|
|
|
t_code_ignore_nonspace = r'[^\s\}\'\"\{]+'
|
|
t_code_ignore_whitespace = r'\s+'
|
|
t_code_ignore = ""
|
|
|
|
def t_code_error(t):
|
|
raise RuntimeError
|
|
|
|
def t_error(t):
|
|
print "%d: Illegal character '%s'" % (t.lineno, t.value[0])
|
|
print t.value
|
|
t.lexer.skip(1)
|
|
|
|
lex.lex()
|
|
|
|
if __name__ == '__main__':
|
|
lex.runmain()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|