aboutsummaryrefslogtreecommitdiffstats
path: root/components/script/dom/bindings/codegen/ply
diff options
context:
space:
mode:
Diffstat (limited to 'components/script/dom/bindings/codegen/ply')
-rw-r--r--components/script/dom/bindings/codegen/ply/ANNOUNCE40
-rw-r--r--components/script/dom/bindings/codegen/ply/CHANGES1394
-rw-r--r--components/script/dom/bindings/codegen/ply/MANIFEST.in8
-rw-r--r--components/script/dom/bindings/codegen/ply/PKG-INFO22
-rw-r--r--components/script/dom/bindings/codegen/ply/README.md273
-rw-r--r--components/script/dom/bindings/codegen/ply/TODO16
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/README79
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/basic.py65
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/basiclex.py61
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/basiclog.py73
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/basinterp.py496
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/basparse.py474
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/dim.bas14
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/func.bas5
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/gcd.bas22
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/gosub.bas13
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/hello.bas4
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/linear.bas17
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/maxsin.bas12
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/powers.bas13
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/rand.bas4
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/sales.bas20
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/sears.bas18
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/sqrt1.bas5
-rw-r--r--components/script/dom/bindings/codegen/ply/example/BASIC/sqrt2.bas4
-rw-r--r--components/script/dom/bindings/codegen/ply/example/GardenSnake/GardenSnake.py777
-rw-r--r--components/script/dom/bindings/codegen/ply/example/GardenSnake/README5
-rw-r--r--components/script/dom/bindings/codegen/ply/example/README10
-rw-r--r--components/script/dom/bindings/codegen/ply/example/ansic/README2
-rw-r--r--components/script/dom/bindings/codegen/ply/example/ansic/clex.py168
-rw-r--r--components/script/dom/bindings/codegen/ply/example/ansic/cparse.py1048
-rw-r--r--components/script/dom/bindings/codegen/ply/example/calc/calc.py123
-rw-r--r--components/script/dom/bindings/codegen/ply/example/calcdebug/calc.py129
-rw-r--r--components/script/dom/bindings/codegen/ply/example/calceof/calc.py132
-rwxr-xr-xcomponents/script/dom/bindings/codegen/ply/example/classcalc/calc.py165
-rwxr-xr-xcomponents/script/dom/bindings/codegen/ply/example/cleanup.sh2
-rw-r--r--components/script/dom/bindings/codegen/ply/example/closurecalc/calc.py132
-rw-r--r--components/script/dom/bindings/codegen/ply/example/hedit/hedit.py48
-rwxr-xr-xcomponents/script/dom/bindings/codegen/ply/example/newclasscalc/calc.py167
-rw-r--r--components/script/dom/bindings/codegen/ply/example/optcalc/README9
-rw-r--r--components/script/dom/bindings/codegen/ply/example/optcalc/calc.py134
-rw-r--r--components/script/dom/bindings/codegen/ply/example/unicalc/calc.py133
-rw-r--r--components/script/dom/bindings/codegen/ply/example/yply/README41
-rw-r--r--components/script/dom/bindings/codegen/ply/example/yply/ylex.py119
-rw-r--r--components/script/dom/bindings/codegen/ply/example/yply/yparse.py244
-rwxr-xr-xcomponents/script/dom/bindings/codegen/ply/example/yply/yply.py51
-rw-r--r--components/script/dom/bindings/codegen/ply/ply.egg-info/PKG-INFO22
-rw-r--r--components/script/dom/bindings/codegen/ply/ply.egg-info/SOURCES.txt172
-rw-r--r--components/script/dom/bindings/codegen/ply/ply.egg-info/dependency_links.txt1
-rw-r--r--components/script/dom/bindings/codegen/ply/ply.egg-info/top_level.txt1
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/__init__.py5
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/cpp.py918
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/ctokens.py133
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/lex.py1100
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/yacc.py3494
-rw-r--r--components/script/dom/bindings/codegen/ply/ply/ygen.py74
-rw-r--r--components/script/dom/bindings/codegen/ply/setup.cfg11
-rw-r--r--components/script/dom/bindings/codegen/ply/setup.py31
58 files changed, 0 insertions, 12753 deletions
diff --git a/components/script/dom/bindings/codegen/ply/ANNOUNCE b/components/script/dom/bindings/codegen/ply/ANNOUNCE
deleted file mode 100644
index c430051cf45..00000000000
--- a/components/script/dom/bindings/codegen/ply/ANNOUNCE
+++ /dev/null
@@ -1,40 +0,0 @@
-January 31, 2017
-
- Announcing : PLY-3.10 (Python Lex-Yacc)
-
- http://www.dabeaz.com/ply
-
-I'm pleased to announce PLY-3.10--a pure Python implementation of the
-common parsing tools lex and yacc. PLY-3.10 is a minor bug fix
-release. It supports both Python 2 and Python 3.
-
-If you are new to PLY, here are a few highlights:
-
-- PLY is closely modeled after traditional lex/yacc. If you know how
- to use these or similar tools in other languages, you will find
- PLY to be comparable.
-
-- PLY provides very extensive error reporting and diagnostic
- information to assist in parser construction. The original
- implementation was developed for instructional purposes. As
- a result, the system tries to identify the most common types
- of errors made by novice users.
-
-- PLY provides full support for empty productions, error recovery,
- precedence rules, and ambiguous grammars.
-
-- Parsing is based on LR-parsing which is fast, memory efficient,
- better suited to large grammars, and which has a number of nice
- properties when dealing with syntax errors and other parsing
- problems. Currently, PLY can build its parsing tables using
- either SLR or LALR(1) algorithms.
-
-More information about PLY can be obtained on the PLY webpage at:
-
- http://www.dabeaz.com/ply
-
-PLY is freely available.
-
-Cheers,
-
-David Beazley (http://www.dabeaz.com) \ No newline at end of file
diff --git a/components/script/dom/bindings/codegen/ply/CHANGES b/components/script/dom/bindings/codegen/ply/CHANGES
deleted file mode 100644
index 815c23184e4..00000000000
--- a/components/script/dom/bindings/codegen/ply/CHANGES
+++ /dev/null
@@ -1,1394 +0,0 @@
-Version 3.10
----------------------
-01/31/17: beazley
- Changed grammar signature computation to not involve hashing
- functions. Parts are just combined into a big string.
-
-10/07/16: beazley
- Fixed Issue #101: Incorrect shift-reduce conflict resolution with
- precedence specifier.
-
- PLY was incorrectly resolving shift-reduce conflicts in certain
- cases. For example, in the example/calc/calc.py example, you
- could trigger it doing this:
-
- calc > -3 - 4
- 1 (correct answer should be -7)
- calc >
-
- Issue and suggested patch contributed by https://github.com/RomaVis
-
-Version 3.9
----------------------
-08/30/16: beazley
- Exposed the parser state number as the parser.state attribute
- in productions and error functions. For example:
-
- def p_somerule(p):
- '''
- rule : A B C
- '''
- print('State:', p.parser.state)
-
- May address issue #65 (publish current state in error callback).
-
-08/30/16: beazley
- Fixed Issue #88. Python3 compatibility with ply/cpp.
-
-08/30/16: beazley
- Fixed Issue #93. Ply can crash if SyntaxError is raised inside
- a production. Not actually sure if the original implementation
- worked as documented at all. Yacc has been modified to follow
- the spec as outlined in the CHANGES noted for 11/27/07 below.
-
-08/30/16: beazley
- Fixed Issue #97. Failure with code validation when the original
- source files aren't present. Validation step now ignores
- the missing file.
-
-08/30/16: beazley
- Minor fixes to version numbers.
-
-Version 3.8
----------------------
-10/02/15: beazley
- Fixed issues related to Python 3.5. Patch contributed by Barry Warsaw.
-
-Version 3.7
----------------------
-08/25/15: beazley
- Fixed problems when reading table files from pickled data.
-
-05/07/15: beazley
- Fixed regression in handling of table modules if specified as module
- objects. See https://github.com/dabeaz/ply/issues/63
-
-Version 3.6
----------------------
-04/25/15: beazley
- If PLY is unable to create the 'parser.out' or 'parsetab.py' files due
- to permission issues, it now just issues a warning message and
- continues to operate. This could happen if a module using PLY
- is installed in a funny way where tables have to be regenerated, but
- for whatever reason, the user doesn't have write permission on
- the directory where PLY wants to put them.
-
-04/24/15: beazley
- Fixed some issues related to use of packages and table file
- modules. Just to emphasize, PLY now generates its special
- files such as 'parsetab.py' and 'lextab.py' in the *SAME*
- directory as the source file that uses lex() and yacc().
-
- If for some reason, you want to change the name of the table
- module, use the tabmodule and lextab options:
-
- lexer = lex.lex(lextab='spamlextab')
- parser = yacc.yacc(tabmodule='spamparsetab')
-
- If you specify a simple name as shown, the module will still be
- created in the same directory as the file invoking lex() or yacc().
- If you want the table files to be placed into a different package,
- then give a fully qualified package name. For example:
-
- lexer = lex.lex(lextab='pkgname.files.lextab')
- parser = yacc.yacc(tabmodule='pkgname.files.parsetab')
-
- For this to work, 'pkgname.files' must already exist as a valid
- Python package (i.e., the directories must already exist and be
- set up with the proper __init__.py files, etc.).
-
-Version 3.5
----------------------
-04/21/15: beazley
- Added support for defaulted_states in the parser. A
- defaulted_state is a state where the only legal action is a
- reduction of a single grammar rule across all valid input
- tokens. For such states, the rule is reduced and the
- reading of the next lookahead token is delayed until it is
- actually needed at a later point in time.
-
- This delay in consuming the next lookahead token is a
- potentially important feature in advanced parsing
- applications that require tight interaction between the
- lexer and the parser. For example, a grammar rule change
- modify the lexer state upon reduction and have such changes
- take effect before the next input token is read.
-
- *** POTENTIAL INCOMPATIBILITY ***
- One potential danger of defaulted_states is that syntax
- errors might be deferred to a a later point of processing
- than where they were detected in past versions of PLY.
- Thus, it's possible that your error handling could change
- slightly on the same inputs. defaulted_states do not change
- the overall parsing of the input (i.e., the same grammar is
- accepted).
-
- If for some reason, you need to disable defaulted states,
- you can do this:
-
- parser = yacc.yacc()
- parser.defaulted_states = {}
-
-04/21/15: beazley
- Fixed debug logging in the parser. It wasn't properly reporting goto states
- on grammar rule reductions.
-
-04/20/15: beazley
- Added actions to be defined to character literals (Issue #32). For example:
-
- literals = [ '{', '}' ]
-
- def t_lbrace(t):
- r'\{'
- # Some action
- t.type = '{'
- return t
-
- def t_rbrace(t):
- r'\}'
- # Some action
- t.type = '}'
- return t
-
-04/19/15: beazley
- Import of the 'parsetab.py' file is now constrained to only consider the
- directory specified by the outputdir argument to yacc(). If not supplied,
- the import will only consider the directory in which the grammar is defined.
- This should greatly reduce problems with the wrong parsetab.py file being
- imported by mistake. For example, if it's found somewhere else on the path
- by accident.
-
- *** POTENTIAL INCOMPATIBILITY *** It's possible that this might break some
- packaging/deployment setup if PLY was instructed to place its parsetab.py
- in a different location. You'll have to specify a proper outputdir= argument
- to yacc() to fix this if needed.
-
-04/19/15: beazley
- Changed default output directory to be the same as that in which the
- yacc grammar is defined. If your grammar is in a file 'calc.py',
- then the parsetab.py and parser.out files should be generated in the
- same directory as that file. The destination directory can be changed
- using the outputdir= argument to yacc().
-
-04/19/15: beazley
- Changed the parsetab.py file signature slightly so that the parsetab won't
- regenerate if created on a different major version of Python (ie., a
- parsetab created on Python 2 will work with Python 3).
-
-04/16/15: beazley
- Fixed Issue #44 call_errorfunc() should return the result of errorfunc()
-
-04/16/15: beazley
- Support for versions of Python <2.7 is officially dropped. PLY may work, but
- the unit tests requires Python 2.7 or newer.
-
-04/16/15: beazley
- Fixed bug related to calling yacc(start=...). PLY wasn't regenerating the
- table file correctly for this case.
-
-04/16/15: beazley
- Added skipped tests for PyPy and Java. Related to use of Python's -O option.
-
-05/29/13: beazley
- Added filter to make unit tests pass under 'python -3'.
- Reported by Neil Muller.
-
-05/29/13: beazley
- Fixed CPP_INTEGER regex in ply/cpp.py (Issue 21).
- Reported by @vbraun.
-
-05/29/13: beazley
- Fixed yacc validation bugs when from __future__ import unicode_literals
- is being used. Reported by Kenn Knowles.
-
-05/29/13: beazley
- Added support for Travis-CI. Contributed by Kenn Knowles.
-
-05/29/13: beazley
- Added a .gitignore file. Suggested by Kenn Knowles.
-
-05/29/13: beazley
- Fixed validation problems for source files that include a
- different source code encoding specifier. Fix relies on
- the inspect module. Should work on Python 2.6 and newer.
- Not sure about older versions of Python.
- Contributed by Michael Droettboom
-
-05/21/13: beazley
- Fixed unit tests for yacc to eliminate random failures due to dict hash value
- randomization in Python 3.3
- Reported by Arfrever
-
-10/15/12: beazley
- Fixed comment whitespace processing bugs in ply/cpp.py.
- Reported by Alexei Pososin.
-
-10/15/12: beazley
- Fixed token names in ply/ctokens.py to match rule names.
- Reported by Alexei Pososin.
-
-04/26/12: beazley
- Changes to functions available in panic mode error recover. In previous versions
- of PLY, the following global functions were available for use in the p_error() rule:
-
- yacc.errok() # Reset error state
- yacc.token() # Get the next token
- yacc.restart() # Reset the parsing stack
-
- The use of global variables was problematic for code involving multiple parsers
- and frankly was a poor design overall. These functions have been moved to methods
- of the parser instance created by the yacc() function. You should write code like
- this:
-
- def p_error(p):
- ...
- parser.errok()
-
- parser = yacc.yacc()
-
- *** POTENTIAL INCOMPATIBILITY *** The original global functions now issue a
- DeprecationWarning.
-
-04/19/12: beazley
- Fixed some problems with line and position tracking and the use of error
- symbols. If you have a grammar rule involving an error rule like this:
-
- def p_assignment_bad(p):
- '''assignment : location EQUALS error SEMI'''
- ...
-
- You can now do line and position tracking on the error token. For example:
-
- def p_assignment_bad(p):
- '''assignment : location EQUALS error SEMI'''
- start_line = p.lineno(3)
- start_pos = p.lexpos(3)
-
- If the trackng=True option is supplied to parse(), you can additionally get
- spans:
-
- def p_assignment_bad(p):
- '''assignment : location EQUALS error SEMI'''
- start_line, end_line = p.linespan(3)
- start_pos, end_pos = p.lexspan(3)
-
- Note that error handling is still a hairy thing in PLY. This won't work
- unless your lexer is providing accurate information. Please report bugs.
- Suggested by a bug reported by Davis Herring.
-
-04/18/12: beazley
- Change to doc string handling in lex module. Regex patterns are now first
- pulled from a function's .regex attribute. If that doesn't exist, then
- .doc is checked as a fallback. The @TOKEN decorator now sets the .regex
- attribute of a function instead of its doc string.
- Changed suggested by Kristoffer Ellersgaard Koch.
-
-04/18/12: beazley
- Fixed issue #1: Fixed _tabversion. It should use __tabversion__ instead of __version__
- Reported by Daniele Tricoli
-
-04/18/12: beazley
- Fixed issue #8: Literals empty list causes IndexError
- Reported by Walter Nissen.
-
-04/18/12: beazley
- Fixed issue #12: Typo in code snippet in documentation
- Reported by florianschanda.
-
-04/18/12: beazley
- Fixed issue #10: Correctly escape t_XOREQUAL pattern.
- Reported by Andy Kittner.
-
-Version 3.4
----------------------
-02/17/11: beazley
- Minor patch to make cpp.py compatible with Python 3. Note: This
- is an experimental file not currently used by the rest of PLY.
-
-02/17/11: beazley
- Fixed setup.py trove classifiers to properly list PLY as
- Python 3 compatible.
-
-01/02/11: beazley
- Migration of repository to github.
-
-Version 3.3
------------------------------
-08/25/09: beazley
- Fixed issue 15 related to the set_lineno() method in yacc. Reported by
- mdsherry.
-
-08/25/09: beazley
- Fixed a bug related to regular expression compilation flags not being
- properly stored in lextab.py files created by the lexer when running
- in optimize mode. Reported by Bruce Frederiksen.
-
-
-Version 3.2
------------------------------
-03/24/09: beazley
- Added an extra check to not print duplicated warning messages
- about reduce/reduce conflicts.
-
-03/24/09: beazley
- Switched PLY over to a BSD-license.
-
-03/23/09: beazley
- Performance optimization. Discovered a few places to make
- speedups in LR table generation.
-
-03/23/09: beazley
- New warning message. PLY now warns about rules never
- reduced due to reduce/reduce conflicts. Suggested by
- Bruce Frederiksen.
-
-03/23/09: beazley
- Some clean-up of warning messages related to reduce/reduce errors.
-
-03/23/09: beazley
- Added a new picklefile option to yacc() to write the parsing
- tables to a filename using the pickle module. Here is how
- it works:
-
- yacc(picklefile="parsetab.p")
-
- This option can be used if the normal parsetab.py file is
- extremely large. For example, on jython, it is impossible
- to read parsing tables if the parsetab.py exceeds a certain
- threshold.
-
- The filename supplied to the picklefile option is opened
- relative to the current working directory of the Python
- interpreter. If you need to refer to the file elsewhere,
- you will need to supply an absolute or relative path.
-
- For maximum portability, the pickle file is written
- using protocol 0.
-
-03/13/09: beazley
- Fixed a bug in parser.out generation where the rule numbers
- where off by one.
-
-03/13/09: beazley
- Fixed a string formatting bug with one of the error messages.
- Reported by Richard Reitmeyer
-
-Version 3.1
------------------------------
-02/28/09: beazley
- Fixed broken start argument to yacc(). PLY-3.0 broke this
- feature by accident.
-
-02/28/09: beazley
- Fixed debugging output. yacc() no longer reports shift/reduce
- or reduce/reduce conflicts if debugging is turned off. This
- restores similar behavior in PLY-2.5. Reported by Andrew Waters.
-
-Version 3.0
------------------------------
-02/03/09: beazley
- Fixed missing lexer attribute on certain tokens when
- invoking the parser p_error() function. Reported by
- Bart Whiteley.
-
-02/02/09: beazley
- The lex() command now does all error-reporting and diagonistics
- using the logging module interface. Pass in a Logger object
- using the errorlog parameter to specify a different logger.
-
-02/02/09: beazley
- Refactored ply.lex to use a more object-oriented and organized
- approach to collecting lexer information.
-
-02/01/09: beazley
- Removed the nowarn option from lex(). All output is controlled
- by passing in a logger object. Just pass in a logger with a high
- level setting to suppress output. This argument was never
- documented to begin with so hopefully no one was relying upon it.
-
-02/01/09: beazley
- Discovered and removed a dead if-statement in the lexer. This
- resulted in a 6-7% speedup in lexing when I tested it.
-
-01/13/09: beazley
- Minor change to the procedure for signalling a syntax error in a
- production rule. A normal SyntaxError exception should be raised
- instead of yacc.SyntaxError.
-
-01/13/09: beazley
- Added a new method p.set_lineno(n,lineno) that can be used to set the
- line number of symbol n in grammar rules. This simplifies manual
- tracking of line numbers.
-
-01/11/09: beazley
- Vastly improved debugging support for yacc.parse(). Instead of passing
- debug as an integer, you can supply a Logging object (see the logging
- module). Messages will be generated at the ERROR, INFO, and DEBUG
- logging levels, each level providing progressively more information.
- The debugging trace also shows states, grammar rule, values passed
- into grammar rules, and the result of each reduction.
-
-01/09/09: beazley
- The yacc() command now does all error-reporting and diagnostics using
- the interface of the logging module. Use the errorlog parameter to
- specify a logging object for error messages. Use the debuglog parameter
- to specify a logging object for the 'parser.out' output.
-
-01/09/09: beazley
- *HUGE* refactoring of the the ply.yacc() implementation. The high-level
- user interface is backwards compatible, but the internals are completely
- reorganized into classes. No more global variables. The internals
- are also more extensible. For example, you can use the classes to
- construct a LALR(1) parser in an entirely different manner than
- what is currently the case. Documentation is forthcoming.
-
-01/07/09: beazley
- Various cleanup and refactoring of yacc internals.
-
-01/06/09: beazley
- Fixed a bug with precedence assignment. yacc was assigning the precedence
- each rule based on the left-most token, when in fact, it should have been
- using the right-most token. Reported by Bruce Frederiksen.
-
-11/27/08: beazley
- Numerous changes to support Python 3.0 including removal of deprecated
- statements (e.g., has_key) and the additional of compatibility code
- to emulate features from Python 2 that have been removed, but which
- are needed. Fixed the unit testing suite to work with Python 3.0.
- The code should be backwards compatible with Python 2.
-
-11/26/08: beazley
- Loosened the rules on what kind of objects can be passed in as the
- "module" parameter to lex() and yacc(). Previously, you could only use
- a module or an instance. Now, PLY just uses dir() to get a list of
- symbols on whatever the object is without regard for its type.
-
-11/26/08: beazley
- Changed all except: statements to be compatible with Python2.x/3.x syntax.
-
-11/26/08: beazley
- Changed all raise Exception, value statements to raise Exception(value) for
- forward compatibility.
-
-11/26/08: beazley
- Removed all print statements from lex and yacc, using sys.stdout and sys.stderr
- directly. Preparation for Python 3.0 support.
-
-11/04/08: beazley
- Fixed a bug with referring to symbols on the the parsing stack using negative
- indices.
-
-05/29/08: beazley
- Completely revamped the testing system to use the unittest module for everything.
- Added additional tests to cover new errors/warnings.
-
-Version 2.5
------------------------------
-05/28/08: beazley
- Fixed a bug with writing lex-tables in optimized mode and start states.
- Reported by Kevin Henry.
-
-Version 2.4
------------------------------
-05/04/08: beazley
- A version number is now embedded in the table file signature so that
- yacc can more gracefully accomodate changes to the output format
- in the future.
-
-05/04/08: beazley
- Removed undocumented .pushback() method on grammar productions. I'm
- not sure this ever worked and can't recall ever using it. Might have
- been an abandoned idea that never really got fleshed out. This
- feature was never described or tested so removing it is hopefully
- harmless.
-
-05/04/08: beazley
- Added extra error checking to yacc() to detect precedence rules defined
- for undefined terminal symbols. This allows yacc() to detect a potential
- problem that can be really tricky to debug if no warning message or error
- message is generated about it.
-
-05/04/08: beazley
- lex() now has an outputdir that can specify the output directory for
- tables when running in optimize mode. For example:
-
- lexer = lex.lex(optimize=True, lextab="ltab", outputdir="foo/bar")
-
- The behavior of specifying a table module and output directory are
- more aligned with the behavior of yacc().
-
-05/04/08: beazley
- [Issue 9]
- Fixed filename bug in when specifying the modulename in lex() and yacc().
- If you specified options such as the following:
-
- parser = yacc.yacc(tabmodule="foo.bar.parsetab",outputdir="foo/bar")
-
- yacc would create a file "foo.bar.parsetab.py" in the given directory.
- Now, it simply generates a file "parsetab.py" in that directory.
- Bug reported by cptbinho.
-
-05/04/08: beazley
- Slight modification to lex() and yacc() to allow their table files
- to be loaded from a previously loaded module. This might make
- it easier to load the parsing tables from a complicated package
- structure. For example:
-
- import foo.bar.spam.parsetab as parsetab
- parser = yacc.yacc(tabmodule=parsetab)
-
- Note: lex and yacc will never regenerate the table file if used
- in the form---you will get a warning message instead.
- This idea suggested by Brian Clapper.
-
-
-04/28/08: beazley
- Fixed a big with p_error() functions being picked up correctly
- when running in yacc(optimize=1) mode. Patch contributed by
- Bart Whiteley.
-
-02/28/08: beazley
- Fixed a bug with 'nonassoc' precedence rules. Basically the
- non-precedence was being ignored and not producing the correct
- run-time behavior in the parser.
-
-02/16/08: beazley
- Slight relaxation of what the input() method to a lexer will
- accept as a string. Instead of testing the input to see
- if the input is a string or unicode string, it checks to see
- if the input object looks like it contains string data.
- This change makes it possible to pass string-like objects
- in as input. For example, the object returned by mmap.
-
- import mmap, os
- data = mmap.mmap(os.open(filename,os.O_RDONLY),
- os.path.getsize(filename),
- access=mmap.ACCESS_READ)
- lexer.input(data)
-
-
-11/29/07: beazley
- Modification of ply.lex to allow token functions to aliased.
- This is subtle, but it makes it easier to create libraries and
- to reuse token specifications. For example, suppose you defined
- a function like this:
-
- def number(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
- This change would allow you to define a token rule as follows:
-
- t_NUMBER = number
-
- In this case, the token type will be set to 'NUMBER' and use
- the associated number() function to process tokens.
-
-11/28/07: beazley
- Slight modification to lex and yacc to grab symbols from both
- the local and global dictionaries of the caller. This
- modification allows lexers and parsers to be defined using
- inner functions and closures.
-
-11/28/07: beazley
- Performance optimization: The lexer.lexmatch and t.lexer
- attributes are no longer set for lexer tokens that are not
- defined by functions. The only normal use of these attributes
- would be in lexer rules that need to perform some kind of
- special processing. Thus, it doesn't make any sense to set
- them on every token.
-
- *** POTENTIAL INCOMPATIBILITY *** This might break code
- that is mucking around with internal lexer state in some
- sort of magical way.
-
-11/27/07: beazley
- Added the ability to put the parser into error-handling mode
- from within a normal production. To do this, simply raise
- a yacc.SyntaxError exception like this:
-
- def p_some_production(p):
- 'some_production : prod1 prod2'
- ...
- raise yacc.SyntaxError # Signal an error
-
- A number of things happen after this occurs:
-
- - The last symbol shifted onto the symbol stack is discarded
- and parser state backed up to what it was before the
- the rule reduction.
-
- - The current lookahead symbol is saved and replaced by
- the 'error' symbol.
-
- - The parser enters error recovery mode where it tries
- to either reduce the 'error' rule or it starts
- discarding items off of the stack until the parser
- resets.
-
- When an error is manually set, the parser does *not* call
- the p_error() function (if any is defined).
- *** NEW FEATURE *** Suggested on the mailing list
-
-11/27/07: beazley
- Fixed structure bug in examples/ansic. Reported by Dion Blazakis.
-
-11/27/07: beazley
- Fixed a bug in the lexer related to start conditions and ignored
- token rules. If a rule was defined that changed state, but
- returned no token, the lexer could be left in an inconsistent
- state. Reported by
-
-11/27/07: beazley
- Modified setup.py to support Python Eggs. Patch contributed by
- Simon Cross.
-
-11/09/07: beazely
- Fixed a bug in error handling in yacc. If a syntax error occurred and the
- parser rolled the entire parse stack back, the parser would be left in in
- inconsistent state that would cause it to trigger incorrect actions on
- subsequent input. Reported by Ton Biegstraaten, Justin King, and others.
-
-11/09/07: beazley
- Fixed a bug when passing empty input strings to yacc.parse(). This
- would result in an error message about "No input given". Reported
- by Andrew Dalke.
-
-Version 2.3
------------------------------
-02/20/07: beazley
- Fixed a bug with character literals if the literal '.' appeared as the
- last symbol of a grammar rule. Reported by Ales Smrcka.
-
-02/19/07: beazley
- Warning messages are now redirected to stderr instead of being printed
- to standard output.
-
-02/19/07: beazley
- Added a warning message to lex.py if it detects a literal backslash
- character inside the t_ignore declaration. This is to help
- problems that might occur if someone accidentally defines t_ignore
- as a Python raw string. For example:
-
- t_ignore = r' \t'
-
- The idea for this is from an email I received from David Cimimi who
- reported bizarre behavior in lexing as a result of defining t_ignore
- as a raw string by accident.
-
-02/18/07: beazley
- Performance improvements. Made some changes to the internal
- table organization and LR parser to improve parsing performance.
-
-02/18/07: beazley
- Automatic tracking of line number and position information must now be
- enabled by a special flag to parse(). For example:
-
- yacc.parse(data,tracking=True)
-
- In many applications, it's just not that important to have the
- parser automatically track all line numbers. By making this an
- optional feature, it allows the parser to run significantly faster
- (more than a 20% speed increase in many cases). Note: positional
- information is always available for raw tokens---this change only
- applies to positional information associated with nonterminal
- grammar symbols.
- *** POTENTIAL INCOMPATIBILITY ***
-
-02/18/07: beazley
- Yacc no longer supports extended slices of grammar productions.
- However, it does support regular slices. For example:
-
- def p_foo(p):
- '''foo: a b c d e'''
- p[0] = p[1:3]
-
- This change is a performance improvement to the parser--it streamlines
- normal access to the grammar values since slices are now handled in
- a __getslice__() method as opposed to __getitem__().
-
-02/12/07: beazley
- Fixed a bug in the handling of token names when combined with
- start conditions. Bug reported by Todd O'Bryan.
-
-Version 2.2
-------------------------------
-11/01/06: beazley
- Added lexpos() and lexspan() methods to grammar symbols. These
- mirror the same functionality of lineno() and linespan(). For
- example:
-
- def p_expr(p):
- 'expr : expr PLUS expr'
- p.lexpos(1) # Lexing position of left-hand-expression
- p.lexpos(1) # Lexing position of PLUS
- start,end = p.lexspan(3) # Lexing range of right hand expression
-
-11/01/06: beazley
- Minor change to error handling. The recommended way to skip characters
- in the input is to use t.lexer.skip() as shown here:
-
- def t_error(t):
- print "Illegal character '%s'" % t.value[0]
- t.lexer.skip(1)
-
- The old approach of just using t.skip(1) will still work, but won't
- be documented.
-
-10/31/06: beazley
- Discarded tokens can now be specified as simple strings instead of
- functions. To do this, simply include the text "ignore_" in the
- token declaration. For example:
-
- t_ignore_cppcomment = r'//.*'
-
- Previously, this had to be done with a function. For example:
-
- def t_ignore_cppcomment(t):
- r'//.*'
- pass
-
- If start conditions/states are being used, state names should appear
- before the "ignore_" text.
-
-10/19/06: beazley
- The Lex module now provides support for flex-style start conditions
- as described at http://www.gnu.org/software/flex/manual/html_chapter/flex_11.html.
- Please refer to this document to understand this change note. Refer to
- the PLY documentation for PLY-specific explanation of how this works.
-
- To use start conditions, you first need to declare a set of states in
- your lexer file:
-
- states = (
- ('foo','exclusive'),
- ('bar','inclusive')
- )
-
- This serves the same role as the %s and %x specifiers in flex.
-
- One a state has been declared, tokens for that state can be
- declared by defining rules of the form t_state_TOK. For example:
-
- t_PLUS = '\+' # Rule defined in INITIAL state
- t_foo_NUM = '\d+' # Rule defined in foo state
- t_bar_NUM = '\d+' # Rule defined in bar state
-
- t_foo_bar_NUM = '\d+' # Rule defined in both foo and bar
- t_ANY_NUM = '\d+' # Rule defined in all states
-
- In addition to defining tokens for each state, the t_ignore and t_error
- specifications can be customized for specific states. For example:
-
- t_foo_ignore = " " # Ignored characters for foo state
- def t_bar_error(t):
- # Handle errors in bar state
-
- With token rules, the following methods can be used to change states
-
- def t_TOKNAME(t):
- t.lexer.begin('foo') # Begin state 'foo'
- t.lexer.push_state('foo') # Begin state 'foo', push old state
- # onto a stack
- t.lexer.pop_state() # Restore previous state
- t.lexer.current_state() # Returns name of current state
-
- These methods mirror the BEGIN(), yy_push_state(), yy_pop_state(), and
- yy_top_state() functions in flex.
-
- The use of start states can be used as one way to write sub-lexers.
- For example, the lexer or parser might instruct the lexer to start
- generating a different set of tokens depending on the context.
-
- example/yply/ylex.py shows the use of start states to grab C/C++
- code fragments out of traditional yacc specification files.
-
- *** NEW FEATURE *** Suggested by Daniel Larraz with whom I also
- discussed various aspects of the design.
-
-10/19/06: beazley
- Minor change to the way in which yacc.py was reporting shift/reduce
- conflicts. Although the underlying LALR(1) algorithm was correct,
- PLY was under-reporting the number of conflicts compared to yacc/bison
- when precedence rules were in effect. This change should make PLY
- report the same number of conflicts as yacc.
-
-10/19/06: beazley
- Modified yacc so that grammar rules could also include the '-'
- character. For example:
-
- def p_expr_list(p):
- 'expression-list : expression-list expression'
-
- Suggested by Oldrich Jedlicka.
-
-10/18/06: beazley
- Attribute lexer.lexmatch added so that token rules can access the re
- match object that was generated. For example:
-
- def t_FOO(t):
- r'some regex'
- m = t.lexer.lexmatch
- # Do something with m
-
-
- This may be useful if you want to access named groups specified within
- the regex for a specific token. Suggested by Oldrich Jedlicka.
-
-10/16/06: beazley
- Changed the error message that results if an illegal character
- is encountered and no default error function is defined in lex.
- The exception is now more informative about the actual cause of
- the error.
-
-Version 2.1
-------------------------------
-10/02/06: beazley
- The last Lexer object built by lex() can be found in lex.lexer.
- The last Parser object built by yacc() can be found in yacc.parser.
-
-10/02/06: beazley
- New example added: examples/yply
-
- This example uses PLY to convert Unix-yacc specification files to
- PLY programs with the same grammar. This may be useful if you
- want to convert a grammar from bison/yacc to use with PLY.
-
-10/02/06: beazley
- Added support for a start symbol to be specified in the yacc
- input file itself. Just do this:
-
- start = 'name'
-
- where 'name' matches some grammar rule. For example:
-
- def p_name(p):
- 'name : A B C'
- ...
-
- This mirrors the functionality of the yacc %start specifier.
-
-09/30/06: beazley
- Some new examples added.:
-
- examples/GardenSnake : A simple indentation based language similar
- to Python. Shows how you might handle
- whitespace. Contributed by Andrew Dalke.
-
- examples/BASIC : An implementation of 1964 Dartmouth BASIC.
- Contributed by Dave against his better
- judgement.
-
-09/28/06: beazley
- Minor patch to allow named groups to be used in lex regular
- expression rules. For example:
-
- t_QSTRING = r'''(?P<quote>['"]).*?(?P=quote)'''
-
- Patch submitted by Adam Ring.
-
-09/28/06: beazley
- LALR(1) is now the default parsing method. To use SLR, use
- yacc.yacc(method="SLR"). Note: there is no performance impact
- on parsing when using LALR(1) instead of SLR. However, constructing
- the parsing tables will take a little longer.
-
-09/26/06: beazley
- Change to line number tracking. To modify line numbers, modify
- the line number of the lexer itself. For example:
-
- def t_NEWLINE(t):
- r'\n'
- t.lexer.lineno += 1
-
- This modification is both cleanup and a performance optimization.
- In past versions, lex was monitoring every token for changes in
- the line number. This extra processing is unnecessary for a vast
- majority of tokens. Thus, this new approach cleans it up a bit.
-
- *** POTENTIAL INCOMPATIBILITY ***
- You will need to change code in your lexer that updates the line
- number. For example, "t.lineno += 1" becomes "t.lexer.lineno += 1"
-
-09/26/06: beazley
- Added the lexing position to tokens as an attribute lexpos. This
- is the raw index into the input text at which a token appears.
- This information can be used to compute column numbers and other
- details (e.g., scan backwards from lexpos to the first newline
- to get a column position).
-
-09/25/06: beazley
- Changed the name of the __copy__() method on the Lexer class
- to clone(). This is used to clone a Lexer object (e.g., if
- you're running different lexers at the same time).
-
-09/21/06: beazley
- Limitations related to the use of the re module have been eliminated.
- Several users reported problems with regular expressions exceeding
- more than 100 named groups. To solve this, lex.py is now capable
- of automatically splitting its master regular regular expression into
- smaller expressions as needed. This should, in theory, make it
- possible to specify an arbitrarily large number of tokens.
-
-09/21/06: beazley
- Improved error checking in lex.py. Rules that match the empty string
- are now rejected (otherwise they cause the lexer to enter an infinite
- loop). An extra check for rules containing '#' has also been added.
- Since lex compiles regular expressions in verbose mode, '#' is interpreted
- as a regex comment, it is critical to use '\#' instead.
-
-09/18/06: beazley
- Added a @TOKEN decorator function to lex.py that can be used to
- define token rules where the documentation string might be computed
- in some way.
-
- digit = r'([0-9])'
- nondigit = r'([_A-Za-z])'
- identifier = r'(' + nondigit + r'(' + digit + r'|' + nondigit + r')*)'
-
- from ply.lex import TOKEN
-
- @TOKEN(identifier)
- def t_ID(t):
- # Do whatever
-
- The @TOKEN decorator merely sets the documentation string of the
- associated token function as needed for lex to work.
-
- Note: An alternative solution is the following:
-
- def t_ID(t):
- # Do whatever
-
- t_ID.__doc__ = identifier
-
- Note: Decorators require the use of Python 2.4 or later. If compatibility
- with old versions is needed, use the latter solution.
-
- The need for this feature was suggested by Cem Karan.
-
-09/14/06: beazley
- Support for single-character literal tokens has been added to yacc.
- These literals must be enclosed in quotes. For example:
-
- def p_expr(p):
- "expr : expr '+' expr"
- ...
-
- def p_expr(p):
- 'expr : expr "-" expr'
- ...
-
- In addition to this, it is necessary to tell the lexer module about
- literal characters. This is done by defining the variable 'literals'
- as a list of characters. This should be defined in the module that
- invokes the lex.lex() function. For example:
-
- literals = ['+','-','*','/','(',')','=']
-
- or simply
-
- literals = '+=*/()='
-
- It is important to note that literals can only be a single character.
- When the lexer fails to match a token using its normal regular expression
- rules, it will check the current character against the literal list.
- If found, it will be returned with a token type set to match the literal
- character. Otherwise, an illegal character will be signalled.
-
-
-09/14/06: beazley
- Modified PLY to install itself as a proper Python package called 'ply'.
- This will make it a little more friendly to other modules. This
- changes the usage of PLY only slightly. Just do this to import the
- modules
-
- import ply.lex as lex
- import ply.yacc as yacc
-
- Alternatively, you can do this:
-
- from ply import *
-
- Which imports both the lex and yacc modules.
- Change suggested by Lee June.
-
-09/13/06: beazley
- Changed the handling of negative indices when used in production rules.
- A negative production index now accesses already parsed symbols on the
- parsing stack. For example,
-
- def p_foo(p):
- "foo: A B C D"
- print p[1] # Value of 'A' symbol
- print p[2] # Value of 'B' symbol
- print p[-1] # Value of whatever symbol appears before A
- # on the parsing stack.
-
- p[0] = some_val # Sets the value of the 'foo' grammer symbol
-
- This behavior makes it easier to work with embedded actions within the
- parsing rules. For example, in C-yacc, it is possible to write code like
- this:
-
- bar: A { printf("seen an A = %d\n", $1); } B { do_stuff; }
-
- In this example, the printf() code executes immediately after A has been
- parsed. Within the embedded action code, $1 refers to the A symbol on
- the stack.
-
- To perform this equivalent action in PLY, you need to write a pair
- of rules like this:
-
- def p_bar(p):
- "bar : A seen_A B"
- do_stuff
-
- def p_seen_A(p):
- "seen_A :"
- print "seen an A =", p[-1]
-
- The second rule "seen_A" is merely a empty production which should be
- reduced as soon as A is parsed in the "bar" rule above. The use
- of the negative index p[-1] is used to access whatever symbol appeared
- before the seen_A symbol.
-
- This feature also makes it possible to support inherited attributes.
- For example:
-
- def p_decl(p):
- "decl : scope name"
-
- def p_scope(p):
- """scope : GLOBAL
- | LOCAL"""
- p[0] = p[1]
-
- def p_name(p):
- "name : ID"
- if p[-1] == "GLOBAL":
- # ...
- else if p[-1] == "LOCAL":
- #...
-
- In this case, the name rule is inheriting an attribute from the
- scope declaration that precedes it.
-
- *** POTENTIAL INCOMPATIBILITY ***
- If you are currently using negative indices within existing grammar rules,
- your code will break. This should be extremely rare if non-existent in
- most cases. The argument to various grammar rules is not usually not
- processed in the same way as a list of items.
-
-Version 2.0
-------------------------------
-09/07/06: beazley
- Major cleanup and refactoring of the LR table generation code. Both SLR
- and LALR(1) table generation is now performed by the same code base with
- only minor extensions for extra LALR(1) processing.
-
-09/07/06: beazley
- Completely reimplemented the entire LALR(1) parsing engine to use the
- DeRemer and Pennello algorithm for calculating lookahead sets. This
- significantly improves the performance of generating LALR(1) tables
- and has the added feature of actually working correctly! If you
- experienced weird behavior with LALR(1) in prior releases, this should
- hopefully resolve all of those problems. Many thanks to
- Andrew Waters and Markus Schoepflin for submitting bug reports
- and helping me test out the revised LALR(1) support.
-
-Version 1.8
-------------------------------
-08/02/06: beazley
- Fixed a problem related to the handling of default actions in LALR(1)
- parsing. If you experienced subtle and/or bizarre behavior when trying
- to use the LALR(1) engine, this may correct those problems. Patch
- contributed by Russ Cox. Note: This patch has been superceded by
- revisions for LALR(1) parsing in Ply-2.0.
-
-08/02/06: beazley
- Added support for slicing of productions in yacc.
- Patch contributed by Patrick Mezard.
-
-Version 1.7
-------------------------------
-03/02/06: beazley
- Fixed infinite recursion problem ReduceToTerminals() function that
- would sometimes come up in LALR(1) table generation. Reported by
- Markus Schoepflin.
-
-03/01/06: beazley
- Added "reflags" argument to lex(). For example:
-
- lex.lex(reflags=re.UNICODE)
-
- This can be used to specify optional flags to the re.compile() function
- used inside the lexer. This may be necessary for special situations such
- as processing Unicode (e.g., if you want escapes like \w and \b to consult
- the Unicode character property database). The need for this suggested by
- Andreas Jung.
-
-03/01/06: beazley
- Fixed a bug with an uninitialized variable on repeated instantiations of parser
- objects when the write_tables=0 argument was used. Reported by Michael Brown.
-
-03/01/06: beazley
- Modified lex.py to accept Unicode strings both as the regular expressions for
- tokens and as input. Hopefully this is the only change needed for Unicode support.
- Patch contributed by Johan Dahl.
-
-03/01/06: beazley
- Modified the class-based interface to work with new-style or old-style classes.
- Patch contributed by Michael Brown (although I tweaked it slightly so it would work
- with older versions of Python).
-
-Version 1.6
-------------------------------
-05/27/05: beazley
- Incorporated patch contributed by Christopher Stawarz to fix an extremely
- devious bug in LALR(1) parser generation. This patch should fix problems
- numerous people reported with LALR parsing.
-
-05/27/05: beazley
- Fixed problem with lex.py copy constructor. Reported by Dave Aitel, Aaron Lav,
- and Thad Austin.
-
-05/27/05: beazley
- Added outputdir option to yacc() to control output directory. Contributed
- by Christopher Stawarz.
-
-05/27/05: beazley
- Added rununit.py test script to run tests using the Python unittest module.
- Contributed by Miki Tebeka.
-
-Version 1.5
-------------------------------
-05/26/04: beazley
- Major enhancement. LALR(1) parsing support is now working.
- This feature was implemented by Elias Ioup (ezioup@alumni.uchicago.edu)
- and optimized by David Beazley. To use LALR(1) parsing do
- the following:
-
- yacc.yacc(method="LALR")
-
- Computing LALR(1) parsing tables takes about twice as long as
- the default SLR method. However, LALR(1) allows you to handle
- more complex grammars. For example, the ANSI C grammar
- (in example/ansic) has 13 shift-reduce conflicts with SLR, but
- only has 1 shift-reduce conflict with LALR(1).
-
-05/20/04: beazley
- Added a __len__ method to parser production lists. Can
- be used in parser rules like this:
-
- def p_somerule(p):
- """a : B C D
- | E F"
- if (len(p) == 3):
- # Must have been first rule
- elif (len(p) == 2):
- # Must be second rule
-
- Suggested by Joshua Gerth and others.
-
-Version 1.4
-------------------------------
-04/23/04: beazley
- Incorporated a variety of patches contributed by Eric Raymond.
- These include:
-
- 0. Cleans up some comments so they don't wrap on an 80-column display.
- 1. Directs compiler errors to stderr where they belong.
- 2. Implements and documents automatic line counting when \n is ignored.
- 3. Changes the way progress messages are dumped when debugging is on.
- The new format is both less verbose and conveys more information than
- the old, including shift and reduce actions.
-
-04/23/04: beazley
- Added a Python setup.py file to simply installation. Contributed
- by Adam Kerrison.
-
-04/23/04: beazley
- Added patches contributed by Adam Kerrison.
-
- - Some output is now only shown when debugging is enabled. This
- means that PLY will be completely silent when not in debugging mode.
-
- - An optional parameter "write_tables" can be passed to yacc() to
- control whether or not parsing tables are written. By default,
- it is true, but it can be turned off if you don't want the yacc
- table file. Note: disabling this will cause yacc() to regenerate
- the parsing table each time.
-
-04/23/04: beazley
- Added patches contributed by David McNab. This patch addes two
- features:
-
- - The parser can be supplied as a class instead of a module.
- For an example of this, see the example/classcalc directory.
-
- - Debugging output can be directed to a filename of the user's
- choice. Use
-
- yacc(debugfile="somefile.out")
-
-
-Version 1.3
-------------------------------
-12/10/02: jmdyck
- Various minor adjustments to the code that Dave checked in today.
- Updated test/yacc_{inf,unused}.exp to reflect today's changes.
-
-12/10/02: beazley
- Incorporated a variety of minor bug fixes to empty production
- handling and infinite recursion checking. Contributed by
- Michael Dyck.
-
-12/10/02: beazley
- Removed bogus recover() method call in yacc.restart()
-
-Version 1.2
-------------------------------
-11/27/02: beazley
- Lexer and parser objects are now available as an attribute
- of tokens and slices respectively. For example:
-
- def t_NUMBER(t):
- r'\d+'
- print t.lexer
-
- def p_expr_plus(t):
- 'expr: expr PLUS expr'
- print t.lexer
- print t.parser
-
- This can be used for state management (if needed).
-
-10/31/02: beazley
- Modified yacc.py to work with Python optimize mode. To make
- this work, you need to use
-
- yacc.yacc(optimize=1)
-
- Furthermore, you need to first run Python in normal mode
- to generate the necessary parsetab.py files. After that,
- you can use python -O or python -OO.
-
- Note: optimized mode turns off a lot of error checking.
- Only use when you are sure that your grammar is working.
- Make sure parsetab.py is up to date!
-
-10/30/02: beazley
- Added cloning of Lexer objects. For example:
-
- import copy
- l = lex.lex()
- lc = copy.copy(l)
-
- l.input("Some text")
- lc.input("Some other text")
- ...
-
- This might be useful if the same "lexer" is meant to
- be used in different contexts---or if multiple lexers
- are running concurrently.
-
-10/30/02: beazley
- Fixed subtle bug with first set computation and empty productions.
- Patch submitted by Michael Dyck.
-
-10/30/02: beazley
- Fixed error messages to use "filename:line: message" instead
- of "filename:line. message". This makes error reporting more
- friendly to emacs. Patch submitted by François Pinard.
-
-10/30/02: beazley
- Improvements to parser.out file. Terminals and nonterminals
- are sorted instead of being printed in random order.
- Patch submitted by François Pinard.
-
-10/30/02: beazley
- Improvements to parser.out file output. Rules are now printed
- in a way that's easier to understand. Contributed by Russ Cox.
-
-10/30/02: beazley
- Added 'nonassoc' associativity support. This can be used
- to disable the chaining of operators like a < b < c.
- To use, simply specify 'nonassoc' in the precedence table
-
- precedence = (
- ('nonassoc', 'LESSTHAN', 'GREATERTHAN'), # Nonassociative operators
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('right', 'UMINUS'), # Unary minus operator
- )
-
- Patch contributed by Russ Cox.
-
-10/30/02: beazley
- Modified the lexer to provide optional support for Python -O and -OO
- modes. To make this work, Python *first* needs to be run in
- unoptimized mode. This reads the lexing information and creates a
- file "lextab.py". Then, run lex like this:
-
- # module foo.py
- ...
- ...
- lex.lex(optimize=1)
-
- Once the lextab file has been created, subsequent calls to
- lex.lex() will read data from the lextab file instead of using
- introspection. In optimized mode (-O, -OO) everything should
- work normally despite the loss of doc strings.
-
- To change the name of the file 'lextab.py' use the following:
-
- lex.lex(lextab="footab")
-
- (this creates a file footab.py)
-
-
-Version 1.1 October 25, 2001
-------------------------------
-
-10/25/01: beazley
- Modified the table generator to produce much more compact data.
- This should greatly reduce the size of the parsetab.py[c] file.
- Caveat: the tables still need to be constructed so a little more
- work is done in parsetab on import.
-
-10/25/01: beazley
- There may be a possible bug in the cycle detector that reports errors
- about infinite recursion. I'm having a little trouble tracking it
- down, but if you get this problem, you can disable the cycle
- detector as follows:
-
- yacc.yacc(check_recursion = 0)
-
-10/25/01: beazley
- Fixed a bug in lex.py that sometimes caused illegal characters to be
- reported incorrectly. Reported by Sverre Jørgensen.
-
-7/8/01 : beazley
- Added a reference to the underlying lexer object when tokens are handled by
- functions. The lexer is available as the 'lexer' attribute. This
- was added to provide better lexing support for languages such as Fortran
- where certain types of tokens can't be conveniently expressed as regular
- expressions (and where the tokenizing function may want to perform a
- little backtracking). Suggested by Pearu Peterson.
-
-6/20/01 : beazley
- Modified yacc() function so that an optional starting symbol can be specified.
- For example:
-
- yacc.yacc(start="statement")
-
- Normally yacc always treats the first production rule as the starting symbol.
- However, if you are debugging your grammar it may be useful to specify
- an alternative starting symbol. Idea suggested by Rich Salz.
-
-Version 1.0 June 18, 2001
---------------------------
-Initial public offering
-
diff --git a/components/script/dom/bindings/codegen/ply/MANIFEST.in b/components/script/dom/bindings/codegen/ply/MANIFEST.in
deleted file mode 100644
index 0d37431b0b4..00000000000
--- a/components/script/dom/bindings/codegen/ply/MANIFEST.in
+++ /dev/null
@@ -1,8 +0,0 @@
-recursive-include example *
-recursive-include doc *
-recursive-include test *
-include ANNOUNCE
-include README.md
-include CHANGES
-include TODO
-global-exclude *.pyc
diff --git a/components/script/dom/bindings/codegen/ply/PKG-INFO b/components/script/dom/bindings/codegen/ply/PKG-INFO
deleted file mode 100644
index 6eedf425953..00000000000
--- a/components/script/dom/bindings/codegen/ply/PKG-INFO
+++ /dev/null
@@ -1,22 +0,0 @@
-Metadata-Version: 1.1
-Name: ply
-Version: 3.10
-Summary: Python Lex & Yacc
-Home-page: http://www.dabeaz.com/ply/
-Author: David Beazley
-Author-email: dave@dabeaz.com
-License: BSD
-Description:
- PLY is yet another implementation of lex and yacc for Python. Some notable
- features include the fact that its implemented entirely in Python and it
- uses LALR(1) parsing which is efficient and well suited for larger grammars.
-
- PLY provides most of the standard lex/yacc features including support for empty
- productions, precedence rules, error recovery, and support for ambiguous grammars.
-
- PLY is extremely easy to use and provides very extensive error checking.
- It is compatible with both Python 2 and Python 3.
-
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 2
diff --git a/components/script/dom/bindings/codegen/ply/README.md b/components/script/dom/bindings/codegen/ply/README.md
deleted file mode 100644
index e428f1b14a8..00000000000
--- a/components/script/dom/bindings/codegen/ply/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-PLY (Python Lex-Yacc) Version 3.10
-
-Copyright (C) 2001-2017
-David M. Beazley (Dabeaz LLC)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-* Neither the name of the David Beazley or Dabeaz LLC may be used to
- endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Introduction
-============
-
-PLY is a 100% Python implementation of the common parsing tools lex
-and yacc. Here are a few highlights:
-
- - PLY is very closely modeled after traditional lex/yacc.
- If you know how to use these tools in C, you will find PLY
- to be similar.
-
- - PLY provides *very* extensive error reporting and diagnostic
- information to assist in parser construction. The original
- implementation was developed for instructional purposes. As
- a result, the system tries to identify the most common types
- of errors made by novice users.
-
- - PLY provides full support for empty productions, error recovery,
- precedence specifiers, and moderately ambiguous grammars.
-
- - Parsing is based on LR-parsing which is fast, memory efficient,
- better suited to large grammars, and which has a number of nice
- properties when dealing with syntax errors and other parsing problems.
- Currently, PLY builds its parsing tables using the LALR(1)
- algorithm used in yacc.
-
- - PLY uses Python introspection features to build lexers and parsers.
- This greatly simplifies the task of parser construction since it reduces
- the number of files and eliminates the need to run a separate lex/yacc
- tool before running your program.
-
- - PLY can be used to build parsers for "real" programming languages.
- Although it is not ultra-fast due to its Python implementation,
- PLY can be used to parse grammars consisting of several hundred
- rules (as might be found for a language like C). The lexer and LR
- parser are also reasonably efficient when parsing typically
- sized programs. People have used PLY to build parsers for
- C, C++, ADA, and other real programming languages.
-
-How to Use
-==========
-
-PLY consists of two files : lex.py and yacc.py. These are contained
-within the 'ply' directory which may also be used as a Python package.
-To use PLY, simply copy the 'ply' directory to your project and import
-lex and yacc from the associated 'ply' package. For example:
-
- import ply.lex as lex
- import ply.yacc as yacc
-
-Alternatively, you can copy just the files lex.py and yacc.py
-individually and use them as modules. For example:
-
- import lex
- import yacc
-
-The file setup.py can be used to install ply using distutils.
-
-The file doc/ply.html contains complete documentation on how to use
-the system.
-
-The example directory contains several different examples including a
-PLY specification for ANSI C as given in K&R 2nd Ed.
-
-A simple example is found at the end of this document
-
-Requirements
-============
-PLY requires the use of Python 2.6 or greater. However, you should
-use the latest Python release if possible. It should work on just
-about any platform. PLY has been tested with both CPython and Jython.
-It also seems to work with IronPython.
-
-Resources
-=========
-More information about PLY can be obtained on the PLY webpage at:
-
- http://www.dabeaz.com/ply
-
-For a detailed overview of parsing theory, consult the excellent
-book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and
-Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown
-may also be useful.
-
-The GitHub page for PLY can be found at:
-
- https://github.com/dabeaz/ply
-
-An old and relatively inactive discussion group for PLY is found at:
-
- http://groups.google.com/group/ply-hack
-
-Acknowledgments
-===============
-A special thanks is in order for all of the students in CS326 who
-suffered through about 25 different versions of these tools :-).
-
-The CHANGES file acknowledges those who have contributed patches.
-
-Elias Ioup did the first implementation of LALR(1) parsing in PLY-1.x.
-Andrew Waters and Markus Schoepflin were instrumental in reporting bugs
-and testing a revised LALR(1) implementation for PLY-2.0.
-
-Special Note for PLY-3.0
-========================
-PLY-3.0 the first PLY release to support Python 3. However, backwards
-compatibility with Python 2.6 is still preserved. PLY provides dual
-Python 2/3 compatibility by restricting its implementation to a common
-subset of basic language features. You should not convert PLY using
-2to3--it is not necessary and may in fact break the implementation.
-
-Example
-=======
-
-Here is a simple example showing a PLY implementation of a calculator
-with variables.
-
- # -----------------------------------------------------------------------------
- # calc.py
- #
- # A simple calculator with variables.
- # -----------------------------------------------------------------------------
-
- tokens = (
- 'NAME','NUMBER',
- 'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
- 'LPAREN','RPAREN',
- )
-
- # Tokens
-
- t_PLUS = r'\+'
- t_MINUS = r'-'
- t_TIMES = r'\*'
- t_DIVIDE = r'/'
- t_EQUALS = r'='
- t_LPAREN = r'\('
- t_RPAREN = r'\)'
- t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
- def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
- # Ignored characters
- t_ignore = " \t"
-
- def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
- def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
- # Build the lexer
- import ply.lex as lex
- lex.lex()
-
- # Precedence rules for the arithmetic operators
- precedence = (
- ('left','PLUS','MINUS'),
- ('left','TIMES','DIVIDE'),
- ('right','UMINUS'),
- )
-
- # dictionary of names (for storing variables)
- names = { }
-
- def p_statement_assign(p):
- 'statement : NAME EQUALS expression'
- names[p[1]] = p[3]
-
- def p_statement_expr(p):
- 'statement : expression'
- print(p[1])
-
- def p_expression_binop(p):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if p[2] == '+' : p[0] = p[1] + p[3]
- elif p[2] == '-': p[0] = p[1] - p[3]
- elif p[2] == '*': p[0] = p[1] * p[3]
- elif p[2] == '/': p[0] = p[1] / p[3]
-
- def p_expression_uminus(p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
- def p_expression_group(p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
- def p_expression_number(p):
- 'expression : NUMBER'
- p[0] = p[1]
-
- def p_expression_name(p):
- 'expression : NAME'
- try:
- p[0] = names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
- def p_error(p):
- print("Syntax error at '%s'" % p.value)
-
- import ply.yacc as yacc
- yacc.yacc()
-
- while True:
- try:
- s = raw_input('calc > ') # use input() on Python 3
- except EOFError:
- break
- yacc.parse(s)
-
-
-Bug Reports and Patches
-=======================
-My goal with PLY is to simply have a decent lex/yacc implementation
-for Python. As a general rule, I don't spend huge amounts of time
-working on it unless I receive very specific bug reports and/or
-patches to fix problems. I also try to incorporate submitted feature
-requests and enhancements into each new version. Please visit the PLY
-github page at https://github.com/dabeaz/ply to submit issues and pull
-requests. To contact me about bugs and/or new features, please send
-email to dave@dabeaz.com.
-
--- Dave
-
-
-
-
-
-
-
-
-
diff --git a/components/script/dom/bindings/codegen/ply/TODO b/components/script/dom/bindings/codegen/ply/TODO
deleted file mode 100644
index f4800aacf47..00000000000
--- a/components/script/dom/bindings/codegen/ply/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-The PLY to-do list:
-
-1. Finish writing the C Preprocessor module. Started in the
- file ply/cpp.py
-
-2. Create and document libraries of useful tokens.
-
-3. Expand the examples/yply tool that parses bison/yacc
- files.
-
-4. Think of various diabolical things to do with the
- new yacc internals. For example, it is now possible
- to specify grammrs using completely different schemes
- than the reflection approach used by PLY.
-
-
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/README b/components/script/dom/bindings/codegen/ply/example/BASIC/README
deleted file mode 100644
index be24a3005e7..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/README
+++ /dev/null
@@ -1,79 +0,0 @@
-Inspired by a September 14, 2006 Salon article "Why Johnny Can't Code" by
-David Brin (http://www.salon.com/tech/feature/2006/09/14/basic/index.html),
-I thought that a fully working BASIC interpreter might be an interesting,
-if not questionable, PLY example. Uh, okay, so maybe it's just a bad idea,
-but in any case, here it is.
-
-In this example, you'll find a rough implementation of 1964 Dartmouth BASIC
-as described in the manual at:
-
- http://www.bitsavers.org/pdf/dartmouth/BASIC_Oct64.pdf
-
-See also:
-
- http://en.wikipedia.org/wiki/Dartmouth_BASIC
-
-This dialect is downright primitive---there are no string variables
-and no facilities for interactive input. Moreover, subroutines and functions
-are brain-dead even more than they usually are for BASIC. Of course,
-the GOTO statement is provided.
-
-Nevertheless, there are a few interesting aspects of this example:
-
- - It illustrates a fully working interpreter including lexing, parsing,
- and interpretation of instructions.
-
- - The parser shows how to catch and report various kinds of parsing
- errors in a more graceful way.
-
- - The example both parses files (supplied on command line) and
- interactive input entered line by line.
-
- - It shows how you might represent parsed information. In this case,
- each BASIC statement is encoded into a Python tuple containing the
- statement type and parameters. These tuples are then stored in
- a dictionary indexed by program line numbers.
-
- - Even though it's just BASIC, the parser contains more than 80
- rules and 150 parsing states. Thus, it's a little more meaty than
- the calculator example.
-
-To use the example, run it as follows:
-
- % python basic.py hello.bas
- HELLO WORLD
- %
-
-or use it interactively:
-
- % python basic.py
- [BASIC] 10 PRINT "HELLO WORLD"
- [BASIC] 20 END
- [BASIC] RUN
- HELLO WORLD
- [BASIC]
-
-The following files are defined:
-
- basic.py - High level script that controls everything
- basiclex.py - BASIC tokenizer
- basparse.py - BASIC parser
- basinterp.py - BASIC interpreter that runs parsed programs.
-
-In addition, a number of sample BASIC programs (.bas suffix) are
-provided. These were taken out of the Dartmouth manual.
-
-Disclaimer: I haven't spent a ton of time testing this and it's likely that
-I've skimped here and there on a few finer details (e.g., strictly enforcing
-variable naming rules). However, the interpreter seems to be able to run
-the examples in the BASIC manual.
-
-Have fun!
-
--Dave
-
-
-
-
-
-
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/basic.py b/components/script/dom/bindings/codegen/ply/example/BASIC/basic.py
deleted file mode 100644
index 70ac9e7c740..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/basic.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# An implementation of Dartmouth BASIC (1964)
-#
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-import basiclex
-import basparse
-import basinterp
-
-# If a filename has been specified, we try to run it.
-# If a runtime error occurs, we bail out and enter
-# interactive mode below
-if len(sys.argv) == 2:
- data = open(sys.argv[1]).read()
- prog = basparse.parse(data)
- if not prog:
- raise SystemExit
- b = basinterp.BasicInterpreter(prog)
- try:
- b.run()
- raise SystemExit
- except RuntimeError:
- pass
-
-else:
- b = basinterp.BasicInterpreter({})
-
-# Interactive mode. This incrementally adds/deletes statements
-# from the program stored in the BasicInterpreter object. In
-# addition, special commands 'NEW','LIST',and 'RUN' are added.
-# Specifying a line number with no code deletes that line from
-# the program.
-
-while 1:
- try:
- line = raw_input("[BASIC] ")
- except EOFError:
- raise SystemExit
- if not line:
- continue
- line += "\n"
- prog = basparse.parse(line)
- if not prog:
- continue
-
- keys = list(prog)
- if keys[0] > 0:
- b.add_statements(prog)
- else:
- stat = prog[keys[0]]
- if stat[0] == 'RUN':
- try:
- b.run()
- except RuntimeError:
- pass
- elif stat[0] == 'LIST':
- b.list()
- elif stat[0] == 'BLANK':
- b.del_line(stat[1])
- elif stat[0] == 'NEW':
- b.new()
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/basiclex.py b/components/script/dom/bindings/codegen/ply/example/BASIC/basiclex.py
deleted file mode 100644
index 4151f4c34fb..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/basiclex.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# An implementation of Dartmouth BASIC (1964)
-
-from ply import *
-
-keywords = (
- 'LET', 'READ', 'DATA', 'PRINT', 'GOTO', 'IF', 'THEN', 'FOR', 'NEXT', 'TO', 'STEP',
- 'END', 'STOP', 'DEF', 'GOSUB', 'DIM', 'REM', 'RETURN', 'RUN', 'LIST', 'NEW',
-)
-
-tokens = keywords + (
- 'EQUALS', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER',
- 'LPAREN', 'RPAREN', 'LT', 'LE', 'GT', 'GE', 'NE',
- 'COMMA', 'SEMI', 'INTEGER', 'FLOAT', 'STRING',
- 'ID', 'NEWLINE'
-)
-
-t_ignore = ' \t'
-
-
-def t_REM(t):
- r'REM .*'
- return t
-
-
-def t_ID(t):
- r'[A-Z][A-Z0-9]*'
- if t.value in keywords:
- t.type = t.value
- return t
-
-t_EQUALS = r'='
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_POWER = r'\^'
-t_DIVIDE = r'/'
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_LT = r'<'
-t_LE = r'<='
-t_GT = r'>'
-t_GE = r'>='
-t_NE = r'<>'
-t_COMMA = r'\,'
-t_SEMI = r';'
-t_INTEGER = r'\d+'
-t_FLOAT = r'((\d*\.\d+)(E[\+-]?\d+)?|([1-9]\d*E[\+-]?\d+))'
-t_STRING = r'\".*?\"'
-
-
-def t_NEWLINE(t):
- r'\n'
- t.lexer.lineno += 1
- return t
-
-
-def t_error(t):
- print("Illegal character %s" % t.value[0])
- t.lexer.skip(1)
-
-lex.lex(debug=0)
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/basiclog.py b/components/script/dom/bindings/codegen/ply/example/BASIC/basiclog.py
deleted file mode 100644
index 9dcc7feda69..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/basiclog.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# An implementation of Dartmouth BASIC (1964)
-#
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-import logging
-logging.basicConfig(
- level=logging.INFO,
- filename="parselog.txt",
- filemode="w"
-)
-log = logging.getLogger()
-
-import basiclex
-import basparse
-import basinterp
-
-# If a filename has been specified, we try to run it.
-# If a runtime error occurs, we bail out and enter
-# interactive mode below
-if len(sys.argv) == 2:
- data = open(sys.argv[1]).read()
- prog = basparse.parse(data, debug=log)
- if not prog:
- raise SystemExit
- b = basinterp.BasicInterpreter(prog)
- try:
- b.run()
- raise SystemExit
- except RuntimeError:
- pass
-
-else:
- b = basinterp.BasicInterpreter({})
-
-# Interactive mode. This incrementally adds/deletes statements
-# from the program stored in the BasicInterpreter object. In
-# addition, special commands 'NEW','LIST',and 'RUN' are added.
-# Specifying a line number with no code deletes that line from
-# the program.
-
-while 1:
- try:
- line = raw_input("[BASIC] ")
- except EOFError:
- raise SystemExit
- if not line:
- continue
- line += "\n"
- prog = basparse.parse(line, debug=log)
- if not prog:
- continue
-
- keys = list(prog)
- if keys[0] > 0:
- b.add_statements(prog)
- else:
- stat = prog[keys[0]]
- if stat[0] == 'RUN':
- try:
- b.run()
- except RuntimeError:
- pass
- elif stat[0] == 'LIST':
- b.list()
- elif stat[0] == 'BLANK':
- b.del_line(stat[1])
- elif stat[0] == 'NEW':
- b.new()
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/basinterp.py b/components/script/dom/bindings/codegen/ply/example/BASIC/basinterp.py
deleted file mode 100644
index 67762c797bf..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/basinterp.py
+++ /dev/null
@@ -1,496 +0,0 @@
-# This file provides the runtime support for running a basic program
-# Assumes the program has been parsed using basparse.py
-
-import sys
-import math
-import random
-
-
-class BasicInterpreter:
-
- # Initialize the interpreter. prog is a dictionary
- # containing (line,statement) mappings
- def __init__(self, prog):
- self.prog = prog
-
- self.functions = { # Built-in function table
- 'SIN': lambda z: math.sin(self.eval(z)),
- 'COS': lambda z: math.cos(self.eval(z)),
- 'TAN': lambda z: math.tan(self.eval(z)),
- 'ATN': lambda z: math.atan(self.eval(z)),
- 'EXP': lambda z: math.exp(self.eval(z)),
- 'ABS': lambda z: abs(self.eval(z)),
- 'LOG': lambda z: math.log(self.eval(z)),
- 'SQR': lambda z: math.sqrt(self.eval(z)),
- 'INT': lambda z: int(self.eval(z)),
- 'RND': lambda z: random.random()
- }
-
- # Collect all data statements
- def collect_data(self):
- self.data = []
- for lineno in self.stat:
- if self.prog[lineno][0] == 'DATA':
- self.data = self.data + self.prog[lineno][1]
- self.dc = 0 # Initialize the data counter
-
- # Check for end statements
- def check_end(self):
- has_end = 0
- for lineno in self.stat:
- if self.prog[lineno][0] == 'END' and not has_end:
- has_end = lineno
- if not has_end:
- print("NO END INSTRUCTION")
- self.error = 1
- return
- if has_end != lineno:
- print("END IS NOT LAST")
- self.error = 1
-
- # Check loops
- def check_loops(self):
- for pc in range(len(self.stat)):
- lineno = self.stat[pc]
- if self.prog[lineno][0] == 'FOR':
- forinst = self.prog[lineno]
- loopvar = forinst[1]
- for i in range(pc + 1, len(self.stat)):
- if self.prog[self.stat[i]][0] == 'NEXT':
- nextvar = self.prog[self.stat[i]][1]
- if nextvar != loopvar:
- continue
- self.loopend[pc] = i
- break
- else:
- print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
- self.error = 1
-
- # Evaluate an expression
- def eval(self, expr):
- etype = expr[0]
- if etype == 'NUM':
- return expr[1]
- elif etype == 'GROUP':
- return self.eval(expr[1])
- elif etype == 'UNARY':
- if expr[1] == '-':
- return -self.eval(expr[2])
- elif etype == 'BINOP':
- if expr[1] == '+':
- return self.eval(expr[2]) + self.eval(expr[3])
- elif expr[1] == '-':
- return self.eval(expr[2]) - self.eval(expr[3])
- elif expr[1] == '*':
- return self.eval(expr[2]) * self.eval(expr[3])
- elif expr[1] == '/':
- return float(self.eval(expr[2])) / self.eval(expr[3])
- elif expr[1] == '^':
- return abs(self.eval(expr[2]))**self.eval(expr[3])
- elif etype == 'VAR':
- var, dim1, dim2 = expr[1]
- if not dim1 and not dim2:
- if var in self.vars:
- return self.vars[var]
- else:
- print("UNDEFINED VARIABLE %s AT LINE %s" %
- (var, self.stat[self.pc]))
- raise RuntimeError
- # May be a list lookup or a function evaluation
- if dim1 and not dim2:
- if var in self.functions:
- # A function
- return self.functions[var](dim1)
- else:
- # A list evaluation
- if var in self.lists:
- dim1val = self.eval(dim1)
- if dim1val < 1 or dim1val > len(self.lists[var]):
- print("LIST INDEX OUT OF BOUNDS AT LINE %s" %
- self.stat[self.pc])
- raise RuntimeError
- return self.lists[var][dim1val - 1]
- if dim1 and dim2:
- if var in self.tables:
- dim1val = self.eval(dim1)
- dim2val = self.eval(dim2)
- if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
- print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" %
- self.stat[self.pc])
- raise RuntimeError
- return self.tables[var][dim1val - 1][dim2val - 1]
- print("UNDEFINED VARIABLE %s AT LINE %s" %
- (var, self.stat[self.pc]))
- raise RuntimeError
-
- # Evaluate a relational expression
- def releval(self, expr):
- etype = expr[1]
- lhs = self.eval(expr[2])
- rhs = self.eval(expr[3])
- if etype == '<':
- if lhs < rhs:
- return 1
- else:
- return 0
-
- elif etype == '<=':
- if lhs <= rhs:
- return 1
- else:
- return 0
-
- elif etype == '>':
- if lhs > rhs:
- return 1
- else:
- return 0
-
- elif etype == '>=':
- if lhs >= rhs:
- return 1
- else:
- return 0
-
- elif etype == '=':
- if lhs == rhs:
- return 1
- else:
- return 0
-
- elif etype == '<>':
- if lhs != rhs:
- return 1
- else:
- return 0
-
- # Assignment
- def assign(self, target, value):
- var, dim1, dim2 = target
- if not dim1 and not dim2:
- self.vars[var] = self.eval(value)
- elif dim1 and not dim2:
- # List assignment
- dim1val = self.eval(dim1)
- if not var in self.lists:
- self.lists[var] = [0] * 10
-
- if dim1val > len(self.lists[var]):
- print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
- raise RuntimeError
- self.lists[var][dim1val - 1] = self.eval(value)
- elif dim1 and dim2:
- dim1val = self.eval(dim1)
- dim2val = self.eval(dim2)
- if not var in self.tables:
- temp = [0] * 10
- v = []
- for i in range(10):
- v.append(temp[:])
- self.tables[var] = v
- # Variable already exists
- if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
- print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
- raise RuntimeError
- self.tables[var][dim1val - 1][dim2val - 1] = self.eval(value)
-
- # Change the current line number
- def goto(self, linenum):
- if not linenum in self.prog:
- print("UNDEFINED LINE NUMBER %d AT LINE %d" %
- (linenum, self.stat[self.pc]))
- raise RuntimeError
- self.pc = self.stat.index(linenum)
-
- # Run it
- def run(self):
- self.vars = {} # All variables
- self.lists = {} # List variables
- self.tables = {} # Tables
- self.loops = [] # Currently active loops
- self.loopend = {} # Mapping saying where loops end
- self.gosub = None # Gosub return point (if any)
- self.error = 0 # Indicates program error
-
- self.stat = list(self.prog) # Ordered list of all line numbers
- self.stat.sort()
- self.pc = 0 # Current program counter
-
- # Processing prior to running
-
- self.collect_data() # Collect all of the data statements
- self.check_end()
- self.check_loops()
-
- if self.error:
- raise RuntimeError
-
- while 1:
- line = self.stat[self.pc]
- instr = self.prog[line]
-
- op = instr[0]
-
- # END and STOP statements
- if op == 'END' or op == 'STOP':
- break # We're done
-
- # GOTO statement
- elif op == 'GOTO':
- newline = instr[1]
- self.goto(newline)
- continue
-
- # PRINT statement
- elif op == 'PRINT':
- plist = instr[1]
- out = ""
- for label, val in plist:
- if out:
- out += ' ' * (15 - (len(out) % 15))
- out += label
- if val:
- if label:
- out += " "
- eval = self.eval(val)
- out += str(eval)
- sys.stdout.write(out)
- end = instr[2]
- if not (end == ',' or end == ';'):
- sys.stdout.write("\n")
- if end == ',':
- sys.stdout.write(" " * (15 - (len(out) % 15)))
- if end == ';':
- sys.stdout.write(" " * (3 - (len(out) % 3)))
-
- # LET statement
- elif op == 'LET':
- target = instr[1]
- value = instr[2]
- self.assign(target, value)
-
- # READ statement
- elif op == 'READ':
- for target in instr[1]:
- if self.dc < len(self.data):
- value = ('NUM', self.data[self.dc])
- self.assign(target, value)
- self.dc += 1
- else:
- # No more data. Program ends
- return
- elif op == 'IF':
- relop = instr[1]
- newline = instr[2]
- if (self.releval(relop)):
- self.goto(newline)
- continue
-
- elif op == 'FOR':
- loopvar = instr[1]
- initval = instr[2]
- finval = instr[3]
- stepval = instr[4]
-
- # Check to see if this is a new loop
- if not self.loops or self.loops[-1][0] != self.pc:
- # Looks like a new loop. Make the initial assignment
- newvalue = initval
- self.assign((loopvar, None, None), initval)
- if not stepval:
- stepval = ('NUM', 1)
- stepval = self.eval(stepval) # Evaluate step here
- self.loops.append((self.pc, stepval))
- else:
- # It's a repeat of the previous loop
- # Update the value of the loop variable according to the
- # step
- stepval = ('NUM', self.loops[-1][1])
- newvalue = (
- 'BINOP', '+', ('VAR', (loopvar, None, None)), stepval)
-
- if self.loops[-1][1] < 0:
- relop = '>='
- else:
- relop = '<='
- if not self.releval(('RELOP', relop, newvalue, finval)):
- # Loop is done. Jump to the NEXT
- self.pc = self.loopend[self.pc]
- self.loops.pop()
- else:
- self.assign((loopvar, None, None), newvalue)
-
- elif op == 'NEXT':
- if not self.loops:
- print("NEXT WITHOUT FOR AT LINE %s" % line)
- return
-
- nextvar = instr[1]
- self.pc = self.loops[-1][0]
- loopinst = self.prog[self.stat[self.pc]]
- forvar = loopinst[1]
- if nextvar != forvar:
- print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
- return
- continue
- elif op == 'GOSUB':
- newline = instr[1]
- if self.gosub:
- print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
- return
- self.gosub = self.stat[self.pc]
- self.goto(newline)
- continue
-
- elif op == 'RETURN':
- if not self.gosub:
- print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
- return
- self.goto(self.gosub)
- self.gosub = None
-
- elif op == 'FUNC':
- fname = instr[1]
- pname = instr[2]
- expr = instr[3]
-
- def eval_func(pvalue, name=pname, self=self, expr=expr):
- self.assign((pname, None, None), pvalue)
- return self.eval(expr)
- self.functions[fname] = eval_func
-
- elif op == 'DIM':
- for vname, x, y in instr[1]:
- if y == 0:
- # Single dimension variable
- self.lists[vname] = [0] * x
- else:
- # Double dimension variable
- temp = [0] * y
- v = []
- for i in range(x):
- v.append(temp[:])
- self.tables[vname] = v
-
- self.pc += 1
-
- # Utility functions for program listing
- def expr_str(self, expr):
- etype = expr[0]
- if etype == 'NUM':
- return str(expr[1])
- elif etype == 'GROUP':
- return "(%s)" % self.expr_str(expr[1])
- elif etype == 'UNARY':
- if expr[1] == '-':
- return "-" + str(expr[2])
- elif etype == 'BINOP':
- return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3]))
- elif etype == 'VAR':
- return self.var_str(expr[1])
-
- def relexpr_str(self, expr):
- return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3]))
-
- def var_str(self, var):
- varname, dim1, dim2 = var
- if not dim1 and not dim2:
- return varname
- if dim1 and not dim2:
- return "%s(%s)" % (varname, self.expr_str(dim1))
- return "%s(%s,%s)" % (varname, self.expr_str(dim1), self.expr_str(dim2))
-
- # Create a program listing
- def list(self):
- stat = list(self.prog) # Ordered list of all line numbers
- stat.sort()
- for line in stat:
- instr = self.prog[line]
- op = instr[0]
- if op in ['END', 'STOP', 'RETURN']:
- print("%s %s" % (line, op))
- continue
- elif op == 'REM':
- print("%s %s" % (line, instr[1]))
- elif op == 'PRINT':
- _out = "%s %s " % (line, op)
- first = 1
- for p in instr[1]:
- if not first:
- _out += ", "
- if p[0] and p[1]:
- _out += '"%s"%s' % (p[0], self.expr_str(p[1]))
- elif p[1]:
- _out += self.expr_str(p[1])
- else:
- _out += '"%s"' % (p[0],)
- first = 0
- if instr[2]:
- _out += instr[2]
- print(_out)
- elif op == 'LET':
- print("%s LET %s = %s" %
- (line, self.var_str(instr[1]), self.expr_str(instr[2])))
- elif op == 'READ':
- _out = "%s READ " % line
- first = 1
- for r in instr[1]:
- if not first:
- _out += ","
- _out += self.var_str(r)
- first = 0
- print(_out)
- elif op == 'IF':
- print("%s IF %s THEN %d" %
- (line, self.relexpr_str(instr[1]), instr[2]))
- elif op == 'GOTO' or op == 'GOSUB':
- print("%s %s %s" % (line, op, instr[1]))
- elif op == 'FOR':
- _out = "%s FOR %s = %s TO %s" % (
- line, instr[1], self.expr_str(instr[2]), self.expr_str(instr[3]))
- if instr[4]:
- _out += " STEP %s" % (self.expr_str(instr[4]))
- print(_out)
- elif op == 'NEXT':
- print("%s NEXT %s" % (line, instr[1]))
- elif op == 'FUNC':
- print("%s DEF %s(%s) = %s" %
- (line, instr[1], instr[2], self.expr_str(instr[3])))
- elif op == 'DIM':
- _out = "%s DIM " % line
- first = 1
- for vname, x, y in instr[1]:
- if not first:
- _out += ","
- first = 0
- if y == 0:
- _out += "%s(%d)" % (vname, x)
- else:
- _out += "%s(%d,%d)" % (vname, x, y)
-
- print(_out)
- elif op == 'DATA':
- _out = "%s DATA " % line
- first = 1
- for v in instr[1]:
- if not first:
- _out += ","
- first = 0
- _out += v
- print(_out)
-
- # Erase the current program
- def new(self):
- self.prog = {}
-
- # Insert statements
- def add_statements(self, prog):
- for line, stat in prog.items():
- self.prog[line] = stat
-
- # Delete a statement
- def del_line(self, lineno):
- try:
- del self.prog[lineno]
- except KeyError:
- pass
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/basparse.py b/components/script/dom/bindings/codegen/ply/example/BASIC/basparse.py
deleted file mode 100644
index d610c7d9094..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/basparse.py
+++ /dev/null
@@ -1,474 +0,0 @@
-# An implementation of Dartmouth BASIC (1964)
-#
-
-from ply import *
-import basiclex
-
-tokens = basiclex.tokens
-
-precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('left', 'POWER'),
- ('right', 'UMINUS')
-)
-
-# A BASIC program is a series of statements. We represent the program as a
-# dictionary of tuples indexed by line number.
-
-
-def p_program(p):
- '''program : program statement
- | statement'''
-
- if len(p) == 2 and p[1]:
- p[0] = {}
- line, stat = p[1]
- p[0][line] = stat
- elif len(p) == 3:
- p[0] = p[1]
- if not p[0]:
- p[0] = {}
- if p[2]:
- line, stat = p[2]
- p[0][line] = stat
-
-# This catch-all rule is used for any catastrophic errors. In this case,
-# we simply return nothing
-
-
-def p_program_error(p):
- '''program : error'''
- p[0] = None
- p.parser.error = 1
-
-# Format of all BASIC statements.
-
-
-def p_statement(p):
- '''statement : INTEGER command NEWLINE'''
- if isinstance(p[2], str):
- print("%s %s %s" % (p[2], "AT LINE", p[1]))
- p[0] = None
- p.parser.error = 1
- else:
- lineno = int(p[1])
- p[0] = (lineno, p[2])
-
-# Interactive statements.
-
-
-def p_statement_interactive(p):
- '''statement : RUN NEWLINE
- | LIST NEWLINE
- | NEW NEWLINE'''
- p[0] = (0, (p[1], 0))
-
-# Blank line number
-
-
-def p_statement_blank(p):
- '''statement : INTEGER NEWLINE'''
- p[0] = (0, ('BLANK', int(p[1])))
-
-# Error handling for malformed statements
-
-
-def p_statement_bad(p):
- '''statement : INTEGER error NEWLINE'''
- print("MALFORMED STATEMENT AT LINE %s" % p[1])
- p[0] = None
- p.parser.error = 1
-
-# Blank line
-
-
-def p_statement_newline(p):
- '''statement : NEWLINE'''
- p[0] = None
-
-# LET statement
-
-
-def p_command_let(p):
- '''command : LET variable EQUALS expr'''
- p[0] = ('LET', p[2], p[4])
-
-
-def p_command_let_bad(p):
- '''command : LET variable EQUALS error'''
- p[0] = "BAD EXPRESSION IN LET"
-
-# READ statement
-
-
-def p_command_read(p):
- '''command : READ varlist'''
- p[0] = ('READ', p[2])
-
-
-def p_command_read_bad(p):
- '''command : READ error'''
- p[0] = "MALFORMED VARIABLE LIST IN READ"
-
-# DATA statement
-
-
-def p_command_data(p):
- '''command : DATA numlist'''
- p[0] = ('DATA', p[2])
-
-
-def p_command_data_bad(p):
- '''command : DATA error'''
- p[0] = "MALFORMED NUMBER LIST IN DATA"
-
-# PRINT statement
-
-
-def p_command_print(p):
- '''command : PRINT plist optend'''
- p[0] = ('PRINT', p[2], p[3])
-
-
-def p_command_print_bad(p):
- '''command : PRINT error'''
- p[0] = "MALFORMED PRINT STATEMENT"
-
-# Optional ending on PRINT. Either a comma (,) or semicolon (;)
-
-
-def p_optend(p):
- '''optend : COMMA
- | SEMI
- |'''
- if len(p) == 2:
- p[0] = p[1]
- else:
- p[0] = None
-
-# PRINT statement with no arguments
-
-
-def p_command_print_empty(p):
- '''command : PRINT'''
- p[0] = ('PRINT', [], None)
-
-# GOTO statement
-
-
-def p_command_goto(p):
- '''command : GOTO INTEGER'''
- p[0] = ('GOTO', int(p[2]))
-
-
-def p_command_goto_bad(p):
- '''command : GOTO error'''
- p[0] = "INVALID LINE NUMBER IN GOTO"
-
-# IF-THEN statement
-
-
-def p_command_if(p):
- '''command : IF relexpr THEN INTEGER'''
- p[0] = ('IF', p[2], int(p[4]))
-
-
-def p_command_if_bad(p):
- '''command : IF error THEN INTEGER'''
- p[0] = "BAD RELATIONAL EXPRESSION"
-
-
-def p_command_if_bad2(p):
- '''command : IF relexpr THEN error'''
- p[0] = "INVALID LINE NUMBER IN THEN"
-
-# FOR statement
-
-
-def p_command_for(p):
- '''command : FOR ID EQUALS expr TO expr optstep'''
- p[0] = ('FOR', p[2], p[4], p[6], p[7])
-
-
-def p_command_for_bad_initial(p):
- '''command : FOR ID EQUALS error TO expr optstep'''
- p[0] = "BAD INITIAL VALUE IN FOR STATEMENT"
-
-
-def p_command_for_bad_final(p):
- '''command : FOR ID EQUALS expr TO error optstep'''
- p[0] = "BAD FINAL VALUE IN FOR STATEMENT"
-
-
-def p_command_for_bad_step(p):
- '''command : FOR ID EQUALS expr TO expr STEP error'''
- p[0] = "MALFORMED STEP IN FOR STATEMENT"
-
-# Optional STEP qualifier on FOR statement
-
-
-def p_optstep(p):
- '''optstep : STEP expr
- | empty'''
- if len(p) == 3:
- p[0] = p[2]
- else:
- p[0] = None
-
-# NEXT statement
-
-
-def p_command_next(p):
- '''command : NEXT ID'''
-
- p[0] = ('NEXT', p[2])
-
-
-def p_command_next_bad(p):
- '''command : NEXT error'''
- p[0] = "MALFORMED NEXT"
-
-# END statement
-
-
-def p_command_end(p):
- '''command : END'''
- p[0] = ('END',)
-
-# REM statement
-
-
-def p_command_rem(p):
- '''command : REM'''
- p[0] = ('REM', p[1])
-
-# STOP statement
-
-
-def p_command_stop(p):
- '''command : STOP'''
- p[0] = ('STOP',)
-
-# DEF statement
-
-
-def p_command_def(p):
- '''command : DEF ID LPAREN ID RPAREN EQUALS expr'''
- p[0] = ('FUNC', p[2], p[4], p[7])
-
-
-def p_command_def_bad_rhs(p):
- '''command : DEF ID LPAREN ID RPAREN EQUALS error'''
- p[0] = "BAD EXPRESSION IN DEF STATEMENT"
-
-
-def p_command_def_bad_arg(p):
- '''command : DEF ID LPAREN error RPAREN EQUALS expr'''
- p[0] = "BAD ARGUMENT IN DEF STATEMENT"
-
-# GOSUB statement
-
-
-def p_command_gosub(p):
- '''command : GOSUB INTEGER'''
- p[0] = ('GOSUB', int(p[2]))
-
-
-def p_command_gosub_bad(p):
- '''command : GOSUB error'''
- p[0] = "INVALID LINE NUMBER IN GOSUB"
-
-# RETURN statement
-
-
-def p_command_return(p):
- '''command : RETURN'''
- p[0] = ('RETURN',)
-
-# DIM statement
-
-
-def p_command_dim(p):
- '''command : DIM dimlist'''
- p[0] = ('DIM', p[2])
-
-
-def p_command_dim_bad(p):
- '''command : DIM error'''
- p[0] = "MALFORMED VARIABLE LIST IN DIM"
-
-# List of variables supplied to DIM statement
-
-
-def p_dimlist(p):
- '''dimlist : dimlist COMMA dimitem
- | dimitem'''
- if len(p) == 4:
- p[0] = p[1]
- p[0].append(p[3])
- else:
- p[0] = [p[1]]
-
-# DIM items
-
-
-def p_dimitem_single(p):
- '''dimitem : ID LPAREN INTEGER RPAREN'''
- p[0] = (p[1], eval(p[3]), 0)
-
-
-def p_dimitem_double(p):
- '''dimitem : ID LPAREN INTEGER COMMA INTEGER RPAREN'''
- p[0] = (p[1], eval(p[3]), eval(p[5]))
-
-# Arithmetic expressions
-
-
-def p_expr_binary(p):
- '''expr : expr PLUS expr
- | expr MINUS expr
- | expr TIMES expr
- | expr DIVIDE expr
- | expr POWER expr'''
-
- p[0] = ('BINOP', p[2], p[1], p[3])
-
-
-def p_expr_number(p):
- '''expr : INTEGER
- | FLOAT'''
- p[0] = ('NUM', eval(p[1]))
-
-
-def p_expr_variable(p):
- '''expr : variable'''
- p[0] = ('VAR', p[1])
-
-
-def p_expr_group(p):
- '''expr : LPAREN expr RPAREN'''
- p[0] = ('GROUP', p[2])
-
-
-def p_expr_unary(p):
- '''expr : MINUS expr %prec UMINUS'''
- p[0] = ('UNARY', '-', p[2])
-
-# Relational expressions
-
-
-def p_relexpr(p):
- '''relexpr : expr LT expr
- | expr LE expr
- | expr GT expr
- | expr GE expr
- | expr EQUALS expr
- | expr NE expr'''
- p[0] = ('RELOP', p[2], p[1], p[3])
-
-# Variables
-
-
-def p_variable(p):
- '''variable : ID
- | ID LPAREN expr RPAREN
- | ID LPAREN expr COMMA expr RPAREN'''
- if len(p) == 2:
- p[0] = (p[1], None, None)
- elif len(p) == 5:
- p[0] = (p[1], p[3], None)
- else:
- p[0] = (p[1], p[3], p[5])
-
-# Builds a list of variable targets as a Python list
-
-
-def p_varlist(p):
- '''varlist : varlist COMMA variable
- | variable'''
- if len(p) > 2:
- p[0] = p[1]
- p[0].append(p[3])
- else:
- p[0] = [p[1]]
-
-
-# Builds a list of numbers as a Python list
-
-def p_numlist(p):
- '''numlist : numlist COMMA number
- | number'''
-
- if len(p) > 2:
- p[0] = p[1]
- p[0].append(p[3])
- else:
- p[0] = [p[1]]
-
-# A number. May be an integer or a float
-
-
-def p_number(p):
- '''number : INTEGER
- | FLOAT'''
- p[0] = eval(p[1])
-
-# A signed number.
-
-
-def p_number_signed(p):
- '''number : MINUS INTEGER
- | MINUS FLOAT'''
- p[0] = eval("-" + p[2])
-
-# List of targets for a print statement
-# Returns a list of tuples (label,expr)
-
-
-def p_plist(p):
- '''plist : plist COMMA pitem
- | pitem'''
- if len(p) > 3:
- p[0] = p[1]
- p[0].append(p[3])
- else:
- p[0] = [p[1]]
-
-
-def p_item_string(p):
- '''pitem : STRING'''
- p[0] = (p[1][1:-1], None)
-
-
-def p_item_string_expr(p):
- '''pitem : STRING expr'''
- p[0] = (p[1][1:-1], p[2])
-
-
-def p_item_expr(p):
- '''pitem : expr'''
- p[0] = ("", p[1])
-
-# Empty
-
-
-def p_empty(p):
- '''empty : '''
-
-# Catastrophic error handler
-
-
-def p_error(p):
- if not p:
- print("SYNTAX ERROR AT EOF")
-
-bparser = yacc.yacc()
-
-
-def parse(data, debug=0):
- bparser.error = 0
- p = bparser.parse(data, debug=debug)
- if bparser.error:
- return None
- return p
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/dim.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/dim.bas
deleted file mode 100644
index 87bd95b32ec..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/dim.bas
+++ /dev/null
@@ -1,14 +0,0 @@
-5 DIM A(50,15)
-10 FOR I = 1 TO 50
-20 FOR J = 1 TO 15
-30 LET A(I,J) = I + J
-35 REM PRINT I,J, A(I,J)
-40 NEXT J
-50 NEXT I
-100 FOR I = 1 TO 50
-110 FOR J = 1 TO 15
-120 PRINT A(I,J),
-130 NEXT J
-140 PRINT
-150 NEXT I
-999 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/func.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/func.bas
deleted file mode 100644
index 447ee16a927..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/func.bas
+++ /dev/null
@@ -1,5 +0,0 @@
-10 DEF FDX(X) = 2*X
-20 FOR I = 0 TO 100
-30 PRINT FDX(I)
-40 NEXT I
-50 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/gcd.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/gcd.bas
deleted file mode 100644
index d0b77460894..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/gcd.bas
+++ /dev/null
@@ -1,22 +0,0 @@
-10 PRINT "A","B","C","GCD"
-20 READ A,B,C
-30 LET X = A
-40 LET Y = B
-50 GOSUB 200
-60 LET X = G
-70 LET Y = C
-80 GOSUB 200
-90 PRINT A, B, C, G
-100 GOTO 20
-110 DATA 60, 90, 120
-120 DATA 38456, 64872, 98765
-130 DATA 32, 384, 72
-200 LET Q = INT(X/Y)
-210 LET R = X - Q*Y
-220 IF R = 0 THEN 300
-230 LET X = Y
-240 LET Y = R
-250 GOTO 200
-300 LET G = Y
-310 RETURN
-999 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/gosub.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/gosub.bas
deleted file mode 100644
index 99737b16f15..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/gosub.bas
+++ /dev/null
@@ -1,13 +0,0 @@
-100 LET X = 3
-110 GOSUB 400
-120 PRINT U, V, W
-200 LET X = 5
-210 GOSUB 400
-220 LET Z = U + 2*V + 3*W
-230 PRINT Z
-240 GOTO 999
-400 LET U = X*X
-410 LET V = X*X*X
-420 LET W = X*X*X*X + X*X*X + X*X + X
-430 RETURN
-999 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/hello.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/hello.bas
deleted file mode 100644
index cc6f0b0b511..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/hello.bas
+++ /dev/null
@@ -1,4 +0,0 @@
-5 REM HELLO WORLD PROGAM
-10 PRINT "HELLO WORLD"
-99 END
-
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/linear.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/linear.bas
deleted file mode 100644
index 56c08220b3e..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/linear.bas
+++ /dev/null
@@ -1,17 +0,0 @@
-1 REM ::: SOLVE A SYSTEM OF LINEAR EQUATIONS
-2 REM ::: A1*X1 + A2*X2 = B1
-3 REM ::: A3*X1 + A4*X2 = B2
-4 REM --------------------------------------
-10 READ A1, A2, A3, A4
-15 LET D = A1 * A4 - A3 * A2
-20 IF D = 0 THEN 65
-30 READ B1, B2
-37 LET X1 = (B1*A4 - B2*A2) / D
-42 LET X2 = (A1*B2 - A3*B1) / D
-55 PRINT X1, X2
-60 GOTO 30
-65 PRINT "NO UNIQUE SOLUTION"
-70 DATA 1, 2, 4
-80 DATA 2, -7, 5
-85 DATA 1, 3, 4, -7
-90 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/maxsin.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/maxsin.bas
deleted file mode 100644
index b96901530c2..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/maxsin.bas
+++ /dev/null
@@ -1,12 +0,0 @@
-5 PRINT "X VALUE", "SINE", "RESOLUTION"
-10 READ D
-20 LET M = -1
-30 FOR X = 0 TO 3 STEP D
-40 IF SIN(X) <= M THEN 80
-50 LET X0 = X
-60 LET M = SIN(X)
-80 NEXT X
-85 PRINT X0, M, D
-90 GOTO 10
-100 DATA .1, .01, .001
-110 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/powers.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/powers.bas
deleted file mode 100644
index a454dc3e211..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/powers.bas
+++ /dev/null
@@ -1,13 +0,0 @@
-5 PRINT "THIS PROGRAM COMPUTES AND PRINTS THE NTH POWERS"
-6 PRINT "OF THE NUMBERS LESS THAN OR EQUAL TO N FOR VARIOUS"
-7 PRINT "N FROM 1 THROUGH 7"
-8 PRINT
-10 FOR N = 1 TO 7
-15 PRINT "N = "N
-20 FOR I = 1 TO N
-30 PRINT I^N,
-40 NEXT I
-50 PRINT
-60 PRINT
-70 NEXT N
-80 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/rand.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/rand.bas
deleted file mode 100644
index 4ff7a146702..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/rand.bas
+++ /dev/null
@@ -1,4 +0,0 @@
-10 FOR I = 1 TO 20
-20 PRINT INT(10*RND(0))
-30 NEXT I
-40 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/sales.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/sales.bas
deleted file mode 100644
index a39aefb762c..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/sales.bas
+++ /dev/null
@@ -1,20 +0,0 @@
-10 FOR I = 1 TO 3
-20 READ P(I)
-30 NEXT I
-40 FOR I = 1 TO 3
-50 FOR J = 1 TO 5
-60 READ S(I,J)
-70 NEXT J
-80 NEXT I
-90 FOR J = 1 TO 5
-100 LET S = 0
-110 FOR I = 1 TO 3
-120 LET S = S + P(I) * S(I,J)
-130 NEXT I
-140 PRINT "TOTAL SALES FOR SALESMAN"J, "$"S
-150 NEXT J
-200 DATA 1.25, 4.30, 2.50
-210 DATA 40, 20, 37, 29, 42
-220 DATA 10, 16, 3, 21, 8
-230 DATA 35, 47, 29, 16, 33
-300 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/sears.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/sears.bas
deleted file mode 100644
index 5ced3974e24..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/sears.bas
+++ /dev/null
@@ -1,18 +0,0 @@
-1 REM :: THIS PROGRAM COMPUTES HOW MANY TIMES YOU HAVE TO FOLD
-2 REM :: A PIECE OF PAPER SO THAT IT IS TALLER THAN THE
-3 REM :: SEARS TOWER.
-4 REM :: S = HEIGHT OF TOWER (METERS)
-5 REM :: T = THICKNESS OF PAPER (MILLIMETERS)
-10 LET S = 442
-20 LET T = 0.1
-30 REM CONVERT T TO METERS
-40 LET T = T * .001
-50 LET F = 1
-60 LET H = T
-100 IF H > S THEN 200
-120 LET H = 2 * H
-125 LET F = F + 1
-130 GOTO 100
-200 PRINT "NUMBER OF FOLDS ="F
-220 PRINT "FINAL HEIGHT ="H
-999 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt1.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt1.bas
deleted file mode 100644
index 6673a91524f..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt1.bas
+++ /dev/null
@@ -1,5 +0,0 @@
-10 LET X = 0
-20 LET X = X + 1
-30 PRINT X, SQR(X)
-40 IF X < 100 THEN 20
-50 END
diff --git a/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt2.bas b/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt2.bas
deleted file mode 100644
index 862d85ef269..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/BASIC/sqrt2.bas
+++ /dev/null
@@ -1,4 +0,0 @@
-10 FOR X = 1 TO 100
-20 PRINT X, SQR(X)
-30 NEXT X
-40 END
diff --git a/components/script/dom/bindings/codegen/ply/example/GardenSnake/GardenSnake.py b/components/script/dom/bindings/codegen/ply/example/GardenSnake/GardenSnake.py
deleted file mode 100644
index 8b493b40dca..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/GardenSnake/GardenSnake.py
+++ /dev/null
@@ -1,777 +0,0 @@
-# GardenSnake - a parser generator demonstration program
-#
-# This implements a modified version of a subset of Python:
-# - only 'def', 'return' and 'if' statements
-# - 'if' only has 'then' clause (no elif nor else)
-# - single-quoted strings only, content in raw format
-# - numbers are decimal.Decimal instances (not integers or floats)
-# - no print statment; use the built-in 'print' function
-# - only < > == + - / * implemented (and unary + -)
-# - assignment and tuple assignment work
-# - no generators of any sort
-# - no ... well, no quite a lot
-
-# Why? I'm thinking about a new indentation-based configuration
-# language for a project and wanted to figure out how to do it. Once
-# I got that working I needed a way to test it out. My original AST
-# was dumb so I decided to target Python's AST and compile it into
-# Python code. Plus, it's pretty cool that it only took a day or so
-# from sitting down with Ply to having working code.
-
-# This uses David Beazley's Ply from http://www.dabeaz.com/ply/
-
-# This work is hereby released into the Public Domain. To view a copy of
-# the public domain dedication, visit
-# http://creativecommons.org/licenses/publicdomain/ or send a letter to
-# Creative Commons, 543 Howard Street, 5th Floor, San Francisco,
-# California, 94105, USA.
-#
-# Portions of this work are derived from Python's Grammar definition
-# and may be covered under the Python copyright and license
-#
-# Andrew Dalke / Dalke Scientific Software, LLC
-# 30 August 2006 / Cape Town, South Africa
-
-# Changelog:
-# 30 August - added link to CC license; removed the "swapcase" encoding
-
-# Modifications for inclusion in PLY distribution
-import sys
-sys.path.insert(0, "../..")
-from ply import *
-
-##### Lexer ######
-#import lex
-import decimal
-
-tokens = (
- 'DEF',
- 'IF',
- 'NAME',
- 'NUMBER', # Python decimals
- 'STRING', # single quoted strings only; syntax of raw strings
- 'LPAR',
- 'RPAR',
- 'COLON',
- 'EQ',
- 'ASSIGN',
- 'LT',
- 'GT',
- 'PLUS',
- 'MINUS',
- 'MULT',
- 'DIV',
- 'RETURN',
- 'WS',
- 'NEWLINE',
- 'COMMA',
- 'SEMICOLON',
- 'INDENT',
- 'DEDENT',
- 'ENDMARKER',
-)
-
-#t_NUMBER = r'\d+'
-# taken from decmial.py but without the leading sign
-
-
-def t_NUMBER(t):
- r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
- t.value = decimal.Decimal(t.value)
- return t
-
-
-def t_STRING(t):
- r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
- t.value = t.value[1:-1].decode("string-escape") # .swapcase() # for fun
- return t
-
-t_COLON = r':'
-t_EQ = r'=='
-t_ASSIGN = r'='
-t_LT = r'<'
-t_GT = r'>'
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_MULT = r'\*'
-t_DIV = r'/'
-t_COMMA = r','
-t_SEMICOLON = r';'
-
-# Ply nicely documented how to do this.
-
-RESERVED = {
- "def": "DEF",
- "if": "IF",
- "return": "RETURN",
-}
-
-
-def t_NAME(t):
- r'[a-zA-Z_][a-zA-Z0-9_]*'
- t.type = RESERVED.get(t.value, "NAME")
- return t
-
-# Putting this before t_WS let it consume lines with only comments in
-# them so the latter code never sees the WS part. Not consuming the
-# newline. Needed for "if 1: #comment"
-
-
-def t_comment(t):
- r"[ ]*\043[^\n]*" # \043 is '#'
- pass
-
-
-# Whitespace
-def t_WS(t):
- r' [ ]+ '
- if t.lexer.at_line_start and t.lexer.paren_count == 0:
- return t
-
-# Don't generate newline tokens when inside of parenthesis, eg
-# a = (1,
-# 2, 3)
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += len(t.value)
- t.type = "NEWLINE"
- if t.lexer.paren_count == 0:
- return t
-
-
-def t_LPAR(t):
- r'\('
- t.lexer.paren_count += 1
- return t
-
-
-def t_RPAR(t):
- r'\)'
- # check for underflow? should be the job of the parser
- t.lexer.paren_count -= 1
- return t
-
-
-def t_error(t):
- raise SyntaxError("Unknown symbol %r" % (t.value[0],))
- print "Skipping", repr(t.value[0])
- t.lexer.skip(1)
-
-# I implemented INDENT / DEDENT generation as a post-processing filter
-
-# The original lex token stream contains WS and NEWLINE characters.
-# WS will only occur before any other tokens on a line.
-
-# I have three filters. One tags tokens by adding two attributes.
-# "must_indent" is True if the token must be indented from the
-# previous code. The other is "at_line_start" which is True for WS
-# and the first non-WS/non-NEWLINE on a line. It flags the check so
-# see if the new line has changed indication level.
-
-# Python's syntax has three INDENT states
-# 0) no colon hence no need to indent
-# 1) "if 1: go()" - simple statements have a COLON but no need for an indent
-# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
-NO_INDENT = 0
-MAY_INDENT = 1
-MUST_INDENT = 2
-
-# only care about whitespace at the start of a line
-
-
-def track_tokens_filter(lexer, tokens):
- lexer.at_line_start = at_line_start = True
- indent = NO_INDENT
- saw_colon = False
- for token in tokens:
- token.at_line_start = at_line_start
-
- if token.type == "COLON":
- at_line_start = False
- indent = MAY_INDENT
- token.must_indent = False
-
- elif token.type == "NEWLINE":
- at_line_start = True
- if indent == MAY_INDENT:
- indent = MUST_INDENT
- token.must_indent = False
-
- elif token.type == "WS":
- assert token.at_line_start == True
- at_line_start = True
- token.must_indent = False
-
- else:
- # A real token; only indent after COLON NEWLINE
- if indent == MUST_INDENT:
- token.must_indent = True
- else:
- token.must_indent = False
- at_line_start = False
- indent = NO_INDENT
-
- yield token
- lexer.at_line_start = at_line_start
-
-
-def _new_token(type, lineno):
- tok = lex.LexToken()
- tok.type = type
- tok.value = None
- tok.lineno = lineno
- return tok
-
-# Synthesize a DEDENT tag
-
-
-def DEDENT(lineno):
- return _new_token("DEDENT", lineno)
-
-# Synthesize an INDENT tag
-
-
-def INDENT(lineno):
- return _new_token("INDENT", lineno)
-
-
-# Track the indentation level and emit the right INDENT / DEDENT events.
-def indentation_filter(tokens):
- # A stack of indentation levels; will never pop item 0
- levels = [0]
- token = None
- depth = 0
- prev_was_ws = False
- for token in tokens:
- # if 1:
- # print "Process", token,
- # if token.at_line_start:
- # print "at_line_start",
- # if token.must_indent:
- # print "must_indent",
- # print
-
- # WS only occurs at the start of the line
- # There may be WS followed by NEWLINE so
- # only track the depth here. Don't indent/dedent
- # until there's something real.
- if token.type == "WS":
- assert depth == 0
- depth = len(token.value)
- prev_was_ws = True
- # WS tokens are never passed to the parser
- continue
-
- if token.type == "NEWLINE":
- depth = 0
- if prev_was_ws or token.at_line_start:
- # ignore blank lines
- continue
- # pass the other cases on through
- yield token
- continue
-
- # then it must be a real token (not WS, not NEWLINE)
- # which can affect the indentation level
-
- prev_was_ws = False
- if token.must_indent:
- # The current depth must be larger than the previous level
- if not (depth > levels[-1]):
- raise IndentationError("expected an indented block")
-
- levels.append(depth)
- yield INDENT(token.lineno)
-
- elif token.at_line_start:
- # Must be on the same level or one of the previous levels
- if depth == levels[-1]:
- # At the same level
- pass
- elif depth > levels[-1]:
- raise IndentationError(
- "indentation increase but not in new block")
- else:
- # Back up; but only if it matches a previous level
- try:
- i = levels.index(depth)
- except ValueError:
- raise IndentationError("inconsistent indentation")
- for _ in range(i + 1, len(levels)):
- yield DEDENT(token.lineno)
- levels.pop()
-
- yield token
-
- ### Finished processing ###
-
- # Must dedent any remaining levels
- if len(levels) > 1:
- assert token is not None
- for _ in range(1, len(levels)):
- yield DEDENT(token.lineno)
-
-
-# The top-level filter adds an ENDMARKER, if requested.
-# Python's grammar uses it.
-def filter(lexer, add_endmarker=True):
- token = None
- tokens = iter(lexer.token, None)
- tokens = track_tokens_filter(lexer, tokens)
- for token in indentation_filter(tokens):
- yield token
-
- if add_endmarker:
- lineno = 1
- if token is not None:
- lineno = token.lineno
- yield _new_token("ENDMARKER", lineno)
-
-# Combine Ply and my filters into a new lexer
-
-
-class IndentLexer(object):
-
- def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
- self.lexer = lex.lex(debug=debug, optimize=optimize,
- lextab=lextab, reflags=reflags)
- self.token_stream = None
-
- def input(self, s, add_endmarker=True):
- self.lexer.paren_count = 0
- self.lexer.input(s)
- self.token_stream = filter(self.lexer, add_endmarker)
-
- def token(self):
- try:
- return self.token_stream.next()
- except StopIteration:
- return None
-
-########## Parser (tokens -> AST) ######
-
-# also part of Ply
-#import yacc
-
-# I use the Python AST
-from compiler import ast
-
-# Helper function
-
-
-def Assign(left, right):
- names = []
- if isinstance(left, ast.Name):
- # Single assignment on left
- return ast.Assign([ast.AssName(left.name, 'OP_ASSIGN')], right)
- elif isinstance(left, ast.Tuple):
- # List of things - make sure they are Name nodes
- names = []
- for child in left.getChildren():
- if not isinstance(child, ast.Name):
- raise SyntaxError("that assignment not supported")
- names.append(child.name)
- ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
- return ast.Assign([ast.AssTuple(ass_list)], right)
- else:
- raise SyntaxError("Can't do that yet")
-
-
-# The grammar comments come from Python's Grammar/Grammar file
-
-# NB: compound_stmt in single_input is followed by extra NEWLINE!
-# file_input: (NEWLINE | stmt)* ENDMARKER
-def p_file_input_end(p):
- """file_input_end : file_input ENDMARKER"""
- p[0] = ast.Stmt(p[1])
-
-
-def p_file_input(p):
- """file_input : file_input NEWLINE
- | file_input stmt
- | NEWLINE
- | stmt"""
- if isinstance(p[len(p) - 1], basestring):
- if len(p) == 3:
- p[0] = p[1]
- else:
- p[0] = [] # p == 2 --> only a blank line
- else:
- if len(p) == 3:
- p[0] = p[1] + p[2]
- else:
- p[0] = p[1]
-
-
-# funcdef: [decorators] 'def' NAME parameters ':' suite
-# ignoring decorators
-def p_funcdef(p):
- "funcdef : DEF NAME parameters COLON suite"
- p[0] = ast.Function(None, p[2], tuple(p[3]), (), 0, None, p[5])
-
-# parameters: '(' [varargslist] ')'
-
-
-def p_parameters(p):
- """parameters : LPAR RPAR
- | LPAR varargslist RPAR"""
- if len(p) == 3:
- p[0] = []
- else:
- p[0] = p[2]
-
-
-# varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
-# highly simplified
-def p_varargslist(p):
- """varargslist : varargslist COMMA NAME
- | NAME"""
- if len(p) == 4:
- p[0] = p[1] + p[3]
- else:
- p[0] = [p[1]]
-
-# stmt: simple_stmt | compound_stmt
-
-
-def p_stmt_simple(p):
- """stmt : simple_stmt"""
- # simple_stmt is a list
- p[0] = p[1]
-
-
-def p_stmt_compound(p):
- """stmt : compound_stmt"""
- p[0] = [p[1]]
-
-# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
-
-
-def p_simple_stmt(p):
- """simple_stmt : small_stmts NEWLINE
- | small_stmts SEMICOLON NEWLINE"""
- p[0] = p[1]
-
-
-def p_small_stmts(p):
- """small_stmts : small_stmts SEMICOLON small_stmt
- | small_stmt"""
- if len(p) == 4:
- p[0] = p[1] + [p[3]]
- else:
- p[0] = [p[1]]
-
-# small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
-# import_stmt | global_stmt | exec_stmt | assert_stmt
-
-
-def p_small_stmt(p):
- """small_stmt : flow_stmt
- | expr_stmt"""
- p[0] = p[1]
-
-# expr_stmt: testlist (augassign (yield_expr|testlist) |
-# ('=' (yield_expr|testlist))*)
-# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
-# '<<=' | '>>=' | '**=' | '//=')
-
-
-def p_expr_stmt(p):
- """expr_stmt : testlist ASSIGN testlist
- | testlist """
- if len(p) == 2:
- # a list of expressions
- p[0] = ast.Discard(p[1])
- else:
- p[0] = Assign(p[1], p[3])
-
-
-def p_flow_stmt(p):
- "flow_stmt : return_stmt"
- p[0] = p[1]
-
-# return_stmt: 'return' [testlist]
-
-
-def p_return_stmt(p):
- "return_stmt : RETURN testlist"
- p[0] = ast.Return(p[2])
-
-
-def p_compound_stmt(p):
- """compound_stmt : if_stmt
- | funcdef"""
- p[0] = p[1]
-
-
-def p_if_stmt(p):
- 'if_stmt : IF test COLON suite'
- p[0] = ast.If([(p[2], p[4])], None)
-
-
-def p_suite(p):
- """suite : simple_stmt
- | NEWLINE INDENT stmts DEDENT"""
- if len(p) == 2:
- p[0] = ast.Stmt(p[1])
- else:
- p[0] = ast.Stmt(p[3])
-
-
-def p_stmts(p):
- """stmts : stmts stmt
- | stmt"""
- if len(p) == 3:
- p[0] = p[1] + p[2]
- else:
- p[0] = p[1]
-
-# No using Python's approach because Ply supports precedence
-
-# comparison: expr (comp_op expr)*
-# arith_expr: term (('+'|'-') term)*
-# term: factor (('*'|'/'|'%'|'//') factor)*
-# factor: ('+'|'-'|'~') factor | power
-# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
-
-
-def make_lt_compare((left, right)):
- return ast.Compare(left, [('<', right), ])
-
-
-def make_gt_compare((left, right)):
- return ast.Compare(left, [('>', right), ])
-
-
-def make_eq_compare((left, right)):
- return ast.Compare(left, [('==', right), ])
-
-
-binary_ops = {
- "+": ast.Add,
- "-": ast.Sub,
- "*": ast.Mul,
- "/": ast.Div,
- "<": make_lt_compare,
- ">": make_gt_compare,
- "==": make_eq_compare,
-}
-unary_ops = {
- "+": ast.UnaryAdd,
- "-": ast.UnarySub,
-}
-precedence = (
- ("left", "EQ", "GT", "LT"),
- ("left", "PLUS", "MINUS"),
- ("left", "MULT", "DIV"),
-)
-
-
-def p_comparison(p):
- """comparison : comparison PLUS comparison
- | comparison MINUS comparison
- | comparison MULT comparison
- | comparison DIV comparison
- | comparison LT comparison
- | comparison EQ comparison
- | comparison GT comparison
- | PLUS comparison
- | MINUS comparison
- | power"""
- if len(p) == 4:
- p[0] = binary_ops[p[2]]((p[1], p[3]))
- elif len(p) == 3:
- p[0] = unary_ops[p[1]](p[2])
- else:
- p[0] = p[1]
-
-# power: atom trailer* ['**' factor]
-# trailers enables function calls. I only allow one level of calls
-# so this is 'trailer'
-
-
-def p_power(p):
- """power : atom
- | atom trailer"""
- if len(p) == 2:
- p[0] = p[1]
- else:
- if p[2][0] == "CALL":
- p[0] = ast.CallFunc(p[1], p[2][1], None, None)
- else:
- raise AssertionError("not implemented")
-
-
-def p_atom_name(p):
- """atom : NAME"""
- p[0] = ast.Name(p[1])
-
-
-def p_atom_number(p):
- """atom : NUMBER
- | STRING"""
- p[0] = ast.Const(p[1])
-
-
-def p_atom_tuple(p):
- """atom : LPAR testlist RPAR"""
- p[0] = p[2]
-
-# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
-
-
-def p_trailer(p):
- "trailer : LPAR arglist RPAR"
- p[0] = ("CALL", p[2])
-
-# testlist: test (',' test)* [',']
-# Contains shift/reduce error
-
-
-def p_testlist(p):
- """testlist : testlist_multi COMMA
- | testlist_multi """
- if len(p) == 2:
- p[0] = p[1]
- else:
- # May need to promote singleton to tuple
- if isinstance(p[1], list):
- p[0] = p[1]
- else:
- p[0] = [p[1]]
- # Convert into a tuple?
- if isinstance(p[0], list):
- p[0] = ast.Tuple(p[0])
-
-
-def p_testlist_multi(p):
- """testlist_multi : testlist_multi COMMA test
- | test"""
- if len(p) == 2:
- # singleton
- p[0] = p[1]
- else:
- if isinstance(p[1], list):
- p[0] = p[1] + [p[3]]
- else:
- # singleton -> tuple
- p[0] = [p[1], p[3]]
-
-
-# test: or_test ['if' or_test 'else' test] | lambdef
-# as I don't support 'and', 'or', and 'not' this works down to 'comparison'
-def p_test(p):
- "test : comparison"
- p[0] = p[1]
-
-
-# arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
-# XXX INCOMPLETE: this doesn't allow the trailing comma
-def p_arglist(p):
- """arglist : arglist COMMA argument
- | argument"""
- if len(p) == 4:
- p[0] = p[1] + [p[3]]
- else:
- p[0] = [p[1]]
-
-# argument: test [gen_for] | test '=' test # Really [keyword '='] test
-
-
-def p_argument(p):
- "argument : test"
- p[0] = p[1]
-
-
-def p_error(p):
- # print "Error!", repr(p)
- raise SyntaxError(p)
-
-
-class GardenSnakeParser(object):
-
- def __init__(self, lexer=None):
- if lexer is None:
- lexer = IndentLexer()
- self.lexer = lexer
- self.parser = yacc.yacc(start="file_input_end")
-
- def parse(self, code):
- self.lexer.input(code)
- result = self.parser.parse(lexer=self.lexer)
- return ast.Module(None, result)
-
-
-###### Code generation ######
-
-from compiler import misc, syntax, pycodegen
-
-
-class GardenSnakeCompiler(object):
-
- def __init__(self):
- self.parser = GardenSnakeParser()
-
- def compile(self, code, filename="<string>"):
- tree = self.parser.parse(code)
- # print tree
- misc.set_filename(filename, tree)
- syntax.check(tree)
- gen = pycodegen.ModuleCodeGenerator(tree)
- code = gen.getCode()
- return code
-
-####### Test code #######
-
-compile = GardenSnakeCompiler().compile
-
-code = r"""
-
-print('LET\'S TRY THIS \\OUT')
-
-#Comment here
-def x(a):
- print('called with',a)
- if a == 1:
- return 2
- if a*2 > 10: return 999 / 4
- # Another comment here
-
- return a+2*3
-
-ints = (1, 2,
- 3, 4,
-5)
-print('mutiline-expression', ints)
-
-t = 4+1/3*2+6*(9-5+1)
-print('predence test; should be 34+2/3:', t, t==(34+2/3))
-
-print('numbers', 1,2,3,4,5)
-if 1:
- 8
- a=9
- print(x(a))
-
-print(x(1))
-print(x(2))
-print(x(8),'3')
-print('this is decimal', 1/5)
-print('BIG DECIMAL', 1.234567891234567e12345)
-
-"""
-
-# Set up the GardenSnake run-time environment
-
-
-def print_(*args):
- print "-->", " ".join(map(str, args))
-
-globals()["print"] = print_
-
-compiled_code = compile(code)
-
-exec compiled_code in globals()
-print "Done"
diff --git a/components/script/dom/bindings/codegen/ply/example/GardenSnake/README b/components/script/dom/bindings/codegen/ply/example/GardenSnake/README
deleted file mode 100644
index 4d8be2db050..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/GardenSnake/README
+++ /dev/null
@@ -1,5 +0,0 @@
-This example is Andrew Dalke's GardenSnake language. It shows how to process an
-indentation-like language like Python. Further details can be found here:
-
-http://dalkescientific.com/writings/diary/archive/2006/08/30/gardensnake_language.html
-
diff --git a/components/script/dom/bindings/codegen/ply/example/README b/components/script/dom/bindings/codegen/ply/example/README
deleted file mode 100644
index 63519b557f1..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/README
+++ /dev/null
@@ -1,10 +0,0 @@
-Simple examples:
- calc - Simple calculator
- classcalc - Simple calculate defined as a class
-
-Complex examples
- ansic - ANSI C grammar from K&R
- BASIC - A small BASIC interpreter
- GardenSnake - A simple python-like language
- yply - Converts Unix yacc files to PLY programs.
-
diff --git a/components/script/dom/bindings/codegen/ply/example/ansic/README b/components/script/dom/bindings/codegen/ply/example/ansic/README
deleted file mode 100644
index e049d3b4e48..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/ansic/README
+++ /dev/null
@@ -1,2 +0,0 @@
-This example is incomplete. Was going to specify an ANSI C parser.
-This is part of it.
diff --git a/components/script/dom/bindings/codegen/ply/example/ansic/clex.py b/components/script/dom/bindings/codegen/ply/example/ansic/clex.py
deleted file mode 100644
index 4bde1d730b0..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/ansic/clex.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# ----------------------------------------------------------------------
-# clex.py
-#
-# A lexer for ANSI C.
-# ----------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-import ply.lex as lex
-
-# Reserved words
-reserved = (
- 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE',
- 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
- 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF',
- 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE',
-)
-
-tokens = reserved + (
- # Literals (identifier, integer constant, float constant, string constant,
- # char const)
- 'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
-
- # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
- 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
- 'LOR', 'LAND', 'LNOT',
- 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
-
- # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
- 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
- 'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
-
- # Increment/decrement (++,--)
- 'PLUSPLUS', 'MINUSMINUS',
-
- # Structure dereference (->)
- 'ARROW',
-
- # Conditional operator (?)
- 'CONDOP',
-
- # Delimeters ( ) [ ] { } , . ; :
- 'LPAREN', 'RPAREN',
- 'LBRACKET', 'RBRACKET',
- 'LBRACE', 'RBRACE',
- 'COMMA', 'PERIOD', 'SEMI', 'COLON',
-
- # Ellipsis (...)
- 'ELLIPSIS',
-)
-
-# Completely ignored characters
-t_ignore = ' \t\x0c'
-
-# Newlines
-
-
-def t_NEWLINE(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-# Operators
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_DIVIDE = r'/'
-t_MOD = r'%'
-t_OR = r'\|'
-t_AND = r'&'
-t_NOT = r'~'
-t_XOR = r'\^'
-t_LSHIFT = r'<<'
-t_RSHIFT = r'>>'
-t_LOR = r'\|\|'
-t_LAND = r'&&'
-t_LNOT = r'!'
-t_LT = r'<'
-t_GT = r'>'
-t_LE = r'<='
-t_GE = r'>='
-t_EQ = r'=='
-t_NE = r'!='
-
-# Assignment operators
-
-t_EQUALS = r'='
-t_TIMESEQUAL = r'\*='
-t_DIVEQUAL = r'/='
-t_MODEQUAL = r'%='
-t_PLUSEQUAL = r'\+='
-t_MINUSEQUAL = r'-='
-t_LSHIFTEQUAL = r'<<='
-t_RSHIFTEQUAL = r'>>='
-t_ANDEQUAL = r'&='
-t_OREQUAL = r'\|='
-t_XOREQUAL = r'\^='
-
-# Increment/decrement
-t_PLUSPLUS = r'\+\+'
-t_MINUSMINUS = r'--'
-
-# ->
-t_ARROW = r'->'
-
-# ?
-t_CONDOP = r'\?'
-
-# Delimeters
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_LBRACKET = r'\['
-t_RBRACKET = r'\]'
-t_LBRACE = r'\{'
-t_RBRACE = r'\}'
-t_COMMA = r','
-t_PERIOD = r'\.'
-t_SEMI = r';'
-t_COLON = r':'
-t_ELLIPSIS = r'\.\.\.'
-
-# Identifiers and reserved words
-
-reserved_map = {}
-for r in reserved:
- reserved_map[r.lower()] = r
-
-
-def t_ID(t):
- r'[A-Za-z_][\w_]*'
- t.type = reserved_map.get(t.value, "ID")
- return t
-
-# Integer literal
-t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
-
-# Floating literal
-t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
-
-# String literal
-t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
-
-# Character constant 'c' or L'c'
-t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
-
-# Comments
-
-
-def t_comment(t):
- r'/\*(.|\n)*?\*/'
- t.lexer.lineno += t.value.count('\n')
-
-# Preprocessor directive (ignored)
-
-
-def t_preprocessor(t):
- r'\#(.)*?\n'
- t.lexer.lineno += 1
-
-
-def t_error(t):
- print("Illegal character %s" % repr(t.value[0]))
- t.lexer.skip(1)
-
-lexer = lex.lex()
-if __name__ == "__main__":
- lex.runmain(lexer)
diff --git a/components/script/dom/bindings/codegen/ply/example/ansic/cparse.py b/components/script/dom/bindings/codegen/ply/example/ansic/cparse.py
deleted file mode 100644
index 5fe9bce0428..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/ansic/cparse.py
+++ /dev/null
@@ -1,1048 +0,0 @@
-# -----------------------------------------------------------------------------
-# cparse.py
-#
-# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
-# -----------------------------------------------------------------------------
-
-import sys
-import clex
-import ply.yacc as yacc
-
-# Get the token map
-tokens = clex.tokens
-
-# translation-unit:
-
-
-def p_translation_unit_1(t):
- 'translation_unit : external_declaration'
- pass
-
-
-def p_translation_unit_2(t):
- 'translation_unit : translation_unit external_declaration'
- pass
-
-# external-declaration:
-
-
-def p_external_declaration_1(t):
- 'external_declaration : function_definition'
- pass
-
-
-def p_external_declaration_2(t):
- 'external_declaration : declaration'
- pass
-
-# function-definition:
-
-
-def p_function_definition_1(t):
- 'function_definition : declaration_specifiers declarator declaration_list compound_statement'
- pass
-
-
-def p_function_definition_2(t):
- 'function_definition : declarator declaration_list compound_statement'
- pass
-
-
-def p_function_definition_3(t):
- 'function_definition : declarator compound_statement'
- pass
-
-
-def p_function_definition_4(t):
- 'function_definition : declaration_specifiers declarator compound_statement'
- pass
-
-# declaration:
-
-
-def p_declaration_1(t):
- 'declaration : declaration_specifiers init_declarator_list SEMI'
- pass
-
-
-def p_declaration_2(t):
- 'declaration : declaration_specifiers SEMI'
- pass
-
-# declaration-list:
-
-
-def p_declaration_list_1(t):
- 'declaration_list : declaration'
- pass
-
-
-def p_declaration_list_2(t):
- 'declaration_list : declaration_list declaration '
- pass
-
-# declaration-specifiers
-
-
-def p_declaration_specifiers_1(t):
- 'declaration_specifiers : storage_class_specifier declaration_specifiers'
- pass
-
-
-def p_declaration_specifiers_2(t):
- 'declaration_specifiers : type_specifier declaration_specifiers'
- pass
-
-
-def p_declaration_specifiers_3(t):
- 'declaration_specifiers : type_qualifier declaration_specifiers'
- pass
-
-
-def p_declaration_specifiers_4(t):
- 'declaration_specifiers : storage_class_specifier'
- pass
-
-
-def p_declaration_specifiers_5(t):
- 'declaration_specifiers : type_specifier'
- pass
-
-
-def p_declaration_specifiers_6(t):
- 'declaration_specifiers : type_qualifier'
- pass
-
-# storage-class-specifier
-
-
-def p_storage_class_specifier(t):
- '''storage_class_specifier : AUTO
- | REGISTER
- | STATIC
- | EXTERN
- | TYPEDEF
- '''
- pass
-
-# type-specifier:
-
-
-def p_type_specifier(t):
- '''type_specifier : VOID
- | CHAR
- | SHORT
- | INT
- | LONG
- | FLOAT
- | DOUBLE
- | SIGNED
- | UNSIGNED
- | struct_or_union_specifier
- | enum_specifier
- | TYPEID
- '''
- pass
-
-# type-qualifier:
-
-
-def p_type_qualifier(t):
- '''type_qualifier : CONST
- | VOLATILE'''
- pass
-
-# struct-or-union-specifier
-
-
-def p_struct_or_union_specifier_1(t):
- 'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE'
- pass
-
-
-def p_struct_or_union_specifier_2(t):
- 'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE'
- pass
-
-
-def p_struct_or_union_specifier_3(t):
- 'struct_or_union_specifier : struct_or_union ID'
- pass
-
-# struct-or-union:
-
-
-def p_struct_or_union(t):
- '''struct_or_union : STRUCT
- | UNION
- '''
- pass
-
-# struct-declaration-list:
-
-
-def p_struct_declaration_list_1(t):
- 'struct_declaration_list : struct_declaration'
- pass
-
-
-def p_struct_declaration_list_2(t):
- 'struct_declaration_list : struct_declaration_list struct_declaration'
- pass
-
-# init-declarator-list:
-
-
-def p_init_declarator_list_1(t):
- 'init_declarator_list : init_declarator'
- pass
-
-
-def p_init_declarator_list_2(t):
- 'init_declarator_list : init_declarator_list COMMA init_declarator'
- pass
-
-# init-declarator
-
-
-def p_init_declarator_1(t):
- 'init_declarator : declarator'
- pass
-
-
-def p_init_declarator_2(t):
- 'init_declarator : declarator EQUALS initializer'
- pass
-
-# struct-declaration:
-
-
-def p_struct_declaration(t):
- 'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI'
- pass
-
-# specifier-qualifier-list:
-
-
-def p_specifier_qualifier_list_1(t):
- 'specifier_qualifier_list : type_specifier specifier_qualifier_list'
- pass
-
-
-def p_specifier_qualifier_list_2(t):
- 'specifier_qualifier_list : type_specifier'
- pass
-
-
-def p_specifier_qualifier_list_3(t):
- 'specifier_qualifier_list : type_qualifier specifier_qualifier_list'
- pass
-
-
-def p_specifier_qualifier_list_4(t):
- 'specifier_qualifier_list : type_qualifier'
- pass
-
-# struct-declarator-list:
-
-
-def p_struct_declarator_list_1(t):
- 'struct_declarator_list : struct_declarator'
- pass
-
-
-def p_struct_declarator_list_2(t):
- 'struct_declarator_list : struct_declarator_list COMMA struct_declarator'
- pass
-
-# struct-declarator:
-
-
-def p_struct_declarator_1(t):
- 'struct_declarator : declarator'
- pass
-
-
-def p_struct_declarator_2(t):
- 'struct_declarator : declarator COLON constant_expression'
- pass
-
-
-def p_struct_declarator_3(t):
- 'struct_declarator : COLON constant_expression'
- pass
-
-# enum-specifier:
-
-
-def p_enum_specifier_1(t):
- 'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE'
- pass
-
-
-def p_enum_specifier_2(t):
- 'enum_specifier : ENUM LBRACE enumerator_list RBRACE'
- pass
-
-
-def p_enum_specifier_3(t):
- 'enum_specifier : ENUM ID'
- pass
-
-# enumerator_list:
-
-
-def p_enumerator_list_1(t):
- 'enumerator_list : enumerator'
- pass
-
-
-def p_enumerator_list_2(t):
- 'enumerator_list : enumerator_list COMMA enumerator'
- pass
-
-# enumerator:
-
-
-def p_enumerator_1(t):
- 'enumerator : ID'
- pass
-
-
-def p_enumerator_2(t):
- 'enumerator : ID EQUALS constant_expression'
- pass
-
-# declarator:
-
-
-def p_declarator_1(t):
- 'declarator : pointer direct_declarator'
- pass
-
-
-def p_declarator_2(t):
- 'declarator : direct_declarator'
- pass
-
-# direct-declarator:
-
-
-def p_direct_declarator_1(t):
- 'direct_declarator : ID'
- pass
-
-
-def p_direct_declarator_2(t):
- 'direct_declarator : LPAREN declarator RPAREN'
- pass
-
-
-def p_direct_declarator_3(t):
- 'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET'
- pass
-
-
-def p_direct_declarator_4(t):
- 'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN '
- pass
-
-
-def p_direct_declarator_5(t):
- 'direct_declarator : direct_declarator LPAREN identifier_list RPAREN '
- pass
-
-
-def p_direct_declarator_6(t):
- 'direct_declarator : direct_declarator LPAREN RPAREN '
- pass
-
-# pointer:
-
-
-def p_pointer_1(t):
- 'pointer : TIMES type_qualifier_list'
- pass
-
-
-def p_pointer_2(t):
- 'pointer : TIMES'
- pass
-
-
-def p_pointer_3(t):
- 'pointer : TIMES type_qualifier_list pointer'
- pass
-
-
-def p_pointer_4(t):
- 'pointer : TIMES pointer'
- pass
-
-# type-qualifier-list:
-
-
-def p_type_qualifier_list_1(t):
- 'type_qualifier_list : type_qualifier'
- pass
-
-
-def p_type_qualifier_list_2(t):
- 'type_qualifier_list : type_qualifier_list type_qualifier'
- pass
-
-# parameter-type-list:
-
-
-def p_parameter_type_list_1(t):
- 'parameter_type_list : parameter_list'
- pass
-
-
-def p_parameter_type_list_2(t):
- 'parameter_type_list : parameter_list COMMA ELLIPSIS'
- pass
-
-# parameter-list:
-
-
-def p_parameter_list_1(t):
- 'parameter_list : parameter_declaration'
- pass
-
-
-def p_parameter_list_2(t):
- 'parameter_list : parameter_list COMMA parameter_declaration'
- pass
-
-# parameter-declaration:
-
-
-def p_parameter_declaration_1(t):
- 'parameter_declaration : declaration_specifiers declarator'
- pass
-
-
-def p_parameter_declaration_2(t):
- 'parameter_declaration : declaration_specifiers abstract_declarator_opt'
- pass
-
-# identifier-list:
-
-
-def p_identifier_list_1(t):
- 'identifier_list : ID'
- pass
-
-
-def p_identifier_list_2(t):
- 'identifier_list : identifier_list COMMA ID'
- pass
-
-# initializer:
-
-
-def p_initializer_1(t):
- 'initializer : assignment_expression'
- pass
-
-
-def p_initializer_2(t):
- '''initializer : LBRACE initializer_list RBRACE
- | LBRACE initializer_list COMMA RBRACE'''
- pass
-
-# initializer-list:
-
-
-def p_initializer_list_1(t):
- 'initializer_list : initializer'
- pass
-
-
-def p_initializer_list_2(t):
- 'initializer_list : initializer_list COMMA initializer'
- pass
-
-# type-name:
-
-
-def p_type_name(t):
- 'type_name : specifier_qualifier_list abstract_declarator_opt'
- pass
-
-
-def p_abstract_declarator_opt_1(t):
- 'abstract_declarator_opt : empty'
- pass
-
-
-def p_abstract_declarator_opt_2(t):
- 'abstract_declarator_opt : abstract_declarator'
- pass
-
-# abstract-declarator:
-
-
-def p_abstract_declarator_1(t):
- 'abstract_declarator : pointer '
- pass
-
-
-def p_abstract_declarator_2(t):
- 'abstract_declarator : pointer direct_abstract_declarator'
- pass
-
-
-def p_abstract_declarator_3(t):
- 'abstract_declarator : direct_abstract_declarator'
- pass
-
-# direct-abstract-declarator:
-
-
-def p_direct_abstract_declarator_1(t):
- 'direct_abstract_declarator : LPAREN abstract_declarator RPAREN'
- pass
-
-
-def p_direct_abstract_declarator_2(t):
- 'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET'
- pass
-
-
-def p_direct_abstract_declarator_3(t):
- 'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET'
- pass
-
-
-def p_direct_abstract_declarator_4(t):
- 'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN'
- pass
-
-
-def p_direct_abstract_declarator_5(t):
- 'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN'
- pass
-
-# Optional fields in abstract declarators
-
-
-def p_constant_expression_opt_1(t):
- 'constant_expression_opt : empty'
- pass
-
-
-def p_constant_expression_opt_2(t):
- 'constant_expression_opt : constant_expression'
- pass
-
-
-def p_parameter_type_list_opt_1(t):
- 'parameter_type_list_opt : empty'
- pass
-
-
-def p_parameter_type_list_opt_2(t):
- 'parameter_type_list_opt : parameter_type_list'
- pass
-
-# statement:
-
-
-def p_statement(t):
- '''
- statement : labeled_statement
- | expression_statement
- | compound_statement
- | selection_statement
- | iteration_statement
- | jump_statement
- '''
- pass
-
-# labeled-statement:
-
-
-def p_labeled_statement_1(t):
- 'labeled_statement : ID COLON statement'
- pass
-
-
-def p_labeled_statement_2(t):
- 'labeled_statement : CASE constant_expression COLON statement'
- pass
-
-
-def p_labeled_statement_3(t):
- 'labeled_statement : DEFAULT COLON statement'
- pass
-
-# expression-statement:
-
-
-def p_expression_statement(t):
- 'expression_statement : expression_opt SEMI'
- pass
-
-# compound-statement:
-
-
-def p_compound_statement_1(t):
- 'compound_statement : LBRACE declaration_list statement_list RBRACE'
- pass
-
-
-def p_compound_statement_2(t):
- 'compound_statement : LBRACE statement_list RBRACE'
- pass
-
-
-def p_compound_statement_3(t):
- 'compound_statement : LBRACE declaration_list RBRACE'
- pass
-
-
-def p_compound_statement_4(t):
- 'compound_statement : LBRACE RBRACE'
- pass
-
-# statement-list:
-
-
-def p_statement_list_1(t):
- 'statement_list : statement'
- pass
-
-
-def p_statement_list_2(t):
- 'statement_list : statement_list statement'
- pass
-
-# selection-statement
-
-
-def p_selection_statement_1(t):
- 'selection_statement : IF LPAREN expression RPAREN statement'
- pass
-
-
-def p_selection_statement_2(t):
- 'selection_statement : IF LPAREN expression RPAREN statement ELSE statement '
- pass
-
-
-def p_selection_statement_3(t):
- 'selection_statement : SWITCH LPAREN expression RPAREN statement '
- pass
-
-# iteration_statement:
-
-
-def p_iteration_statement_1(t):
- 'iteration_statement : WHILE LPAREN expression RPAREN statement'
- pass
-
-
-def p_iteration_statement_2(t):
- 'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement '
- pass
-
-
-def p_iteration_statement_3(t):
- 'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI'
- pass
-
-# jump_statement:
-
-
-def p_jump_statement_1(t):
- 'jump_statement : GOTO ID SEMI'
- pass
-
-
-def p_jump_statement_2(t):
- 'jump_statement : CONTINUE SEMI'
- pass
-
-
-def p_jump_statement_3(t):
- 'jump_statement : BREAK SEMI'
- pass
-
-
-def p_jump_statement_4(t):
- 'jump_statement : RETURN expression_opt SEMI'
- pass
-
-
-def p_expression_opt_1(t):
- 'expression_opt : empty'
- pass
-
-
-def p_expression_opt_2(t):
- 'expression_opt : expression'
- pass
-
-# expression:
-
-
-def p_expression_1(t):
- 'expression : assignment_expression'
- pass
-
-
-def p_expression_2(t):
- 'expression : expression COMMA assignment_expression'
- pass
-
-# assigment_expression:
-
-
-def p_assignment_expression_1(t):
- 'assignment_expression : conditional_expression'
- pass
-
-
-def p_assignment_expression_2(t):
- 'assignment_expression : unary_expression assignment_operator assignment_expression'
- pass
-
-# assignment_operator:
-
-
-def p_assignment_operator(t):
- '''
- assignment_operator : EQUALS
- | TIMESEQUAL
- | DIVEQUAL
- | MODEQUAL
- | PLUSEQUAL
- | MINUSEQUAL
- | LSHIFTEQUAL
- | RSHIFTEQUAL
- | ANDEQUAL
- | OREQUAL
- | XOREQUAL
- '''
- pass
-
-# conditional-expression
-
-
-def p_conditional_expression_1(t):
- 'conditional_expression : logical_or_expression'
- pass
-
-
-def p_conditional_expression_2(t):
- 'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression '
- pass
-
-# constant-expression
-
-
-def p_constant_expression(t):
- 'constant_expression : conditional_expression'
- pass
-
-# logical-or-expression
-
-
-def p_logical_or_expression_1(t):
- 'logical_or_expression : logical_and_expression'
- pass
-
-
-def p_logical_or_expression_2(t):
- 'logical_or_expression : logical_or_expression LOR logical_and_expression'
- pass
-
-# logical-and-expression
-
-
-def p_logical_and_expression_1(t):
- 'logical_and_expression : inclusive_or_expression'
- pass
-
-
-def p_logical_and_expression_2(t):
- 'logical_and_expression : logical_and_expression LAND inclusive_or_expression'
- pass
-
-# inclusive-or-expression:
-
-
-def p_inclusive_or_expression_1(t):
- 'inclusive_or_expression : exclusive_or_expression'
- pass
-
-
-def p_inclusive_or_expression_2(t):
- 'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression'
- pass
-
-# exclusive-or-expression:
-
-
-def p_exclusive_or_expression_1(t):
- 'exclusive_or_expression : and_expression'
- pass
-
-
-def p_exclusive_or_expression_2(t):
- 'exclusive_or_expression : exclusive_or_expression XOR and_expression'
- pass
-
-# AND-expression
-
-
-def p_and_expression_1(t):
- 'and_expression : equality_expression'
- pass
-
-
-def p_and_expression_2(t):
- 'and_expression : and_expression AND equality_expression'
- pass
-
-
-# equality-expression:
-def p_equality_expression_1(t):
- 'equality_expression : relational_expression'
- pass
-
-
-def p_equality_expression_2(t):
- 'equality_expression : equality_expression EQ relational_expression'
- pass
-
-
-def p_equality_expression_3(t):
- 'equality_expression : equality_expression NE relational_expression'
- pass
-
-
-# relational-expression:
-def p_relational_expression_1(t):
- 'relational_expression : shift_expression'
- pass
-
-
-def p_relational_expression_2(t):
- 'relational_expression : relational_expression LT shift_expression'
- pass
-
-
-def p_relational_expression_3(t):
- 'relational_expression : relational_expression GT shift_expression'
- pass
-
-
-def p_relational_expression_4(t):
- 'relational_expression : relational_expression LE shift_expression'
- pass
-
-
-def p_relational_expression_5(t):
- 'relational_expression : relational_expression GE shift_expression'
- pass
-
-# shift-expression
-
-
-def p_shift_expression_1(t):
- 'shift_expression : additive_expression'
- pass
-
-
-def p_shift_expression_2(t):
- 'shift_expression : shift_expression LSHIFT additive_expression'
- pass
-
-
-def p_shift_expression_3(t):
- 'shift_expression : shift_expression RSHIFT additive_expression'
- pass
-
-# additive-expression
-
-
-def p_additive_expression_1(t):
- 'additive_expression : multiplicative_expression'
- pass
-
-
-def p_additive_expression_2(t):
- 'additive_expression : additive_expression PLUS multiplicative_expression'
- pass
-
-
-def p_additive_expression_3(t):
- 'additive_expression : additive_expression MINUS multiplicative_expression'
- pass
-
-# multiplicative-expression
-
-
-def p_multiplicative_expression_1(t):
- 'multiplicative_expression : cast_expression'
- pass
-
-
-def p_multiplicative_expression_2(t):
- 'multiplicative_expression : multiplicative_expression TIMES cast_expression'
- pass
-
-
-def p_multiplicative_expression_3(t):
- 'multiplicative_expression : multiplicative_expression DIVIDE cast_expression'
- pass
-
-
-def p_multiplicative_expression_4(t):
- 'multiplicative_expression : multiplicative_expression MOD cast_expression'
- pass
-
-# cast-expression:
-
-
-def p_cast_expression_1(t):
- 'cast_expression : unary_expression'
- pass
-
-
-def p_cast_expression_2(t):
- 'cast_expression : LPAREN type_name RPAREN cast_expression'
- pass
-
-# unary-expression:
-
-
-def p_unary_expression_1(t):
- 'unary_expression : postfix_expression'
- pass
-
-
-def p_unary_expression_2(t):
- 'unary_expression : PLUSPLUS unary_expression'
- pass
-
-
-def p_unary_expression_3(t):
- 'unary_expression : MINUSMINUS unary_expression'
- pass
-
-
-def p_unary_expression_4(t):
- 'unary_expression : unary_operator cast_expression'
- pass
-
-
-def p_unary_expression_5(t):
- 'unary_expression : SIZEOF unary_expression'
- pass
-
-
-def p_unary_expression_6(t):
- 'unary_expression : SIZEOF LPAREN type_name RPAREN'
- pass
-
-# unary-operator
-
-
-def p_unary_operator(t):
- '''unary_operator : AND
- | TIMES
- | PLUS
- | MINUS
- | NOT
- | LNOT '''
- pass
-
-# postfix-expression:
-
-
-def p_postfix_expression_1(t):
- 'postfix_expression : primary_expression'
- pass
-
-
-def p_postfix_expression_2(t):
- 'postfix_expression : postfix_expression LBRACKET expression RBRACKET'
- pass
-
-
-def p_postfix_expression_3(t):
- 'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN'
- pass
-
-
-def p_postfix_expression_4(t):
- 'postfix_expression : postfix_expression LPAREN RPAREN'
- pass
-
-
-def p_postfix_expression_5(t):
- 'postfix_expression : postfix_expression PERIOD ID'
- pass
-
-
-def p_postfix_expression_6(t):
- 'postfix_expression : postfix_expression ARROW ID'
- pass
-
-
-def p_postfix_expression_7(t):
- 'postfix_expression : postfix_expression PLUSPLUS'
- pass
-
-
-def p_postfix_expression_8(t):
- 'postfix_expression : postfix_expression MINUSMINUS'
- pass
-
-# primary-expression:
-
-
-def p_primary_expression(t):
- '''primary_expression : ID
- | constant
- | SCONST
- | LPAREN expression RPAREN'''
- pass
-
-# argument-expression-list:
-
-
-def p_argument_expression_list(t):
- '''argument_expression_list : assignment_expression
- | argument_expression_list COMMA assignment_expression'''
- pass
-
-# constant:
-
-
-def p_constant(t):
- '''constant : ICONST
- | FCONST
- | CCONST'''
- pass
-
-
-def p_empty(t):
- 'empty : '
- pass
-
-
-def p_error(t):
- print("Whoa. We're hosed")
-
-import profile
-# Build the grammar
-
-yacc.yacc()
-#yacc.yacc(method='LALR',write_tables=False,debug=False)
-
-#profile.run("yacc.yacc(method='LALR')")
diff --git a/components/script/dom/bindings/codegen/ply/example/calc/calc.py b/components/script/dom/bindings/codegen/ply/example/calc/calc.py
deleted file mode 100644
index 824c3d7d0a2..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/calc/calc.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-tokens = (
- 'NAME', 'NUMBER',
-)
-
-literals = ['=', '+', '-', '*', '/', '(', ')']
-
-# Tokens
-
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
-t_ignore = " \t"
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Parsing rules
-
-precedence = (
- ('left', '+', '-'),
- ('left', '*', '/'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(p):
- 'statement : NAME "=" expression'
- names[p[1]] = p[3]
-
-
-def p_statement_expr(p):
- 'statement : expression'
- print(p[1])
-
-
-def p_expression_binop(p):
- '''expression : expression '+' expression
- | expression '-' expression
- | expression '*' expression
- | expression '/' expression'''
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
-
-
-def p_expression_uminus(p):
- "expression : '-' expression %prec UMINUS"
- p[0] = -p[2]
-
-
-def p_expression_group(p):
- "expression : '(' expression ')'"
- p[0] = p[2]
-
-
-def p_expression_number(p):
- "expression : NUMBER"
- p[0] = p[1]
-
-
-def p_expression_name(p):
- "expression : NAME"
- try:
- p[0] = names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
-
-def p_error(p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-import ply.yacc as yacc
-yacc.yacc()
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s)
diff --git a/components/script/dom/bindings/codegen/ply/example/calcdebug/calc.py b/components/script/dom/bindings/codegen/ply/example/calcdebug/calc.py
deleted file mode 100644
index 06831e2ca56..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/calcdebug/calc.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# This example shows how to run the parser in a debugging mode
-# with output routed to a logging object.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-tokens = (
- 'NAME', 'NUMBER',
-)
-
-literals = ['=', '+', '-', '*', '/', '(', ')']
-
-# Tokens
-
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
-t_ignore = " \t"
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Parsing rules
-
-precedence = (
- ('left', '+', '-'),
- ('left', '*', '/'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(p):
- 'statement : NAME "=" expression'
- names[p[1]] = p[3]
-
-
-def p_statement_expr(p):
- 'statement : expression'
- print(p[1])
-
-
-def p_expression_binop(p):
- '''expression : expression '+' expression
- | expression '-' expression
- | expression '*' expression
- | expression '/' expression'''
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
-
-
-def p_expression_uminus(p):
- "expression : '-' expression %prec UMINUS"
- p[0] = -p[2]
-
-
-def p_expression_group(p):
- "expression : '(' expression ')'"
- p[0] = p[2]
-
-
-def p_expression_number(p):
- "expression : NUMBER"
- p[0] = p[1]
-
-
-def p_expression_name(p):
- "expression : NAME"
- try:
- p[0] = names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
-
-def p_error(p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-import ply.yacc as yacc
-yacc.yacc()
-
-import logging
-logging.basicConfig(
- level=logging.INFO,
- filename="parselog.txt"
-)
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s, debug=logging.getLogger())
diff --git a/components/script/dom/bindings/codegen/ply/example/calceof/calc.py b/components/script/dom/bindings/codegen/ply/example/calceof/calc.py
deleted file mode 100644
index 22b39a41a86..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/calceof/calc.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. Asks the user for more input and
-# demonstrates the use of the t_eof() rule.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-tokens = (
- 'NAME', 'NUMBER',
-)
-
-literals = ['=', '+', '-', '*', '/', '(', ')']
-
-# Tokens
-
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
-t_ignore = " \t"
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_eof(t):
- more = raw_input('... ')
- if more:
- t.lexer.input(more + '\n')
- return t.lexer.token()
- else:
- return None
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Parsing rules
-
-precedence = (
- ('left', '+', '-'),
- ('left', '*', '/'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(p):
- 'statement : NAME "=" expression'
- names[p[1]] = p[3]
-
-
-def p_statement_expr(p):
- 'statement : expression'
- print(p[1])
-
-
-def p_expression_binop(p):
- '''expression : expression '+' expression
- | expression '-' expression
- | expression '*' expression
- | expression '/' expression'''
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
-
-
-def p_expression_uminus(p):
- "expression : '-' expression %prec UMINUS"
- p[0] = -p[2]
-
-
-def p_expression_group(p):
- "expression : '(' expression ')'"
- p[0] = p[2]
-
-
-def p_expression_number(p):
- "expression : NUMBER"
- p[0] = p[1]
-
-
-def p_expression_name(p):
- "expression : NAME"
- try:
- p[0] = names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
-
-def p_error(p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-import ply.yacc as yacc
-yacc.yacc()
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s + '\n')
diff --git a/components/script/dom/bindings/codegen/ply/example/classcalc/calc.py b/components/script/dom/bindings/codegen/ply/example/classcalc/calc.py
deleted file mode 100755
index ada4afd426c..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/classcalc/calc.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-#
-# Class-based example contributed to PLY by David McNab
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-import ply.lex as lex
-import ply.yacc as yacc
-import os
-
-
-class Parser:
- """
- Base class for a lexer/parser that has the rules defined as methods
- """
- tokens = ()
- precedence = ()
-
- def __init__(self, **kw):
- self.debug = kw.get('debug', 0)
- self.names = {}
- try:
- modname = os.path.split(os.path.splitext(__file__)[0])[
- 1] + "_" + self.__class__.__name__
- except:
- modname = "parser" + "_" + self.__class__.__name__
- self.debugfile = modname + ".dbg"
- self.tabmodule = modname + "_" + "parsetab"
- # print self.debugfile, self.tabmodule
-
- # Build the lexer and parser
- lex.lex(module=self, debug=self.debug)
- yacc.yacc(module=self,
- debug=self.debug,
- debugfile=self.debugfile,
- tabmodule=self.tabmodule)
-
- def run(self):
- while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s)
-
-
-class Calc(Parser):
-
- tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
- )
-
- # Tokens
-
- t_PLUS = r'\+'
- t_MINUS = r'-'
- t_EXP = r'\*\*'
- t_TIMES = r'\*'
- t_DIVIDE = r'/'
- t_EQUALS = r'='
- t_LPAREN = r'\('
- t_RPAREN = r'\)'
- t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
- def t_NUMBER(self, t):
- r'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print("Integer value too large %s" % t.value)
- t.value = 0
- # print "parsed number %s" % repr(t.value)
- return t
-
- t_ignore = " \t"
-
- def t_newline(self, t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
- def t_error(self, t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
- # Parsing rules
-
- precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('left', 'EXP'),
- ('right', 'UMINUS'),
- )
-
- def p_statement_assign(self, p):
- 'statement : NAME EQUALS expression'
- self.names[p[1]] = p[3]
-
- def p_statement_expr(self, p):
- 'statement : expression'
- print(p[1])
-
- def p_expression_binop(self, p):
- """
- expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression
- | expression EXP expression
- """
- # print [repr(p[i]) for i in range(0,4)]
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
- elif p[2] == '**':
- p[0] = p[1] ** p[3]
-
- def p_expression_uminus(self, p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
- def p_expression_group(self, p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
- def p_expression_number(self, p):
- 'expression : NUMBER'
- p[0] = p[1]
-
- def p_expression_name(self, p):
- 'expression : NAME'
- try:
- p[0] = self.names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
- def p_error(self, p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-if __name__ == '__main__':
- calc = Calc()
- calc.run()
diff --git a/components/script/dom/bindings/codegen/ply/example/cleanup.sh b/components/script/dom/bindings/codegen/ply/example/cleanup.sh
deleted file mode 100755
index 3e115f41c42..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/cleanup.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-rm -f */*.pyc */parsetab.py */parser.out */*~ */*.class
diff --git a/components/script/dom/bindings/codegen/ply/example/closurecalc/calc.py b/components/script/dom/bindings/codegen/ply/example/closurecalc/calc.py
deleted file mode 100644
index 6031b058130..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/closurecalc/calc.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A calculator parser that makes use of closures. The function make_calculator()
-# returns a function that accepts an input string and returns a result. All
-# lexing rules, parsing rules, and internal state are held inside the function.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-# Make a calculator function
-
-
-def make_calculator():
- import ply.lex as lex
- import ply.yacc as yacc
-
- # ------- Internal calculator state
-
- variables = {} # Dictionary of stored variables
-
- # ------- Calculator tokenizing rules
-
- tokens = (
- 'NAME', 'NUMBER',
- )
-
- literals = ['=', '+', '-', '*', '/', '(', ')']
-
- t_ignore = " \t"
-
- t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
- def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
- def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
- def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
- # Build the lexer
- lexer = lex.lex()
-
- # ------- Calculator parsing rules
-
- precedence = (
- ('left', '+', '-'),
- ('left', '*', '/'),
- ('right', 'UMINUS'),
- )
-
- def p_statement_assign(p):
- 'statement : NAME "=" expression'
- variables[p[1]] = p[3]
- p[0] = None
-
- def p_statement_expr(p):
- 'statement : expression'
- p[0] = p[1]
-
- def p_expression_binop(p):
- '''expression : expression '+' expression
- | expression '-' expression
- | expression '*' expression
- | expression '/' expression'''
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
-
- def p_expression_uminus(p):
- "expression : '-' expression %prec UMINUS"
- p[0] = -p[2]
-
- def p_expression_group(p):
- "expression : '(' expression ')'"
- p[0] = p[2]
-
- def p_expression_number(p):
- "expression : NUMBER"
- p[0] = p[1]
-
- def p_expression_name(p):
- "expression : NAME"
- try:
- p[0] = variables[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
- def p_error(p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
- # Build the parser
- parser = yacc.yacc()
-
- # ------- Input function
-
- def input(text):
- result = parser.parse(text, lexer=lexer)
- return result
-
- return input
-
-# Make a calculator object and use it
-calc = make_calculator()
-
-while True:
- try:
- s = raw_input("calc > ")
- except EOFError:
- break
- r = calc(s)
- if r:
- print(r)
diff --git a/components/script/dom/bindings/codegen/ply/example/hedit/hedit.py b/components/script/dom/bindings/codegen/ply/example/hedit/hedit.py
deleted file mode 100644
index 32da745677c..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/hedit/hedit.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -----------------------------------------------------------------------------
-# hedit.py
-#
-# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
-#
-# These tokens can't be easily tokenized because they are of the following
-# form:
-#
-# nHc1...cn
-#
-# where n is a positive integer and c1 ... cn are characters.
-#
-# This example shows how to modify the state of the lexer to parse
-# such tokens
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-
-tokens = (
- 'H_EDIT_DESCRIPTOR',
-)
-
-# Tokens
-t_ignore = " \t\n"
-
-
-def t_H_EDIT_DESCRIPTOR(t):
- r"\d+H.*" # This grabs all of the remaining text
- i = t.value.index('H')
- n = eval(t.value[:i])
-
- # Adjust the tokenizing position
- t.lexer.lexpos -= len(t.value) - (i + 1 + n)
-
- t.value = t.value[i + 1:i + 1 + n]
- return t
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-lex.runmain()
diff --git a/components/script/dom/bindings/codegen/ply/example/newclasscalc/calc.py b/components/script/dom/bindings/codegen/ply/example/newclasscalc/calc.py
deleted file mode 100755
index 43c9506a8aa..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/newclasscalc/calc.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-#
-# Class-based example contributed to PLY by David McNab.
-#
-# Modified to use new-style classes. Test case.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-import ply.lex as lex
-import ply.yacc as yacc
-import os
-
-
-class Parser(object):
- """
- Base class for a lexer/parser that has the rules defined as methods
- """
- tokens = ()
- precedence = ()
-
- def __init__(self, **kw):
- self.debug = kw.get('debug', 0)
- self.names = {}
- try:
- modname = os.path.split(os.path.splitext(__file__)[0])[
- 1] + "_" + self.__class__.__name__
- except:
- modname = "parser" + "_" + self.__class__.__name__
- self.debugfile = modname + ".dbg"
- self.tabmodule = modname + "_" + "parsetab"
- # print self.debugfile, self.tabmodule
-
- # Build the lexer and parser
- lex.lex(module=self, debug=self.debug)
- yacc.yacc(module=self,
- debug=self.debug,
- debugfile=self.debugfile,
- tabmodule=self.tabmodule)
-
- def run(self):
- while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s)
-
-
-class Calc(Parser):
-
- tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
- )
-
- # Tokens
-
- t_PLUS = r'\+'
- t_MINUS = r'-'
- t_EXP = r'\*\*'
- t_TIMES = r'\*'
- t_DIVIDE = r'/'
- t_EQUALS = r'='
- t_LPAREN = r'\('
- t_RPAREN = r'\)'
- t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
- def t_NUMBER(self, t):
- r'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print("Integer value too large %s" % t.value)
- t.value = 0
- # print "parsed number %s" % repr(t.value)
- return t
-
- t_ignore = " \t"
-
- def t_newline(self, t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
- def t_error(self, t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
- # Parsing rules
-
- precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('left', 'EXP'),
- ('right', 'UMINUS'),
- )
-
- def p_statement_assign(self, p):
- 'statement : NAME EQUALS expression'
- self.names[p[1]] = p[3]
-
- def p_statement_expr(self, p):
- 'statement : expression'
- print(p[1])
-
- def p_expression_binop(self, p):
- """
- expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression
- | expression EXP expression
- """
- # print [repr(p[i]) for i in range(0,4)]
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
- elif p[2] == '**':
- p[0] = p[1] ** p[3]
-
- def p_expression_uminus(self, p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
- def p_expression_group(self, p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
- def p_expression_number(self, p):
- 'expression : NUMBER'
- p[0] = p[1]
-
- def p_expression_name(self, p):
- 'expression : NAME'
- try:
- p[0] = self.names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
- def p_error(self, p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-if __name__ == '__main__':
- calc = Calc()
- calc.run()
diff --git a/components/script/dom/bindings/codegen/ply/example/optcalc/README b/components/script/dom/bindings/codegen/ply/example/optcalc/README
deleted file mode 100644
index 53dd5fcd559..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/optcalc/README
+++ /dev/null
@@ -1,9 +0,0 @@
-An example showing how to use Python optimized mode.
-To run:
-
- - First run 'python calc.py'
-
- - Then run 'python -OO calc.py'
-
-If working correctly, the second version should run the
-same way.
diff --git a/components/script/dom/bindings/codegen/ply/example/optcalc/calc.py b/components/script/dom/bindings/codegen/ply/example/optcalc/calc.py
deleted file mode 100644
index 0c223e59949..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/optcalc/calc.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
-)
-
-# Tokens
-
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_DIVIDE = r'/'
-t_EQUALS = r'='
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- r'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print("Integer value too large %s" % t.value)
- t.value = 0
- return t
-
-t_ignore = " \t"
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex(optimize=1)
-
-# Parsing rules
-
-precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(t):
- 'statement : NAME EQUALS expression'
- names[t[1]] = t[3]
-
-
-def p_statement_expr(t):
- 'statement : expression'
- print(t[1])
-
-
-def p_expression_binop(t):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if t[2] == '+':
- t[0] = t[1] + t[3]
- elif t[2] == '-':
- t[0] = t[1] - t[3]
- elif t[2] == '*':
- t[0] = t[1] * t[3]
- elif t[2] == '/':
- t[0] = t[1] / t[3]
- elif t[2] == '<':
- t[0] = t[1] < t[3]
-
-
-def p_expression_uminus(t):
- 'expression : MINUS expression %prec UMINUS'
- t[0] = -t[2]
-
-
-def p_expression_group(t):
- 'expression : LPAREN expression RPAREN'
- t[0] = t[2]
-
-
-def p_expression_number(t):
- 'expression : NUMBER'
- t[0] = t[1]
-
-
-def p_expression_name(t):
- 'expression : NAME'
- try:
- t[0] = names[t[1]]
- except LookupError:
- print("Undefined name '%s'" % t[1])
- t[0] = 0
-
-
-def p_error(t):
- if t:
- print("Syntax error at '%s'" % t.value)
- else:
- print("Syntax error at EOF")
-
-import ply.yacc as yacc
-yacc.yacc(optimize=1)
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- yacc.parse(s)
diff --git a/components/script/dom/bindings/codegen/ply/example/unicalc/calc.py b/components/script/dom/bindings/codegen/ply/example/unicalc/calc.py
deleted file mode 100644
index 901c4b9d761..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/unicalc/calc.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-#
-# This example uses unicode strings for tokens, docstrings, and input.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
-)
-
-# Tokens
-
-t_PLUS = ur'\+'
-t_MINUS = ur'-'
-t_TIMES = ur'\*'
-t_DIVIDE = ur'/'
-t_EQUALS = ur'='
-t_LPAREN = ur'\('
-t_RPAREN = ur'\)'
-t_NAME = ur'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- ur'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print "Integer value too large", t.value
- t.value = 0
- return t
-
-t_ignore = u" \t"
-
-
-def t_newline(t):
- ur'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print "Illegal character '%s'" % t.value[0]
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Parsing rules
-
-precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(p):
- 'statement : NAME EQUALS expression'
- names[p[1]] = p[3]
-
-
-def p_statement_expr(p):
- 'statement : expression'
- print p[1]
-
-
-def p_expression_binop(p):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if p[2] == u'+':
- p[0] = p[1] + p[3]
- elif p[2] == u'-':
- p[0] = p[1] - p[3]
- elif p[2] == u'*':
- p[0] = p[1] * p[3]
- elif p[2] == u'/':
- p[0] = p[1] / p[3]
-
-
-def p_expression_uminus(p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
-
-def p_expression_group(p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
-
-def p_expression_number(p):
- 'expression : NUMBER'
- p[0] = p[1]
-
-
-def p_expression_name(p):
- 'expression : NAME'
- try:
- p[0] = names[p[1]]
- except LookupError:
- print "Undefined name '%s'" % p[1]
- p[0] = 0
-
-
-def p_error(p):
- if p:
- print "Syntax error at '%s'" % p.value
- else:
- print "Syntax error at EOF"
-
-import ply.yacc as yacc
-yacc.yacc()
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(unicode(s))
diff --git a/components/script/dom/bindings/codegen/ply/example/yply/README b/components/script/dom/bindings/codegen/ply/example/yply/README
deleted file mode 100644
index bfadf36436f..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/yply/README
+++ /dev/null
@@ -1,41 +0,0 @@
-yply.py
-
-This example implements a program yply.py that converts a UNIX-yacc
-specification file into a PLY-compatible program. To use, simply
-run it like this:
-
- % python yply.py [-nocode] inputfile.y >myparser.py
-
-The output of this program is Python code. In the output,
-any C code in the original file is included, but is commented out.
-If you use the -nocode option, then all of the C code in the
-original file is just discarded.
-
-To use the resulting grammer with PLY, you'll need to edit the
-myparser.py file. Within this file, some stub code is included that
-can be used to test the construction of the parsing tables. However,
-you'll need to do more editing to make a workable parser.
-
-Disclaimer: This just an example I threw together in an afternoon.
-It might have some bugs. However, it worked when I tried it on
-a yacc-specified C++ parser containing 442 rules and 855 parsing
-states.
-
-Comments:
-
-1. This example does not parse specification files meant for lex/flex.
- You'll need to specify the tokenizer on your own.
-
-2. This example shows a number of interesting PLY features including
-
- - Parsing of literal text delimited by nested parentheses
- - Some interaction between the parser and the lexer.
- - Use of literals in the grammar specification
- - One pass compilation. The program just emits the result,
- there is no intermediate parse tree.
-
-3. This program could probably be cleaned up and enhanced a lot.
- It would be great if someone wanted to work on this (hint).
-
--Dave
-
diff --git a/components/script/dom/bindings/codegen/ply/example/yply/ylex.py b/components/script/dom/bindings/codegen/ply/example/yply/ylex.py
deleted file mode 100644
index 16410e250ee..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/yply/ylex.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# lexer for yacc-grammars
-#
-# Author: David Beazley (dave@dabeaz.com)
-# Date : October 2, 2006
-
-import sys
-sys.path.append("../..")
-
-from ply import *
-
-tokens = (
- 'LITERAL', 'SECTION', 'TOKEN', 'LEFT', 'RIGHT', 'PREC', 'START', 'TYPE', 'NONASSOC', 'UNION', 'CODE',
- 'ID', 'QLITERAL', 'NUMBER',
-)
-
-states = (('code', 'exclusive'),)
-
-literals = [';', ',', '<', '>', '|', ':']
-t_ignore = ' \t'
-
-t_TOKEN = r'%token'
-t_LEFT = r'%left'
-t_RIGHT = r'%right'
-t_NONASSOC = r'%nonassoc'
-t_PREC = r'%prec'
-t_START = r'%start'
-t_TYPE = r'%type'
-t_UNION = r'%union'
-t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
-t_QLITERAL = r'''(?P<quote>['"]).*?(?P=quote)'''
-t_NUMBER = r'\d+'
-
-
-def t_SECTION(t):
- r'%%'
- if getattr(t.lexer, "lastsection", 0):
- t.value = t.lexer.lexdata[t.lexpos + 2:]
- t.lexer.lexpos = len(t.lexer.lexdata)
- else:
- t.lexer.lastsection = 0
- return t
-
-# Comments
-
-
-def t_ccomment(t):
- r'/\*(.|\n)*?\*/'
- t.lexer.lineno += t.value.count('\n')
-
-t_ignore_cppcomment = r'//.*'
-
-
-def t_LITERAL(t):
- r'%\{(.|\n)*?%\}'
- t.lexer.lineno += t.value.count("\n")
- return t
-
-
-def t_NEWLINE(t):
- r'\n'
- t.lexer.lineno += 1
-
-
-def t_code(t):
- r'\{'
- t.lexer.codestart = t.lexpos
- t.lexer.level = 1
- t.lexer.begin('code')
-
-
-def t_code_ignore_string(t):
- r'\"([^\\\n]|(\\.))*?\"'
-
-
-def t_code_ignore_char(t):
- r'\'([^\\\n]|(\\.))*?\''
-
-
-def t_code_ignore_comment(t):
- r'/\*(.|\n)*?\*/'
-
-
-def t_code_ignore_cppcom(t):
- r'//.*'
-
-
-def t_code_lbrace(t):
- r'\{'
- t.lexer.level += 1
-
-
-def t_code_rbrace(t):
- r'\}'
- t.lexer.level -= 1
- if t.lexer.level == 0:
- t.type = 'CODE'
- t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos + 1]
- t.lexer.begin('INITIAL')
- t.lexer.lineno += t.value.count('\n')
- return t
-
-t_code_ignore_nonspace = r'[^\s\}\'\"\{]+'
-t_code_ignore_whitespace = r'\s+'
-t_code_ignore = ""
-
-
-def t_code_error(t):
- raise RuntimeError
-
-
-def t_error(t):
- print("%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0]))
- print(t.value)
- t.lexer.skip(1)
-
-lex.lex()
-
-if __name__ == '__main__':
- lex.runmain()
diff --git a/components/script/dom/bindings/codegen/ply/example/yply/yparse.py b/components/script/dom/bindings/codegen/ply/example/yply/yparse.py
deleted file mode 100644
index 1f2e8d0922c..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/yply/yparse.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# parser for Unix yacc-based grammars
-#
-# Author: David Beazley (dave@dabeaz.com)
-# Date : October 2, 2006
-
-import ylex
-tokens = ylex.tokens
-
-from ply import *
-
-tokenlist = []
-preclist = []
-
-emit_code = 1
-
-
-def p_yacc(p):
- '''yacc : defsection rulesection'''
-
-
-def p_defsection(p):
- '''defsection : definitions SECTION
- | SECTION'''
- p.lexer.lastsection = 1
- print("tokens = ", repr(tokenlist))
- print()
- print("precedence = ", repr(preclist))
- print()
- print("# -------------- RULES ----------------")
- print()
-
-
-def p_rulesection(p):
- '''rulesection : rules SECTION'''
-
- print("# -------------- RULES END ----------------")
- print_code(p[2], 0)
-
-
-def p_definitions(p):
- '''definitions : definitions definition
- | definition'''
-
-
-def p_definition_literal(p):
- '''definition : LITERAL'''
- print_code(p[1], 0)
-
-
-def p_definition_start(p):
- '''definition : START ID'''
- print("start = '%s'" % p[2])
-
-
-def p_definition_token(p):
- '''definition : toktype opttype idlist optsemi '''
- for i in p[3]:
- if i[0] not in "'\"":
- tokenlist.append(i)
- if p[1] == '%left':
- preclist.append(('left',) + tuple(p[3]))
- elif p[1] == '%right':
- preclist.append(('right',) + tuple(p[3]))
- elif p[1] == '%nonassoc':
- preclist.append(('nonassoc',) + tuple(p[3]))
-
-
-def p_toktype(p):
- '''toktype : TOKEN
- | LEFT
- | RIGHT
- | NONASSOC'''
- p[0] = p[1]
-
-
-def p_opttype(p):
- '''opttype : '<' ID '>'
- | empty'''
-
-
-def p_idlist(p):
- '''idlist : idlist optcomma tokenid
- | tokenid'''
- if len(p) == 2:
- p[0] = [p[1]]
- else:
- p[0] = p[1]
- p[1].append(p[3])
-
-
-def p_tokenid(p):
- '''tokenid : ID
- | ID NUMBER
- | QLITERAL
- | QLITERAL NUMBER'''
- p[0] = p[1]
-
-
-def p_optsemi(p):
- '''optsemi : ';'
- | empty'''
-
-
-def p_optcomma(p):
- '''optcomma : ','
- | empty'''
-
-
-def p_definition_type(p):
- '''definition : TYPE '<' ID '>' namelist optsemi'''
- # type declarations are ignored
-
-
-def p_namelist(p):
- '''namelist : namelist optcomma ID
- | ID'''
-
-
-def p_definition_union(p):
- '''definition : UNION CODE optsemi'''
- # Union declarations are ignored
-
-
-def p_rules(p):
- '''rules : rules rule
- | rule'''
- if len(p) == 2:
- rule = p[1]
- else:
- rule = p[2]
-
- # Print out a Python equivalent of this rule
-
- embedded = [] # Embedded actions (a mess)
- embed_count = 0
-
- rulename = rule[0]
- rulecount = 1
- for r in rule[1]:
- # r contains one of the rule possibilities
- print("def p_%s_%d(p):" % (rulename, rulecount))
- prod = []
- prodcode = ""
- for i in range(len(r)):
- item = r[i]
- if item[0] == '{': # A code block
- if i == len(r) - 1:
- prodcode = item
- break
- else:
- # an embedded action
- embed_name = "_embed%d_%s" % (embed_count, rulename)
- prod.append(embed_name)
- embedded.append((embed_name, item))
- embed_count += 1
- else:
- prod.append(item)
- print(" '''%s : %s'''" % (rulename, " ".join(prod)))
- # Emit code
- print_code(prodcode, 4)
- print()
- rulecount += 1
-
- for e, code in embedded:
- print("def p_%s(p):" % e)
- print(" '''%s : '''" % e)
- print_code(code, 4)
- print()
-
-
-def p_rule(p):
- '''rule : ID ':' rulelist ';' '''
- p[0] = (p[1], [p[3]])
-
-
-def p_rule2(p):
- '''rule : ID ':' rulelist morerules ';' '''
- p[4].insert(0, p[3])
- p[0] = (p[1], p[4])
-
-
-def p_rule_empty(p):
- '''rule : ID ':' ';' '''
- p[0] = (p[1], [[]])
-
-
-def p_rule_empty2(p):
- '''rule : ID ':' morerules ';' '''
-
- p[3].insert(0, [])
- p[0] = (p[1], p[3])
-
-
-def p_morerules(p):
- '''morerules : morerules '|' rulelist
- | '|' rulelist
- | '|' '''
-
- if len(p) == 2:
- p[0] = [[]]
- elif len(p) == 3:
- p[0] = [p[2]]
- else:
- p[0] = p[1]
- p[0].append(p[3])
-
-# print("morerules", len(p), p[0])
-
-
-def p_rulelist(p):
- '''rulelist : rulelist ruleitem
- | ruleitem'''
-
- if len(p) == 2:
- p[0] = [p[1]]
- else:
- p[0] = p[1]
- p[1].append(p[2])
-
-
-def p_ruleitem(p):
- '''ruleitem : ID
- | QLITERAL
- | CODE
- | PREC'''
- p[0] = p[1]
-
-
-def p_empty(p):
- '''empty : '''
-
-
-def p_error(p):
- pass
-
-yacc.yacc(debug=0)
-
-
-def print_code(code, indent):
- if not emit_code:
- return
- codelines = code.splitlines()
- for c in codelines:
- print("%s# %s" % (" " * indent, c))
diff --git a/components/script/dom/bindings/codegen/ply/example/yply/yply.py b/components/script/dom/bindings/codegen/ply/example/yply/yply.py
deleted file mode 100755
index e24616c831c..00000000000
--- a/components/script/dom/bindings/codegen/ply/example/yply/yply.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/local/bin/python
-# yply.py
-#
-# Author: David Beazley (dave@dabeaz.com)
-# Date : October 2, 2006
-#
-# Converts a UNIX-yacc specification file into a PLY-compatible
-# specification. To use, simply do this:
-#
-# % python yply.py [-nocode] inputfile.y >myparser.py
-#
-# The output of this program is Python code. In the output,
-# any C code in the original file is included, but is commented.
-# If you use the -nocode option, then all of the C code in the
-# original file is discarded.
-#
-# Disclaimer: This just an example I threw together in an afternoon.
-# It might have some bugs. However, it worked when I tried it on
-# a yacc-specified C++ parser containing 442 rules and 855 parsing
-# states.
-#
-
-import sys
-sys.path.insert(0, "../..")
-
-import ylex
-import yparse
-
-from ply import *
-
-if len(sys.argv) == 1:
- print("usage : yply.py [-nocode] inputfile")
- raise SystemExit
-
-if len(sys.argv) == 3:
- if sys.argv[1] == '-nocode':
- yparse.emit_code = 0
- else:
- print("Unknown option '%s'" % sys.argv[1])
- raise SystemExit
- filename = sys.argv[2]
-else:
- filename = sys.argv[1]
-
-yacc.parse(open(filename).read())
-
-print("""
-if __name__ == '__main__':
- from ply import *
- yacc.yacc()
-""")
diff --git a/components/script/dom/bindings/codegen/ply/ply.egg-info/PKG-INFO b/components/script/dom/bindings/codegen/ply/ply.egg-info/PKG-INFO
deleted file mode 100644
index 6eedf425953..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply.egg-info/PKG-INFO
+++ /dev/null
@@ -1,22 +0,0 @@
-Metadata-Version: 1.1
-Name: ply
-Version: 3.10
-Summary: Python Lex & Yacc
-Home-page: http://www.dabeaz.com/ply/
-Author: David Beazley
-Author-email: dave@dabeaz.com
-License: BSD
-Description:
- PLY is yet another implementation of lex and yacc for Python. Some notable
- features include the fact that its implemented entirely in Python and it
- uses LALR(1) parsing which is efficient and well suited for larger grammars.
-
- PLY provides most of the standard lex/yacc features including support for empty
- productions, precedence rules, error recovery, and support for ambiguous grammars.
-
- PLY is extremely easy to use and provides very extensive error checking.
- It is compatible with both Python 2 and Python 3.
-
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 2
diff --git a/components/script/dom/bindings/codegen/ply/ply.egg-info/SOURCES.txt b/components/script/dom/bindings/codegen/ply/ply.egg-info/SOURCES.txt
deleted file mode 100644
index 2dff7dd29b8..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-ANNOUNCE
-CHANGES
-MANIFEST.in
-README.md
-TODO
-setup.cfg
-setup.py
-doc/internal.html
-doc/makedoc.py
-doc/ply.html
-example/README
-example/cleanup.sh
-example/BASIC/README
-example/BASIC/basic.py
-example/BASIC/basiclex.py
-example/BASIC/basiclog.py
-example/BASIC/basinterp.py
-example/BASIC/basparse.py
-example/BASIC/dim.bas
-example/BASIC/func.bas
-example/BASIC/gcd.bas
-example/BASIC/gosub.bas
-example/BASIC/hello.bas
-example/BASIC/linear.bas
-example/BASIC/maxsin.bas
-example/BASIC/powers.bas
-example/BASIC/rand.bas
-example/BASIC/sales.bas
-example/BASIC/sears.bas
-example/BASIC/sqrt1.bas
-example/BASIC/sqrt2.bas
-example/GardenSnake/GardenSnake.py
-example/GardenSnake/README
-example/ansic/README
-example/ansic/clex.py
-example/ansic/cparse.py
-example/calc/calc.py
-example/calcdebug/calc.py
-example/calceof/calc.py
-example/classcalc/calc.py
-example/closurecalc/calc.py
-example/hedit/hedit.py
-example/newclasscalc/calc.py
-example/optcalc/README
-example/optcalc/calc.py
-example/unicalc/calc.py
-example/yply/README
-example/yply/ylex.py
-example/yply/yparse.py
-example/yply/yply.py
-ply/__init__.py
-ply/cpp.py
-ply/ctokens.py
-ply/lex.py
-ply/yacc.py
-ply/ygen.py
-ply.egg-info/PKG-INFO
-ply.egg-info/SOURCES.txt
-ply.egg-info/dependency_links.txt
-ply.egg-info/top_level.txt
-test/README
-test/calclex.py
-test/cleanup.sh
-test/lex_closure.py
-test/lex_doc1.py
-test/lex_dup1.py
-test/lex_dup2.py
-test/lex_dup3.py
-test/lex_empty.py
-test/lex_error1.py
-test/lex_error2.py
-test/lex_error3.py
-test/lex_error4.py
-test/lex_hedit.py
-test/lex_ignore.py
-test/lex_ignore2.py
-test/lex_literal1.py
-test/lex_literal2.py
-test/lex_literal3.py
-test/lex_many_tokens.py
-test/lex_module.py
-test/lex_module_import.py
-test/lex_object.py
-test/lex_opt_alias.py
-test/lex_optimize.py
-test/lex_optimize2.py
-test/lex_optimize3.py
-test/lex_re1.py
-test/lex_re2.py
-test/lex_re3.py
-test/lex_rule1.py
-test/lex_rule2.py
-test/lex_rule3.py
-test/lex_state1.py
-test/lex_state2.py
-test/lex_state3.py
-test/lex_state4.py
-test/lex_state5.py
-test/lex_state_noerror.py
-test/lex_state_norule.py
-test/lex_state_try.py
-test/lex_token1.py
-test/lex_token2.py
-test/lex_token3.py
-test/lex_token4.py
-test/lex_token5.py
-test/lex_token_dup.py
-test/testlex.py
-test/testyacc.py
-test/yacc_badargs.py
-test/yacc_badid.py
-test/yacc_badprec.py
-test/yacc_badprec2.py
-test/yacc_badprec3.py
-test/yacc_badrule.py
-test/yacc_badtok.py
-test/yacc_dup.py
-test/yacc_error1.py
-test/yacc_error2.py
-test/yacc_error3.py
-test/yacc_error4.py
-test/yacc_error5.py
-test/yacc_error6.py
-test/yacc_error7.py
-test/yacc_inf.py
-test/yacc_literal.py
-test/yacc_misplaced.py
-test/yacc_missing1.py
-test/yacc_nested.py
-test/yacc_nodoc.py
-test/yacc_noerror.py
-test/yacc_nop.py
-test/yacc_notfunc.py
-test/yacc_notok.py
-test/yacc_prec1.py
-test/yacc_rr.py
-test/yacc_rr_unused.py
-test/yacc_simple.py
-test/yacc_sr.py
-test/yacc_term1.py
-test/yacc_unicode_literals.py
-test/yacc_unused.py
-test/yacc_unused_rule.py
-test/yacc_uprec.py
-test/yacc_uprec2.py
-test/pkg_test1/__init__.py
-test/pkg_test1/parsing/__init__.py
-test/pkg_test1/parsing/calclex.py
-test/pkg_test1/parsing/calcparse.py
-test/pkg_test2/__init__.py
-test/pkg_test2/parsing/__init__.py
-test/pkg_test2/parsing/calclex.py
-test/pkg_test2/parsing/calcparse.py
-test/pkg_test3/__init__.py
-test/pkg_test3/generated/__init__.py
-test/pkg_test3/parsing/__init__.py
-test/pkg_test3/parsing/calclex.py
-test/pkg_test3/parsing/calcparse.py
-test/pkg_test4/__init__.py
-test/pkg_test4/parsing/__init__.py
-test/pkg_test4/parsing/calclex.py
-test/pkg_test4/parsing/calcparse.py
-test/pkg_test5/__init__.py
-test/pkg_test5/parsing/__init__.py
-test/pkg_test5/parsing/calclex.py
-test/pkg_test5/parsing/calcparse.py
-test/pkg_test6/__init__.py
-test/pkg_test6/parsing/__init__.py
-test/pkg_test6/parsing/calclex.py
-test/pkg_test6/parsing/calcparse.py
-test/pkg_test6/parsing/expression.py
-test/pkg_test6/parsing/statement.py \ No newline at end of file
diff --git a/components/script/dom/bindings/codegen/ply/ply.egg-info/dependency_links.txt b/components/script/dom/bindings/codegen/ply/ply.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891791..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/components/script/dom/bindings/codegen/ply/ply.egg-info/top_level.txt b/components/script/dom/bindings/codegen/ply/ply.egg-info/top_level.txt
deleted file mode 100644
index 90412f06833..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-ply
diff --git a/components/script/dom/bindings/codegen/ply/ply/__init__.py b/components/script/dom/bindings/codegen/ply/ply/__init__.py
deleted file mode 100644
index 6e53cddcf67..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# PLY package
-# Author: David Beazley (dave@dabeaz.com)
-
-__version__ = '3.9'
-__all__ = ['lex','yacc']
diff --git a/components/script/dom/bindings/codegen/ply/ply/cpp.py b/components/script/dom/bindings/codegen/ply/ply/cpp.py
deleted file mode 100644
index b6bfc69614b..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/cpp.py
+++ /dev/null
@@ -1,918 +0,0 @@
-# -----------------------------------------------------------------------------
-# cpp.py
-#
-# Author: David Beazley (http://www.dabeaz.com)
-# Copyright (C) 2007
-# All rights reserved
-#
-# This module implements an ANSI-C style lexical preprocessor for PLY.
-# -----------------------------------------------------------------------------
-from __future__ import generators
-
-import sys
-
-# Some Python 3 compatibility shims
-if sys.version_info.major < 3:
- STRING_TYPES = (str, unicode)
-else:
- STRING_TYPES = str
- xrange = range
-
-# -----------------------------------------------------------------------------
-# Default preprocessor lexer definitions. These tokens are enough to get
-# a basic preprocessor working. Other modules may import these if they want
-# -----------------------------------------------------------------------------
-
-tokens = (
- 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
-)
-
-literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
-
-# Whitespace
-def t_CPP_WS(t):
- r'\s+'
- t.lexer.lineno += t.value.count("\n")
- return t
-
-t_CPP_POUND = r'\#'
-t_CPP_DPOUND = r'\#\#'
-
-# Identifier
-t_CPP_ID = r'[A-Za-z_][\w_]*'
-
-# Integer literal
-def CPP_INTEGER(t):
- r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
- return t
-
-t_CPP_INTEGER = CPP_INTEGER
-
-# Floating literal
-t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
-
-# String literal
-def t_CPP_STRING(t):
- r'\"([^\\\n]|(\\(.|\n)))*?\"'
- t.lexer.lineno += t.value.count("\n")
- return t
-
-# Character constant 'c' or L'c'
-def t_CPP_CHAR(t):
- r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
- t.lexer.lineno += t.value.count("\n")
- return t
-
-# Comment
-def t_CPP_COMMENT1(t):
- r'(/\*(.|\n)*?\*/)'
- ncr = t.value.count("\n")
- t.lexer.lineno += ncr
- # replace with one space or a number of '\n'
- t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
- return t
-
-# Line comment
-def t_CPP_COMMENT2(t):
- r'(//.*?(\n|$))'
- # replace with '/n'
- t.type = 'CPP_WS'; t.value = '\n'
- return t
-
-def t_error(t):
- t.type = t.value[0]
- t.value = t.value[0]
- t.lexer.skip(1)
- return t
-
-import re
-import copy
-import time
-import os.path
-
-# -----------------------------------------------------------------------------
-# trigraph()
-#
-# Given an input string, this function replaces all trigraph sequences.
-# The following mapping is used:
-#
-# ??= #
-# ??/ \
-# ??' ^
-# ??( [
-# ??) ]
-# ??! |
-# ??< {
-# ??> }
-# ??- ~
-# -----------------------------------------------------------------------------
-
-_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
-_trigraph_rep = {
- '=':'#',
- '/':'\\',
- "'":'^',
- '(':'[',
- ')':']',
- '!':'|',
- '<':'{',
- '>':'}',
- '-':'~'
-}
-
-def trigraph(input):
- return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
-
-# ------------------------------------------------------------------
-# Macro object
-#
-# This object holds information about preprocessor macros
-#
-# .name - Macro name (string)
-# .value - Macro value (a list of tokens)
-# .arglist - List of argument names
-# .variadic - Boolean indicating whether or not variadic macro
-# .vararg - Name of the variadic parameter
-#
-# When a macro is created, the macro replacement token sequence is
-# pre-scanned and used to create patch lists that are later used
-# during macro expansion
-# ------------------------------------------------------------------
-
-class Macro(object):
- def __init__(self,name,value,arglist=None,variadic=False):
- self.name = name
- self.value = value
- self.arglist = arglist
- self.variadic = variadic
- if variadic:
- self.vararg = arglist[-1]
- self.source = None
-
-# ------------------------------------------------------------------
-# Preprocessor object
-#
-# Object representing a preprocessor. Contains macro definitions,
-# include directories, and other information
-# ------------------------------------------------------------------
-
-class Preprocessor(object):
- def __init__(self,lexer=None):
- if lexer is None:
- lexer = lex.lexer
- self.lexer = lexer
- self.macros = { }
- self.path = []
- self.temp_path = []
-
- # Probe the lexer for selected tokens
- self.lexprobe()
-
- tm = time.localtime()
- self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
- self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
- self.parser = None
-
- # -----------------------------------------------------------------------------
- # tokenize()
- #
- # Utility function. Given a string of text, tokenize into a list of tokens
- # -----------------------------------------------------------------------------
-
- def tokenize(self,text):
- tokens = []
- self.lexer.input(text)
- while True:
- tok = self.lexer.token()
- if not tok: break
- tokens.append(tok)
- return tokens
-
- # ---------------------------------------------------------------------
- # error()
- #
- # Report a preprocessor error/warning of some kind
- # ----------------------------------------------------------------------
-
- def error(self,file,line,msg):
- print("%s:%d %s" % (file,line,msg))
-
- # ----------------------------------------------------------------------
- # lexprobe()
- #
- # This method probes the preprocessor lexer object to discover
- # the token types of symbols that are important to the preprocessor.
- # If this works right, the preprocessor will simply "work"
- # with any suitable lexer regardless of how tokens have been named.
- # ----------------------------------------------------------------------
-
- def lexprobe(self):
-
- # Determine the token type for identifiers
- self.lexer.input("identifier")
- tok = self.lexer.token()
- if not tok or tok.value != "identifier":
- print("Couldn't determine identifier type")
- else:
- self.t_ID = tok.type
-
- # Determine the token type for integers
- self.lexer.input("12345")
- tok = self.lexer.token()
- if not tok or int(tok.value) != 12345:
- print("Couldn't determine integer type")
- else:
- self.t_INTEGER = tok.type
- self.t_INTEGER_TYPE = type(tok.value)
-
- # Determine the token type for strings enclosed in double quotes
- self.lexer.input("\"filename\"")
- tok = self.lexer.token()
- if not tok or tok.value != "\"filename\"":
- print("Couldn't determine string type")
- else:
- self.t_STRING = tok.type
-
- # Determine the token type for whitespace--if any
- self.lexer.input(" ")
- tok = self.lexer.token()
- if not tok or tok.value != " ":
- self.t_SPACE = None
- else:
- self.t_SPACE = tok.type
-
- # Determine the token type for newlines
- self.lexer.input("\n")
- tok = self.lexer.token()
- if not tok or tok.value != "\n":
- self.t_NEWLINE = None
- print("Couldn't determine token for newlines")
- else:
- self.t_NEWLINE = tok.type
-
- self.t_WS = (self.t_SPACE, self.t_NEWLINE)
-
- # Check for other characters used by the preprocessor
- chars = [ '<','>','#','##','\\','(',')',',','.']
- for c in chars:
- self.lexer.input(c)
- tok = self.lexer.token()
- if not tok or tok.value != c:
- print("Unable to lex '%s' required for preprocessor" % c)
-
- # ----------------------------------------------------------------------
- # add_path()
- #
- # Adds a search path to the preprocessor.
- # ----------------------------------------------------------------------
-
- def add_path(self,path):
- self.path.append(path)
-
- # ----------------------------------------------------------------------
- # group_lines()
- #
- # Given an input string, this function splits it into lines. Trailing whitespace
- # is removed. Any line ending with \ is grouped with the next line. This
- # function forms the lowest level of the preprocessor---grouping into text into
- # a line-by-line format.
- # ----------------------------------------------------------------------
-
- def group_lines(self,input):
- lex = self.lexer.clone()
- lines = [x.rstrip() for x in input.splitlines()]
- for i in xrange(len(lines)):
- j = i+1
- while lines[i].endswith('\\') and (j < len(lines)):
- lines[i] = lines[i][:-1]+lines[j]
- lines[j] = ""
- j += 1
-
- input = "\n".join(lines)
- lex.input(input)
- lex.lineno = 1
-
- current_line = []
- while True:
- tok = lex.token()
- if not tok:
- break
- current_line.append(tok)
- if tok.type in self.t_WS and '\n' in tok.value:
- yield current_line
- current_line = []
-
- if current_line:
- yield current_line
-
- # ----------------------------------------------------------------------
- # tokenstrip()
- #
- # Remove leading/trailing whitespace tokens from a token list
- # ----------------------------------------------------------------------
-
- def tokenstrip(self,tokens):
- i = 0
- while i < len(tokens) and tokens[i].type in self.t_WS:
- i += 1
- del tokens[:i]
- i = len(tokens)-1
- while i >= 0 and tokens[i].type in self.t_WS:
- i -= 1
- del tokens[i+1:]
- return tokens
-
-
- # ----------------------------------------------------------------------
- # collect_args()
- #
- # Collects comma separated arguments from a list of tokens. The arguments
- # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
- # where tokencount is the number of tokens consumed, args is a list of arguments,
- # and positions is a list of integers containing the starting index of each
- # argument. Each argument is represented by a list of tokens.
- #
- # When collecting arguments, leading and trailing whitespace is removed
- # from each argument.
- #
- # This function properly handles nested parenthesis and commas---these do not
- # define new arguments.
- # ----------------------------------------------------------------------
-
- def collect_args(self,tokenlist):
- args = []
- positions = []
- current_arg = []
- nesting = 1
- tokenlen = len(tokenlist)
-
- # Search for the opening '('.
- i = 0
- while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
- i += 1
-
- if (i < tokenlen) and (tokenlist[i].value == '('):
- positions.append(i+1)
- else:
- self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
- return 0, [], []
-
- i += 1
-
- while i < tokenlen:
- t = tokenlist[i]
- if t.value == '(':
- current_arg.append(t)
- nesting += 1
- elif t.value == ')':
- nesting -= 1
- if nesting == 0:
- if current_arg:
- args.append(self.tokenstrip(current_arg))
- positions.append(i)
- return i+1,args,positions
- current_arg.append(t)
- elif t.value == ',' and nesting == 1:
- args.append(self.tokenstrip(current_arg))
- positions.append(i+1)
- current_arg = []
- else:
- current_arg.append(t)
- i += 1
-
- # Missing end argument
- self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
- return 0, [],[]
-
- # ----------------------------------------------------------------------
- # macro_prescan()
- #
- # Examine the macro value (token sequence) and identify patch points
- # This is used to speed up macro expansion later on---we'll know
- # right away where to apply patches to the value to form the expansion
- # ----------------------------------------------------------------------
-
- def macro_prescan(self,macro):
- macro.patch = [] # Standard macro arguments
- macro.str_patch = [] # String conversion expansion
- macro.var_comma_patch = [] # Variadic macro comma patch
- i = 0
- while i < len(macro.value):
- if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
- argnum = macro.arglist.index(macro.value[i].value)
- # Conversion of argument to a string
- if i > 0 and macro.value[i-1].value == '#':
- macro.value[i] = copy.copy(macro.value[i])
- macro.value[i].type = self.t_STRING
- del macro.value[i-1]
- macro.str_patch.append((argnum,i-1))
- continue
- # Concatenation
- elif (i > 0 and macro.value[i-1].value == '##'):
- macro.patch.append(('c',argnum,i-1))
- del macro.value[i-1]
- continue
- elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
- macro.patch.append(('c',argnum,i))
- i += 1
- continue
- # Standard expansion
- else:
- macro.patch.append(('e',argnum,i))
- elif macro.value[i].value == '##':
- if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
- ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
- (macro.value[i+1].value == macro.vararg):
- macro.var_comma_patch.append(i-1)
- i += 1
- macro.patch.sort(key=lambda x: x[2],reverse=True)
-
- # ----------------------------------------------------------------------
- # macro_expand_args()
- #
- # Given a Macro and list of arguments (each a token list), this method
- # returns an expanded version of a macro. The return value is a token sequence
- # representing the replacement macro tokens
- # ----------------------------------------------------------------------
-
- def macro_expand_args(self,macro,args):
- # Make a copy of the macro token sequence
- rep = [copy.copy(_x) for _x in macro.value]
-
- # Make string expansion patches. These do not alter the length of the replacement sequence
-
- str_expansion = {}
- for argnum, i in macro.str_patch:
- if argnum not in str_expansion:
- str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
- rep[i] = copy.copy(rep[i])
- rep[i].value = str_expansion[argnum]
-
- # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
- comma_patch = False
- if macro.variadic and not args[-1]:
- for i in macro.var_comma_patch:
- rep[i] = None
- comma_patch = True
-
- # Make all other patches. The order of these matters. It is assumed that the patch list
- # has been sorted in reverse order of patch location since replacements will cause the
- # size of the replacement sequence to expand from the patch point.
-
- expanded = { }
- for ptype, argnum, i in macro.patch:
- # Concatenation. Argument is left unexpanded
- if ptype == 'c':
- rep[i:i+1] = args[argnum]
- # Normal expansion. Argument is macro expanded first
- elif ptype == 'e':
- if argnum not in expanded:
- expanded[argnum] = self.expand_macros(args[argnum])
- rep[i:i+1] = expanded[argnum]
-
- # Get rid of removed comma if necessary
- if comma_patch:
- rep = [_i for _i in rep if _i]
-
- return rep
-
-
- # ----------------------------------------------------------------------
- # expand_macros()
- #
- # Given a list of tokens, this function performs macro expansion.
- # The expanded argument is a dictionary that contains macros already
- # expanded. This is used to prevent infinite recursion.
- # ----------------------------------------------------------------------
-
- def expand_macros(self,tokens,expanded=None):
- if expanded is None:
- expanded = {}
- i = 0
- while i < len(tokens):
- t = tokens[i]
- if t.type == self.t_ID:
- if t.value in self.macros and t.value not in expanded:
- # Yes, we found a macro match
- expanded[t.value] = True
-
- m = self.macros[t.value]
- if not m.arglist:
- # A simple macro
- ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
- for e in ex:
- e.lineno = t.lineno
- tokens[i:i+1] = ex
- i += len(ex)
- else:
- # A macro with arguments
- j = i + 1
- while j < len(tokens) and tokens[j].type in self.t_WS:
- j += 1
- if tokens[j].value == '(':
- tokcount,args,positions = self.collect_args(tokens[j:])
- if not m.variadic and len(args) != len(m.arglist):
- self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
- i = j + tokcount
- elif m.variadic and len(args) < len(m.arglist)-1:
- if len(m.arglist) > 2:
- self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
- else:
- self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
- i = j + tokcount
- else:
- if m.variadic:
- if len(args) == len(m.arglist)-1:
- args.append([])
- else:
- args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
- del args[len(m.arglist):]
-
- # Get macro replacement text
- rep = self.macro_expand_args(m,args)
- rep = self.expand_macros(rep,expanded)
- for r in rep:
- r.lineno = t.lineno
- tokens[i:j+tokcount] = rep
- i += len(rep)
- del expanded[t.value]
- continue
- elif t.value == '__LINE__':
- t.type = self.t_INTEGER
- t.value = self.t_INTEGER_TYPE(t.lineno)
-
- i += 1
- return tokens
-
- # ----------------------------------------------------------------------
- # evalexpr()
- #
- # Evaluate an expression token sequence for the purposes of evaluating
- # integral expressions.
- # ----------------------------------------------------------------------
-
- def evalexpr(self,tokens):
- # tokens = tokenize(line)
- # Search for defined macros
- i = 0
- while i < len(tokens):
- if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
- j = i + 1
- needparen = False
- result = "0L"
- while j < len(tokens):
- if tokens[j].type in self.t_WS:
- j += 1
- continue
- elif tokens[j].type == self.t_ID:
- if tokens[j].value in self.macros:
- result = "1L"
- else:
- result = "0L"
- if not needparen: break
- elif tokens[j].value == '(':
- needparen = True
- elif tokens[j].value == ')':
- break
- else:
- self.error(self.source,tokens[i].lineno,"Malformed defined()")
- j += 1
- tokens[i].type = self.t_INTEGER
- tokens[i].value = self.t_INTEGER_TYPE(result)
- del tokens[i+1:j+1]
- i += 1
- tokens = self.expand_macros(tokens)
- for i,t in enumerate(tokens):
- if t.type == self.t_ID:
- tokens[i] = copy.copy(t)
- tokens[i].type = self.t_INTEGER
- tokens[i].value = self.t_INTEGER_TYPE("0L")
- elif t.type == self.t_INTEGER:
- tokens[i] = copy.copy(t)
- # Strip off any trailing suffixes
- tokens[i].value = str(tokens[i].value)
- while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
- tokens[i].value = tokens[i].value[:-1]
-
- expr = "".join([str(x.value) for x in tokens])
- expr = expr.replace("&&"," and ")
- expr = expr.replace("||"," or ")
- expr = expr.replace("!"," not ")
- try:
- result = eval(expr)
- except Exception:
- self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
- result = 0
- return result
-
- # ----------------------------------------------------------------------
- # parsegen()
- #
- # Parse an input string/
- # ----------------------------------------------------------------------
- def parsegen(self,input,source=None):
-
- # Replace trigraph sequences
- t = trigraph(input)
- lines = self.group_lines(t)
-
- if not source:
- source = ""
-
- self.define("__FILE__ \"%s\"" % source)
-
- self.source = source
- chunk = []
- enable = True
- iftrigger = False
- ifstack = []
-
- for x in lines:
- for i,tok in enumerate(x):
- if tok.type not in self.t_WS: break
- if tok.value == '#':
- # Preprocessor directive
-
- # insert necessary whitespace instead of eaten tokens
- for tok in x:
- if tok.type in self.t_WS and '\n' in tok.value:
- chunk.append(tok)
-
- dirtokens = self.tokenstrip(x[i+1:])
- if dirtokens:
- name = dirtokens[0].value
- args = self.tokenstrip(dirtokens[1:])
- else:
- name = ""
- args = []
-
- if name == 'define':
- if enable:
- for tok in self.expand_macros(chunk):
- yield tok
- chunk = []
- self.define(args)
- elif name == 'include':
- if enable:
- for tok in self.expand_macros(chunk):
- yield tok
- chunk = []
- oldfile = self.macros['__FILE__']
- for tok in self.include(args):
- yield tok
- self.macros['__FILE__'] = oldfile
- self.source = source
- elif name == 'undef':
- if enable:
- for tok in self.expand_macros(chunk):
- yield tok
- chunk = []
- self.undef(args)
- elif name == 'ifdef':
- ifstack.append((enable,iftrigger))
- if enable:
- if not args[0].value in self.macros:
- enable = False
- iftrigger = False
- else:
- iftrigger = True
- elif name == 'ifndef':
- ifstack.append((enable,iftrigger))
- if enable:
- if args[0].value in self.macros:
- enable = False
- iftrigger = False
- else:
- iftrigger = True
- elif name == 'if':
- ifstack.append((enable,iftrigger))
- if enable:
- result = self.evalexpr(args)
- if not result:
- enable = False
- iftrigger = False
- else:
- iftrigger = True
- elif name == 'elif':
- if ifstack:
- if ifstack[-1][0]: # We only pay attention if outer "if" allows this
- if enable: # If already true, we flip enable False
- enable = False
- elif not iftrigger: # If False, but not triggered yet, we'll check expression
- result = self.evalexpr(args)
- if result:
- enable = True
- iftrigger = True
- else:
- self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
-
- elif name == 'else':
- if ifstack:
- if ifstack[-1][0]:
- if enable:
- enable = False
- elif not iftrigger:
- enable = True
- iftrigger = True
- else:
- self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
-
- elif name == 'endif':
- if ifstack:
- enable,iftrigger = ifstack.pop()
- else:
- self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
- else:
- # Unknown preprocessor directive
- pass
-
- else:
- # Normal text
- if enable:
- chunk.extend(x)
-
- for tok in self.expand_macros(chunk):
- yield tok
- chunk = []
-
- # ----------------------------------------------------------------------
- # include()
- #
- # Implementation of file-inclusion
- # ----------------------------------------------------------------------
-
- def include(self,tokens):
- # Try to extract the filename and then process an include file
- if not tokens:
- return
- if tokens:
- if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
- tokens = self.expand_macros(tokens)
-
- if tokens[0].value == '<':
- # Include <...>
- i = 1
- while i < len(tokens):
- if tokens[i].value == '>':
- break
- i += 1
- else:
- print("Malformed #include <...>")
- return
- filename = "".join([x.value for x in tokens[1:i]])
- path = self.path + [""] + self.temp_path
- elif tokens[0].type == self.t_STRING:
- filename = tokens[0].value[1:-1]
- path = self.temp_path + [""] + self.path
- else:
- print("Malformed #include statement")
- return
- for p in path:
- iname = os.path.join(p,filename)
- try:
- data = open(iname,"r").read()
- dname = os.path.dirname(iname)
- if dname:
- self.temp_path.insert(0,dname)
- for tok in self.parsegen(data,filename):
- yield tok
- if dname:
- del self.temp_path[0]
- break
- except IOError:
- pass
- else:
- print("Couldn't find '%s'" % filename)
-
- # ----------------------------------------------------------------------
- # define()
- #
- # Define a new macro
- # ----------------------------------------------------------------------
-
- def define(self,tokens):
- if isinstance(tokens,STRING_TYPES):
- tokens = self.tokenize(tokens)
-
- linetok = tokens
- try:
- name = linetok[0]
- if len(linetok) > 1:
- mtype = linetok[1]
- else:
- mtype = None
- if not mtype:
- m = Macro(name.value,[])
- self.macros[name.value] = m
- elif mtype.type in self.t_WS:
- # A normal macro
- m = Macro(name.value,self.tokenstrip(linetok[2:]))
- self.macros[name.value] = m
- elif mtype.value == '(':
- # A macro with arguments
- tokcount, args, positions = self.collect_args(linetok[1:])
- variadic = False
- for a in args:
- if variadic:
- print("No more arguments may follow a variadic argument")
- break
- astr = "".join([str(_i.value) for _i in a])
- if astr == "...":
- variadic = True
- a[0].type = self.t_ID
- a[0].value = '__VA_ARGS__'
- variadic = True
- del a[1:]
- continue
- elif astr[-3:] == "..." and a[0].type == self.t_ID:
- variadic = True
- del a[1:]
- # If, for some reason, "." is part of the identifier, strip off the name for the purposes
- # of macro expansion
- if a[0].value[-3:] == '...':
- a[0].value = a[0].value[:-3]
- continue
- if len(a) > 1 or a[0].type != self.t_ID:
- print("Invalid macro argument")
- break
- else:
- mvalue = self.tokenstrip(linetok[1+tokcount:])
- i = 0
- while i < len(mvalue):
- if i+1 < len(mvalue):
- if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
- del mvalue[i]
- continue
- elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
- del mvalue[i+1]
- i += 1
- m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
- self.macro_prescan(m)
- self.macros[name.value] = m
- else:
- print("Bad macro definition")
- except LookupError:
- print("Bad macro definition")
-
- # ----------------------------------------------------------------------
- # undef()
- #
- # Undefine a macro
- # ----------------------------------------------------------------------
-
- def undef(self,tokens):
- id = tokens[0].value
- try:
- del self.macros[id]
- except LookupError:
- pass
-
- # ----------------------------------------------------------------------
- # parse()
- #
- # Parse input text.
- # ----------------------------------------------------------------------
- def parse(self,input,source=None,ignore={}):
- self.ignore = ignore
- self.parser = self.parsegen(input,source)
-
- # ----------------------------------------------------------------------
- # token()
- #
- # Method to return individual tokens
- # ----------------------------------------------------------------------
- def token(self):
- try:
- while True:
- tok = next(self.parser)
- if tok.type not in self.ignore: return tok
- except StopIteration:
- self.parser = None
- return None
-
-if __name__ == '__main__':
- import ply.lex as lex
- lexer = lex.lex()
-
- # Run a preprocessor
- import sys
- f = open(sys.argv[1])
- input = f.read()
-
- p = Preprocessor(lexer)
- p.parse(input,sys.argv[1])
- while True:
- tok = p.token()
- if not tok: break
- print(p.source, tok)
-
-
-
-
-
-
-
-
-
-
-
diff --git a/components/script/dom/bindings/codegen/ply/ply/ctokens.py b/components/script/dom/bindings/codegen/ply/ply/ctokens.py
deleted file mode 100644
index f6f6952d605..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/ctokens.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# ----------------------------------------------------------------------
-# ctokens.py
-#
-# Token specifications for symbols in ANSI C and C++. This file is
-# meant to be used as a library in other tokenizers.
-# ----------------------------------------------------------------------
-
-# Reserved words
-
-tokens = [
- # Literals (identifier, integer constant, float constant, string constant, char const)
- 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
-
- # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
- 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
- 'LOR', 'LAND', 'LNOT',
- 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
-
- # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
- 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
- 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
-
- # Increment/decrement (++,--)
- 'INCREMENT', 'DECREMENT',
-
- # Structure dereference (->)
- 'ARROW',
-
- # Ternary operator (?)
- 'TERNARY',
-
- # Delimeters ( ) [ ] { } , . ; :
- 'LPAREN', 'RPAREN',
- 'LBRACKET', 'RBRACKET',
- 'LBRACE', 'RBRACE',
- 'COMMA', 'PERIOD', 'SEMI', 'COLON',
-
- # Ellipsis (...)
- 'ELLIPSIS',
-]
-
-# Operators
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_DIVIDE = r'/'
-t_MODULO = r'%'
-t_OR = r'\|'
-t_AND = r'&'
-t_NOT = r'~'
-t_XOR = r'\^'
-t_LSHIFT = r'<<'
-t_RSHIFT = r'>>'
-t_LOR = r'\|\|'
-t_LAND = r'&&'
-t_LNOT = r'!'
-t_LT = r'<'
-t_GT = r'>'
-t_LE = r'<='
-t_GE = r'>='
-t_EQ = r'=='
-t_NE = r'!='
-
-# Assignment operators
-
-t_EQUALS = r'='
-t_TIMESEQUAL = r'\*='
-t_DIVEQUAL = r'/='
-t_MODEQUAL = r'%='
-t_PLUSEQUAL = r'\+='
-t_MINUSEQUAL = r'-='
-t_LSHIFTEQUAL = r'<<='
-t_RSHIFTEQUAL = r'>>='
-t_ANDEQUAL = r'&='
-t_OREQUAL = r'\|='
-t_XOREQUAL = r'\^='
-
-# Increment/decrement
-t_INCREMENT = r'\+\+'
-t_DECREMENT = r'--'
-
-# ->
-t_ARROW = r'->'
-
-# ?
-t_TERNARY = r'\?'
-
-# Delimeters
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_LBRACKET = r'\['
-t_RBRACKET = r'\]'
-t_LBRACE = r'\{'
-t_RBRACE = r'\}'
-t_COMMA = r','
-t_PERIOD = r'\.'
-t_SEMI = r';'
-t_COLON = r':'
-t_ELLIPSIS = r'\.\.\.'
-
-# Identifiers
-t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
-
-# Integer literal
-t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
-
-# Floating literal
-t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
-
-# String literal
-t_STRING = r'\"([^\\\n]|(\\.))*?\"'
-
-# Character constant 'c' or L'c'
-t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
-
-# Comment (C-Style)
-def t_COMMENT(t):
- r'/\*(.|\n)*?\*/'
- t.lexer.lineno += t.value.count('\n')
- return t
-
-# Comment (C++-Style)
-def t_CPPCOMMENT(t):
- r'//.*\n'
- t.lexer.lineno += 1
- return t
-
-
-
-
-
-
diff --git a/components/script/dom/bindings/codegen/ply/ply/lex.py b/components/script/dom/bindings/codegen/ply/ply/lex.py
deleted file mode 100644
index 3e240d1aa20..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/lex.py
+++ /dev/null
@@ -1,1100 +0,0 @@
-# -----------------------------------------------------------------------------
-# ply: lex.py
-#
-# Copyright (C) 2001-2017
-# David M. Beazley (Dabeaz LLC)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-# endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# -----------------------------------------------------------------------------
-
-__version__ = '3.10'
-__tabversion__ = '3.10'
-
-import re
-import sys
-import types
-import copy
-import os
-import inspect
-
-# This tuple contains known string types
-try:
- # Python 2.6
- StringTypes = (types.StringType, types.UnicodeType)
-except AttributeError:
- # Python 3.0
- StringTypes = (str, bytes)
-
-# This regular expression is used to match valid token names
-_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
-
-# Exception thrown when invalid token encountered and no default error
-# handler is defined.
-class LexError(Exception):
- def __init__(self, message, s):
- self.args = (message,)
- self.text = s
-
-
-# Token class. This class is used to represent the tokens produced.
-class LexToken(object):
- def __str__(self):
- return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
-
- def __repr__(self):
- return str(self)
-
-
-# This object is a stand-in for a logging object created by the
-# logging module.
-
-class PlyLogger(object):
- def __init__(self, f):
- self.f = f
-
- def critical(self, msg, *args, **kwargs):
- self.f.write((msg % args) + '\n')
-
- def warning(self, msg, *args, **kwargs):
- self.f.write('WARNING: ' + (msg % args) + '\n')
-
- def error(self, msg, *args, **kwargs):
- self.f.write('ERROR: ' + (msg % args) + '\n')
-
- info = critical
- debug = critical
-
-
-# Null logger is used when no output is generated. Does nothing.
-class NullLogger(object):
- def __getattribute__(self, name):
- return self
-
- def __call__(self, *args, **kwargs):
- return self
-
-
-# -----------------------------------------------------------------------------
-# === Lexing Engine ===
-#
-# The following Lexer class implements the lexer runtime. There are only
-# a few public methods and attributes:
-#
-# input() - Store a new string in the lexer
-# token() - Get the next token
-# clone() - Clone the lexer
-#
-# lineno - Current line number
-# lexpos - Current position in the input string
-# -----------------------------------------------------------------------------
-
-class Lexer:
- def __init__(self):
- self.lexre = None # Master regular expression. This is a list of
- # tuples (re, findex) where re is a compiled
- # regular expression and findex is a list
- # mapping regex group numbers to rules
- self.lexretext = None # Current regular expression strings
- self.lexstatere = {} # Dictionary mapping lexer states to master regexs
- self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
- self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
- self.lexstate = 'INITIAL' # Current lexer state
- self.lexstatestack = [] # Stack of lexer states
- self.lexstateinfo = None # State information
- self.lexstateignore = {} # Dictionary of ignored characters for each state
- self.lexstateerrorf = {} # Dictionary of error functions for each state
- self.lexstateeoff = {} # Dictionary of eof functions for each state
- self.lexreflags = 0 # Optional re compile flags
- self.lexdata = None # Actual input data (as a string)
- self.lexpos = 0 # Current position in input text
- self.lexlen = 0 # Length of the input text
- self.lexerrorf = None # Error rule (if any)
- self.lexeoff = None # EOF rule (if any)
- self.lextokens = None # List of valid tokens
- self.lexignore = '' # Ignored characters
- self.lexliterals = '' # Literal characters that can be passed through
- self.lexmodule = None # Module
- self.lineno = 1 # Current line number
- self.lexoptimize = False # Optimized mode
-
- def clone(self, object=None):
- c = copy.copy(self)
-
- # If the object parameter has been supplied, it means we are attaching the
- # lexer to a new object. In this case, we have to rebind all methods in
- # the lexstatere and lexstateerrorf tables.
-
- if object:
- newtab = {}
- for key, ritem in self.lexstatere.items():
- newre = []
- for cre, findex in ritem:
- newfindex = []
- for f in findex:
- if not f or not f[0]:
- newfindex.append(f)
- continue
- newfindex.append((getattr(object, f[0].__name__), f[1]))
- newre.append((cre, newfindex))
- newtab[key] = newre
- c.lexstatere = newtab
- c.lexstateerrorf = {}
- for key, ef in self.lexstateerrorf.items():
- c.lexstateerrorf[key] = getattr(object, ef.__name__)
- c.lexmodule = object
- return c
-
- # ------------------------------------------------------------
- # writetab() - Write lexer information to a table file
- # ------------------------------------------------------------
- def writetab(self, lextab, outputdir=''):
- if isinstance(lextab, types.ModuleType):
- raise IOError("Won't overwrite existing lextab module")
- basetabmodule = lextab.split('.')[-1]
- filename = os.path.join(outputdir, basetabmodule) + '.py'
- with open(filename, 'w') as tf:
- tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
- tf.write('_tabversion = %s\n' % repr(__tabversion__))
- tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
- tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
- tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
- tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
-
- # Rewrite the lexstatere table, replacing function objects with function names
- tabre = {}
- for statename, lre in self.lexstatere.items():
- titem = []
- for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
- titem.append((retext, _funcs_to_names(func, renames)))
- tabre[statename] = titem
-
- tf.write('_lexstatere = %s\n' % repr(tabre))
- tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
-
- taberr = {}
- for statename, ef in self.lexstateerrorf.items():
- taberr[statename] = ef.__name__ if ef else None
- tf.write('_lexstateerrorf = %s\n' % repr(taberr))
-
- tabeof = {}
- for statename, ef in self.lexstateeoff.items():
- tabeof[statename] = ef.__name__ if ef else None
- tf.write('_lexstateeoff = %s\n' % repr(tabeof))
-
- # ------------------------------------------------------------
- # readtab() - Read lexer information from a tab file
- # ------------------------------------------------------------
- def readtab(self, tabfile, fdict):
- if isinstance(tabfile, types.ModuleType):
- lextab = tabfile
- else:
- exec('import %s' % tabfile)
- lextab = sys.modules[tabfile]
-
- if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
- raise ImportError('Inconsistent PLY version')
-
- self.lextokens = lextab._lextokens
- self.lexreflags = lextab._lexreflags
- self.lexliterals = lextab._lexliterals
- self.lextokens_all = self.lextokens | set(self.lexliterals)
- self.lexstateinfo = lextab._lexstateinfo
- self.lexstateignore = lextab._lexstateignore
- self.lexstatere = {}
- self.lexstateretext = {}
- for statename, lre in lextab._lexstatere.items():
- titem = []
- txtitem = []
- for pat, func_name in lre:
- titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
-
- self.lexstatere[statename] = titem
- self.lexstateretext[statename] = txtitem
-
- self.lexstateerrorf = {}
- for statename, ef in lextab._lexstateerrorf.items():
- self.lexstateerrorf[statename] = fdict[ef]
-
- self.lexstateeoff = {}
- for statename, ef in lextab._lexstateeoff.items():
- self.lexstateeoff[statename] = fdict[ef]
-
- self.begin('INITIAL')
-
- # ------------------------------------------------------------
- # input() - Push a new string into the lexer
- # ------------------------------------------------------------
- def input(self, s):
- # Pull off the first character to see if s looks like a string
- c = s[:1]
- if not isinstance(c, StringTypes):
- raise ValueError('Expected a string')
- self.lexdata = s
- self.lexpos = 0
- self.lexlen = len(s)
-
- # ------------------------------------------------------------
- # begin() - Changes the lexing state
- # ------------------------------------------------------------
- def begin(self, state):
- if state not in self.lexstatere:
- raise ValueError('Undefined state')
- self.lexre = self.lexstatere[state]
- self.lexretext = self.lexstateretext[state]
- self.lexignore = self.lexstateignore.get(state, '')
- self.lexerrorf = self.lexstateerrorf.get(state, None)
- self.lexeoff = self.lexstateeoff.get(state, None)
- self.lexstate = state
-
- # ------------------------------------------------------------
- # push_state() - Changes the lexing state and saves old on stack
- # ------------------------------------------------------------
- def push_state(self, state):
- self.lexstatestack.append(self.lexstate)
- self.begin(state)
-
- # ------------------------------------------------------------
- # pop_state() - Restores the previous state
- # ------------------------------------------------------------
- def pop_state(self):
- self.begin(self.lexstatestack.pop())
-
- # ------------------------------------------------------------
- # current_state() - Returns the current lexing state
- # ------------------------------------------------------------
- def current_state(self):
- return self.lexstate
-
- # ------------------------------------------------------------
- # skip() - Skip ahead n characters
- # ------------------------------------------------------------
- def skip(self, n):
- self.lexpos += n
-
- # ------------------------------------------------------------
- # opttoken() - Return the next token from the Lexer
- #
- # Note: This function has been carefully implemented to be as fast
- # as possible. Don't make changes unless you really know what
- # you are doing
- # ------------------------------------------------------------
- def token(self):
- # Make local copies of frequently referenced attributes
- lexpos = self.lexpos
- lexlen = self.lexlen
- lexignore = self.lexignore
- lexdata = self.lexdata
-
- while lexpos < lexlen:
- # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
- if lexdata[lexpos] in lexignore:
- lexpos += 1
- continue
-
- # Look for a regular expression match
- for lexre, lexindexfunc in self.lexre:
- m = lexre.match(lexdata, lexpos)
- if not m:
- continue
-
- # Create a token for return
- tok = LexToken()
- tok.value = m.group()
- tok.lineno = self.lineno
- tok.lexpos = lexpos
-
- i = m.lastindex
- func, tok.type = lexindexfunc[i]
-
- if not func:
- # If no token type was set, it's an ignored token
- if tok.type:
- self.lexpos = m.end()
- return tok
- else:
- lexpos = m.end()
- break
-
- lexpos = m.end()
-
- # If token is processed by a function, call it
-
- tok.lexer = self # Set additional attributes useful in token rules
- self.lexmatch = m
- self.lexpos = lexpos
-
- newtok = func(tok)
-
- # Every function must return a token, if nothing, we just move to next token
- if not newtok:
- lexpos = self.lexpos # This is here in case user has updated lexpos.
- lexignore = self.lexignore # This is here in case there was a state change
- break
-
- # Verify type of the token. If not in the token map, raise an error
- if not self.lexoptimize:
- if newtok.type not in self.lextokens_all:
- raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
- func.__code__.co_filename, func.__code__.co_firstlineno,
- func.__name__, newtok.type), lexdata[lexpos:])
-
- return newtok
- else:
- # No match, see if in literals
- if lexdata[lexpos] in self.lexliterals:
- tok = LexToken()
- tok.value = lexdata[lexpos]
- tok.lineno = self.lineno
- tok.type = tok.value
- tok.lexpos = lexpos
- self.lexpos = lexpos + 1
- return tok
-
- # No match. Call t_error() if defined.
- if self.lexerrorf:
- tok = LexToken()
- tok.value = self.lexdata[lexpos:]
- tok.lineno = self.lineno
- tok.type = 'error'
- tok.lexer = self
- tok.lexpos = lexpos
- self.lexpos = lexpos
- newtok = self.lexerrorf(tok)
- if lexpos == self.lexpos:
- # Error method didn't change text position at all. This is an error.
- raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
- lexpos = self.lexpos
- if not newtok:
- continue
- return newtok
-
- self.lexpos = lexpos
- raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
-
- if self.lexeoff:
- tok = LexToken()
- tok.type = 'eof'
- tok.value = ''
- tok.lineno = self.lineno
- tok.lexpos = lexpos
- tok.lexer = self
- self.lexpos = lexpos
- newtok = self.lexeoff(tok)
- return newtok
-
- self.lexpos = lexpos + 1
- if self.lexdata is None:
- raise RuntimeError('No input string given with input()')
- return None
-
- # Iterator interface
- def __iter__(self):
- return self
-
- def next(self):
- t = self.token()
- if t is None:
- raise StopIteration
- return t
-
- __next__ = next
-
-# -----------------------------------------------------------------------------
-# ==== Lex Builder ===
-#
-# The functions and classes below are used to collect lexing information
-# and build a Lexer object from it.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# _get_regex(func)
-#
-# Returns the regular expression assigned to a function either as a doc string
-# or as a .regex attribute attached by the @TOKEN decorator.
-# -----------------------------------------------------------------------------
-def _get_regex(func):
- return getattr(func, 'regex', func.__doc__)
-
-# -----------------------------------------------------------------------------
-# get_caller_module_dict()
-#
-# This function returns a dictionary containing all of the symbols defined within
-# a caller further down the call stack. This is used to get the environment
-# associated with the yacc() call if none was provided.
-# -----------------------------------------------------------------------------
-def get_caller_module_dict(levels):
- f = sys._getframe(levels)
- ldict = f.f_globals.copy()
- if f.f_globals != f.f_locals:
- ldict.update(f.f_locals)
- return ldict
-
-# -----------------------------------------------------------------------------
-# _funcs_to_names()
-#
-# Given a list of regular expression functions, this converts it to a list
-# suitable for output to a table file
-# -----------------------------------------------------------------------------
-def _funcs_to_names(funclist, namelist):
- result = []
- for f, name in zip(funclist, namelist):
- if f and f[0]:
- result.append((name, f[1]))
- else:
- result.append(f)
- return result
-
-# -----------------------------------------------------------------------------
-# _names_to_funcs()
-#
-# Given a list of regular expression function names, this converts it back to
-# functions.
-# -----------------------------------------------------------------------------
-def _names_to_funcs(namelist, fdict):
- result = []
- for n in namelist:
- if n and n[0]:
- result.append((fdict[n[0]], n[1]))
- else:
- result.append(n)
- return result
-
-# -----------------------------------------------------------------------------
-# _form_master_re()
-#
-# This function takes a list of all of the regex components and attempts to
-# form the master regular expression. Given limitations in the Python re
-# module, it may be necessary to break the master regex into separate expressions.
-# -----------------------------------------------------------------------------
-def _form_master_re(relist, reflags, ldict, toknames):
- if not relist:
- return []
- regex = '|'.join(relist)
- try:
- lexre = re.compile(regex, reflags)
-
- # Build the index to function map for the matching engine
- lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
- lexindexnames = lexindexfunc[:]
-
- for f, i in lexre.groupindex.items():
- handle = ldict.get(f, None)
- if type(handle) in (types.FunctionType, types.MethodType):
- lexindexfunc[i] = (handle, toknames[f])
- lexindexnames[i] = f
- elif handle is not None:
- lexindexnames[i] = f
- if f.find('ignore_') > 0:
- lexindexfunc[i] = (None, None)
- else:
- lexindexfunc[i] = (None, toknames[f])
-
- return [(lexre, lexindexfunc)], [regex], [lexindexnames]
- except Exception:
- m = int(len(relist)/2)
- if m == 0:
- m = 1
- llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
- rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
- return (llist+rlist), (lre+rre), (lnames+rnames)
-
-# -----------------------------------------------------------------------------
-# def _statetoken(s,names)
-#
-# Given a declaration name s of the form "t_" and a dictionary whose keys are
-# state names, this function returns a tuple (states,tokenname) where states
-# is a tuple of state names and tokenname is the name of the token. For example,
-# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
-# -----------------------------------------------------------------------------
-def _statetoken(s, names):
- nonstate = 1
- parts = s.split('_')
- for i, part in enumerate(parts[1:], 1):
- if part not in names and part != 'ANY':
- break
-
- if i > 1:
- states = tuple(parts[1:i])
- else:
- states = ('INITIAL',)
-
- if 'ANY' in states:
- states = tuple(names)
-
- tokenname = '_'.join(parts[i:])
- return (states, tokenname)
-
-
-# -----------------------------------------------------------------------------
-# LexerReflect()
-#
-# This class represents information needed to build a lexer as extracted from a
-# user's input file.
-# -----------------------------------------------------------------------------
-class LexerReflect(object):
- def __init__(self, ldict, log=None, reflags=0):
- self.ldict = ldict
- self.error_func = None
- self.tokens = []
- self.reflags = reflags
- self.stateinfo = {'INITIAL': 'inclusive'}
- self.modules = set()
- self.error = False
- self.log = PlyLogger(sys.stderr) if log is None else log
-
- # Get all of the basic information
- def get_all(self):
- self.get_tokens()
- self.get_literals()
- self.get_states()
- self.get_rules()
-
- # Validate all of the information
- def validate_all(self):
- self.validate_tokens()
- self.validate_literals()
- self.validate_rules()
- return self.error
-
- # Get the tokens map
- def get_tokens(self):
- tokens = self.ldict.get('tokens', None)
- if not tokens:
- self.log.error('No token list is defined')
- self.error = True
- return
-
- if not isinstance(tokens, (list, tuple)):
- self.log.error('tokens must be a list or tuple')
- self.error = True
- return
-
- if not tokens:
- self.log.error('tokens is empty')
- self.error = True
- return
-
- self.tokens = tokens
-
- # Validate the tokens
- def validate_tokens(self):
- terminals = {}
- for n in self.tokens:
- if not _is_identifier.match(n):
- self.log.error("Bad token name '%s'", n)
- self.error = True
- if n in terminals:
- self.log.warning("Token '%s' multiply defined", n)
- terminals[n] = 1
-
- # Get the literals specifier
- def get_literals(self):
- self.literals = self.ldict.get('literals', '')
- if not self.literals:
- self.literals = ''
-
- # Validate literals
- def validate_literals(self):
- try:
- for c in self.literals:
- if not isinstance(c, StringTypes) or len(c) > 1:
- self.log.error('Invalid literal %s. Must be a single character', repr(c))
- self.error = True
-
- except TypeError:
- self.log.error('Invalid literals specification. literals must be a sequence of characters')
- self.error = True
-
- def get_states(self):
- self.states = self.ldict.get('states', None)
- # Build statemap
- if self.states:
- if not isinstance(self.states, (tuple, list)):
- self.log.error('states must be defined as a tuple or list')
- self.error = True
- else:
- for s in self.states:
- if not isinstance(s, tuple) or len(s) != 2:
- self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
- self.error = True
- continue
- name, statetype = s
- if not isinstance(name, StringTypes):
- self.log.error('State name %s must be a string', repr(name))
- self.error = True
- continue
- if not (statetype == 'inclusive' or statetype == 'exclusive'):
- self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
- self.error = True
- continue
- if name in self.stateinfo:
- self.log.error("State '%s' already defined", name)
- self.error = True
- continue
- self.stateinfo[name] = statetype
-
- # Get all of the symbols with a t_ prefix and sort them into various
- # categories (functions, strings, error functions, and ignore characters)
-
- def get_rules(self):
- tsymbols = [f for f in self.ldict if f[:2] == 't_']
-
- # Now build up a list of functions and a list of strings
- self.toknames = {} # Mapping of symbols to token names
- self.funcsym = {} # Symbols defined as functions
- self.strsym = {} # Symbols defined as strings
- self.ignore = {} # Ignore strings by state
- self.errorf = {} # Error functions by state
- self.eoff = {} # EOF functions by state
-
- for s in self.stateinfo:
- self.funcsym[s] = []
- self.strsym[s] = []
-
- if len(tsymbols) == 0:
- self.log.error('No rules of the form t_rulename are defined')
- self.error = True
- return
-
- for f in tsymbols:
- t = self.ldict[f]
- states, tokname = _statetoken(f, self.stateinfo)
- self.toknames[f] = tokname
-
- if hasattr(t, '__call__'):
- if tokname == 'error':
- for s in states:
- self.errorf[s] = t
- elif tokname == 'eof':
- for s in states:
- self.eoff[s] = t
- elif tokname == 'ignore':
- line = t.__code__.co_firstlineno
- file = t.__code__.co_filename
- self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
- self.error = True
- else:
- for s in states:
- self.funcsym[s].append((f, t))
- elif isinstance(t, StringTypes):
- if tokname == 'ignore':
- for s in states:
- self.ignore[s] = t
- if '\\' in t:
- self.log.warning("%s contains a literal backslash '\\'", f)
-
- elif tokname == 'error':
- self.log.error("Rule '%s' must be defined as a function", f)
- self.error = True
- else:
- for s in states:
- self.strsym[s].append((f, t))
- else:
- self.log.error('%s not defined as a function or string', f)
- self.error = True
-
- # Sort the functions by line number
- for f in self.funcsym.values():
- f.sort(key=lambda x: x[1].__code__.co_firstlineno)
-
- # Sort the strings by regular expression length
- for s in self.strsym.values():
- s.sort(key=lambda x: len(x[1]), reverse=True)
-
- # Validate all of the t_rules collected
- def validate_rules(self):
- for state in self.stateinfo:
- # Validate all rules defined by functions
-
- for fname, f in self.funcsym[state]:
- line = f.__code__.co_firstlineno
- file = f.__code__.co_filename
- module = inspect.getmodule(f)
- self.modules.add(module)
-
- tokname = self.toknames[fname]
- if isinstance(f, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- nargs = f.__code__.co_argcount
- if nargs > reqargs:
- self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
- self.error = True
- continue
-
- if nargs < reqargs:
- self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
- self.error = True
- continue
-
- if not _get_regex(f):
- self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
- self.error = True
- continue
-
- try:
- c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
- if c.match(''):
- self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
- self.error = True
- except re.error as e:
- self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
- if '#' in _get_regex(f):
- self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
- self.error = True
-
- # Validate all rules defined by strings
- for name, r in self.strsym[state]:
- tokname = self.toknames[name]
- if tokname == 'error':
- self.log.error("Rule '%s' must be defined as a function", name)
- self.error = True
- continue
-
- if tokname not in self.tokens and tokname.find('ignore_') < 0:
- self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
- self.error = True
- continue
-
- try:
- c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
- if (c.match('')):
- self.log.error("Regular expression for rule '%s' matches empty string", name)
- self.error = True
- except re.error as e:
- self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
- if '#' in r:
- self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
- self.error = True
-
- if not self.funcsym[state] and not self.strsym[state]:
- self.log.error("No rules defined for state '%s'", state)
- self.error = True
-
- # Validate the error function
- efunc = self.errorf.get(state, None)
- if efunc:
- f = efunc
- line = f.__code__.co_firstlineno
- file = f.__code__.co_filename
- module = inspect.getmodule(f)
- self.modules.add(module)
-
- if isinstance(f, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- nargs = f.__code__.co_argcount
- if nargs > reqargs:
- self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
- self.error = True
-
- if nargs < reqargs:
- self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
- self.error = True
-
- for module in self.modules:
- self.validate_module(module)
-
- # -----------------------------------------------------------------------------
- # validate_module()
- #
- # This checks to see if there are duplicated t_rulename() functions or strings
- # in the parser input file. This is done using a simple regular expression
- # match on each line in the source code of the given module.
- # -----------------------------------------------------------------------------
-
- def validate_module(self, module):
- try:
- lines, linen = inspect.getsourcelines(module)
- except IOError:
- return
-
- fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
- sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
-
- counthash = {}
- linen += 1
- for line in lines:
- m = fre.match(line)
- if not m:
- m = sre.match(line)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- filename = inspect.getsourcefile(module)
- self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
- self.error = True
- linen += 1
-
-# -----------------------------------------------------------------------------
-# lex(module)
-#
-# Build all of the regular expression rules from definitions in the supplied module
-# -----------------------------------------------------------------------------
-def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
- reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
-
- if lextab is None:
- lextab = 'lextab'
-
- global lexer
-
- ldict = None
- stateinfo = {'INITIAL': 'inclusive'}
- lexobj = Lexer()
- lexobj.lexoptimize = optimize
- global token, input
-
- if errorlog is None:
- errorlog = PlyLogger(sys.stderr)
-
- if debug:
- if debuglog is None:
- debuglog = PlyLogger(sys.stderr)
-
- # Get the module dictionary used for the lexer
- if object:
- module = object
-
- # Get the module dictionary used for the parser
- if module:
- _items = [(k, getattr(module, k)) for k in dir(module)]
- ldict = dict(_items)
- # If no __file__ attribute is available, try to obtain it from the __module__ instead
- if '__file__' not in ldict:
- ldict['__file__'] = sys.modules[ldict['__module__']].__file__
- else:
- ldict = get_caller_module_dict(2)
-
- # Determine if the module is package of a package or not.
- # If so, fix the tabmodule setting so that tables load correctly
- pkg = ldict.get('__package__')
- if pkg and isinstance(lextab, str):
- if '.' not in lextab:
- lextab = pkg + '.' + lextab
-
- # Collect parser information from the dictionary
- linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
- linfo.get_all()
- if not optimize:
- if linfo.validate_all():
- raise SyntaxError("Can't build lexer")
-
- if optimize and lextab:
- try:
- lexobj.readtab(lextab, ldict)
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
- return lexobj
-
- except ImportError:
- pass
-
- # Dump some basic debugging information
- if debug:
- debuglog.info('lex: tokens = %r', linfo.tokens)
- debuglog.info('lex: literals = %r', linfo.literals)
- debuglog.info('lex: states = %r', linfo.stateinfo)
-
- # Build a dictionary of valid token names
- lexobj.lextokens = set()
- for n in linfo.tokens:
- lexobj.lextokens.add(n)
-
- # Get literals specification
- if isinstance(linfo.literals, (list, tuple)):
- lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
- else:
- lexobj.lexliterals = linfo.literals
-
- lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
-
- # Get the stateinfo dictionary
- stateinfo = linfo.stateinfo
-
- regexs = {}
- # Build the master regular expressions
- for state in stateinfo:
- regex_list = []
-
- # Add rules defined by functions first
- for fname, f in linfo.funcsym[state]:
- line = f.__code__.co_firstlineno
- file = f.__code__.co_filename
- regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
- if debug:
- debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
-
- # Now add all of the simple rules
- for name, r in linfo.strsym[state]:
- regex_list.append('(?P<%s>%s)' % (name, r))
- if debug:
- debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
-
- regexs[state] = regex_list
-
- # Build the master regular expressions
-
- if debug:
- debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
-
- for state in regexs:
- lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
- lexobj.lexstatere[state] = lexre
- lexobj.lexstateretext[state] = re_text
- lexobj.lexstaterenames[state] = re_names
- if debug:
- for i, text in enumerate(re_text):
- debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
-
- # For inclusive states, we need to add the regular expressions from the INITIAL state
- for state, stype in stateinfo.items():
- if state != 'INITIAL' and stype == 'inclusive':
- lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
- lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
- lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
-
- lexobj.lexstateinfo = stateinfo
- lexobj.lexre = lexobj.lexstatere['INITIAL']
- lexobj.lexretext = lexobj.lexstateretext['INITIAL']
- lexobj.lexreflags = reflags
-
- # Set up ignore variables
- lexobj.lexstateignore = linfo.ignore
- lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
-
- # Set up error functions
- lexobj.lexstateerrorf = linfo.errorf
- lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
- if not lexobj.lexerrorf:
- errorlog.warning('No t_error rule is defined')
-
- # Set up eof functions
- lexobj.lexstateeoff = linfo.eoff
- lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
-
- # Check state information for ignore and error rules
- for s, stype in stateinfo.items():
- if stype == 'exclusive':
- if s not in linfo.errorf:
- errorlog.warning("No error rule is defined for exclusive state '%s'", s)
- if s not in linfo.ignore and lexobj.lexignore:
- errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
- elif stype == 'inclusive':
- if s not in linfo.errorf:
- linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
- if s not in linfo.ignore:
- linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
-
- # Create global versions of the token() and input() functions
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
-
- # If in optimize mode, we write the lextab
- if lextab and optimize:
- if outputdir is None:
- # If no output directory is set, the location of the output files
- # is determined according to the following rules:
- # - If lextab specifies a package, files go into that package directory
- # - Otherwise, files go in the same directory as the specifying module
- if isinstance(lextab, types.ModuleType):
- srcfile = lextab.__file__
- else:
- if '.' not in lextab:
- srcfile = ldict['__file__']
- else:
- parts = lextab.split('.')
- pkgname = '.'.join(parts[:-1])
- exec('import %s' % pkgname)
- srcfile = getattr(sys.modules[pkgname], '__file__', '')
- outputdir = os.path.dirname(srcfile)
- try:
- lexobj.writetab(lextab, outputdir)
- except IOError as e:
- errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
-
- return lexobj
-
-# -----------------------------------------------------------------------------
-# runmain()
-#
-# This runs the lexer as a main program
-# -----------------------------------------------------------------------------
-
-def runmain(lexer=None, data=None):
- if not data:
- try:
- filename = sys.argv[1]
- f = open(filename)
- data = f.read()
- f.close()
- except IndexError:
- sys.stdout.write('Reading from standard input (type EOF to end):\n')
- data = sys.stdin.read()
-
- if lexer:
- _input = lexer.input
- else:
- _input = input
- _input(data)
- if lexer:
- _token = lexer.token
- else:
- _token = token
-
- while True:
- tok = _token()
- if not tok:
- break
- sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
-
-# -----------------------------------------------------------------------------
-# @TOKEN(regex)
-#
-# This decorator function can be used to set the regex expression on a function
-# when its docstring might need to be set in an alternative way
-# -----------------------------------------------------------------------------
-
-def TOKEN(r):
- def set_regex(f):
- if hasattr(r, '__call__'):
- f.regex = _get_regex(r)
- else:
- f.regex = r
- return f
- return set_regex
-
-# Alternative spelling of the TOKEN decorator
-Token = TOKEN
-
diff --git a/components/script/dom/bindings/codegen/ply/ply/yacc.py b/components/script/dom/bindings/codegen/ply/ply/yacc.py
deleted file mode 100644
index 03bd86ee078..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/yacc.py
+++ /dev/null
@@ -1,3494 +0,0 @@
-# -----------------------------------------------------------------------------
-# ply: yacc.py
-#
-# Copyright (C) 2001-2017
-# David M. Beazley (Dabeaz LLC)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-# endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# -----------------------------------------------------------------------------
-#
-# This implements an LR parser that is constructed from grammar rules defined
-# as Python functions. The grammer is specified by supplying the BNF inside
-# Python documentation strings. The inspiration for this technique was borrowed
-# from John Aycock's Spark parsing system. PLY might be viewed as cross between
-# Spark and the GNU bison utility.
-#
-# The current implementation is only somewhat object-oriented. The
-# LR parser itself is defined in terms of an object (which allows multiple
-# parsers to co-exist). However, most of the variables used during table
-# construction are defined in terms of global variables. Users shouldn't
-# notice unless they are trying to define multiple parsers at the same
-# time using threads (in which case they should have their head examined).
-#
-# This implementation supports both SLR and LALR(1) parsing. LALR(1)
-# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
-# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
-# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
-# by the more efficient DeRemer and Pennello algorithm.
-#
-# :::::::: WARNING :::::::
-#
-# Construction of LR parsing tables is fairly complicated and expensive.
-# To make this module run fast, a *LOT* of work has been put into
-# optimization---often at the expensive of readability and what might
-# consider to be good Python "coding style." Modify the code at your
-# own risk!
-# ----------------------------------------------------------------------------
-
-import re
-import types
-import sys
-import os.path
-import inspect
-import base64
-import warnings
-
-__version__ = '3.10'
-__tabversion__ = '3.10'
-
-#-----------------------------------------------------------------------------
-# === User configurable parameters ===
-#
-# Change these to modify the default behavior of yacc (if you wish)
-#-----------------------------------------------------------------------------
-
-yaccdebug = True # Debugging mode. If set, yacc generates a
- # a 'parser.out' file in the current directory
-
-debug_file = 'parser.out' # Default name of the debugging file
-tab_module = 'parsetab' # Default name of the table module
-default_lr = 'LALR' # Default LR table generation method
-
-error_count = 3 # Number of symbols that must be shifted to leave recovery mode
-
-yaccdevel = False # Set to True if developing yacc. This turns off optimized
- # implementations of certain functions.
-
-resultlimit = 40 # Size limit of results when running in debug mode.
-
-pickle_protocol = 0 # Protocol to use when writing pickle files
-
-# String type-checking compatibility
-if sys.version_info[0] < 3:
- string_types = basestring
-else:
- string_types = str
-
-MAXINT = sys.maxsize
-
-# This object is a stand-in for a logging object created by the
-# logging module. PLY will use this by default to create things
-# such as the parser.out file. If a user wants more detailed
-# information, they can create their own logging object and pass
-# it into PLY.
-
-class PlyLogger(object):
- def __init__(self, f):
- self.f = f
-
- def debug(self, msg, *args, **kwargs):
- self.f.write((msg % args) + '\n')
-
- info = debug
-
- def warning(self, msg, *args, **kwargs):
- self.f.write('WARNING: ' + (msg % args) + '\n')
-
- def error(self, msg, *args, **kwargs):
- self.f.write('ERROR: ' + (msg % args) + '\n')
-
- critical = debug
-
-# Null logger is used when no output is generated. Does nothing.
-class NullLogger(object):
- def __getattribute__(self, name):
- return self
-
- def __call__(self, *args, **kwargs):
- return self
-
-# Exception raised for yacc-related errors
-class YaccError(Exception):
- pass
-
-# Format the result message that the parser produces when running in debug mode.
-def format_result(r):
- repr_str = repr(r)
- if '\n' in repr_str:
- repr_str = repr(repr_str)
- if len(repr_str) > resultlimit:
- repr_str = repr_str[:resultlimit] + ' ...'
- result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
- return result
-
-# Format stack entries when the parser is running in debug mode
-def format_stack_entry(r):
- repr_str = repr(r)
- if '\n' in repr_str:
- repr_str = repr(repr_str)
- if len(repr_str) < 16:
- return repr_str
- else:
- return '<%s @ 0x%x>' % (type(r).__name__, id(r))
-
-# Panic mode error recovery support. This feature is being reworked--much of the
-# code here is to offer a deprecation/backwards compatible transition
-
-_errok = None
-_token = None
-_restart = None
-_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
-Instead, invoke the methods on the associated parser instance:
-
- def p_error(p):
- ...
- # Use parser.errok(), parser.token(), parser.restart()
- ...
-
- parser = yacc.yacc()
-'''
-
-def errok():
- warnings.warn(_warnmsg)
- return _errok()
-
-def restart():
- warnings.warn(_warnmsg)
- return _restart()
-
-def token():
- warnings.warn(_warnmsg)
- return _token()
-
-# Utility function to call the p_error() function with some deprecation hacks
-def call_errorfunc(errorfunc, token, parser):
- global _errok, _token, _restart
- _errok = parser.errok
- _token = parser.token
- _restart = parser.restart
- r = errorfunc(token)
- try:
- del _errok, _token, _restart
- except NameError:
- pass
- return r
-
-#-----------------------------------------------------------------------------
-# === LR Parsing Engine ===
-#
-# The following classes are used for the LR parser itself. These are not
-# used during table construction and are independent of the actual LR
-# table generation algorithm
-#-----------------------------------------------------------------------------
-
-# This class is used to hold non-terminal grammar symbols during parsing.
-# It normally has the following attributes set:
-# .type = Grammar symbol type
-# .value = Symbol value
-# .lineno = Starting line number
-# .endlineno = Ending line number (optional, set automatically)
-# .lexpos = Starting lex position
-# .endlexpos = Ending lex position (optional, set automatically)
-
-class YaccSymbol:
- def __str__(self):
- return self.type
-
- def __repr__(self):
- return str(self)
-
-# This class is a wrapper around the objects actually passed to each
-# grammar rule. Index lookup and assignment actually assign the
-# .value attribute of the underlying YaccSymbol object.
-# The lineno() method returns the line number of a given
-# item (or 0 if not defined). The linespan() method returns
-# a tuple of (startline,endline) representing the range of lines
-# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
-# representing the range of positional information for a symbol.
-
-class YaccProduction:
- def __init__(self, s, stack=None):
- self.slice = s
- self.stack = stack
- self.lexer = None
- self.parser = None
-
- def __getitem__(self, n):
- if isinstance(n, slice):
- return [s.value for s in self.slice[n]]
- elif n >= 0:
- return self.slice[n].value
- else:
- return self.stack[n].value
-
- def __setitem__(self, n, v):
- self.slice[n].value = v
-
- def __getslice__(self, i, j):
- return [s.value for s in self.slice[i:j]]
-
- def __len__(self):
- return len(self.slice)
-
- def lineno(self, n):
- return getattr(self.slice[n], 'lineno', 0)
-
- def set_lineno(self, n, lineno):
- self.slice[n].lineno = lineno
-
- def linespan(self, n):
- startline = getattr(self.slice[n], 'lineno', 0)
- endline = getattr(self.slice[n], 'endlineno', startline)
- return startline, endline
-
- def lexpos(self, n):
- return getattr(self.slice[n], 'lexpos', 0)
-
- def lexspan(self, n):
- startpos = getattr(self.slice[n], 'lexpos', 0)
- endpos = getattr(self.slice[n], 'endlexpos', startpos)
- return startpos, endpos
-
- def error(self):
- raise SyntaxError
-
-# -----------------------------------------------------------------------------
-# == LRParser ==
-#
-# The LR Parsing engine.
-# -----------------------------------------------------------------------------
-
-class LRParser:
- def __init__(self, lrtab, errorf):
- self.productions = lrtab.lr_productions
- self.action = lrtab.lr_action
- self.goto = lrtab.lr_goto
- self.errorfunc = errorf
- self.set_defaulted_states()
- self.errorok = True
-
- def errok(self):
- self.errorok = True
-
- def restart(self):
- del self.statestack[:]
- del self.symstack[:]
- sym = YaccSymbol()
- sym.type = '$end'
- self.symstack.append(sym)
- self.statestack.append(0)
-
- # Defaulted state support.
- # This method identifies parser states where there is only one possible reduction action.
- # For such states, the parser can make a choose to make a rule reduction without consuming
- # the next look-ahead token. This delayed invocation of the tokenizer can be useful in
- # certain kinds of advanced parsing situations where the lexer and parser interact with
- # each other or change states (i.e., manipulation of scope, lexer states, etc.).
- #
- # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
- def set_defaulted_states(self):
- self.defaulted_states = {}
- for state, actions in self.action.items():
- rules = list(actions.values())
- if len(rules) == 1 and rules[0] < 0:
- self.defaulted_states[state] = rules[0]
-
- def disable_defaulted_states(self):
- self.defaulted_states = {}
-
- def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
- if debug or yaccdevel:
- if isinstance(debug, int):
- debug = PlyLogger(sys.stderr)
- return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
- elif tracking:
- return self.parseopt(input, lexer, debug, tracking, tokenfunc)
- else:
- return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
-
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parsedebug().
- #
- # This is the debugging enabled version of parse(). All changes made to the
- # parsing engine should be made here. Optimized versions of this function
- # are automatically created by the ply/ygen.py script. This script cuts out
- # sections enclosed in markers such as this:
- #
- # #--! DEBUG
- # statements
- # #--! DEBUG
- #
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
- #--! parsedebug-start
- lookahead = None # Current lookahead symbol
- lookaheadstack = [] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- defaulted_states = self.defaulted_states # Local reference to defaulted states
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
- #--! DEBUG
- debug.info('PLY: PARSE DEBUG START')
- #--! DEBUG
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- from . import lex
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set the parser() token method (sometimes used in error recovery)
- self.token = get_token
-
- # Set up the state and symbol stacks
-
- statestack = [] # Stack of parsing states
- self.statestack = statestack
- symstack = [] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
- state = 0
- while True:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
- #--! DEBUG
- debug.debug('')
- debug.debug('State : %s', state)
- #--! DEBUG
-
- if state not in defaulted_states:
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
- else:
- t = defaulted_states[state]
- #--! DEBUG
- debug.debug('Defaulted state %s: Reduce using %d', state, -t)
- #--! DEBUG
-
- #--! DEBUG
- debug.debug('Stack : %s',
- ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
- #--! DEBUG
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
- #--! DEBUG
- debug.debug('Action : Shift and goto state %s', t)
- #--! DEBUG
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount:
- errorcount -= 1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
- #--! DEBUG
- if plen:
- debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
- '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
- goto[statestack[-1-plen]][pname])
- else:
- debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
- goto[statestack[-1]][pname])
-
- #--! DEBUG
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
- #--! TRACKING
- if tracking:
- t1 = targ[1]
- sym.lineno = t1.lineno
- sym.lexpos = t1.lexpos
- t1 = targ[-1]
- sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
- sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
- #--! TRACKING
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- self.state = state
- p.callable(pslice)
- del statestack[-plen:]
- #--! DEBUG
- debug.info('Result : %s', format_result(pslice[0]))
- #--! DEBUG
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
- #--! TRACKING
- if tracking:
- sym.lineno = lexer.lineno
- sym.lexpos = lexer.lexpos
- #--! TRACKING
-
- targ = [sym]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- self.state = state
- p.callable(pslice)
- #--! DEBUG
- debug.info('Result : %s', format_result(pslice[0]))
- #--! DEBUG
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- result = getattr(n, 'value', None)
- #--! DEBUG
- debug.info('Done : Returning %s', format_result(result))
- debug.info('PLY: PARSE DEBUG END')
- #--! DEBUG
- return result
-
- if t is None:
-
- #--! DEBUG
- debug.error('Error : %s',
- ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
- #--! DEBUG
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = False
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- if errtoken and not hasattr(errtoken, 'lexer'):
- errtoken.lexer = lexer
- self.state = state
- tok = call_errorfunc(self.errorfunc, errtoken, self)
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken, 'lineno'):
- lineno = lookahead.lineno
- else:
- lineno = 0
- if lineno:
- sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
- else:
- sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
- else:
- sys.stderr.write('yacc: Parse error in input. EOF\n')
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- #--! TRACKING
- if tracking:
- sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
- sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
- #--! TRACKING
- lookahead = None
- continue
-
- # Create the error symbol for the first time and make it the new lookahead symbol
- t = YaccSymbol()
- t.type = 'error'
-
- if hasattr(lookahead, 'lineno'):
- t.lineno = t.endlineno = lookahead.lineno
- if hasattr(lookahead, 'lexpos'):
- t.lexpos = t.endlexpos = lookahead.lexpos
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- sym = symstack.pop()
- #--! TRACKING
- if tracking:
- lookahead.lineno = sym.lineno
- lookahead.lexpos = sym.lexpos
- #--! TRACKING
- statestack.pop()
- state = statestack[-1]
-
- continue
-
- # Call an error function here
- raise RuntimeError('yacc: internal parser error!!!\n')
-
- #--! parsedebug-end
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parseopt().
- #
- # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
- # This code is automatically generated by the ply/ygen.py script. Make
- # changes to the parsedebug() method instead.
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
- #--! parseopt-start
- lookahead = None # Current lookahead symbol
- lookaheadstack = [] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- defaulted_states = self.defaulted_states # Local reference to defaulted states
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- from . import lex
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set the parser() token method (sometimes used in error recovery)
- self.token = get_token
-
- # Set up the state and symbol stacks
-
- statestack = [] # Stack of parsing states
- self.statestack = statestack
- symstack = [] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
- state = 0
- while True:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
-
- if state not in defaulted_states:
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
- else:
- t = defaulted_states[state]
-
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount:
- errorcount -= 1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
- #--! TRACKING
- if tracking:
- t1 = targ[1]
- sym.lineno = t1.lineno
- sym.lexpos = t1.lexpos
- t1 = targ[-1]
- sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
- sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
- #--! TRACKING
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- self.state = state
- p.callable(pslice)
- del statestack[-plen:]
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
- #--! TRACKING
- if tracking:
- sym.lineno = lexer.lineno
- sym.lexpos = lexer.lexpos
- #--! TRACKING
-
- targ = [sym]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- self.state = state
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- result = getattr(n, 'value', None)
- return result
-
- if t is None:
-
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = False
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- if errtoken and not hasattr(errtoken, 'lexer'):
- errtoken.lexer = lexer
- self.state = state
- tok = call_errorfunc(self.errorfunc, errtoken, self)
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken, 'lineno'):
- lineno = lookahead.lineno
- else:
- lineno = 0
- if lineno:
- sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
- else:
- sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
- else:
- sys.stderr.write('yacc: Parse error in input. EOF\n')
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- #--! TRACKING
- if tracking:
- sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
- sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
- #--! TRACKING
- lookahead = None
- continue
-
- # Create the error symbol for the first time and make it the new lookahead symbol
- t = YaccSymbol()
- t.type = 'error'
-
- if hasattr(lookahead, 'lineno'):
- t.lineno = t.endlineno = lookahead.lineno
- if hasattr(lookahead, 'lexpos'):
- t.lexpos = t.endlexpos = lookahead.lexpos
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- sym = symstack.pop()
- #--! TRACKING
- if tracking:
- lookahead.lineno = sym.lineno
- lookahead.lexpos = sym.lexpos
- #--! TRACKING
- statestack.pop()
- state = statestack[-1]
-
- continue
-
- # Call an error function here
- raise RuntimeError('yacc: internal parser error!!!\n')
-
- #--! parseopt-end
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parseopt_notrack().
- #
- # Optimized version of parseopt() with line number tracking removed.
- # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
- # by the ply/ygen.py script. Make changes to the parsedebug() method instead.
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
- #--! parseopt-notrack-start
- lookahead = None # Current lookahead symbol
- lookaheadstack = [] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- defaulted_states = self.defaulted_states # Local reference to defaulted states
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- from . import lex
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set the parser() token method (sometimes used in error recovery)
- self.token = get_token
-
- # Set up the state and symbol stacks
-
- statestack = [] # Stack of parsing states
- self.statestack = statestack
- symstack = [] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
- state = 0
- while True:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
-
- if state not in defaulted_states:
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
- else:
- t = defaulted_states[state]
-
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount:
- errorcount -= 1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- self.state = state
- p.callable(pslice)
- del statestack[-plen:]
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
-
- targ = [sym]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- self.state = state
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
- state = statestack[-1]
- sym.type = 'error'
- sym.value = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = False
-
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- result = getattr(n, 'value', None)
- return result
-
- if t is None:
-
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = False
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- if errtoken and not hasattr(errtoken, 'lexer'):
- errtoken.lexer = lexer
- self.state = state
- tok = call_errorfunc(self.errorfunc, errtoken, self)
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken, 'lineno'):
- lineno = lookahead.lineno
- else:
- lineno = 0
- if lineno:
- sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
- else:
- sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
- else:
- sys.stderr.write('yacc: Parse error in input. EOF\n')
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- lookahead = None
- continue
-
- # Create the error symbol for the first time and make it the new lookahead symbol
- t = YaccSymbol()
- t.type = 'error'
-
- if hasattr(lookahead, 'lineno'):
- t.lineno = t.endlineno = lookahead.lineno
- if hasattr(lookahead, 'lexpos'):
- t.lexpos = t.endlexpos = lookahead.lexpos
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- sym = symstack.pop()
- statestack.pop()
- state = statestack[-1]
-
- continue
-
- # Call an error function here
- raise RuntimeError('yacc: internal parser error!!!\n')
-
- #--! parseopt-notrack-end
-
-# -----------------------------------------------------------------------------
-# === Grammar Representation ===
-#
-# The following functions, classes, and variables are used to represent and
-# manipulate the rules that make up a grammar.
-# -----------------------------------------------------------------------------
-
-# regex matching identifiers
-_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
-
-# -----------------------------------------------------------------------------
-# class Production:
-#
-# This class stores the raw information about a single production or grammar rule.
-# A grammar rule refers to a specification such as this:
-#
-# expr : expr PLUS term
-#
-# Here are the basic attributes defined on all productions
-#
-# name - Name of the production. For example 'expr'
-# prod - A list of symbols on the right side ['expr','PLUS','term']
-# prec - Production precedence level
-# number - Production number.
-# func - Function that executes on reduce
-# file - File where production function is defined
-# lineno - Line number where production function is defined
-#
-# The following attributes are defined or optional.
-#
-# len - Length of the production (number of symbols on right hand side)
-# usyms - Set of unique symbols found in the production
-# -----------------------------------------------------------------------------
-
-class Production(object):
- reduced = 0
- def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
- self.name = name
- self.prod = tuple(prod)
- self.number = number
- self.func = func
- self.callable = None
- self.file = file
- self.line = line
- self.prec = precedence
-
- # Internal settings used during table construction
-
- self.len = len(self.prod) # Length of the production
-
- # Create a list of unique production symbols used in the production
- self.usyms = []
- for s in self.prod:
- if s not in self.usyms:
- self.usyms.append(s)
-
- # List of all LR items for the production
- self.lr_items = []
- self.lr_next = None
-
- # Create a string representation
- if self.prod:
- self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
- else:
- self.str = '%s -> <empty>' % self.name
-
- def __str__(self):
- return self.str
-
- def __repr__(self):
- return 'Production(' + str(self) + ')'
-
- def __len__(self):
- return len(self.prod)
-
- def __nonzero__(self):
- return 1
-
- def __getitem__(self, index):
- return self.prod[index]
-
- # Return the nth lr_item from the production (or None if at the end)
- def lr_item(self, n):
- if n > len(self.prod):
- return None
- p = LRItem(self, n)
- # Precompute the list of productions immediately following.
- try:
- p.lr_after = Prodnames[p.prod[n+1]]
- except (IndexError, KeyError):
- p.lr_after = []
- try:
- p.lr_before = p.prod[n-1]
- except IndexError:
- p.lr_before = None
- return p
-
- # Bind the production function name to a callable
- def bind(self, pdict):
- if self.func:
- self.callable = pdict[self.func]
-
-# This class serves as a minimal standin for Production objects when
-# reading table data from files. It only contains information
-# actually used by the LR parsing engine, plus some additional
-# debugging information.
-class MiniProduction(object):
- def __init__(self, str, name, len, func, file, line):
- self.name = name
- self.len = len
- self.func = func
- self.callable = None
- self.file = file
- self.line = line
- self.str = str
-
- def __str__(self):
- return self.str
-
- def __repr__(self):
- return 'MiniProduction(%s)' % self.str
-
- # Bind the production function name to a callable
- def bind(self, pdict):
- if self.func:
- self.callable = pdict[self.func]
-
-
-# -----------------------------------------------------------------------------
-# class LRItem
-#
-# This class represents a specific stage of parsing a production rule. For
-# example:
-#
-# expr : expr . PLUS term
-#
-# In the above, the "." represents the current location of the parse. Here
-# basic attributes:
-#
-# name - Name of the production. For example 'expr'
-# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
-# number - Production number.
-#
-# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
-# then lr_next refers to 'expr -> expr PLUS . term'
-# lr_index - LR item index (location of the ".") in the prod list.
-# lookaheads - LALR lookahead symbols for this item
-# len - Length of the production (number of symbols on right hand side)
-# lr_after - List of all productions that immediately follow
-# lr_before - Grammar symbol immediately before
-# -----------------------------------------------------------------------------
-
-class LRItem(object):
- def __init__(self, p, n):
- self.name = p.name
- self.prod = list(p.prod)
- self.number = p.number
- self.lr_index = n
- self.lookaheads = {}
- self.prod.insert(n, '.')
- self.prod = tuple(self.prod)
- self.len = len(self.prod)
- self.usyms = p.usyms
-
- def __str__(self):
- if self.prod:
- s = '%s -> %s' % (self.name, ' '.join(self.prod))
- else:
- s = '%s -> <empty>' % self.name
- return s
-
- def __repr__(self):
- return 'LRItem(' + str(self) + ')'
-
-# -----------------------------------------------------------------------------
-# rightmost_terminal()
-#
-# Return the rightmost terminal from a list of symbols. Used in add_production()
-# -----------------------------------------------------------------------------
-def rightmost_terminal(symbols, terminals):
- i = len(symbols) - 1
- while i >= 0:
- if symbols[i] in terminals:
- return symbols[i]
- i -= 1
- return None
-
-# -----------------------------------------------------------------------------
-# === GRAMMAR CLASS ===
-#
-# The following class represents the contents of the specified grammar along
-# with various computed properties such as first sets, follow sets, LR items, etc.
-# This data is used for critical parts of the table generation process later.
-# -----------------------------------------------------------------------------
-
-class GrammarError(YaccError):
- pass
-
-class Grammar(object):
- def __init__(self, terminals):
- self.Productions = [None] # A list of all of the productions. The first
- # entry is always reserved for the purpose of
- # building an augmented grammar
-
- self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
- # productions of that nonterminal.
-
- self.Prodmap = {} # A dictionary that is only used to detect duplicate
- # productions.
-
- self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
- # list of the rules where they are used.
-
- for term in terminals:
- self.Terminals[term] = []
-
- self.Terminals['error'] = []
-
- self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
- # of rule numbers where they are used.
-
- self.First = {} # A dictionary of precomputed FIRST(x) symbols
-
- self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
-
- self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
- # form ('right',level) or ('nonassoc', level) or ('left',level)
-
- self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
- # This is only used to provide error checking and to generate
- # a warning about unused precedence rules.
-
- self.Start = None # Starting symbol for the grammar
-
-
- def __len__(self):
- return len(self.Productions)
-
- def __getitem__(self, index):
- return self.Productions[index]
-
- # -----------------------------------------------------------------------------
- # set_precedence()
- #
- # Sets the precedence for a given terminal. assoc is the associativity such as
- # 'left','right', or 'nonassoc'. level is a numeric level.
- #
- # -----------------------------------------------------------------------------
-
- def set_precedence(self, term, assoc, level):
- assert self.Productions == [None], 'Must call set_precedence() before add_production()'
- if term in self.Precedence:
- raise GrammarError('Precedence already specified for terminal %r' % term)
- if assoc not in ['left', 'right', 'nonassoc']:
- raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
- self.Precedence[term] = (assoc, level)
-
- # -----------------------------------------------------------------------------
- # add_production()
- #
- # Given an action function, this function assembles a production rule and
- # computes its precedence level.
- #
- # The production rule is supplied as a list of symbols. For example,
- # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
- # symbols ['expr','PLUS','term'].
- #
- # Precedence is determined by the precedence of the right-most non-terminal
- # or the precedence of a terminal specified by %prec.
- #
- # A variety of error checks are performed to make sure production symbols
- # are valid and that %prec is used correctly.
- # -----------------------------------------------------------------------------
-
- def add_production(self, prodname, syms, func=None, file='', line=0):
-
- if prodname in self.Terminals:
- raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
- if prodname == 'error':
- raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
- if not _is_identifier.match(prodname):
- raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
-
- # Look for literal tokens
- for n, s in enumerate(syms):
- if s[0] in "'\"":
- try:
- c = eval(s)
- if (len(c) > 1):
- raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
- (file, line, s, prodname))
- if c not in self.Terminals:
- self.Terminals[c] = []
- syms[n] = c
- continue
- except SyntaxError:
- pass
- if not _is_identifier.match(s) and s != '%prec':
- raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
-
- # Determine the precedence level
- if '%prec' in syms:
- if syms[-1] == '%prec':
- raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
- if syms[-2] != '%prec':
- raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
- (file, line))
- precname = syms[-1]
- prodprec = self.Precedence.get(precname)
- if not prodprec:
- raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
- else:
- self.UsedPrecedence.add(precname)
- del syms[-2:] # Drop %prec from the rule
- else:
- # If no %prec, precedence is determined by the rightmost terminal symbol
- precname = rightmost_terminal(syms, self.Terminals)
- prodprec = self.Precedence.get(precname, ('right', 0))
-
- # See if the rule is already in the rulemap
- map = '%s -> %s' % (prodname, syms)
- if map in self.Prodmap:
- m = self.Prodmap[map]
- raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
- 'Previous definition at %s:%d' % (m.file, m.line))
-
- # From this point on, everything is valid. Create a new Production instance
- pnumber = len(self.Productions)
- if prodname not in self.Nonterminals:
- self.Nonterminals[prodname] = []
-
- # Add the production number to Terminals and Nonterminals
- for t in syms:
- if t in self.Terminals:
- self.Terminals[t].append(pnumber)
- else:
- if t not in self.Nonterminals:
- self.Nonterminals[t] = []
- self.Nonterminals[t].append(pnumber)
-
- # Create a production and add it to the list of productions
- p = Production(pnumber, prodname, syms, prodprec, func, file, line)
- self.Productions.append(p)
- self.Prodmap[map] = p
-
- # Add to the global productions list
- try:
- self.Prodnames[prodname].append(p)
- except KeyError:
- self.Prodnames[prodname] = [p]
-
- # -----------------------------------------------------------------------------
- # set_start()
- #
- # Sets the starting symbol and creates the augmented grammar. Production
- # rule 0 is S' -> start where start is the start symbol.
- # -----------------------------------------------------------------------------
-
- def set_start(self, start=None):
- if not start:
- start = self.Productions[1].name
- if start not in self.Nonterminals:
- raise GrammarError('start symbol %s undefined' % start)
- self.Productions[0] = Production(0, "S'", [start])
- self.Nonterminals[start].append(0)
- self.Start = start
-
- # -----------------------------------------------------------------------------
- # find_unreachable()
- #
- # Find all of the nonterminal symbols that can't be reached from the starting
- # symbol. Returns a list of nonterminals that can't be reached.
- # -----------------------------------------------------------------------------
-
- def find_unreachable(self):
-
- # Mark all symbols that are reachable from a symbol s
- def mark_reachable_from(s):
- if s in reachable:
- return
- reachable.add(s)
- for p in self.Prodnames.get(s, []):
- for r in p.prod:
- mark_reachable_from(r)
-
- reachable = set()
- mark_reachable_from(self.Productions[0].prod[0])
- return [s for s in self.Nonterminals if s not in reachable]
-
- # -----------------------------------------------------------------------------
- # infinite_cycles()
- #
- # This function looks at the various parsing rules and tries to detect
- # infinite recursion cycles (grammar rules where there is no possible way
- # to derive a string of only terminals).
- # -----------------------------------------------------------------------------
-
- def infinite_cycles(self):
- terminates = {}
-
- # Terminals:
- for t in self.Terminals:
- terminates[t] = True
-
- terminates['$end'] = True
-
- # Nonterminals:
-
- # Initialize to false:
- for n in self.Nonterminals:
- terminates[n] = False
-
- # Then propagate termination until no change:
- while True:
- some_change = False
- for (n, pl) in self.Prodnames.items():
- # Nonterminal n terminates iff any of its productions terminates.
- for p in pl:
- # Production p terminates iff all of its rhs symbols terminate.
- for s in p.prod:
- if not terminates[s]:
- # The symbol s does not terminate,
- # so production p does not terminate.
- p_terminates = False
- break
- else:
- # didn't break from the loop,
- # so every symbol s terminates
- # so production p terminates.
- p_terminates = True
-
- if p_terminates:
- # symbol n terminates!
- if not terminates[n]:
- terminates[n] = True
- some_change = True
- # Don't need to consider any more productions for this n.
- break
-
- if not some_change:
- break
-
- infinite = []
- for (s, term) in terminates.items():
- if not term:
- if s not in self.Prodnames and s not in self.Terminals and s != 'error':
- # s is used-but-not-defined, and we've already warned of that,
- # so it would be overkill to say that it's also non-terminating.
- pass
- else:
- infinite.append(s)
-
- return infinite
-
- # -----------------------------------------------------------------------------
- # undefined_symbols()
- #
- # Find all symbols that were used the grammar, but not defined as tokens or
- # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
- # and prod is the production where the symbol was used.
- # -----------------------------------------------------------------------------
- def undefined_symbols(self):
- result = []
- for p in self.Productions:
- if not p:
- continue
-
- for s in p.prod:
- if s not in self.Prodnames and s not in self.Terminals and s != 'error':
- result.append((s, p))
- return result
-
- # -----------------------------------------------------------------------------
- # unused_terminals()
- #
- # Find all terminals that were defined, but not used by the grammar. Returns
- # a list of all symbols.
- # -----------------------------------------------------------------------------
- def unused_terminals(self):
- unused_tok = []
- for s, v in self.Terminals.items():
- if s != 'error' and not v:
- unused_tok.append(s)
-
- return unused_tok
-
- # ------------------------------------------------------------------------------
- # unused_rules()
- #
- # Find all grammar rules that were defined, but not used (maybe not reachable)
- # Returns a list of productions.
- # ------------------------------------------------------------------------------
-
- def unused_rules(self):
- unused_prod = []
- for s, v in self.Nonterminals.items():
- if not v:
- p = self.Prodnames[s][0]
- unused_prod.append(p)
- return unused_prod
-
- # -----------------------------------------------------------------------------
- # unused_precedence()
- #
- # Returns a list of tuples (term,precedence) corresponding to precedence
- # rules that were never used by the grammar. term is the name of the terminal
- # on which precedence was applied and precedence is a string such as 'left' or
- # 'right' corresponding to the type of precedence.
- # -----------------------------------------------------------------------------
-
- def unused_precedence(self):
- unused = []
- for termname in self.Precedence:
- if not (termname in self.Terminals or termname in self.UsedPrecedence):
- unused.append((termname, self.Precedence[termname][0]))
-
- return unused
-
- # -------------------------------------------------------------------------
- # _first()
- #
- # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
- #
- # During execution of compute_first1, the result may be incomplete.
- # Afterward (e.g., when called from compute_follow()), it will be complete.
- # -------------------------------------------------------------------------
- def _first(self, beta):
-
- # We are computing First(x1,x2,x3,...,xn)
- result = []
- for x in beta:
- x_produces_empty = False
-
- # Add all the non-<empty> symbols of First[x] to the result.
- for f in self.First[x]:
- if f == '<empty>':
- x_produces_empty = True
- else:
- if f not in result:
- result.append(f)
-
- if x_produces_empty:
- # We have to consider the next x in beta,
- # i.e. stay in the loop.
- pass
- else:
- # We don't have to consider any further symbols in beta.
- break
- else:
- # There was no 'break' from the loop,
- # so x_produces_empty was true for all x in beta,
- # so beta produces empty as well.
- result.append('<empty>')
-
- return result
-
- # -------------------------------------------------------------------------
- # compute_first()
- #
- # Compute the value of FIRST1(X) for all symbols
- # -------------------------------------------------------------------------
- def compute_first(self):
- if self.First:
- return self.First
-
- # Terminals:
- for t in self.Terminals:
- self.First[t] = [t]
-
- self.First['$end'] = ['$end']
-
- # Nonterminals:
-
- # Initialize to the empty set:
- for n in self.Nonterminals:
- self.First[n] = []
-
- # Then propagate symbols until no change:
- while True:
- some_change = False
- for n in self.Nonterminals:
- for p in self.Prodnames[n]:
- for f in self._first(p.prod):
- if f not in self.First[n]:
- self.First[n].append(f)
- some_change = True
- if not some_change:
- break
-
- return self.First
-
- # ---------------------------------------------------------------------
- # compute_follow()
- #
- # Computes all of the follow sets for every non-terminal symbol. The
- # follow set is the set of all symbols that might follow a given
- # non-terminal. See the Dragon book, 2nd Ed. p. 189.
- # ---------------------------------------------------------------------
- def compute_follow(self, start=None):
- # If already computed, return the result
- if self.Follow:
- return self.Follow
-
- # If first sets not computed yet, do that first.
- if not self.First:
- self.compute_first()
-
- # Add '$end' to the follow list of the start symbol
- for k in self.Nonterminals:
- self.Follow[k] = []
-
- if not start:
- start = self.Productions[1].name
-
- self.Follow[start] = ['$end']
-
- while True:
- didadd = False
- for p in self.Productions[1:]:
- # Here is the production set
- for i, B in enumerate(p.prod):
- if B in self.Nonterminals:
- # Okay. We got a non-terminal in a production
- fst = self._first(p.prod[i+1:])
- hasempty = False
- for f in fst:
- if f != '<empty>' and f not in self.Follow[B]:
- self.Follow[B].append(f)
- didadd = True
- if f == '<empty>':
- hasempty = True
- if hasempty or i == (len(p.prod)-1):
- # Add elements of follow(a) to follow(b)
- for f in self.Follow[p.name]:
- if f not in self.Follow[B]:
- self.Follow[B].append(f)
- didadd = True
- if not didadd:
- break
- return self.Follow
-
-
- # -----------------------------------------------------------------------------
- # build_lritems()
- #
- # This function walks the list of productions and builds a complete set of the
- # LR items. The LR items are stored in two ways: First, they are uniquely
- # numbered and placed in the list _lritems. Second, a linked list of LR items
- # is built for each production. For example:
- #
- # E -> E PLUS E
- #
- # Creates the list
- #
- # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
- # -----------------------------------------------------------------------------
-
- def build_lritems(self):
- for p in self.Productions:
- lastlri = p
- i = 0
- lr_items = []
- while True:
- if i > len(p):
- lri = None
- else:
- lri = LRItem(p, i)
- # Precompute the list of productions immediately following
- try:
- lri.lr_after = self.Prodnames[lri.prod[i+1]]
- except (IndexError, KeyError):
- lri.lr_after = []
- try:
- lri.lr_before = lri.prod[i-1]
- except IndexError:
- lri.lr_before = None
-
- lastlri.lr_next = lri
- if not lri:
- break
- lr_items.append(lri)
- lastlri = lri
- i += 1
- p.lr_items = lr_items
-
-# -----------------------------------------------------------------------------
-# == Class LRTable ==
-#
-# This basic class represents a basic table of LR parsing information.
-# Methods for generating the tables are not defined here. They are defined
-# in the derived class LRGeneratedTable.
-# -----------------------------------------------------------------------------
-
-class VersionError(YaccError):
- pass
-
-class LRTable(object):
- def __init__(self):
- self.lr_action = None
- self.lr_goto = None
- self.lr_productions = None
- self.lr_method = None
-
- def read_table(self, module):
- if isinstance(module, types.ModuleType):
- parsetab = module
- else:
- exec('import %s' % module)
- parsetab = sys.modules[module]
-
- if parsetab._tabversion != __tabversion__:
- raise VersionError('yacc table file version is out of date')
-
- self.lr_action = parsetab._lr_action
- self.lr_goto = parsetab._lr_goto
-
- self.lr_productions = []
- for p in parsetab._lr_productions:
- self.lr_productions.append(MiniProduction(*p))
-
- self.lr_method = parsetab._lr_method
- return parsetab._lr_signature
-
- def read_pickle(self, filename):
- try:
- import cPickle as pickle
- except ImportError:
- import pickle
-
- if not os.path.exists(filename):
- raise ImportError
-
- in_f = open(filename, 'rb')
-
- tabversion = pickle.load(in_f)
- if tabversion != __tabversion__:
- raise VersionError('yacc table file version is out of date')
- self.lr_method = pickle.load(in_f)
- signature = pickle.load(in_f)
- self.lr_action = pickle.load(in_f)
- self.lr_goto = pickle.load(in_f)
- productions = pickle.load(in_f)
-
- self.lr_productions = []
- for p in productions:
- self.lr_productions.append(MiniProduction(*p))
-
- in_f.close()
- return signature
-
- # Bind all production function names to callable objects in pdict
- def bind_callables(self, pdict):
- for p in self.lr_productions:
- p.bind(pdict)
-
-
-# -----------------------------------------------------------------------------
-# === LR Generator ===
-#
-# The following classes and functions are used to generate LR parsing tables on
-# a grammar.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# digraph()
-# traverse()
-#
-# The following two functions are used to compute set valued functions
-# of the form:
-#
-# F(x) = F'(x) U U{F(y) | x R y}
-#
-# This is used to compute the values of Read() sets as well as FOLLOW sets
-# in LALR(1) generation.
-#
-# Inputs: X - An input set
-# R - A relation
-# FP - Set-valued function
-# ------------------------------------------------------------------------------
-
-def digraph(X, R, FP):
- N = {}
- for x in X:
- N[x] = 0
- stack = []
- F = {}
- for x in X:
- if N[x] == 0:
- traverse(x, N, stack, F, X, R, FP)
- return F
-
-def traverse(x, N, stack, F, X, R, FP):
- stack.append(x)
- d = len(stack)
- N[x] = d
- F[x] = FP(x) # F(X) <- F'(x)
-
- rel = R(x) # Get y's related to x
- for y in rel:
- if N[y] == 0:
- traverse(y, N, stack, F, X, R, FP)
- N[x] = min(N[x], N[y])
- for a in F.get(y, []):
- if a not in F[x]:
- F[x].append(a)
- if N[x] == d:
- N[stack[-1]] = MAXINT
- F[stack[-1]] = F[x]
- element = stack.pop()
- while element != x:
- N[stack[-1]] = MAXINT
- F[stack[-1]] = F[x]
- element = stack.pop()
-
-class LALRError(YaccError):
- pass
-
-# -----------------------------------------------------------------------------
-# == LRGeneratedTable ==
-#
-# This class implements the LR table generation algorithm. There are no
-# public methods except for write()
-# -----------------------------------------------------------------------------
-
-class LRGeneratedTable(LRTable):
- def __init__(self, grammar, method='LALR', log=None):
- if method not in ['SLR', 'LALR']:
- raise LALRError('Unsupported method %s' % method)
-
- self.grammar = grammar
- self.lr_method = method
-
- # Set up the logger
- if not log:
- log = NullLogger()
- self.log = log
-
- # Internal attributes
- self.lr_action = {} # Action table
- self.lr_goto = {} # Goto table
- self.lr_productions = grammar.Productions # Copy of grammar Production array
- self.lr_goto_cache = {} # Cache of computed gotos
- self.lr0_cidhash = {} # Cache of closures
-
- self._add_count = 0 # Internal counter used to detect cycles
-
- # Diagonistic information filled in by the table generator
- self.sr_conflict = 0
- self.rr_conflict = 0
- self.conflicts = [] # List of conflicts
-
- self.sr_conflicts = []
- self.rr_conflicts = []
-
- # Build the tables
- self.grammar.build_lritems()
- self.grammar.compute_first()
- self.grammar.compute_follow()
- self.lr_parse_table()
-
- # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
-
- def lr0_closure(self, I):
- self._add_count += 1
-
- # Add everything in I to J
- J = I[:]
- didadd = True
- while didadd:
- didadd = False
- for j in J:
- for x in j.lr_after:
- if getattr(x, 'lr0_added', 0) == self._add_count:
- continue
- # Add B --> .G to J
- J.append(x.lr_next)
- x.lr0_added = self._add_count
- didadd = True
-
- return J
-
- # Compute the LR(0) goto function goto(I,X) where I is a set
- # of LR(0) items and X is a grammar symbol. This function is written
- # in a way that guarantees uniqueness of the generated goto sets
- # (i.e. the same goto set will never be returned as two different Python
- # objects). With uniqueness, we can later do fast set comparisons using
- # id(obj) instead of element-wise comparison.
-
- def lr0_goto(self, I, x):
- # First we look for a previously cached entry
- g = self.lr_goto_cache.get((id(I), x))
- if g:
- return g
-
- # Now we generate the goto set in a way that guarantees uniqueness
- # of the result
-
- s = self.lr_goto_cache.get(x)
- if not s:
- s = {}
- self.lr_goto_cache[x] = s
-
- gs = []
- for p in I:
- n = p.lr_next
- if n and n.lr_before == x:
- s1 = s.get(id(n))
- if not s1:
- s1 = {}
- s[id(n)] = s1
- gs.append(n)
- s = s1
- g = s.get('$end')
- if not g:
- if gs:
- g = self.lr0_closure(gs)
- s['$end'] = g
- else:
- s['$end'] = gs
- self.lr_goto_cache[(id(I), x)] = g
- return g
-
- # Compute the LR(0) sets of item function
- def lr0_items(self):
- C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
- i = 0
- for I in C:
- self.lr0_cidhash[id(I)] = i
- i += 1
-
- # Loop over the items in C and each grammar symbols
- i = 0
- while i < len(C):
- I = C[i]
- i += 1
-
- # Collect all of the symbols that could possibly be in the goto(I,X) sets
- asyms = {}
- for ii in I:
- for s in ii.usyms:
- asyms[s] = None
-
- for x in asyms:
- g = self.lr0_goto(I, x)
- if not g or id(g) in self.lr0_cidhash:
- continue
- self.lr0_cidhash[id(g)] = len(C)
- C.append(g)
-
- return C
-
- # -----------------------------------------------------------------------------
- # ==== LALR(1) Parsing ====
- #
- # LALR(1) parsing is almost exactly the same as SLR except that instead of
- # relying upon Follow() sets when performing reductions, a more selective
- # lookahead set that incorporates the state of the LR(0) machine is utilized.
- # Thus, we mainly just have to focus on calculating the lookahead sets.
- #
- # The method used here is due to DeRemer and Pennelo (1982).
- #
- # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
- # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
- # Vol. 4, No. 4, Oct. 1982, pp. 615-649
- #
- # Further details can also be found in:
- #
- # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
- # McGraw-Hill Book Company, (1985).
- #
- # -----------------------------------------------------------------------------
-
- # -----------------------------------------------------------------------------
- # compute_nullable_nonterminals()
- #
- # Creates a dictionary containing all of the non-terminals that might produce
- # an empty production.
- # -----------------------------------------------------------------------------
-
- def compute_nullable_nonterminals(self):
- nullable = set()
- num_nullable = 0
- while True:
- for p in self.grammar.Productions[1:]:
- if p.len == 0:
- nullable.add(p.name)
- continue
- for t in p.prod:
- if t not in nullable:
- break
- else:
- nullable.add(p.name)
- if len(nullable) == num_nullable:
- break
- num_nullable = len(nullable)
- return nullable
-
- # -----------------------------------------------------------------------------
- # find_nonterminal_trans(C)
- #
- # Given a set of LR(0) items, this functions finds all of the non-terminal
- # transitions. These are transitions in which a dot appears immediately before
- # a non-terminal. Returns a list of tuples of the form (state,N) where state
- # is the state number and N is the nonterminal symbol.
- #
- # The input C is the set of LR(0) items.
- # -----------------------------------------------------------------------------
-
- def find_nonterminal_transitions(self, C):
- trans = []
- for stateno, state in enumerate(C):
- for p in state:
- if p.lr_index < p.len - 1:
- t = (stateno, p.prod[p.lr_index+1])
- if t[1] in self.grammar.Nonterminals:
- if t not in trans:
- trans.append(t)
- return trans
-
- # -----------------------------------------------------------------------------
- # dr_relation()
- #
- # Computes the DR(p,A) relationships for non-terminal transitions. The input
- # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
- #
- # Returns a list of terminals.
- # -----------------------------------------------------------------------------
-
- def dr_relation(self, C, trans, nullable):
- dr_set = {}
- state, N = trans
- terms = []
-
- g = self.lr0_goto(C[state], N)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index+1]
- if a in self.grammar.Terminals:
- if a not in terms:
- terms.append(a)
-
- # This extra bit is to handle the start state
- if state == 0 and N == self.grammar.Productions[0].prod[0]:
- terms.append('$end')
-
- return terms
-
- # -----------------------------------------------------------------------------
- # reads_relation()
- #
- # Computes the READS() relation (p,A) READS (t,C).
- # -----------------------------------------------------------------------------
-
- def reads_relation(self, C, trans, empty):
- # Look for empty transitions
- rel = []
- state, N = trans
-
- g = self.lr0_goto(C[state], N)
- j = self.lr0_cidhash.get(id(g), -1)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index + 1]
- if a in empty:
- rel.append((j, a))
-
- return rel
-
- # -----------------------------------------------------------------------------
- # compute_lookback_includes()
- #
- # Determines the lookback and includes relations
- #
- # LOOKBACK:
- #
- # This relation is determined by running the LR(0) state machine forward.
- # For example, starting with a production "N : . A B C", we run it forward
- # to obtain "N : A B C ." We then build a relationship between this final
- # state and the starting state. These relationships are stored in a dictionary
- # lookdict.
- #
- # INCLUDES:
- #
- # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
- #
- # This relation is used to determine non-terminal transitions that occur
- # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
- # if the following holds:
- #
- # B -> LAT, where T -> epsilon and p' -L-> p
- #
- # L is essentially a prefix (which may be empty), T is a suffix that must be
- # able to derive an empty string. State p' must lead to state p with the string L.
- #
- # -----------------------------------------------------------------------------
-
- def compute_lookback_includes(self, C, trans, nullable):
- lookdict = {} # Dictionary of lookback relations
- includedict = {} # Dictionary of include relations
-
- # Make a dictionary of non-terminal transitions
- dtrans = {}
- for t in trans:
- dtrans[t] = 1
-
- # Loop over all transitions and compute lookbacks and includes
- for state, N in trans:
- lookb = []
- includes = []
- for p in C[state]:
- if p.name != N:
- continue
-
- # Okay, we have a name match. We now follow the production all the way
- # through the state machine until we get the . on the right hand side
-
- lr_index = p.lr_index
- j = state
- while lr_index < p.len - 1:
- lr_index = lr_index + 1
- t = p.prod[lr_index]
-
- # Check to see if this symbol and state are a non-terminal transition
- if (j, t) in dtrans:
- # Yes. Okay, there is some chance that this is an includes relation
- # the only way to know for certain is whether the rest of the
- # production derives empty
-
- li = lr_index + 1
- while li < p.len:
- if p.prod[li] in self.grammar.Terminals:
- break # No forget it
- if p.prod[li] not in nullable:
- break
- li = li + 1
- else:
- # Appears to be a relation between (j,t) and (state,N)
- includes.append((j, t))
-
- g = self.lr0_goto(C[j], t) # Go to next set
- j = self.lr0_cidhash.get(id(g), -1) # Go to next state
-
- # When we get here, j is the final state, now we have to locate the production
- for r in C[j]:
- if r.name != p.name:
- continue
- if r.len != p.len:
- continue
- i = 0
- # This look is comparing a production ". A B C" with "A B C ."
- while i < r.lr_index:
- if r.prod[i] != p.prod[i+1]:
- break
- i = i + 1
- else:
- lookb.append((j, r))
- for i in includes:
- if i not in includedict:
- includedict[i] = []
- includedict[i].append((state, N))
- lookdict[(state, N)] = lookb
-
- return lookdict, includedict
-
- # -----------------------------------------------------------------------------
- # compute_read_sets()
- #
- # Given a set of LR(0) items, this function computes the read sets.
- #
- # Inputs: C = Set of LR(0) items
- # ntrans = Set of nonterminal transitions
- # nullable = Set of empty transitions
- #
- # Returns a set containing the read sets
- # -----------------------------------------------------------------------------
-
- def compute_read_sets(self, C, ntrans, nullable):
- FP = lambda x: self.dr_relation(C, x, nullable)
- R = lambda x: self.reads_relation(C, x, nullable)
- F = digraph(ntrans, R, FP)
- return F
-
- # -----------------------------------------------------------------------------
- # compute_follow_sets()
- #
- # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
- # and an include set, this function computes the follow sets
- #
- # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
- #
- # Inputs:
- # ntrans = Set of nonterminal transitions
- # readsets = Readset (previously computed)
- # inclsets = Include sets (previously computed)
- #
- # Returns a set containing the follow sets
- # -----------------------------------------------------------------------------
-
- def compute_follow_sets(self, ntrans, readsets, inclsets):
- FP = lambda x: readsets[x]
- R = lambda x: inclsets.get(x, [])
- F = digraph(ntrans, R, FP)
- return F
-
- # -----------------------------------------------------------------------------
- # add_lookaheads()
- #
- # Attaches the lookahead symbols to grammar rules.
- #
- # Inputs: lookbacks - Set of lookback relations
- # followset - Computed follow set
- #
- # This function directly attaches the lookaheads to productions contained
- # in the lookbacks set
- # -----------------------------------------------------------------------------
-
- def add_lookaheads(self, lookbacks, followset):
- for trans, lb in lookbacks.items():
- # Loop over productions in lookback
- for state, p in lb:
- if state not in p.lookaheads:
- p.lookaheads[state] = []
- f = followset.get(trans, [])
- for a in f:
- if a not in p.lookaheads[state]:
- p.lookaheads[state].append(a)
-
- # -----------------------------------------------------------------------------
- # add_lalr_lookaheads()
- #
- # This function does all of the work of adding lookahead information for use
- # with LALR parsing
- # -----------------------------------------------------------------------------
-
- def add_lalr_lookaheads(self, C):
- # Determine all of the nullable nonterminals
- nullable = self.compute_nullable_nonterminals()
-
- # Find all non-terminal transitions
- trans = self.find_nonterminal_transitions(C)
-
- # Compute read sets
- readsets = self.compute_read_sets(C, trans, nullable)
-
- # Compute lookback/includes relations
- lookd, included = self.compute_lookback_includes(C, trans, nullable)
-
- # Compute LALR FOLLOW sets
- followsets = self.compute_follow_sets(trans, readsets, included)
-
- # Add all of the lookaheads
- self.add_lookaheads(lookd, followsets)
-
- # -----------------------------------------------------------------------------
- # lr_parse_table()
- #
- # This function constructs the parse tables for SLR or LALR
- # -----------------------------------------------------------------------------
- def lr_parse_table(self):
- Productions = self.grammar.Productions
- Precedence = self.grammar.Precedence
- goto = self.lr_goto # Goto array
- action = self.lr_action # Action array
- log = self.log # Logger for output
-
- actionp = {} # Action production array (temporary)
-
- log.info('Parsing method: %s', self.lr_method)
-
- # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
- # This determines the number of states
-
- C = self.lr0_items()
-
- if self.lr_method == 'LALR':
- self.add_lalr_lookaheads(C)
-
- # Build the parser table, state by state
- st = 0
- for I in C:
- # Loop over each production in I
- actlist = [] # List of actions
- st_action = {}
- st_actionp = {}
- st_goto = {}
- log.info('')
- log.info('state %d', st)
- log.info('')
- for p in I:
- log.info(' (%d) %s', p.number, p)
- log.info('')
-
- for p in I:
- if p.len == p.lr_index + 1:
- if p.name == "S'":
- # Start symbol. Accept!
- st_action['$end'] = 0
- st_actionp['$end'] = p
- else:
- # We are at the end of a production. Reduce!
- if self.lr_method == 'LALR':
- laheads = p.lookaheads[st]
- else:
- laheads = self.grammar.Follow[p.name]
- for a in laheads:
- actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
- r = st_action.get(a)
- if r is not None:
- # Whoa. Have a shift/reduce or reduce/reduce conflict
- if r > 0:
- # Need to decide on shift or reduce here
- # By default we favor shifting. Need to add
- # some precedence rules here.
-
- # Shift precedence comes from the token
- sprec, slevel = Precedence.get(a, ('right', 0))
-
- # Reduce precedence comes from rule being reduced (p)
- rprec, rlevel = Productions[p.number].prec
-
- if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
- # We really need to reduce here.
- st_action[a] = -p.number
- st_actionp[a] = p
- if not slevel and not rlevel:
- log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
- self.sr_conflicts.append((st, a, 'reduce'))
- Productions[p.number].reduced += 1
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
- else:
- # Hmmm. Guess we'll keep the shift
- if not rlevel:
- log.info(' ! shift/reduce conflict for %s resolved as shift', a)
- self.sr_conflicts.append((st, a, 'shift'))
- elif r < 0:
- # Reduce/reduce conflict. In this case, we favor the rule
- # that was defined first in the grammar file
- oldp = Productions[-r]
- pp = Productions[p.number]
- if oldp.line > pp.line:
- st_action[a] = -p.number
- st_actionp[a] = p
- chosenp, rejectp = pp, oldp
- Productions[p.number].reduced += 1
- Productions[oldp.number].reduced -= 1
- else:
- chosenp, rejectp = oldp, pp
- self.rr_conflicts.append((st, chosenp, rejectp))
- log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
- a, st_actionp[a].number, st_actionp[a])
- else:
- raise LALRError('Unknown conflict in state %d' % st)
- else:
- st_action[a] = -p.number
- st_actionp[a] = p
- Productions[p.number].reduced += 1
- else:
- i = p.lr_index
- a = p.prod[i+1] # Get symbol right after the "."
- if a in self.grammar.Terminals:
- g = self.lr0_goto(I, a)
- j = self.lr0_cidhash.get(id(g), -1)
- if j >= 0:
- # We are in a shift state
- actlist.append((a, p, 'shift and go to state %d' % j))
- r = st_action.get(a)
- if r is not None:
- # Whoa have a shift/reduce or shift/shift conflict
- if r > 0:
- if r != j:
- raise LALRError('Shift/shift conflict in state %d' % st)
- elif r < 0:
- # Do a precedence check.
- # - if precedence of reduce rule is higher, we reduce.
- # - if precedence of reduce is same and left assoc, we reduce.
- # - otherwise we shift
-
- # Shift precedence comes from the token
- sprec, slevel = Precedence.get(a, ('right', 0))
-
- # Reduce precedence comes from the rule that could have been reduced
- rprec, rlevel = Productions[st_actionp[a].number].prec
-
- if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
- # We decide to shift here... highest precedence to shift
- Productions[st_actionp[a].number].reduced -= 1
- st_action[a] = j
- st_actionp[a] = p
- if not rlevel:
- log.info(' ! shift/reduce conflict for %s resolved as shift', a)
- self.sr_conflicts.append((st, a, 'shift'))
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
- else:
- # Hmmm. Guess we'll keep the reduce
- if not slevel and not rlevel:
- log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
- self.sr_conflicts.append((st, a, 'reduce'))
-
- else:
- raise LALRError('Unknown conflict in state %d' % st)
- else:
- st_action[a] = j
- st_actionp[a] = p
-
- # Print the actions associated with each terminal
- _actprint = {}
- for a, p, m in actlist:
- if a in st_action:
- if p is st_actionp[a]:
- log.info(' %-15s %s', a, m)
- _actprint[(a, m)] = 1
- log.info('')
- # Print the actions that were not used. (debugging)
- not_used = 0
- for a, p, m in actlist:
- if a in st_action:
- if p is not st_actionp[a]:
- if not (a, m) in _actprint:
- log.debug(' ! %-15s [ %s ]', a, m)
- not_used = 1
- _actprint[(a, m)] = 1
- if not_used:
- log.debug('')
-
- # Construct the goto table for this state
-
- nkeys = {}
- for ii in I:
- for s in ii.usyms:
- if s in self.grammar.Nonterminals:
- nkeys[s] = None
- for n in nkeys:
- g = self.lr0_goto(I, n)
- j = self.lr0_cidhash.get(id(g), -1)
- if j >= 0:
- st_goto[n] = j
- log.info(' %-30s shift and go to state %d', n, j)
-
- action[st] = st_action
- actionp[st] = st_actionp
- goto[st] = st_goto
- st += 1
-
- # -----------------------------------------------------------------------------
- # write()
- #
- # This function writes the LR parsing tables to a file
- # -----------------------------------------------------------------------------
-
- def write_table(self, tabmodule, outputdir='', signature=''):
- if isinstance(tabmodule, types.ModuleType):
- raise IOError("Won't overwrite existing tabmodule")
-
- basemodulename = tabmodule.split('.')[-1]
- filename = os.path.join(outputdir, basemodulename) + '.py'
- try:
- f = open(filename, 'w')
-
- f.write('''
-# %s
-# This file is automatically generated. Do not edit.
-_tabversion = %r
-
-_lr_method = %r
-
-_lr_signature = %r
- ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
-
- # Change smaller to 0 to go back to original tables
- smaller = 1
-
- # Factor out names to try and make smaller
- if smaller:
- items = {}
-
- for s, nd in self.lr_action.items():
- for name, v in nd.items():
- i = items.get(name)
- if not i:
- i = ([], [])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
-
- f.write('\n_lr_action_items = {')
- for k, v in items.items():
- f.write('%r:([' % k)
- for i in v[0]:
- f.write('%r,' % i)
- f.write('],[')
- for i in v[1]:
- f.write('%r,' % i)
-
- f.write(']),')
- f.write('}\n')
-
- f.write('''
-_lr_action = {}
-for _k, _v in _lr_action_items.items():
- for _x,_y in zip(_v[0],_v[1]):
- if not _x in _lr_action: _lr_action[_x] = {}
- _lr_action[_x][_k] = _y
-del _lr_action_items
-''')
-
- else:
- f.write('\n_lr_action = { ')
- for k, v in self.lr_action.items():
- f.write('(%r,%r):%r,' % (k[0], k[1], v))
- f.write('}\n')
-
- if smaller:
- # Factor out names to try and make smaller
- items = {}
-
- for s, nd in self.lr_goto.items():
- for name, v in nd.items():
- i = items.get(name)
- if not i:
- i = ([], [])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
-
- f.write('\n_lr_goto_items = {')
- for k, v in items.items():
- f.write('%r:([' % k)
- for i in v[0]:
- f.write('%r,' % i)
- f.write('],[')
- for i in v[1]:
- f.write('%r,' % i)
-
- f.write(']),')
- f.write('}\n')
-
- f.write('''
-_lr_goto = {}
-for _k, _v in _lr_goto_items.items():
- for _x, _y in zip(_v[0], _v[1]):
- if not _x in _lr_goto: _lr_goto[_x] = {}
- _lr_goto[_x][_k] = _y
-del _lr_goto_items
-''')
- else:
- f.write('\n_lr_goto = { ')
- for k, v in self.lr_goto.items():
- f.write('(%r,%r):%r,' % (k[0], k[1], v))
- f.write('}\n')
-
- # Write production table
- f.write('_lr_productions = [\n')
- for p in self.lr_productions:
- if p.func:
- f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
- p.func, os.path.basename(p.file), p.line))
- else:
- f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
- f.write(']\n')
- f.close()
-
- except IOError as e:
- raise
-
-
- # -----------------------------------------------------------------------------
- # pickle_table()
- #
- # This function pickles the LR parsing tables to a supplied file object
- # -----------------------------------------------------------------------------
-
- def pickle_table(self, filename, signature=''):
- try:
- import cPickle as pickle
- except ImportError:
- import pickle
- with open(filename, 'wb') as outf:
- pickle.dump(__tabversion__, outf, pickle_protocol)
- pickle.dump(self.lr_method, outf, pickle_protocol)
- pickle.dump(signature, outf, pickle_protocol)
- pickle.dump(self.lr_action, outf, pickle_protocol)
- pickle.dump(self.lr_goto, outf, pickle_protocol)
-
- outp = []
- for p in self.lr_productions:
- if p.func:
- outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
- else:
- outp.append((str(p), p.name, p.len, None, None, None))
- pickle.dump(outp, outf, pickle_protocol)
-
-# -----------------------------------------------------------------------------
-# === INTROSPECTION ===
-#
-# The following functions and classes are used to implement the PLY
-# introspection features followed by the yacc() function itself.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# get_caller_module_dict()
-#
-# This function returns a dictionary containing all of the symbols defined within
-# a caller further down the call stack. This is used to get the environment
-# associated with the yacc() call if none was provided.
-# -----------------------------------------------------------------------------
-
-def get_caller_module_dict(levels):
- f = sys._getframe(levels)
- ldict = f.f_globals.copy()
- if f.f_globals != f.f_locals:
- ldict.update(f.f_locals)
- return ldict
-
-# -----------------------------------------------------------------------------
-# parse_grammar()
-#
-# This takes a raw grammar rule string and parses it into production data
-# -----------------------------------------------------------------------------
-def parse_grammar(doc, file, line):
- grammar = []
- # Split the doc string into lines
- pstrings = doc.splitlines()
- lastp = None
- dline = line
- for ps in pstrings:
- dline += 1
- p = ps.split()
- if not p:
- continue
- try:
- if p[0] == '|':
- # This is a continuation of a previous rule
- if not lastp:
- raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
- prodname = lastp
- syms = p[1:]
- else:
- prodname = p[0]
- lastp = prodname
- syms = p[2:]
- assign = p[1]
- if assign != ':' and assign != '::=':
- raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
-
- grammar.append((file, dline, prodname, syms))
- except SyntaxError:
- raise
- except Exception:
- raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
-
- return grammar
-
-# -----------------------------------------------------------------------------
-# ParserReflect()
-#
-# This class represents information extracted for building a parser including
-# start symbol, error function, tokens, precedence list, action functions,
-# etc.
-# -----------------------------------------------------------------------------
-class ParserReflect(object):
- def __init__(self, pdict, log=None):
- self.pdict = pdict
- self.start = None
- self.error_func = None
- self.tokens = None
- self.modules = set()
- self.grammar = []
- self.error = False
-
- if log is None:
- self.log = PlyLogger(sys.stderr)
- else:
- self.log = log
-
- # Get all of the basic information
- def get_all(self):
- self.get_start()
- self.get_error_func()
- self.get_tokens()
- self.get_precedence()
- self.get_pfunctions()
-
- # Validate all of the information
- def validate_all(self):
- self.validate_start()
- self.validate_error_func()
- self.validate_tokens()
- self.validate_precedence()
- self.validate_pfunctions()
- self.validate_modules()
- return self.error
-
- # Compute a signature over the grammar
- def signature(self):
- parts = []
- try:
- if self.start:
- parts.append(self.start)
- if self.prec:
- parts.append(''.join([''.join(p) for p in self.prec]))
- if self.tokens:
- parts.append(' '.join(self.tokens))
- for f in self.pfuncs:
- if f[3]:
- parts.append(f[3])
- except (TypeError, ValueError):
- pass
- return ''.join(parts)
-
- # -----------------------------------------------------------------------------
- # validate_modules()
- #
- # This method checks to see if there are duplicated p_rulename() functions
- # in the parser module file. Without this function, it is really easy for
- # users to make mistakes by cutting and pasting code fragments (and it's a real
- # bugger to try and figure out why the resulting parser doesn't work). Therefore,
- # we just do a little regular expression pattern matching of def statements
- # to try and detect duplicates.
- # -----------------------------------------------------------------------------
-
- def validate_modules(self):
- # Match def p_funcname(
- fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
-
- for module in self.modules:
- try:
- lines, linen = inspect.getsourcelines(module)
- except IOError:
- continue
-
- counthash = {}
- for linen, line in enumerate(lines):
- linen += 1
- m = fre.match(line)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- filename = inspect.getsourcefile(module)
- self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
- filename, linen, name, prev)
-
- # Get the start symbol
- def get_start(self):
- self.start = self.pdict.get('start')
-
- # Validate the start symbol
- def validate_start(self):
- if self.start is not None:
- if not isinstance(self.start, string_types):
- self.log.error("'start' must be a string")
-
- # Look for error handler
- def get_error_func(self):
- self.error_func = self.pdict.get('p_error')
-
- # Validate the error function
- def validate_error_func(self):
- if self.error_func:
- if isinstance(self.error_func, types.FunctionType):
- ismethod = 0
- elif isinstance(self.error_func, types.MethodType):
- ismethod = 1
- else:
- self.log.error("'p_error' defined, but is not a function or method")
- self.error = True
- return
-
- eline = self.error_func.__code__.co_firstlineno
- efile = self.error_func.__code__.co_filename
- module = inspect.getmodule(self.error_func)
- self.modules.add(module)
-
- argcount = self.error_func.__code__.co_argcount - ismethod
- if argcount != 1:
- self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
- self.error = True
-
- # Get the tokens map
- def get_tokens(self):
- tokens = self.pdict.get('tokens')
- if not tokens:
- self.log.error('No token list is defined')
- self.error = True
- return
-
- if not isinstance(tokens, (list, tuple)):
- self.log.error('tokens must be a list or tuple')
- self.error = True
- return
-
- if not tokens:
- self.log.error('tokens is empty')
- self.error = True
- return
-
- self.tokens = tokens
-
- # Validate the tokens
- def validate_tokens(self):
- # Validate the tokens.
- if 'error' in self.tokens:
- self.log.error("Illegal token name 'error'. Is a reserved word")
- self.error = True
- return
-
- terminals = set()
- for n in self.tokens:
- if n in terminals:
- self.log.warning('Token %r multiply defined', n)
- terminals.add(n)
-
- # Get the precedence map (if any)
- def get_precedence(self):
- self.prec = self.pdict.get('precedence')
-
- # Validate and parse the precedence map
- def validate_precedence(self):
- preclist = []
- if self.prec:
- if not isinstance(self.prec, (list, tuple)):
- self.log.error('precedence must be a list or tuple')
- self.error = True
- return
- for level, p in enumerate(self.prec):
- if not isinstance(p, (list, tuple)):
- self.log.error('Bad precedence table')
- self.error = True
- return
-
- if len(p) < 2:
- self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
- self.error = True
- return
- assoc = p[0]
- if not isinstance(assoc, string_types):
- self.log.error('precedence associativity must be a string')
- self.error = True
- return
- for term in p[1:]:
- if not isinstance(term, string_types):
- self.log.error('precedence items must be strings')
- self.error = True
- return
- preclist.append((term, assoc, level+1))
- self.preclist = preclist
-
- # Get all p_functions from the grammar
- def get_pfunctions(self):
- p_functions = []
- for name, item in self.pdict.items():
- if not name.startswith('p_') or name == 'p_error':
- continue
- if isinstance(item, (types.FunctionType, types.MethodType)):
- line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
- module = inspect.getmodule(item)
- p_functions.append((line, module, name, item.__doc__))
-
- # Sort all of the actions by line number; make sure to stringify
- # modules to make them sortable, since `line` may not uniquely sort all
- # p functions
- p_functions.sort(key=lambda p_function: (
- p_function[0],
- str(p_function[1]),
- p_function[2],
- p_function[3]))
- self.pfuncs = p_functions
-
- # Validate all of the p_functions
- def validate_pfunctions(self):
- grammar = []
- # Check for non-empty symbols
- if len(self.pfuncs) == 0:
- self.log.error('no rules of the form p_rulename are defined')
- self.error = True
- return
-
- for line, module, name, doc in self.pfuncs:
- file = inspect.getsourcefile(module)
- func = self.pdict[name]
- if isinstance(func, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- if func.__code__.co_argcount > reqargs:
- self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
- self.error = True
- elif func.__code__.co_argcount < reqargs:
- self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
- self.error = True
- elif not func.__doc__:
- self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
- file, line, func.__name__)
- else:
- try:
- parsed_g = parse_grammar(doc, file, line)
- for g in parsed_g:
- grammar.append((name, g))
- except SyntaxError as e:
- self.log.error(str(e))
- self.error = True
-
- # Looks like a valid grammar rule
- # Mark the file in which defined.
- self.modules.add(module)
-
- # Secondary validation step that looks for p_ definitions that are not functions
- # or functions that look like they might be grammar rules.
-
- for n, v in self.pdict.items():
- if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
- continue
- if n.startswith('t_'):
- continue
- if n.startswith('p_') and n != 'p_error':
- self.log.warning('%r not defined as a function', n)
- if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
- (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
- if v.__doc__:
- try:
- doc = v.__doc__.split(' ')
- if doc[1] == ':':
- self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
- v.__code__.co_filename, v.__code__.co_firstlineno, n)
- except IndexError:
- pass
-
- self.grammar = grammar
-
-# -----------------------------------------------------------------------------
-# yacc(module)
-#
-# Build a parser
-# -----------------------------------------------------------------------------
-
-def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
- check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
- outputdir=None, debuglog=None, errorlog=None, picklefile=None):
-
- if tabmodule is None:
- tabmodule = tab_module
-
- # Reference to the parsing method of the last built parser
- global parse
-
- # If pickling is enabled, table files are not created
- if picklefile:
- write_tables = 0
-
- if errorlog is None:
- errorlog = PlyLogger(sys.stderr)
-
- # Get the module dictionary used for the parser
- if module:
- _items = [(k, getattr(module, k)) for k in dir(module)]
- pdict = dict(_items)
- # If no __file__ attribute is available, try to obtain it from the __module__ instead
- if '__file__' not in pdict:
- pdict['__file__'] = sys.modules[pdict['__module__']].__file__
- else:
- pdict = get_caller_module_dict(2)
-
- if outputdir is None:
- # If no output directory is set, the location of the output files
- # is determined according to the following rules:
- # - If tabmodule specifies a package, files go into that package directory
- # - Otherwise, files go in the same directory as the specifying module
- if isinstance(tabmodule, types.ModuleType):
- srcfile = tabmodule.__file__
- else:
- if '.' not in tabmodule:
- srcfile = pdict['__file__']
- else:
- parts = tabmodule.split('.')
- pkgname = '.'.join(parts[:-1])
- exec('import %s' % pkgname)
- srcfile = getattr(sys.modules[pkgname], '__file__', '')
- outputdir = os.path.dirname(srcfile)
-
- # Determine if the module is package of a package or not.
- # If so, fix the tabmodule setting so that tables load correctly
- pkg = pdict.get('__package__')
- if pkg and isinstance(tabmodule, str):
- if '.' not in tabmodule:
- tabmodule = pkg + '.' + tabmodule
-
-
-
- # Set start symbol if it's specified directly using an argument
- if start is not None:
- pdict['start'] = start
-
- # Collect parser information from the dictionary
- pinfo = ParserReflect(pdict, log=errorlog)
- pinfo.get_all()
-
- if pinfo.error:
- raise YaccError('Unable to build parser')
-
- # Check signature against table files (if any)
- signature = pinfo.signature()
-
- # Read the tables
- try:
- lr = LRTable()
- if picklefile:
- read_signature = lr.read_pickle(picklefile)
- else:
- read_signature = lr.read_table(tabmodule)
- if optimize or (read_signature == signature):
- try:
- lr.bind_callables(pinfo.pdict)
- parser = LRParser(lr, pinfo.error_func)
- parse = parser.parse
- return parser
- except Exception as e:
- errorlog.warning('There was a problem loading the table file: %r', e)
- except VersionError as e:
- errorlog.warning(str(e))
- except ImportError:
- pass
-
- if debuglog is None:
- if debug:
- try:
- debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
- except IOError as e:
- errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
- debuglog = NullLogger()
- else:
- debuglog = NullLogger()
-
- debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
-
- errors = False
-
- # Validate the parser information
- if pinfo.validate_all():
- raise YaccError('Unable to build parser')
-
- if not pinfo.error_func:
- errorlog.warning('no p_error() function is defined')
-
- # Create a grammar object
- grammar = Grammar(pinfo.tokens)
-
- # Set precedence level for terminals
- for term, assoc, level in pinfo.preclist:
- try:
- grammar.set_precedence(term, assoc, level)
- except GrammarError as e:
- errorlog.warning('%s', e)
-
- # Add productions to the grammar
- for funcname, gram in pinfo.grammar:
- file, line, prodname, syms = gram
- try:
- grammar.add_production(prodname, syms, funcname, file, line)
- except GrammarError as e:
- errorlog.error('%s', e)
- errors = True
-
- # Set the grammar start symbols
- try:
- if start is None:
- grammar.set_start(pinfo.start)
- else:
- grammar.set_start(start)
- except GrammarError as e:
- errorlog.error(str(e))
- errors = True
-
- if errors:
- raise YaccError('Unable to build parser')
-
- # Verify the grammar structure
- undefined_symbols = grammar.undefined_symbols()
- for sym, prod in undefined_symbols:
- errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
- errors = True
-
- unused_terminals = grammar.unused_terminals()
- if unused_terminals:
- debuglog.info('')
- debuglog.info('Unused terminals:')
- debuglog.info('')
- for term in unused_terminals:
- errorlog.warning('Token %r defined, but not used', term)
- debuglog.info(' %s', term)
-
- # Print out all productions to the debug log
- if debug:
- debuglog.info('')
- debuglog.info('Grammar')
- debuglog.info('')
- for n, p in enumerate(grammar.Productions):
- debuglog.info('Rule %-5d %s', n, p)
-
- # Find unused non-terminals
- unused_rules = grammar.unused_rules()
- for prod in unused_rules:
- errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
-
- if len(unused_terminals) == 1:
- errorlog.warning('There is 1 unused token')
- if len(unused_terminals) > 1:
- errorlog.warning('There are %d unused tokens', len(unused_terminals))
-
- if len(unused_rules) == 1:
- errorlog.warning('There is 1 unused rule')
- if len(unused_rules) > 1:
- errorlog.warning('There are %d unused rules', len(unused_rules))
-
- if debug:
- debuglog.info('')
- debuglog.info('Terminals, with rules where they appear')
- debuglog.info('')
- terms = list(grammar.Terminals)
- terms.sort()
- for term in terms:
- debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
-
- debuglog.info('')
- debuglog.info('Nonterminals, with rules where they appear')
- debuglog.info('')
- nonterms = list(grammar.Nonterminals)
- nonterms.sort()
- for nonterm in nonterms:
- debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
- debuglog.info('')
-
- if check_recursion:
- unreachable = grammar.find_unreachable()
- for u in unreachable:
- errorlog.warning('Symbol %r is unreachable', u)
-
- infinite = grammar.infinite_cycles()
- for inf in infinite:
- errorlog.error('Infinite recursion detected for symbol %r', inf)
- errors = True
-
- unused_prec = grammar.unused_precedence()
- for term, assoc in unused_prec:
- errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
- errors = True
-
- if errors:
- raise YaccError('Unable to build parser')
-
- # Run the LRGeneratedTable on the grammar
- if debug:
- errorlog.debug('Generating %s tables', method)
-
- lr = LRGeneratedTable(grammar, method, debuglog)
-
- if debug:
- num_sr = len(lr.sr_conflicts)
-
- # Report shift/reduce and reduce/reduce conflicts
- if num_sr == 1:
- errorlog.warning('1 shift/reduce conflict')
- elif num_sr > 1:
- errorlog.warning('%d shift/reduce conflicts', num_sr)
-
- num_rr = len(lr.rr_conflicts)
- if num_rr == 1:
- errorlog.warning('1 reduce/reduce conflict')
- elif num_rr > 1:
- errorlog.warning('%d reduce/reduce conflicts', num_rr)
-
- # Write out conflicts to the output file
- if debug and (lr.sr_conflicts or lr.rr_conflicts):
- debuglog.warning('')
- debuglog.warning('Conflicts:')
- debuglog.warning('')
-
- for state, tok, resolution in lr.sr_conflicts:
- debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
-
- already_reported = set()
- for state, rule, rejected in lr.rr_conflicts:
- if (state, id(rule), id(rejected)) in already_reported:
- continue
- debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
- debuglog.warning('rejected rule (%s) in state %d', rejected, state)
- errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
- errorlog.warning('rejected rule (%s) in state %d', rejected, state)
- already_reported.add((state, id(rule), id(rejected)))
-
- warned_never = []
- for state, rule, rejected in lr.rr_conflicts:
- if not rejected.reduced and (rejected not in warned_never):
- debuglog.warning('Rule (%s) is never reduced', rejected)
- errorlog.warning('Rule (%s) is never reduced', rejected)
- warned_never.append(rejected)
-
- # Write the table file if requested
- if write_tables:
- try:
- lr.write_table(tabmodule, outputdir, signature)
- except IOError as e:
- errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
-
- # Write a pickled version of the tables
- if picklefile:
- try:
- lr.pickle_table(picklefile, signature)
- except IOError as e:
- errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
-
- # Build the parser
- lr.bind_callables(pinfo.pdict)
- parser = LRParser(lr, pinfo.error_func)
-
- parse = parser.parse
- return parser
diff --git a/components/script/dom/bindings/codegen/ply/ply/ygen.py b/components/script/dom/bindings/codegen/ply/ply/ygen.py
deleted file mode 100644
index acf5ca1a37b..00000000000
--- a/components/script/dom/bindings/codegen/ply/ply/ygen.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# ply: ygen.py
-#
-# This is a support program that auto-generates different versions of the YACC parsing
-# function with different features removed for the purposes of performance.
-#
-# Users should edit the method LParser.parsedebug() in yacc.py. The source code
-# for that method is then used to create the other methods. See the comments in
-# yacc.py for further details.
-
-import os.path
-import shutil
-
-def get_source_range(lines, tag):
- srclines = enumerate(lines)
- start_tag = '#--! %s-start' % tag
- end_tag = '#--! %s-end' % tag
-
- for start_index, line in srclines:
- if line.strip().startswith(start_tag):
- break
-
- for end_index, line in srclines:
- if line.strip().endswith(end_tag):
- break
-
- return (start_index + 1, end_index)
-
-def filter_section(lines, tag):
- filtered_lines = []
- include = True
- tag_text = '#--! %s' % tag
- for line in lines:
- if line.strip().startswith(tag_text):
- include = not include
- elif include:
- filtered_lines.append(line)
- return filtered_lines
-
-def main():
- dirname = os.path.dirname(__file__)
- shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
- with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
- lines = f.readlines()
-
- parse_start, parse_end = get_source_range(lines, 'parsedebug')
- parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
- parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
-
- # Get the original source
- orig_lines = lines[parse_start:parse_end]
-
- # Filter the DEBUG sections out
- parseopt_lines = filter_section(orig_lines, 'DEBUG')
-
- # Filter the TRACKING sections out
- parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
-
- # Replace the parser source sections with updated versions
- lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
- lines[parseopt_start:parseopt_end] = parseopt_lines
-
- lines = [line.rstrip()+'\n' for line in lines]
- with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
- f.writelines(lines)
-
- print('Updated yacc.py')
-
-if __name__ == '__main__':
- main()
-
-
-
-
-
diff --git a/components/script/dom/bindings/codegen/ply/setup.cfg b/components/script/dom/bindings/codegen/ply/setup.cfg
deleted file mode 100644
index 4ec8a167da9..00000000000
--- a/components/script/dom/bindings/codegen/ply/setup.cfg
+++ /dev/null
@@ -1,11 +0,0 @@
-[bdist_wheel]
-universal = 1
-
-[metadata]
-description-file = README.md
-
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/components/script/dom/bindings/codegen/ply/setup.py b/components/script/dom/bindings/codegen/ply/setup.py
deleted file mode 100644
index ee8ccd0ccf5..00000000000
--- a/components/script/dom/bindings/codegen/ply/setup.py
+++ /dev/null
@@ -1,31 +0,0 @@
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
-
-setup(name = "ply",
- description="Python Lex & Yacc",
- long_description = """
-PLY is yet another implementation of lex and yacc for Python. Some notable
-features include the fact that its implemented entirely in Python and it
-uses LALR(1) parsing which is efficient and well suited for larger grammars.
-
-PLY provides most of the standard lex/yacc features including support for empty
-productions, precedence rules, error recovery, and support for ambiguous grammars.
-
-PLY is extremely easy to use and provides very extensive error checking.
-It is compatible with both Python 2 and Python 3.
-""",
- license="""BSD""",
- version = "3.10",
- author = "David Beazley",
- author_email = "dave@dabeaz.com",
- maintainer = "David Beazley",
- maintainer_email = "dave@dabeaz.com",
- url = "http://www.dabeaz.com/ply/",
- packages = ['ply'],
- classifiers = [
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 2',
- ]
- )