source: coopr.pyomo/stable/2.3/coopr/pyomo/data/parse_datacmds.py @ 2433

Last change on this file since 2433 was 2433, checked in by wehart, 11 years ago

Merged revisions 2388-2432 via svnmerge from
https://software.sandia.gov/svn/public/coopr/coopr.pyomo/trunk

........

r2399 | wehart | 2010-02-24 11:36:56 -0700 (Wed, 24 Feb 2010) | 2 lines


Adding a work-around when pstats cannot be imported.

........

r2400 | wehart | 2010-02-24 14:51:08 -0700 (Wed, 24 Feb 2010) | 2 lines


Misc documentation updates.

........

r2408 | wehart | 2010-02-28 05:41:04 -0700 (Sun, 28 Feb 2010) | 3 lines


Allow an earlier version of Python. I'm still not sure this is a good idea,
but it's necessary for Jython.

........

r2409 | wehart | 2010-02-28 05:42:33 -0700 (Sun, 28 Feb 2010) | 7 lines


Rework of profiling imports.


Refinement of Pyomo command-line parsing, which is more
specific now.


Adding automatic import of pyomo to package.

........

r2415 | jwatson | 2010-03-08 20:53:13 -0700 (Mon, 08 Mar 2010) | 3 lines


Significantly improved performance of PyomoModel? _clear_attribute method, mainly by eliminate unnecessary calls to it through _setattr_exec_.

........

r2416 | jwatson | 2010-03-09 16:45:22 -0700 (Tue, 09 Mar 2010) | 5 lines


Modified the AMPL dat file parser to instantiate the lexer and yaccer objects only once, for the lifetime of the module.


They were being inadvertently created at each invocation, which pyomo users wouldn't notice. But PySP users, who can be creating 1K or more instances, do!

........

r2422 | wehart | 2010-03-11 16:01:09 -0700 (Thu, 11 Mar 2010) | 3 lines


Rework of unit tests to (a) import pyutilib.th as 'unittest' and
(b) employ test skipping.

........

r2430 | wehart | 2010-03-11 23:38:22 -0700 (Thu, 11 Mar 2010) | 2 lines


Reworking class decorators.

........

File size: 9.8 KB
Line 
1
2#  _________________________________________________________________________
3#
4#  Coopr: A COmmon Optimization Python Repository
5#  Copyright (c) 2008 Sandia Corporation.
6#  This software is distributed under the BSD License.
7#  Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8#  the U.S. Government retains certain rights in this software.
9#  For more information, see the Coopr README.txt file.
10#  _________________________________________________________________________
11
12__all__ = ['parse_data_commands']
13
14import re
15import os
16import os.path
17import ply.lex as lex
18import ply.yacc as yacc
19from pyutilib.misc import flatten
20from pyutilib.ply import t_newline, t_ignore, _find_column, p_error, ply_init
21
22## -----------------------------------------------------------
23##
24## Lexer definitions for tokenizing the input
25##
26## -----------------------------------------------------------
27
28_parse_info = None
29debugging = False
30
31reserved = {
32    'data' : 'DATA',
33    'set' : 'SET',
34    'param' : 'PARAM',
35    'end' : 'END',
36    'import' : 'IMPORT',
37    'include' : 'INCLUDE',
38}
39
40# Token names
41tokens = [
42    "COMMA",
43#    "LBRACE",
44#    "RBRACE",
45#    "NUMBER",
46    "SEMICOLON",
47    "COLON",
48    "COLONEQ",
49    "LBRACKET",
50    "RBRACKET",
51    "LPAREN",
52    "RPAREN",
53#    "RANGE",
54    "WORD",
55    "WORDWITHINDEX",
56    "STRING",
57    "QUOTEDSTRING",
58    "FILENAME",
59    "EQ",
60    "TR",
61    "ASTERISK",
62    "NONWORD",
63] + reserved.values()
64
65# Regular expression rules
66t_COMMA     = r","
67t_LBRACKET  = r"\["
68t_RBRACKET  = r"\]"
69#t_NUMBER    = r"[0-9]+(\.[0-9]+){0,1}"
70t_SEMICOLON = r";"
71t_COLON     = r":"
72t_COLONEQ   = r":="
73t_EQ        = r"="
74t_TR        = r"\(tr\)"
75#t_LT        = r"<"
76#t_GT        = r">"
77#t_LBRACE    = r"{"
78#t_RBRACE    = r"}"
79t_LPAREN    = r"\("
80t_RPAREN    = r"\)"
81t_ASTERISK  = r"\*"
82
83# Discard comments
84def t_COMMENT(t):
85    r'\#[^\n]*'
86    #global _comment_list
87    #_comment_list.append(t.value)
88
89def t_WORDWITHINDEX(t):
90    r'[a-zA-Z_0-9][a-zA-Z_0-9\.\-]*\[[a-zA-Z_0-9\.\-,\*]*\]'
91    t.type = reserved.get(t.value,'WORDWITHINDEX')    # Check for reserved words
92    return t
93
94def t_WORD(t):
95    r'[a-zA-Z_0-9][a-zA-Z_0-9\.\-]*'
96    t.type = reserved.get(t.value,'WORD')    # Check for reserved words
97    return t
98
99def t_STRING(t):
100    r'[a-zA-Z_0-9\.+\-]+'
101    t.type = reserved.get(t.value,'STRING')    # Check for reserved words
102    return t
103
104def t_QUOTEDSTRING(t):
105    r'"([^"]|\"\")*"|\'([^\']|\'\')*\''
106    t.type = reserved.get(t.value,'QUOTEDSTRING')    # Check for reserved words
107    return t
108
109def t_FILENAME(t):
110    r'[a-zA-Z_0-9\./\\]*(/|\\)[a-zA-Z_0-9\./\\]*'
111    t.type = reserved.get(t.value,'FILENAME')    # Check for reserved words
112    return t
113
114t_NONWORD   = r"[^\.A-Za-z0-9,;:=<>\*\(\)\#{}\[\] \n\t\r]+"
115
116# Error handling rule
117def t_error(t):             #pragma:nocover
118    raise IOError, "Illegal character '%s'" % t.value[0]
119    t.lexer.skip(1)
120
121
122## -----------------------------------------------------------
123##
124## Yacc grammar for data commands
125##
126## -----------------------------------------------------------
127
128def p_expr(p):
129    '''expr : statements
130            | '''
131
132def p_statements(p):
133    '''statements : statement statements
134                  | statement '''
135
136def p_statement(p):
137    '''statement : SET WORD COLONEQ setdecl SEMICOLON
138                 | SET WORD COLONEQ SEMICOLON
139                 | SET WORD COLON items COLONEQ setdecl SEMICOLON
140                 | SET WORD COLON items COLONEQ SEMICOLON
141                 | SET WORDWITHINDEX COLONEQ setdecl SEMICOLON
142                 | SET WORDWITHINDEX COLONEQ SEMICOLON
143                 | PARAM items COLONEQ paramdecl SEMICOLON
144                 | IMPORT importdecl SEMICOLON
145                 | INCLUDE WORD SEMICOLON
146                 | INCLUDE QUOTEDSTRING SEMICOLON
147                 | DATA SEMICOLON
148                 | END SEMICOLON
149    '''
150    global _parse_info
151    #print "STATEMENT",len(p), p[1:]
152    if p[1] in ['set','param']:
153        _parse_info.append( flatten(p[1:-1]) )
154    elif p[1] in ['include']:
155        _parse_info.append( p[1:-1] )
156    elif p[1] in ['import']:
157        _parse_info.append( [p[1]]+ p[2] )
158        #_parse_info.append( [p[1], p[2][0], p[1:-1] )
159
160def p_setdecl(p):
161    '''setdecl : items'''
162    p[0] = p[1]
163
164def p_paramdecl(p):
165    '''paramdecl : items'''
166    p[0] = p[1]
167
168def p_importdecl(p):
169    '''importdecl : filename import_options
170                  | filename
171                  | filename import_options COLON WORD EQ indices variable_options
172                  | filename COLON WORD EQ indices variable_options
173                  | filename import_options COLON indices variable_options
174                  | filename COLON indices variable_options
175                  | filename import_options COLON variable_options
176                  | filename COLON variable_options
177    '''
178    tmp = {'filename':p[1]}
179    if len(p) == 2:
180        p[0] = [tmp, (None,[]), {}]
181    elif len(p) == 3:
182        tmp.update(p[2])
183        p[0] = [tmp, (None,[]), {}]
184    elif len(p) == 4:
185        p[0] = [tmp, (None,[]), p[3]]
186    elif len(p) == 5:
187        if p[2] == ':':
188            p[0] = [tmp, (None,p[3]), p[4]]
189        else:
190            tmp.update(p[2])
191            p[0] = [tmp, (None,[]), p[4]]
192    elif len(p) == 6:
193        tmp.update(p[2])
194        p[0] = [tmp, (None,p[4]), p[5]]
195    elif len(p) == 7:
196        p[0] = [tmp, (p[3],p[5]), p[6]]
197    elif len(p) == 8:
198        tmp.update(p[2])
199        p[0] = [tmp, (p[4],p[6]), p[7]]
200    else:
201        raise IOError, "Unexpected condition"
202
203def p_import_options(p):
204    '''import_options : WORD EQ STRING import_options
205                      | WORD EQ STRING
206                      | WORD EQ QUOTEDSTRING import_options
207                      | WORD EQ QUOTEDSTRING
208                      | WORD EQ WORD import_options
209                      | WORD EQ WORD
210                      | WORD EQ PARAM import_options
211                      | WORD EQ PARAM
212                      | WORD EQ SET import_options
213                      | WORD EQ SET
214    '''
215    tmp = {p[1]:p[3]}
216    if len(p) == 4:
217        p[0] = tmp
218    else:
219        tmp.update(p[4])
220        p[0] = tmp
221
222def p_variable_options(p):
223    '''variable_options : variable variable_options
224                        | variable
225    '''
226    if len(p) == 2:
227        p[0] = p[1]
228    else:
229        p[1].update(p[2])
230        p[0] = p[1]
231
232def p_variable(p):
233    '''variable : WORD
234                | WORD EQ WORD
235    '''
236    if len(p) == 2:
237        p[0] = {p[1]:p[1]}
238    else:
239        p[0] = {p[3]:p[1]}
240
241def p_indices(p):
242    '''indices : LBRACKET WORD index_list RBRACKET
243               | LBRACKET WORD RBRACKET
244    '''
245    if len(p) == 5:
246        p[0] = [p[2]] + p[3]
247    else:
248        p[0] = [p[2]]
249
250def p_index_list(p):
251    '''index_list : COMMA WORD index_list
252                  | COMMA ASTERISK index_list
253                  | COMMA WORD
254                  | COMMA ASTERISK
255    '''
256    if len(p) == 4:
257        p[0] = [p[2]]+p[3]
258    else:
259        p[0] = [p[2]]
260
261def p_template(p):
262    '''template : LPAREN WORD index_list RPAREN
263                | LPAREN ASTERISK index_list RPAREN
264                | LPAREN WORD RPAREN
265                | LPAREN ASTERISK RPAREN
266    '''
267    if len(p) == 5:
268        p[0] = p[1]+",".join([p[2]]+p[3])+p[4]
269    else:
270        p[0] = p[1]+p[2]+p[3]
271
272def p_items(p):
273    '''items : item items
274             | item'''
275    if len(p) == 2:
276        p[0] = [p[1]]
277    else:
278        p[0] = [p[1]] + p[2]
279
280def p_item(p):
281    '''item : WORD
282            | WORDWITHINDEX
283            | NONWORD
284            | STRING
285            | QUOTEDSTRING
286            | COMMA
287            | COLON
288            | LBRACKET
289            | RBRACKET
290            | TR
291            | LPAREN
292            | RPAREN
293            | ASTERISK
294            | template
295    '''
296    p[0] = p[1]
297
298def p_filename(p):
299    '''filename : WORD
300                | STRING
301                | QUOTEDSTRING
302                | FILENAME
303                | WORD COLON FILENAME
304    '''
305    if len(p) == 2:
306        p[0] = p[1]
307    else:
308        p[0] = p[1]+p[2]+p[3]
309
310#
311# the ampl dat file lexer and yaccer only need to be
312# created once, so have the corresponding objects
313# accessible at module scope.
314#
315
316tabmodule = 'parse_table_datacmds'
317outputdir = os.path.dirname(os.path.abspath(__file__))+os.sep
318
319ampl_dat_lexer = None
320ampl_dat_yaccer = None
321
322#
323# The function that performs the parsing
324#
325def parse_data_commands(data=None, filename=None, debug=0):
326
327    global debugging
328    global ampl_dat_lexer
329    global ampl_dat_yaccer
330
331    # if the lexer/yaccer haven't been initialized, do so.
332    if ampl_dat_lexer is None:
333       #
334       # Always remove the parser.out file, which is generated to create debugging
335       #
336       if os.path.exists("parser.out"):        #pragma:nocover
337           os.remove("parser.out")
338       if debug > 0:                           #pragma:nocover
339           #
340           # Remove the parsetab.py* files.  These apparently need to be removed
341           # to ensure the creation of a parser.out file.
342           #
343           if os.path.exists("parsetab.py"):
344              os.remove("parsetab.py")
345           if os.path.exists("parsetab.pyc"):
346              os.remove("parsetab.pyc")
347           debugging=True
348
349       ampl_dat_lexer = lex.lex()
350       ampl_dat_yaccer = yacc.yacc(debug=debug, tabmodule=tabmodule, outputdir=outputdir)
351           
352    #
353    # Initialize parse object
354    #
355    global _parse_info
356    _parse_info = []
357
358    #
359    # Parse the file
360    #
361    global _parsedata
362    if not data is None:
363        _parsedata=data
364        ply_init(_parsedata)
365        ampl_dat_yaccer.parse(data, lexer=ampl_dat_lexer, debug=debug)
366    elif not filename is None:
367        f = open(filename)
368        data = f.read()
369        f.close()
370        _parsedata=data
371        ply_init(_parsedata)
372        ampl_dat_yaccer.parse(data, lexer=ampl_dat_lexer, debug=debug)
373    else:
374        _parse_info = None
375    #
376    # Disable parsing I/O
377    #
378    debugging=False
379    return _parse_info
380
Note: See TracBrowser for help on using the repository browser.