10 |
|
__all__ = ['error', 'lcompare', 'llcompare', |
11 |
|
'split_headers', 'split_rad', 'split_radfile'] |
12 |
|
|
13 |
+ |
import re |
14 |
+ |
import shlex |
15 |
|
# Py2.7/3.x compatibility |
16 |
|
try: from itertools import (izip_longest as zip_longest, chain, |
17 |
|
ifilter as filter, izip as zip) |
20 |
|
|
21 |
|
class error(Exception): pass |
22 |
|
|
23 |
+ |
_strtypes = (type(b''), type(u'')) |
24 |
+ |
|
25 |
|
# internal functions |
26 |
|
def _icompare(itest, iref): |
27 |
|
'''compare ints (not public)''' |
28 |
< |
if type(itest) == str: |
28 |
> |
if isinstance(itest, _strtypes): |
29 |
|
iftest = int(itest) |
30 |
|
else: iftest = itest |
31 |
< |
if iftest == iref: return 1 |
32 |
< |
return 0 |
31 |
> |
if iftest == iref: return True |
32 |
> |
return False |
33 |
|
|
34 |
|
def _fcompare(ftest, fref): |
35 |
|
'''compare floats (not public)''' |
36 |
|
FUZZ = 0.0000001 # XXX heuristically determined (quite low?) |
37 |
< |
if type(ftest) == str: |
37 |
> |
if isinstance(ftest, _strtypes): |
38 |
|
fftest = float(ftest) |
39 |
|
else: fftest = ftest |
40 |
|
if (fftest < (fref + FUZZ)) and (fftest > (fref - FUZZ)): |
65 |
|
if tref is False: |
66 |
|
raise error('List comparision failed: More tokens than expected' |
67 |
|
' (>= %d != %d)' % (i+1, i)) |
68 |
< |
if type(tref) == str and tref != ttest: |
65 |
< |
print(tref, ttest) |
68 |
> |
if isinstance(tref, _strtypes) and tref != ttest: |
69 |
|
raise error('String token comparison failed: "%s" != "%s"' |
70 |
|
% (ttest, tref)) |
71 |
|
elif type(tref) == int and not _icompare(ttest, tref): |
69 |
– |
print((ttest, tref)) |
72 |
|
raise error('Int token comparison failed: %s != %s' % (ttest, tref)) |
73 |
|
elif type(tref) == float and not _fcompare(ttest, tref): |
74 |
|
raise error('Float token comparison failed: %s != %s' |
94 |
|
if lref is False: |
95 |
|
raise error('List comparision failed: More entries than expected' |
96 |
|
' (>= %d != %d)' % (i+1, i)) |
97 |
< |
if lref and not isinstance(lref, str): |
97 |
> |
if lref and not isinstance(lref, _strtypes): |
98 |
> |
|
99 |
|
if hasattr(lref, '__getitem__'): |
100 |
|
rfirst = lref[0] |
101 |
|
elif hasattr(lref, 'next') or hasattr(lref, '__next__'): |
102 |
|
rfirst = next(lref) # "peek" at first |
103 |
|
lref = chain([rfirst], lref) # "push" back |
104 |
|
else: rfirst = None |
105 |
< |
if isinstance(rfirst, str): |
105 |
> |
if isinstance(rfirst, _strtypes): |
106 |
|
rfirst = None |
107 |
|
if hasattr(rfirst, '__iter__') or isinstance(rfirst, (list, tuple)): |
108 |
< |
return llcompare(ltest, lref, |
108 |
> |
llcompare(ltest, lref, |
109 |
|
_recurse=_recurse + [i], ignore_empty=ignore_empty) |
110 |
|
try: lcompare(ltest, lref) |
111 |
< |
except TypeError: |
112 |
< |
print(ltest, lref) |
113 |
< |
raise |
111 |
> |
# except TypeError: |
112 |
> |
# print(ltest, lref) |
113 |
> |
# raise |
114 |
|
except error as e: |
115 |
|
if _recurse: |
116 |
|
raise error('%s (line %s)' % (str(e), _recurse + [i + 1])) |
117 |
|
else: raise error('%s (line %d)' % (str(e), i + 1)) |
118 |
|
|
119 |
+ |
_HLPATS = '(\s*)(?:([^=\s]*)\s*=\s*(.*)\s*|(.*)\s*)' |
120 |
+ |
_hlpat = re.compile(_HLPATS) |
121 |
|
def split_headers(s): |
122 |
|
'''split Radiance file headers (eg. the output of getinfo). |
123 |
|
Return a list of lists of tokens suitable for llcompare().''' |
124 |
< |
ll = [ss.strip() for ss in s.split('\n')] |
124 |
> |
ll = s.split('\n') |
125 |
|
nll = [] |
126 |
|
for l in ll: |
127 |
< |
parts = l.split('=', 1) |
128 |
< |
if len(parts) == 2: |
129 |
< |
left = [_typify_token(s) for s in parts[0].split()] |
130 |
< |
right = [_typify_token(s) for s in parts[1].split()] |
131 |
< |
nll.append(left + ['='] + right) |
132 |
< |
else: nll.append([_typify_token(s) for s in l.split()]) |
127 |
> |
m = _hlpat.match(l) |
128 |
> |
groups = m.groups() |
129 |
> |
indent = groups[0] |
130 |
> |
if groups[1]: |
131 |
> |
left = groups[1] |
132 |
> |
right = [_typify_token(s) for s in shlex.split(groups[2])] |
133 |
> |
nll.append([indent, left, '='] + [right]) |
134 |
> |
else: |
135 |
> |
full = [_typify_token(s) for s in shlex.split(groups[3])] |
136 |
> |
nll.append([indent] + [full]) |
137 |
|
return nll |
138 |
|
|
139 |
|
def split_rad(s): |