1 |
schorsch |
1.1 |
''' Text comparison functions for Radiance unit testing. |
2 |
|
|
|
3 |
|
|
This allows differences in whitespace, which is why the text |
4 |
|
|
corpora are split into tokens first. |
5 |
|
|
Tokens are then converted into an appropriate data type, so |
6 |
|
|
that floating point items will still be considered correct |
7 |
|
|
even if they are slightly different, eg. as a consequence of |
8 |
|
|
binary rounding errors. |
9 |
|
|
''' |
10 |
|
|
|
11 |
|
|
|
12 |
|
|
class error(Exception): pass |
13 |
|
|
|
14 |
|
|
# internal functions |
15 |
|
|
def _icompare(itest, iref): |
16 |
|
|
'''compare ints (not public)''' |
17 |
|
|
if type(itest) == str: |
18 |
|
|
iftest = int(itest) |
19 |
|
|
else: iftest = itest |
20 |
|
|
if iftest == iref: return 1 |
21 |
|
|
return 0 |
22 |
|
|
|
23 |
|
|
def _fcompare(ftest, fref): |
24 |
|
|
'''compare floats (not public)''' |
25 |
|
|
FUZZ = 0.0000001 # XXX heuristically determined |
26 |
|
|
if type(ftest) == str: |
27 |
|
|
fftest = float(ftest) |
28 |
|
|
else: fftest = ftest |
29 |
|
|
if (fftest < (fref + FUZZ)) and (fftest > (fref - FUZZ)): |
30 |
|
|
return 1 |
31 |
|
|
return 0 |
32 |
|
|
|
33 |
|
|
def _typify_token(t): |
34 |
|
|
'''return the token as int resp. float if possible (not public)''' |
35 |
|
|
try: return int(t) |
36 |
|
|
except ValueError: pass |
37 |
|
|
try: return float(t) |
38 |
|
|
except ValueError: pass |
39 |
|
|
return t |
40 |
|
|
|
41 |
|
|
|
42 |
|
|
# public comparison functions |
43 |
|
|
|
44 |
|
|
def lcompare(ltest, lref): |
45 |
|
|
'''compare a list of tokens |
46 |
|
|
raise an error if there are intolerable differences |
47 |
|
|
the reference tokens in lref should already be of the correct type. |
48 |
|
|
''' |
49 |
|
|
if len(ltest) != len(lref): |
50 |
|
|
raise error, ('List comparision failed: Different number of tokens' |
51 |
|
|
' (%d, %d)' % (len(ltest), len(lref))) |
52 |
|
|
for i in range(len(lref)): |
53 |
|
|
tref = lref[i] |
54 |
|
|
ttest = ltest[i] |
55 |
|
|
if type(tref) == str and tref != ttest: |
56 |
|
|
raise error, 'Token comparison failed: "%s" != "%s"' % (ttest, tref) |
57 |
|
|
elif type(tref) == int and not _icompare(ttest, tref): |
58 |
|
|
raise error, 'Token comparison failed: %s != %s' % (ttest, tref) |
59 |
|
|
elif type(tref) == float and not _fcompare(ttest, tref): |
60 |
|
|
raise error, 'Token comparison failed: %s != %s' % (ttest, tref) |
61 |
|
|
|
62 |
|
|
def llcompare(lltest, llref, ignore_empty=0, recurse=[]): |
63 |
|
|
'''compare a list of lists of tokens recursively |
64 |
|
|
raise an error if there are intolerable differences |
65 |
|
|
the reference tokens in lref should already be of the correct type. |
66 |
|
|
if ignore_empty is true, empty lines are not included in the comparison |
67 |
|
|
the recurse argument is only used internally |
68 |
|
|
''' |
69 |
|
|
if ignore_empty: |
70 |
|
|
lltest = filter(None, lltest) |
71 |
|
|
llref = filter(None, llref) |
72 |
|
|
if len(lltest) != len(llref): |
73 |
schorsch |
1.2 |
raise error, 'Comparision failed: Different number of lines (%d, %d)' %( |
74 |
schorsch |
1.1 |
len(lltest), len(llref)) |
75 |
|
|
for i in range(len(llref)): |
76 |
|
|
if llref[i]: |
77 |
|
|
rtype = type(llref[i][0]) |
78 |
|
|
if rtype == list or rtype == tuple: |
79 |
|
|
return llcompare(lltest[i], llref[i], |
80 |
|
|
recurse=recurse.append(i), ignore_empty=ignore_empty) |
81 |
|
|
try: lcompare(lltest[i], llref[i]) |
82 |
|
|
except error, e: |
83 |
|
|
if recurse: |
84 |
|
|
raise error, '%s (line %s)' % (str(e), recurse.append(i + 1)) |
85 |
|
|
else: raise error, '%s (line %d)' % (str(e), i + 1) |
86 |
|
|
|
87 |
|
|
def split_headers(s): |
88 |
|
|
'''split Radiance file headers |
89 |
|
|
return a list of lists of tokens suitable for llcompare() |
90 |
|
|
this is useful to check the output of getinfo''' |
91 |
schorsch |
1.2 |
ll = [ss.strip() for ss in s.split('\n')] |
92 |
schorsch |
1.1 |
nll = [] |
93 |
|
|
for l in ll: |
94 |
schorsch |
1.2 |
parts = l.split('=', 1) |
95 |
schorsch |
1.1 |
if len(parts) == 2: |
96 |
schorsch |
1.2 |
left = map(_typify_token, parts[0].split()) |
97 |
|
|
right = map(_typify_token, parts[1].split()) |
98 |
schorsch |
1.1 |
nll.append(left + ['='] + right) |
99 |
schorsch |
1.2 |
else: nll.append(map(_typify_token, l.split())) |
100 |
schorsch |
1.1 |
return nll |
101 |
|
|
|
102 |
|
|
def split_rad(s): |
103 |
|
|
'''Split the contents of a scene description string |
104 |
|
|
return a list of list of tokens suitable for llcompare()''' |
105 |
schorsch |
1.2 |
ll = [ss.strip() for ss in s.split('\n')] |
106 |
schorsch |
1.1 |
nll = [] |
107 |
|
|
for l in ll: |
108 |
schorsch |
1.2 |
nll.append(map(_typify_token, l.split())) |
109 |
schorsch |
1.1 |
return nll |
110 |
|
|
|
111 |
|
|
def split_radfile(fn): |
112 |
|
|
'''Split the contents of a file object containing a scene description |
113 |
|
|
return a list of list of tokens suitable for llcompare()''' |
114 |
|
|
f = open(fn, 'r') |
115 |
|
|
ll = split_rad(f.read()) |
116 |
|
|
f.close() |
117 |
|
|
return ll |
118 |
|
|
|