ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/radiance/ray/test/py_tests/unit_tools/lcompare.py
Revision: 1.2
Committed: Thu Mar 10 01:49:56 2016 UTC (9 years, 1 month ago) by schorsch
Content type: text/x-python
Branch: MAIN
CVS Tags: rad5R4, rad5R2, rad5R1, rad5R3, HEAD
Changes since 1.1: +8 -10 lines
Log Message:
SCons build system learns about platform architecture

File Contents

# Content
1 ''' Text comparison functions for Radiance unit testing.
2
3 This allows differences in whitespace, which is why the text
4 corpora are split into tokens first.
5 Tokens are then converted into an appropriate data type, so
6 that floating point items will still be considered correct
7 even if they are slightly different, eg. as a consequence of
8 binary rounding errors.
9 '''
10
11
12 class error(Exception): pass
13
14 # internal functions
15 def _icompare(itest, iref):
16 '''compare ints (not public)'''
17 if type(itest) == str:
18 iftest = int(itest)
19 else: iftest = itest
20 if iftest == iref: return 1
21 return 0
22
23 def _fcompare(ftest, fref):
24 '''compare floats (not public)'''
25 FUZZ = 0.0000001 # XXX heuristically determined
26 if type(ftest) == str:
27 fftest = float(ftest)
28 else: fftest = ftest
29 if (fftest < (fref + FUZZ)) and (fftest > (fref - FUZZ)):
30 return 1
31 return 0
32
33 def _typify_token(t):
34 '''return the token as int resp. float if possible (not public)'''
35 try: return int(t)
36 except ValueError: pass
37 try: return float(t)
38 except ValueError: pass
39 return t
40
41
42 # public comparison functions
43
44 def lcompare(ltest, lref):
45 '''compare a list of tokens
46 raise an error if there are intolerable differences
47 the reference tokens in lref should already be of the correct type.
48 '''
49 if len(ltest) != len(lref):
50 raise error, ('List comparision failed: Different number of tokens'
51 ' (%d, %d)' % (len(ltest), len(lref)))
52 for i in range(len(lref)):
53 tref = lref[i]
54 ttest = ltest[i]
55 if type(tref) == str and tref != ttest:
56 raise error, 'Token comparison failed: "%s" != "%s"' % (ttest, tref)
57 elif type(tref) == int and not _icompare(ttest, tref):
58 raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
59 elif type(tref) == float and not _fcompare(ttest, tref):
60 raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
61
62 def llcompare(lltest, llref, ignore_empty=0, recurse=[]):
63 '''compare a list of lists of tokens recursively
64 raise an error if there are intolerable differences
65 the reference tokens in lref should already be of the correct type.
66 if ignore_empty is true, empty lines are not included in the comparison
67 the recurse argument is only used internally
68 '''
69 if ignore_empty:
70 lltest = filter(None, lltest)
71 llref = filter(None, llref)
72 if len(lltest) != len(llref):
73 raise error, 'Comparision failed: Different number of lines (%d, %d)' %(
74 len(lltest), len(llref))
75 for i in range(len(llref)):
76 if llref[i]:
77 rtype = type(llref[i][0])
78 if rtype == list or rtype == tuple:
79 return llcompare(lltest[i], llref[i],
80 recurse=recurse.append(i), ignore_empty=ignore_empty)
81 try: lcompare(lltest[i], llref[i])
82 except error, e:
83 if recurse:
84 raise error, '%s (line %s)' % (str(e), recurse.append(i + 1))
85 else: raise error, '%s (line %d)' % (str(e), i + 1)
86
87 def split_headers(s):
88 '''split Radiance file headers
89 return a list of lists of tokens suitable for llcompare()
90 this is useful to check the output of getinfo'''
91 ll = [ss.strip() for ss in s.split('\n')]
92 nll = []
93 for l in ll:
94 parts = l.split('=', 1)
95 if len(parts) == 2:
96 left = map(_typify_token, parts[0].split())
97 right = map(_typify_token, parts[1].split())
98 nll.append(left + ['='] + right)
99 else: nll.append(map(_typify_token, l.split()))
100 return nll
101
102 def split_rad(s):
103 '''Split the contents of a scene description string
104 return a list of list of tokens suitable for llcompare()'''
105 ll = [ss.strip() for ss in s.split('\n')]
106 nll = []
107 for l in ll:
108 nll.append(map(_typify_token, l.split()))
109 return nll
110
111 def split_radfile(fn):
112 '''Split the contents of a file object containing a scene description
113 return a list of list of tokens suitable for llcompare()'''
114 f = open(fn, 'r')
115 ll = split_rad(f.read())
116 f.close()
117 return ll
118