ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/radiance/ray/test/py_tests/unit_tools/lcompare.py
Revision: 1.1
Committed: Sun Dec 7 20:40:51 2003 UTC (20 years, 5 months ago) by schorsch
Content type: text/x-python
Branch: MAIN
CVS Tags: rad4R2P2, rad5R0, rad3R7P2, rad3R7P1, rad4R2, rad4R1, rad4R0, rad3R6, rad3R6P1, rad3R8, rad3R9, rad4R2P1
Log Message:
First attempt at testing framework.

File Contents

# User Rev Content
1 schorsch 1.1 ''' Text comparison functions for Radiance unit testing.
2    
3     This allows differences in whitespace, which is why the text
4     corpora are split into tokens first.
5     Tokens are then converted into an appropriate data type, so
6     that floating point items will still be considered correct
7     even if they are slightly different, eg. as a consequence of
8     binary rounding errors.
9     '''
10    
11     import string
12     import types
13    
14     class error(Exception): pass
15    
16     # internal functions
17     def _icompare(itest, iref):
18     '''compare ints (not public)'''
19     if type(itest) == str:
20     iftest = int(itest)
21     else: iftest = itest
22     if iftest == iref: return 1
23     return 0
24    
25     def _fcompare(ftest, fref):
26     '''compare floats (not public)'''
27     FUZZ = 0.0000001 # XXX heuristically determined
28     if type(ftest) == str:
29     fftest = float(ftest)
30     else: fftest = ftest
31     if (fftest < (fref + FUZZ)) and (fftest > (fref - FUZZ)):
32     return 1
33     return 0
34    
35     def _typify_token(t):
36     '''return the token as int resp. float if possible (not public)'''
37     try: return int(t)
38     except ValueError: pass
39     try: return float(t)
40     except ValueError: pass
41     return t
42    
43    
44     # public comparison functions
45    
46     def lcompare(ltest, lref):
47     '''compare a list of tokens
48     raise an error if there are intolerable differences
49     the reference tokens in lref should already be of the correct type.
50     '''
51     if len(ltest) != len(lref):
52     raise error, ('List comparision failed: Different number of tokens'
53     ' (%d, %d)' % (len(ltest), len(lref)))
54     for i in range(len(lref)):
55     tref = lref[i]
56     ttest = ltest[i]
57     if type(tref) == str and tref != ttest:
58     raise error, 'Token comparison failed: "%s" != "%s"' % (ttest, tref)
59     elif type(tref) == int and not _icompare(ttest, tref):
60     raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
61     elif type(tref) == float and not _fcompare(ttest, tref):
62     raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
63    
64     def llcompare(lltest, llref, ignore_empty=0, recurse=[]):
65     '''compare a list of lists of tokens recursively
66     raise an error if there are intolerable differences
67     the reference tokens in lref should already be of the correct type.
68     if ignore_empty is true, empty lines are not included in the comparison
69     the recurse argument is only used internally
70     '''
71     if ignore_empty:
72     lltest = filter(None, lltest)
73     llref = filter(None, llref)
74     if len(lltest) != len(llref):
75     raise error, 'Comparision failed: Different number of lines (%d,%d)' %(
76     len(lltest), len(llref))
77     for i in range(len(llref)):
78     if llref[i]:
79     rtype = type(llref[i][0])
80     if rtype == list or rtype == tuple:
81     return llcompare(lltest[i], llref[i],
82     recurse=recurse.append(i), ignore_empty=ignore_empty)
83     try: lcompare(lltest[i], llref[i])
84     except error, e:
85     if recurse:
86     raise error, '%s (line %s)' % (str(e), recurse.append(i + 1))
87     else: raise error, '%s (line %d)' % (str(e), i + 1)
88    
89     def split_headers(s):
90     '''split Radiance file headers
91     return a list of lists of tokens suitable for llcompare()
92     this is useful to check the output of getinfo'''
93     ll = map(string.strip,string.split(s, '\n'))
94     nll = []
95     for l in ll:
96     parts = string.split(l, '=', 1)
97     if len(parts) == 2:
98     left = map(_typify_token, string.split(parts[0]))
99     right = map(_typify_token, string.split(parts[1]))
100     nll.append(left + ['='] + right)
101     else: nll.append(map(_typify_token, string.split(l)))
102     return nll
103    
104     def split_rad(s):
105     '''Split the contents of a scene description string
106     return a list of list of tokens suitable for llcompare()'''
107     ll = map(string.strip,string.split(s, '\n'))
108     nll = []
109     for l in ll:
110     nll.append(map(_typify_token, string.split(l)))
111     return nll
112    
113     def split_radfile(fn):
114     '''Split the contents of a file object containing a scene description
115     return a list of list of tokens suitable for llcompare()'''
116     f = open(fn, 'r')
117     ll = split_rad(f.read())
118     f.close()
119     return ll
120