ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/radiance/ray/src/hd/rholo3.c
Revision: 3.1
Committed: Fri Oct 31 10:23:29 1997 UTC (26 years, 5 months ago) by gregl
Content type: text/plain
Branch: MAIN
Log Message:
Initial revision

File Contents

# User Rev Content
1 gregl 3.1 /* Copyright (c) 1997 Silicon Graphics, Inc. */
2    
3     #ifndef lint
4     static char SCCSid[] = "$SunId$ SGI";
5     #endif
6    
7     /*
8     * Routines for tracking beam compuatations
9     */
10    
11     #include "rholo.h"
12    
13    
14     #define abs(x) ((x) > 0 ? (x) : -(x))
15     #define sgn(x) ((x) > 0 ? 1 : (x) < 0 ? -1 : 0)
16    
17    
18     static PACKHEAD *complist; /* list of beams to compute */
19     static int complen; /* length of complist */
20     static int listpos; /* current list position for next_pack */
21     static int lastin; /* last ordered position in list */
22    
23    
24     int
25     weightf(hp, x0, x1, x2) /* voxel weighting function */
26     register HOLO *hp;
27     register int x0, x1, x2;
28     {
29     switch (vlet(OCCUPANCY)) {
30     case 'U': /* uniform weighting */
31     return(1);
32     case 'C': /* center weighting (crude) */
33     x0 += x0 - hp->grid[0] + 1;
34     x0 = abs(x0)*hp->grid[1]*hp->grid[2];
35     x1 += x1 - hp->grid[1] + 1;
36     x1 = abs(x1)*hp->grid[0]*hp->grid[2];
37     x2 += x2 - hp->grid[2] + 1;
38     x2 = abs(x2)*hp->grid[0]*hp->grid[1];
39     return(hp->grid[0]*hp->grid[1]*hp->grid[2] -
40     (x0+x1+x2)/3);
41     default:
42     badvalue(OCCUPANCY);
43     }
44     }
45    
46    
47     /* The following is by Daniel Cohen, taken from Graphics Gems IV, p. 368 */
48     long
49     lineweight(hp, x, y, z, dx, dy, dz) /* compute weights along a line */
50     HOLO *hp;
51     int x, y, z, dx, dy, dz;
52     {
53     long wres = 0;
54     int n, sx, sy, sz, exy, exz, ezy, ax, ay, az, bx, by, bz;
55    
56     sx = sgn(dx); sy = sgn(dy); sz = sgn(dz);
57     ax = abs(dx); ay = abs(dy); az = abs(dz);
58     bx = 2*ax; by = 2*ay; bz = 2*az;
59     exy = ay-ax; exz = az-ax; ezy = ay-az;
60     n = ax+ay+az + 1; /* added increment to visit last */
61     while (n--) {
62     wres += weightf(hp, x, y, z);
63     if (exy < 0) {
64     if (exz < 0) {
65     x += sx;
66     exy += by; exz += bz;
67     } else {
68     z += sz;
69     exz -= bx; ezy += by;
70     }
71     } else {
72     if (ezy < 0) {
73     z += sz;
74     exz -= bx; ezy += by;
75     } else {
76     y += sy;
77     exy -= bx; ezy -= bz;
78     }
79     }
80     }
81     return(wres);
82     }
83    
84    
85     int
86     beamcmp(b0, b1) /* comparison for descending compute order */
87     register PACKHEAD *b0, *b1;
88     {
89     return( b1->nr*(bnrays(hdlist[b0->hd],b0->bi)+1) -
90     b0->nr*(bnrays(hdlist[b1->hd],b1->bi)+1) );
91     }
92    
93    
94     init_global() /* initialize global ray computation */
95     {
96     long wtotal = 0;
97     int i, j;
98     int lseg[2][3];
99     double frac;
100     register int k;
101     /* allocate beam list */
102     complen = 0;
103     for (j = 0; hdlist[j] != NULL; j++)
104     complen += nbeams(hdlist[j]);
105     complist = (PACKHEAD *)malloc(complen*sizeof(PACKHEAD));
106     if (complist == NULL)
107     error(SYSTEM, "out of memory in init_global");
108     /* compute beam weights */
109     k = 0;
110     for (j = 0; hdlist[j] != NULL; j++)
111     for (i = nbeams(hdlist[j]); i > 0; i--) {
112     hdlseg(lseg, hdlist[j], i);
113     complist[k].hd = j;
114     complist[k].bi = i;
115     complist[k].nr = lineweight( hdlist[j],
116     lseg[0][0], lseg[0][1], lseg[0][2],
117     lseg[1][0] - lseg[0][0],
118     lseg[1][1] - lseg[0][1],
119     lseg[1][2] - lseg[0][2] );
120     wtotal += complist[k++].nr;
121     }
122     /* adjust weights */
123     if (vdef(DISKSPACE)) {
124     frac = 1024.*1024.*vflt(DISKSPACE) / (wtotal*sizeof(RAYVAL));
125     if (frac < 0.95 | frac > 1.05)
126     while (k--)
127     complist[k].nr = frac * complist[k].nr;
128     }
129     listpos = 0; lastin = -1;
130     }
131    
132    
133     mergeclists(cdest, cl1, n1, cl2, n2) /* merge two sorted lists */
134     PACKHEAD *cdest;
135     PACKHEAD *cl1, *cl2;
136     int n1, n2;
137     {
138     int cmp;
139    
140     while (n1 | n2) {
141     if (!n1) cmp = 1;
142     else if (!n2) cmp = -1;
143     else cmp = beamcmp(cl1, cl2);
144     if (cmp > 0) {
145     copystruct(cdest, cl2);
146     cl2++; n2--;
147     } else {
148     copystruct(cdest, cl1);
149     cl1++; n1--;
150     }
151     cdest++;
152     }
153     }
154    
155    
156     sortcomplist() /* fix our list order */
157     {
158     PACKHEAD *list2;
159     /* empty queue */
160     done_packets(flush_queue());
161     if (lastin < 0) /* flag to sort entire list */
162     qsort((char *)complist, complen, sizeof(PACKHEAD), beamcmp);
163     else if (listpos) { /* else sort and merge sublist */
164     list2 = (PACKHEAD *)malloc(listpos*sizeof(PACKHEAD));
165     if (list2 == NULL)
166     error(SYSTEM, "out of memory in sortcomplist");
167     bcopy((char *)complist,(char *)list2,listpos*sizeof(PACKHEAD));
168     qsort((char *)list2, listpos, sizeof(PACKHEAD), beamcmp);
169     mergeclists(complist, list2, listpos,
170     complist+listpos, complen-listpos);
171     free((char *)list2);
172     }
173     listpos = 0; lastin = complen-1;
174     }
175    
176    
177     /*
178     * The following routine works on the assumption that the bundle weights are
179     * more or less evenly distributed, such that computing a packet causes
180     * a given bundle to move way down in the computation order. We keep
181     * track of where the computed bundle with the highest priority would end
182     * up, and if we get further in our compute list than this, we resort the
183     * list and start again from the beginning. We have to flush the queue
184     * each time we sort, to ensure that we are not disturbing the order.
185     * If our major assumption is violated, and we have a very steep
186     * descent in our weights, then we will end up resorting much more often
187     * than necessary, resulting in frequent flushing of the queue. Since
188     * a merge sort is used, the sorting costs will be minimal.
189     */
190     next_packet(p) /* prepare packet for computation */
191     register PACKET *p;
192     {
193     int ncomp;
194     register int i;
195    
196     if (complen <= 0)
197     return(0);
198     if (listpos > lastin) /* time to sort the list */
199     sortcomplist();
200     p->hd = complist[listpos].hd;
201     p->bi = complist[listpos].bi;
202     ncomp = bnrays(hdlist[p->hd],p->bi);
203     p->nr = complist[listpos].nr - ncomp;
204     if (p->nr <= 0)
205     return(0);
206     if (p->nr > RPACKSIZ)
207     p->nr = RPACKSIZ;
208     ncomp += p->nr; /* find where this one would go */
209     while (lastin > listpos && complist[listpos].nr *
210     (bnrays(hdlist[complist[lastin].hd],complist[lastin].bi)+1)
211     > complist[lastin].nr * (ncomp+1))
212     lastin--;
213     listpos++;
214     return(1);
215     }