ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/radiance/ray/src/hd/rholo3.c
Revision: 3.8
Committed: Thu Nov 20 11:39:57 1997 UTC (26 years, 10 months ago) by gregl
Content type: text/plain
Branch: MAIN
Changes since 3.7: +2 -0 lines
Log Message:
added check for no display process

File Contents

# Content
1 /* Copyright (c) 1997 Silicon Graphics, Inc. */
2
3 #ifndef lint
4 static char SCCSid[] = "$SunId$ SGI";
5 #endif
6
7 /*
8 * Routines for tracking beam compuatations
9 */
10
11 #include "rholo.h"
12
13
14 #define abs(x) ((x) > 0 ? (x) : -(x))
15 #define sgn(x) ((x) > 0 ? 1 : (x) < 0 ? -1 : 0)
16
17
18 static PACKHEAD *complist=NULL; /* list of beams to compute */
19 static int complen=0; /* length of complist */
20 static int listpos=0; /* current list position for next_packet */
21 static int lastin= -1; /* last ordered position in list */
22
23
24 int
25 beamcmp(b0, b1) /* comparison for descending compute order */
26 register PACKHEAD *b0, *b1;
27 {
28 return( b1->nr*(bnrays(hdlist[b0->hd],b0->bi)+1) -
29 b0->nr*(bnrays(hdlist[b1->hd],b1->bi)+1) );
30 }
31
32
33 bundle_set(op, clist, nents) /* bundle set operation */
34 int op;
35 PACKHEAD *clist;
36 int nents;
37 {
38 BEAM *b;
39 PACKHEAD *p;
40 register int i, n;
41
42 switch (op) {
43 case BS_NEW: /* new computation set */
44 if (complen)
45 free((char *)complist);
46 if (nents <= 0) {
47 complist = NULL;
48 listpos = complen = 0;
49 lastin = -1;
50 return;
51 }
52 complist = (PACKHEAD *)malloc(nents*sizeof(PACKHEAD));
53 if (complist == NULL)
54 goto memerr;
55 bcopy((char *)clist, (char *)complist, nents*sizeof(PACKHEAD));
56 complen = nents;
57 listpos = 0;
58 lastin = -1; /* flag for initial sort */
59 break;
60 case BS_ADD: /* add to computation set */
61 if (nents <= 0)
62 return;
63 /* merge any common members */
64 for (i = 0; i < complen; i++)
65 for (n = 0; n < nents; n++)
66 if (clist[n].bi == complist[i].bi &&
67 clist[n].hd == complist[i].hd) {
68 complist[i].nr += clist[n].nr;
69 clist[n].nr = 0;
70 lastin = -1; /* flag full sort */
71 break;
72 }
73 /* sort updated list */
74 sortcomplist();
75 /* sort new entries */
76 qsort((char *)clist, nents, sizeof(PACKHEAD), beamcmp);
77 /* what can't we satisfy? */
78 for (n = 0; n < nents && clist[n].nr >
79 bnrays(hdlist[clist[n].hd],clist[n].bi); n++)
80 ;
81 if (n) { /* allocate space for merged list */
82 PACKHEAD *newlist;
83 newlist = (PACKHEAD *)malloc(
84 (complen+n)*sizeof(PACKHEAD) );
85 if (newlist == NULL)
86 goto memerr;
87 /* merge lists */
88 mergeclists(newlist, clist, n, complist, complen);
89 if (complen)
90 free((char *)complist);
91 complist = newlist;
92 complen += n;
93 }
94 listpos = 0;
95 lastin = complen-1; /* list is now sorted */
96 break;
97 case BS_DEL: /* delete from computation set */
98 if (nents <= 0)
99 return;
100 /* find each member */
101 for (i = 0; i < complen; i++)
102 for (n = 0; n < nents; n++)
103 if (clist[n].bi == complist[i].bi &&
104 clist[n].hd == complist[i].hd) {
105 if (clist[n].nr == 0 ||
106 clist[n].nr >= complist[i].nr)
107 complist[i].nr = 0;
108 else
109 complist[i].nr -= clist[n].nr;
110 lastin = -1; /* flag full sort */
111 break;
112 }
113 if (lastin < 0) /* sort updated list */
114 sortcomplist();
115 return; /* no display */
116 default:
117 error(CONSISTENCY, "bundle_set called with unknown operation");
118 }
119 if (outdev == NULL)
120 return;
121 n = RPACKSIZ; /* allocate packet holder */
122 for (i = 0; i < nents; i++)
123 if (clist[i].nr > n)
124 n = clist[i].nr;
125 p = (PACKHEAD *)malloc(packsiz(n));
126 if (p == NULL)
127 goto memerr;
128 /* display what we have */
129 for (i = 0; i < nents; i++)
130 if ((b = hdgetbeam(hdlist[clist[i].hd], clist[i].bi)) != NULL) {
131 if (b->nrm > n) {
132 n = b->nrm;
133 p = (PACKHEAD *)realloc((char *)p, packsiz(n));
134 if (p == NULL)
135 goto memerr;
136 }
137 bcopy((char *)hdbray(b), (char *)packra(p),
138 (p->nr=b->nrm)*sizeof(RAYVAL));
139 p->hd = clist[i].hd;
140 p->bi = clist[i].bi;
141 disp_packet(p);
142 }
143 free((char *)p); /* clean up */
144 return;
145 memerr:
146 error(SYSTEM, "out of memory in bundle_set");
147 }
148
149
150 int
151 weightf(hp, x0, x1, x2) /* voxel weighting function */
152 register HOLO *hp;
153 register int x0, x1, x2;
154 {
155 switch (vlet(OCCUPANCY)) {
156 case 'U': /* uniform weighting */
157 return(1);
158 case 'C': /* center weighting (crude) */
159 x0 += x0 - hp->grid[0] + 1;
160 x0 = abs(x0)*hp->grid[1]*hp->grid[2];
161 x1 += x1 - hp->grid[1] + 1;
162 x1 = abs(x1)*hp->grid[0]*hp->grid[2];
163 x2 += x2 - hp->grid[2] + 1;
164 x2 = abs(x2)*hp->grid[0]*hp->grid[1];
165 return(hp->grid[0]*hp->grid[1]*hp->grid[2] -
166 (x0+x1+x2)/3);
167 default:
168 badvalue(OCCUPANCY);
169 }
170 }
171
172
173 /* The following is by Daniel Cohen, taken from Graphics Gems IV, p. 368 */
174 long
175 lineweight(hp, x, y, z, dx, dy, dz) /* compute weights along a line */
176 HOLO *hp;
177 int x, y, z, dx, dy, dz;
178 {
179 long wres = 0;
180 int n, sx, sy, sz, exy, exz, ezy, ax, ay, az, bx, by, bz;
181
182 sx = sgn(dx); sy = sgn(dy); sz = sgn(dz);
183 ax = abs(dx); ay = abs(dy); az = abs(dz);
184 bx = 2*ax; by = 2*ay; bz = 2*az;
185 exy = ay-ax; exz = az-ax; ezy = ay-az;
186 n = ax+ay+az + 1; /* added increment to visit last */
187 while (n--) {
188 wres += weightf(hp, x, y, z);
189 if (exy < 0) {
190 if (exz < 0) {
191 x += sx;
192 exy += by; exz += bz;
193 } else {
194 z += sz;
195 exz -= bx; ezy += by;
196 }
197 } else {
198 if (ezy < 0) {
199 z += sz;
200 exz -= bx; ezy += by;
201 } else {
202 y += sy;
203 exy -= bx; ezy -= bz;
204 }
205 }
206 }
207 return(wres);
208 }
209
210
211 init_global() /* initialize global ray computation */
212 {
213 long wtotal = 0;
214 int i, j;
215 int lseg[2][3];
216 double frac;
217 register int k;
218 /* free old list */
219 if (complen > 0)
220 free((char *)complist);
221 /* allocate beam list */
222 complen = 0;
223 for (j = 0; hdlist[j] != NULL; j++)
224 complen += nbeams(hdlist[j]);
225 complist = (PACKHEAD *)malloc(complen*sizeof(PACKHEAD));
226 if (complist == NULL)
227 error(SYSTEM, "out of memory in init_global");
228 /* compute beam weights */
229 k = 0;
230 for (j = 0; hdlist[j] != NULL; j++)
231 for (i = nbeams(hdlist[j]); i > 0; i--) {
232 hdlseg(lseg, hdlist[j], i);
233 complist[k].hd = j;
234 complist[k].bi = i;
235 complist[k].nr = lineweight( hdlist[j],
236 lseg[0][0], lseg[0][1], lseg[0][2],
237 lseg[1][0] - lseg[0][0],
238 lseg[1][1] - lseg[0][1],
239 lseg[1][2] - lseg[0][2] );
240 wtotal += complist[k++].nr;
241 }
242 /* adjust weights */
243 if (vdef(DISKSPACE)) {
244 frac = 1024.*1024.*vflt(DISKSPACE) / (wtotal*sizeof(RAYVAL));
245 if (frac < 0.95 | frac > 1.05)
246 while (k--)
247 complist[k].nr = frac * complist[k].nr;
248 }
249 listpos = 0; lastin = -1; /* flag initial sort */
250 }
251
252
253 mergeclists(cdest, cl1, n1, cl2, n2) /* merge two sorted lists */
254 PACKHEAD *cdest;
255 PACKHEAD *cl1, *cl2;
256 int n1, n2;
257 {
258 int cmp;
259
260 while (n1 | n2) {
261 if (!n1) cmp = 1;
262 else if (!n2) cmp = -1;
263 else cmp = beamcmp(cl1, cl2);
264 if (cmp > 0) {
265 copystruct(cdest, cl2);
266 cl2++; n2--;
267 } else {
268 copystruct(cdest, cl1);
269 cl1++; n1--;
270 }
271 cdest++;
272 }
273 }
274
275
276 sortcomplist() /* fix our list order */
277 {
278 PACKHEAD *list2;
279 register int i;
280
281 /* empty queue */
282 done_packets(flush_queue());
283 if (complen <= 0) /* check to see if there is even a list */
284 return;
285 if (lastin < 0 || listpos*4 >= complen*3)
286 qsort((char *)complist, complen, sizeof(PACKHEAD), beamcmp);
287 else if (listpos) { /* else sort and merge sublist */
288 list2 = (PACKHEAD *)malloc(listpos*sizeof(PACKHEAD));
289 if (list2 == NULL)
290 error(SYSTEM, "out of memory in sortcomplist");
291 bcopy((char *)complist,(char *)list2,listpos*sizeof(PACKHEAD));
292 qsort((char *)list2, listpos, sizeof(PACKHEAD), beamcmp);
293 mergeclists(complist, list2, listpos,
294 complist+listpos, complen-listpos);
295 free((char *)list2);
296 }
297 /* check for all finished */
298 if (complist[0].nr <= bnrays(hdlist[complist[0].hd],complist[0].bi)) {
299 free((char *)complist);
300 complist = NULL;
301 complen = 0;
302 }
303 /* drop satisfied requests */
304 for (i = complen; i-- && complist[i].nr <=
305 bnrays(hdlist[complist[i].hd],complist[i].bi); )
306 ;
307 if (i < 0) {
308 free((char *)complist);
309 complist = NULL;
310 complen = 0;
311 } else if (i < complen-1) {
312 list2 = (PACKHEAD *)realloc((char *)complist,
313 (i+1)*sizeof(PACKHEAD));
314 if (list2 != NULL) {
315 complist = list2;
316 complen = i+1;
317 }
318 }
319 listpos = 0; lastin = i;
320 }
321
322
323 /*
324 * The following routine works on the assumption that the bundle weights are
325 * more or less evenly distributed, such that computing a packet causes
326 * a given bundle to move way down in the computation order. We keep
327 * track of where the computed bundle with the highest priority would end
328 * up, and if we get further in our compute list than this, we resort the
329 * list and start again from the beginning. We have to flush the queue
330 * each time we sort, to ensure that we are not disturbing the order.
331 * If our major assumption is violated, and we have a very steep
332 * descent in our weights, then we will end up resorting much more often
333 * than necessary, resulting in frequent flushing of the queue. Since
334 * a merge sort is used, the sorting costs will be minimal.
335 */
336 next_packet(p) /* prepare packet for computation */
337 register PACKET *p;
338 {
339 int ncomp;
340 register int i;
341
342 if (complen <= 0)
343 return(0);
344 if (listpos > lastin) /* time to sort the list */
345 sortcomplist();
346 p->hd = complist[listpos].hd;
347 p->bi = complist[listpos].bi;
348 ncomp = bnrays(hdlist[p->hd],p->bi);
349 p->nr = complist[listpos].nr - ncomp;
350 if (p->nr <= 0)
351 return(0);
352 if (p->nr > RPACKSIZ)
353 p->nr = RPACKSIZ;
354 ncomp += p->nr; /* find where this one would go */
355 while (lastin > listpos && complist[listpos].nr *
356 (bnrays(hdlist[complist[lastin].hd],complist[lastin].bi)+1)
357 > complist[lastin].nr * (ncomp+1))
358 lastin--;
359 listpos++;
360 return(1);
361 }