10 |
|
|
11 |
|
#include "rholo.h" |
12 |
|
|
13 |
– |
|
13 |
|
#define abs(x) ((x) > 0 ? (x) : -(x)) |
14 |
|
#define sgn(x) ((x) > 0 ? 1 : (x) < 0 ? -1 : 0) |
15 |
|
|
17 |
– |
|
16 |
|
static PACKHEAD *complist=NULL; /* list of beams to compute */ |
17 |
|
static int complen=0; /* length of complist */ |
18 |
|
static int listpos=0; /* current list position for next_packet */ |
23 |
|
beamcmp(b0, b1) /* comparison for descending compute order */ |
24 |
|
register PACKHEAD *b0, *b1; |
25 |
|
{ |
26 |
< |
return( b1->nr*(bnrays(hdlist[b0->hd],b0->bi)+1) - |
29 |
< |
b0->nr*(bnrays(hdlist[b1->hd],b1->bi)+1) ); |
26 |
> |
return( b1->nr*(b0->nc+1) - b0->nr*(b1->nc+1) ); |
27 |
|
} |
28 |
|
|
29 |
|
|
55 |
|
lastin = -1; /* flag for initial sort */ |
56 |
|
break; |
57 |
|
case BS_ADD: /* add to computation set */ |
58 |
+ |
case BS_ADJ: /* adjust set quantities */ |
59 |
|
if (nents <= 0) |
60 |
|
return; |
61 |
|
/* merge any common members */ |
62 |
< |
for (i = 0; i < complen; i++) |
62 |
> |
for (i = 0; i < complen; i++) { |
63 |
|
for (n = 0; n < nents; n++) |
64 |
|
if (clist[n].bi == complist[i].bi && |
65 |
|
clist[n].hd == complist[i].hd) { |
66 |
< |
complist[i].nr += clist[n].nr; |
66 |
> |
if (op == BS_ADD) |
67 |
> |
complist[i].nr += clist[n].nr; |
68 |
> |
else /* op == BS_ADJ */ |
69 |
> |
complist[i].nr = clist[n].nr; |
70 |
|
clist[n].nr = 0; |
71 |
+ |
clist[n].nc = 1; |
72 |
|
lastin = -1; /* flag full sort */ |
73 |
|
break; |
74 |
|
} |
75 |
+ |
if (n >= nents) |
76 |
+ |
clist[i].nc = bnrays(hdlist[clist[i].hd], |
77 |
+ |
clist[i].bi); |
78 |
+ |
} |
79 |
|
/* sort updated list */ |
80 |
|
sortcomplist(); |
81 |
|
/* sort new entries */ |
82 |
|
qsort((char *)clist, nents, sizeof(PACKHEAD), beamcmp); |
83 |
|
/* what can't we satisfy? */ |
84 |
< |
for (n = 0; n < nents && clist[n].nr > |
79 |
< |
bnrays(hdlist[clist[n].hd],clist[n].bi); n++) |
84 |
> |
for (n = 0; n < nents && clist[n].nr > clist[n].nc; n++) |
85 |
|
; |
86 |
+ |
if (op == BS_ADJ) |
87 |
+ |
nents = n; |
88 |
|
if (n) { /* allocate space for merged list */ |
89 |
|
PACKHEAD *newlist; |
90 |
|
newlist = (PACKHEAD *)malloc( |
123 |
|
} |
124 |
|
if (outdev == NULL) |
125 |
|
return; |
126 |
< |
n = 8*RPACKSIZ; /* allocate packet holder */ |
126 |
> |
n = 32*RPACKSIZ; /* allocate packet holder */ |
127 |
|
p = (PACKHEAD *)malloc(packsiz(n)); |
128 |
|
if (p == NULL) |
129 |
|
goto memerr; |
137 |
|
goto memerr; |
138 |
|
} |
139 |
|
bcopy((char *)hdbray(b), (char *)packra(p), |
140 |
< |
(p->nr=b->nrm)*sizeof(RAYVAL)); |
140 |
> |
b->nrm*sizeof(RAYVAL)); |
141 |
|
p->hd = clist[i].hd; |
142 |
|
p->bi = clist[i].bi; |
143 |
+ |
p->nr = p->nc = b->nrm; |
144 |
|
disp_packet(p); |
145 |
|
} |
146 |
|
free((char *)p); /* clean up */ |
147 |
+ |
if (op == BS_NEW) { |
148 |
+ |
done_packets(flush_queue()); /* empty queue, so we can... */ |
149 |
+ |
for (i = 0; i < complen; i++) /* ...get number computed */ |
150 |
+ |
complist[i].nc = bnrays(hdlist[complist[i].hd], |
151 |
+ |
complist[i].bi); |
152 |
+ |
} |
153 |
|
return; |
154 |
|
memerr: |
155 |
|
error(SYSTEM, "out of memory in bundle_set"); |
287 |
|
PACKHEAD *list2; |
288 |
|
register int i; |
289 |
|
|
276 |
– |
/* empty queue */ |
277 |
– |
done_packets(flush_queue()); |
290 |
|
if (complen <= 0) /* check to see if there is even a list */ |
291 |
|
return; |
292 |
|
if (lastin < 0 || listpos*4 >= complen*3) |
302 |
|
free((char *)list2); |
303 |
|
} |
304 |
|
/* drop satisfied requests */ |
305 |
< |
for (i = complen; i-- && complist[i].nr <= |
294 |
< |
bnrays(hdlist[complist[i].hd],complist[i].bi); ) |
305 |
> |
for (i = complen; i-- && complist[i].nr <= complist[i].nc; ) |
306 |
|
; |
307 |
|
if (i < 0) { |
308 |
|
free((char *)complist); |
326 |
|
* a given bundle to move way down in the computation order. We keep |
327 |
|
* track of where the computed bundle with the highest priority would end |
328 |
|
* up, and if we get further in our compute list than this, we resort the |
329 |
< |
* list and start again from the beginning. We have to flush the queue |
330 |
< |
* each time we sort, to ensure that we are not disturbing the order. |
320 |
< |
* If our major assumption is violated, and we have a very steep |
321 |
< |
* descent in our weights, then we will end up resorting much more often |
322 |
< |
* than necessary, resulting in frequent flushing of the queue. Since |
323 |
< |
* a merge sort is used, the sorting costs will be minimal. |
329 |
> |
* list and start again from the beginning. Since |
330 |
> |
* a merge sort is used, the sorting costs are minimal. |
331 |
|
*/ |
332 |
|
next_packet(p) /* prepare packet for computation */ |
333 |
|
register PACKET *p; |
334 |
|
{ |
328 |
– |
int ncomp; |
335 |
|
register int i; |
336 |
|
|
337 |
|
if (listpos > lastin) /* time to sort the list */ |
340 |
|
return(0); |
341 |
|
p->hd = complist[listpos].hd; |
342 |
|
p->bi = complist[listpos].bi; |
343 |
< |
ncomp = bnrays(hdlist[p->hd],p->bi); |
344 |
< |
p->nr = complist[listpos].nr - ncomp; |
343 |
> |
p->nc = complist[listpos].nc; |
344 |
> |
p->nr = complist[listpos].nr - p->nc; |
345 |
|
if (p->nr <= 0) |
346 |
|
return(0); |
347 |
|
if (p->nr > RPACKSIZ) |
348 |
|
p->nr = RPACKSIZ; |
349 |
< |
ncomp += p->nr; /* find where this one would go */ |
350 |
< |
while (lastin > listpos && complist[listpos].nr * |
351 |
< |
(bnrays(hdlist[complist[lastin].hd],complist[lastin].bi)+1) |
346 |
< |
> complist[lastin].nr * (ncomp+1)) |
349 |
> |
complist[listpos].nc += p->nr; /* find where this one would go */ |
350 |
> |
while (lastin > listpos && |
351 |
> |
beamcmp(complist+lastin, complist+listpos) > 0) |
352 |
|
lastin--; |
353 |
|
listpos++; |
354 |
|
return(1); |