216 |
|
return(total_cost); |
217 |
|
} |
218 |
|
|
219 |
< |
/* Compare entries by moving price */ |
219 |
> |
typedef struct { |
220 |
> |
short s, d; /* source and destination */ |
221 |
> |
float dc; /* discount to push inventory */ |
222 |
> |
} ROWSENT; /* row sort entry */ |
223 |
> |
|
224 |
> |
/* Compare entries by discounted moving price */ |
225 |
|
static int |
226 |
|
rmovcmp(void *b, const void *p1, const void *p2) |
227 |
|
{ |
228 |
|
PRICEMAT *pm = (PRICEMAT *)b; |
229 |
< |
const short *ij1 = (const short *)p1; |
230 |
< |
const short *ij2 = (const short *)p2; |
231 |
< |
float price_diff; |
229 |
> |
const ROWSENT *re1 = (const ROWSENT *)p1; |
230 |
> |
const ROWSENT *re2 = (const ROWSENT *)p2; |
231 |
> |
double price_diff; |
232 |
|
|
233 |
< |
if (ij1[1] < 0) return(ij2[1] >= 0); |
234 |
< |
if (ij2[1] < 0) return(-1); |
235 |
< |
price_diff = pricerow(pm,ij1[0])[ij1[1]] - pricerow(pm,ij2[0])[ij2[1]]; |
233 |
> |
if (re1->d < 0) return(re2->d >= 0); |
234 |
> |
if (re2->d < 0) return(-1); |
235 |
> |
price_diff = re1->dc*pricerow(pm,re1->s)[re1->d] - |
236 |
> |
re2->dc*pricerow(pm,re2->s)[re2->d]; |
237 |
|
if (price_diff > 0) return(1); |
238 |
|
if (price_diff < 0) return(-1); |
239 |
|
return(0); |
247 |
|
const double maxamt = 1./(double)pm->ncols; |
248 |
|
const double minamt = maxamt*1e-4; |
249 |
|
double *src_cost; |
250 |
< |
short (*rord)[2]; |
250 |
> |
ROWSENT *rord; |
251 |
|
struct { |
252 |
|
int s, d; /* source and destination */ |
253 |
< |
double price; /* price estimate per amount moved */ |
253 |
> |
double price; /* cost per amount moved */ |
254 |
|
double amt; /* amount we can move */ |
255 |
|
} cur, best; |
256 |
|
int r2check, i, ri; |
259 |
|
* destination price implies that another source is closer, so |
260 |
|
* we can hold off considering more expensive options until |
261 |
|
* some other (hopefully better) moves have been made. |
262 |
+ |
* A discount based on source remaining is supposed to prioritize |
263 |
+ |
* movement from large lobes, but it doesn't seem to do much, |
264 |
+ |
* so we have it set to 1.0 at the moment. |
265 |
|
*/ |
266 |
+ |
#define discount(qr) 1.0 |
267 |
|
/* most promising row order */ |
268 |
< |
rord = (short (*)[2])malloc(sizeof(short)*2*pm->nrows); |
268 |
> |
rord = (ROWSENT *)malloc(sizeof(ROWSENT)*pm->nrows); |
269 |
|
if (rord == NULL) |
270 |
|
goto memerr; |
271 |
|
for (ri = pm->nrows; ri--; ) { |
272 |
< |
rord[ri][0] = ri; |
273 |
< |
rord[ri][1] = -1; |
272 |
> |
rord[ri].s = ri; |
273 |
> |
rord[ri].d = -1; |
274 |
> |
rord[ri].dc = 1.f; |
275 |
|
if (src_rem[ri] <= minamt) /* enough source material? */ |
276 |
|
continue; |
277 |
|
for (i = 0; i < pm->ncols; i++) |
278 |
< |
if (dst_rem[ rord[ri][1] = psortrow(pm,ri)[i] ] > minamt) |
278 |
> |
if (dst_rem[ rord[ri].d = psortrow(pm,ri)[i] ] > minamt) |
279 |
|
break; |
280 |
|
if (i >= pm->ncols) { /* moved all we can? */ |
281 |
|
free(rord); |
282 |
|
return(.0); |
283 |
|
} |
284 |
+ |
rord[ri].dc = discount(src_rem[ri]); |
285 |
|
} |
286 |
|
if (pm->nrows > max2check) /* sort if too many sources */ |
287 |
< |
qsort_r(rord, pm->nrows, sizeof(short)*2, pm, &rmovcmp); |
287 |
> |
qsort_r(rord, pm->nrows, sizeof(ROWSENT), pm, &rmovcmp); |
288 |
|
/* allocate cost array */ |
289 |
|
src_cost = (double *)malloc(sizeof(double)*pm->nrows); |
290 |
|
if (src_cost == NULL) |
297 |
|
r2check = max2check; /* put a limit on search */ |
298 |
|
for (ri = 0; ri < r2check; ri++) { /* check each source row */ |
299 |
|
double cost_others = 0; |
300 |
< |
cur.s = rord[ri][0]; |
301 |
< |
if ((cur.d = rord[ri][1]) < 0 || |
302 |
< |
(cur.price = pricerow(pm,cur.s)[cur.d]) >= best.price) { |
300 |
> |
cur.s = rord[ri].s; |
301 |
> |
if ((cur.d = rord[ri].d) < 0 || |
302 |
> |
rord[ri].dc*pricerow(pm,cur.s)[cur.d] >= best.price) { |
303 |
|
if (pm->nrows > max2check) break; /* sorted end */ |
304 |
|
continue; /* else skip this one */ |
305 |
|
} |
313 |
|
cost_others += min_cost(src_rem[i], dst_rem, pm, i) |
314 |
|
- src_cost[i]; |
315 |
|
dst_rem[cur.d] += cur.amt; /* undo trial move */ |
316 |
< |
cur.price += cost_others/cur.amt; /* adjust effective price */ |
316 |
> |
/* discount effective price */ |
317 |
> |
cur.price = ( pricerow(pm,cur.s)[cur.d] + cost_others/cur.amt ) * |
318 |
> |
rord[ri].dc; |
319 |
|
if (cur.price < best.price) /* are we better than best? */ |
320 |
|
best = cur; |
321 |
|
} |
331 |
|
memerr: |
332 |
|
fprintf(stderr, "%s: Out of memory in migration_step()\n", progname); |
333 |
|
exit(1); |
334 |
+ |
#undef discount |
335 |
|
} |
336 |
|
|
337 |
|
/* Compute and insert migration along directed edge (may fork child) */ |
499 |
|
} |
500 |
|
} |
501 |
|
} |
502 |
+ |
|
503 |
+ |
/* Add normal direction if missing */ |
504 |
+ |
static void |
505 |
+ |
check_normal_incidence(void) |
506 |
+ |
{ |
507 |
+ |
static const FVECT norm_vec = {.0, .0, 1.}; |
508 |
+ |
const int saved_nprocs = nprocs; |
509 |
+ |
RBFNODE *near_rbf, *mir_rbf, *rbf; |
510 |
+ |
double bestd; |
511 |
+ |
int n; |
512 |
+ |
|
513 |
+ |
if (dsf_list == NULL) |
514 |
+ |
return; /* XXX should be error? */ |
515 |
+ |
near_rbf = dsf_list; |
516 |
+ |
bestd = input_orient*near_rbf->invec[2]; |
517 |
+ |
if (single_plane_incident) { /* ordered plane incidence? */ |
518 |
+ |
if (bestd >= 1.-2.*FTINY) |
519 |
+ |
return; /* already have normal */ |
520 |
+ |
} else { |
521 |
+ |
switch (inp_coverage) { |
522 |
+ |
case INP_QUAD1: |
523 |
+ |
case INP_QUAD2: |
524 |
+ |
case INP_QUAD3: |
525 |
+ |
case INP_QUAD4: |
526 |
+ |
break; /* quadrilateral symmetry? */ |
527 |
+ |
default: |
528 |
+ |
return; /* else we can interpolate */ |
529 |
+ |
} |
530 |
+ |
for (rbf = near_rbf->next; rbf != NULL; rbf = rbf->next) { |
531 |
+ |
const double d = input_orient*rbf->invec[2]; |
532 |
+ |
if (d >= 1.-2.*FTINY) |
533 |
+ |
return; /* seems we have normal */ |
534 |
+ |
if (d > bestd) { |
535 |
+ |
near_rbf = rbf; |
536 |
+ |
bestd = d; |
537 |
+ |
} |
538 |
+ |
} |
539 |
+ |
} |
540 |
+ |
if (mig_list != NULL) { /* need to be called first */ |
541 |
+ |
fprintf(stderr, "%s: Late call to check_normal_incidence()\n", |
542 |
+ |
progname); |
543 |
+ |
exit(1); |
544 |
+ |
} |
545 |
+ |
#ifdef DEBUG |
546 |
+ |
fprintf(stderr, "Interpolating normal incidence by mirroring (%.1f,%.1f)\n", |
547 |
+ |
get_theta180(near_rbf->invec), get_phi360(near_rbf->invec)); |
548 |
+ |
#endif |
549 |
+ |
/* mirror nearest incidence */ |
550 |
+ |
n = sizeof(RBFNODE) + sizeof(RBFVAL)*(near_rbf->nrbf-1); |
551 |
+ |
mir_rbf = (RBFNODE *)malloc(n); |
552 |
+ |
if (mir_rbf == NULL) |
553 |
+ |
goto memerr; |
554 |
+ |
memcpy(mir_rbf, near_rbf, n); |
555 |
+ |
mir_rbf->ord = near_rbf->ord - 1; /* not used, I think */ |
556 |
+ |
mir_rbf->next = NULL; |
557 |
+ |
rev_rbf_symmetry(mir_rbf, MIRROR_X|MIRROR_Y); |
558 |
+ |
nprocs = 1; /* compute migration matrix */ |
559 |
+ |
if (mig_list != create_migration(mir_rbf, near_rbf)) |
560 |
+ |
exit(1); /* XXX should never happen! */ |
561 |
+ |
/* interpolate normal dist. */ |
562 |
+ |
rbf = e_advect_rbf(mig_list, norm_vec, 2*near_rbf->nrbf); |
563 |
+ |
nprocs = saved_nprocs; /* final clean-up */ |
564 |
+ |
free(mir_rbf); |
565 |
+ |
free(mig_list); |
566 |
+ |
mig_list = near_rbf->ejl = NULL; |
567 |
+ |
insert_dsf(rbf); /* insert interpolated normal */ |
568 |
+ |
return; |
569 |
+ |
memerr: |
570 |
+ |
fprintf(stderr, "%s: Out of memory in check_normal_incidence()\n", |
571 |
+ |
progname); |
572 |
+ |
exit(1); |
573 |
+ |
} |
574 |
|
|
575 |
|
/* Build our triangle mesh from recorded RBFs */ |
576 |
|
void |
579 |
|
double best2 = M_PI*M_PI; |
580 |
|
RBFNODE *shrt_edj[2]; |
581 |
|
RBFNODE *rbf0, *rbf1; |
582 |
+ |
/* add normal if needed */ |
583 |
+ |
check_normal_incidence(); |
584 |
|
/* check if isotropic */ |
585 |
|
if (single_plane_incident) { |
586 |
|
for (rbf0 = dsf_list; rbf0 != NULL; rbf0 = rbf0->next) |