51 |
|
struct s_rbfnode *next; /* next in global RBF list */ |
52 |
|
MIGRATION *ejl; /* edge list for this vertex */ |
53 |
|
FVECT invec; /* incident vector direction */ |
54 |
+ |
double vtotal; /* volume for normalization */ |
55 |
|
int nrbf; /* number of RBFs */ |
56 |
|
RBFVAL rbfa[1]; /* RBF array (extends struct) */ |
57 |
|
} RBFLIST; /* RBF representation of DSF @ 1 incidence */ |
63 |
|
/* processed incident DSF measurements */ |
64 |
|
static RBFLIST *dsf_list = NULL; |
65 |
|
|
66 |
< |
/* edge (linking) matrices */ |
66 |
> |
/* RBF-linking matrices (edges) */ |
67 |
|
static MIGRATION *mig_list = NULL; |
68 |
|
|
69 |
< |
#define mtxval(m,i,j) (m)->mtx[(i)*(m)->rbfv[1]->nrbf+(j)] |
70 |
< |
#define nextedge(rbf,m) (m)->enxt[(rbf)==(m)->rbfv[1]] |
69 |
> |
#define mtx_nrows(m) ((m)->rbfv[0]->nrbf) |
70 |
> |
#define mtx_ncols(m) ((m)->rbfv[1]->nrbf) |
71 |
> |
#define mtx_ndx(m,i,j) ((i)*mtx_ncols(m) + (j)) |
72 |
> |
#define is_src(rbf,m) ((rbf) == (m)->rbfv[0]) |
73 |
> |
#define is_dest(rbf,m) ((rbf) == (m)->rbfv[1]) |
74 |
> |
#define nextedge(rbf,m) (m)->enxt[is_dest(rbf,m)] |
75 |
|
|
76 |
+ |
/* Compute volume associated with Gaussian lobe */ |
77 |
+ |
static double |
78 |
+ |
rbf_volume(const RBFVAL *rbfp) |
79 |
+ |
{ |
80 |
+ |
double rad = R2ANG(rbfp->crad); |
81 |
+ |
|
82 |
+ |
return((2.*M_PI) * rbfp->peak * rad*rad); |
83 |
+ |
} |
84 |
+ |
|
85 |
|
/* Compute outgoing vector from grid position */ |
86 |
|
static void |
87 |
|
vec_from_pos(FVECT vec, int xpos, int ypos) |
149 |
|
/* allocate RBF array */ |
150 |
|
newnode = (RBFLIST *)malloc(sizeof(RBFLIST) + sizeof(RBFVAL)*(nn-1)); |
151 |
|
if (newnode == NULL) { |
152 |
< |
fputs("Out of memory in make_rbfrep\n", stderr); |
152 |
> |
fputs("Out of memory in make_rbfrep()\n", stderr); |
153 |
|
exit(1); |
154 |
|
} |
155 |
|
newnode->next = NULL; |
158 |
|
newnode->invec[0] = cos(M_PI/180.*phi_in_deg)*newnode->invec[2]; |
159 |
|
newnode->invec[1] = sin(M_PI/180.*phi_in_deg)*newnode->invec[2]; |
160 |
|
newnode->invec[2] = sqrt(1. - newnode->invec[2]*newnode->invec[2]); |
161 |
+ |
newnode->vtotal = 0; |
162 |
|
newnode->nrbf = nn; |
163 |
|
nn = 0; /* fill RBF array */ |
164 |
|
for (i = 0; i < GRIDRES; i++) |
195 |
|
*/ |
196 |
|
} while (--niter > 0 && lastVar-thisVar > 0.02*lastVar); |
197 |
|
|
198 |
+ |
nn = 0; /* compute sum for normalization */ |
199 |
+ |
while (nn < newnode->nrbf) |
200 |
+ |
newnode->vtotal += rbf_volume(&newnode->rbfa[nn++]); |
201 |
+ |
|
202 |
|
newnode->next = dsf_list; |
203 |
|
return(dsf_list = newnode); |
204 |
|
} |
404 |
|
} |
405 |
|
} |
406 |
|
|
407 |
+ |
/* Compute (and allocate) migration price matrix for optimization */ |
408 |
+ |
static float * |
409 |
+ |
price_routes(const RBFLIST *from_rbf, const RBFLIST *to_rbf) |
410 |
+ |
{ |
411 |
+ |
float *pmtx = (float *)malloc(sizeof(float) * |
412 |
+ |
from_rbf->nrbf * to_rbf->nrbf); |
413 |
+ |
FVECT *vto = (FVECT *)malloc(sizeof(FVECT) * to_rbf->nrbf); |
414 |
+ |
int i, j; |
415 |
+ |
|
416 |
+ |
if ((pmtx == NULL) | (vto == NULL)) { |
417 |
+ |
fputs("Out of memory in migration_costs()\n", stderr); |
418 |
+ |
exit(1); |
419 |
+ |
} |
420 |
+ |
for (j = to_rbf->nrbf; j--; ) /* save repetitive ops. */ |
421 |
+ |
vec_from_pos(vto[j], to_rbf->rbfa[j].gx, to_rbf->rbfa[j].gy); |
422 |
+ |
|
423 |
+ |
for (i = from_rbf->nrbf; i--; ) { |
424 |
+ |
const double from_ang = R2ANG(from_rbf->rbfa[i].crad); |
425 |
+ |
FVECT vfrom; |
426 |
+ |
vec_from_pos(vfrom, from_rbf->rbfa[i].gx, from_rbf->rbfa[i].gy); |
427 |
+ |
for (j = to_rbf->nrbf; j--; ) |
428 |
+ |
pmtx[i*to_rbf->nrbf + j] = acos(DOT(vfrom, vto[j])) + |
429 |
+ |
fabs(R2ANG(to_rbf->rbfa[j].crad) - from_ang); |
430 |
+ |
} |
431 |
+ |
free(vto); |
432 |
+ |
return(pmtx); |
433 |
+ |
} |
434 |
+ |
|
435 |
+ |
/* Comparison routine needed for sorting price row */ |
436 |
+ |
static const float *price_arr; |
437 |
+ |
static int |
438 |
+ |
msrt_cmp(const void *p1, const void *p2) |
439 |
+ |
{ |
440 |
+ |
float c1 = price_arr[*(const int *)p1]; |
441 |
+ |
float c2 = price_arr[*(const int *)p2]; |
442 |
+ |
|
443 |
+ |
if (c1 > c2) return(1); |
444 |
+ |
if (c1 < c2) return(-1); |
445 |
+ |
return(0); |
446 |
+ |
} |
447 |
+ |
|
448 |
+ |
/* Compute minimum (optimistic) cost for moving the given source material */ |
449 |
+ |
static double |
450 |
+ |
min_cost(double amt2move, const double *avail, const float *price, int n) |
451 |
+ |
{ |
452 |
+ |
static int *price_sort = NULL; |
453 |
+ |
static int n_alloc = 0; |
454 |
+ |
double total_cost = 0; |
455 |
+ |
int i; |
456 |
+ |
|
457 |
+ |
if (amt2move <= FTINY) /* pre-emptive check */ |
458 |
+ |
return(0.); |
459 |
+ |
if (n > n_alloc) { /* (re)allocate sort array */ |
460 |
+ |
if (n_alloc) free(price_sort); |
461 |
+ |
price_sort = (int *)malloc(sizeof(int)*n); |
462 |
+ |
if (price_sort == NULL) { |
463 |
+ |
fputs("Out of memory in min_cost()\n", stderr); |
464 |
+ |
exit(1); |
465 |
+ |
} |
466 |
+ |
n_alloc = n; |
467 |
+ |
} |
468 |
+ |
for (i = n; i--; ) |
469 |
+ |
price_sort[i] = i; |
470 |
+ |
price_arr = price; |
471 |
+ |
qsort(price_sort, n, sizeof(int), &msrt_cmp); |
472 |
+ |
/* move cheapest first */ |
473 |
+ |
for (i = 0; i < n && amt2move > FTINY; i++) { |
474 |
+ |
int d = price_sort[i]; |
475 |
+ |
double amt = (amt2move < avail[d]) ? amt2move : avail[d]; |
476 |
+ |
|
477 |
+ |
total_cost += amt * price[d]; |
478 |
+ |
amt2move -= amt; |
479 |
+ |
} |
480 |
+ |
if (amt2move > 1e-5) fprintf(stderr, "%g leftover!\n", amt2move); |
481 |
+ |
return(total_cost); |
482 |
+ |
} |
483 |
+ |
|
484 |
+ |
/* Take a step in migration by choosing optimal bucket to transfer */ |
485 |
+ |
static double |
486 |
+ |
migration_step(MIGRATION *mig, double *src_rem, double *dst_rem, const float *pmtx) |
487 |
+ |
{ |
488 |
+ |
static double *src_cost = NULL; |
489 |
+ |
int n_alloc = 0; |
490 |
+ |
const double maxamt = 0.5/(mtx_nrows(mig)*mtx_ncols(mig)); |
491 |
+ |
double amt = 0; |
492 |
+ |
struct { |
493 |
+ |
int s, d; /* source and destination */ |
494 |
+ |
double price; /* price estimate per amount moved */ |
495 |
+ |
double amt; /* amount we can move */ |
496 |
+ |
} cur, best; |
497 |
+ |
int i; |
498 |
+ |
|
499 |
+ |
if (mtx_nrows(mig) > n_alloc) { /* allocate cost array */ |
500 |
+ |
if (n_alloc) |
501 |
+ |
free(src_cost); |
502 |
+ |
src_cost = (double *)malloc(sizeof(double)*mtx_nrows(mig)); |
503 |
+ |
if (src_cost == NULL) { |
504 |
+ |
fputs("Out of memory in migration_step()\n", stderr); |
505 |
+ |
exit(1); |
506 |
+ |
} |
507 |
+ |
n_alloc = mtx_nrows(mig); |
508 |
+ |
} |
509 |
+ |
for (i = mtx_nrows(mig); i--; ) /* starting costs for diff. */ |
510 |
+ |
src_cost[i] = min_cost(src_rem[i], dst_rem, |
511 |
+ |
pmtx+i*mtx_ncols(mig), mtx_ncols(mig)); |
512 |
+ |
|
513 |
+ |
/* find best source & dest. */ |
514 |
+ |
best.s = best.d = -1; best.price = FHUGE; best.amt = 0; |
515 |
+ |
for (cur.s = mtx_nrows(mig); cur.s--; ) { |
516 |
+ |
const float *price = pmtx + cur.s*mtx_ncols(mig); |
517 |
+ |
double cost_others = 0; |
518 |
+ |
if (src_rem[cur.s] <= FTINY) |
519 |
+ |
continue; |
520 |
+ |
cur.d = -1; /* examine cheapest dest. */ |
521 |
+ |
for (i = mtx_ncols(mig); i--; ) |
522 |
+ |
if (dst_rem[i] > FTINY && |
523 |
+ |
(cur.d < 0 || price[i] < price[cur.d])) |
524 |
+ |
cur.d = i; |
525 |
+ |
if (cur.d < 0) |
526 |
+ |
return(.0); |
527 |
+ |
if ((cur.price = price[cur.d]) >= best.price) |
528 |
+ |
continue; /* no point checking further */ |
529 |
+ |
cur.amt = (src_rem[cur.s] < dst_rem[cur.d]) ? |
530 |
+ |
src_rem[cur.s] : dst_rem[cur.d]; |
531 |
+ |
if (cur.amt > maxamt) cur.amt = maxamt; |
532 |
+ |
dst_rem[cur.d] -= cur.amt; /* add up differential costs */ |
533 |
+ |
for (i = mtx_nrows(mig); i--; ) { |
534 |
+ |
if (i == cur.s) continue; |
535 |
+ |
cost_others += min_cost(src_rem[i], dst_rem, price, mtx_ncols(mig)) |
536 |
+ |
- src_cost[i]; |
537 |
+ |
} |
538 |
+ |
dst_rem[cur.d] += cur.amt; /* undo trial move */ |
539 |
+ |
cur.price += cost_others/cur.amt; /* adjust effective price */ |
540 |
+ |
if (cur.price < best.price) /* are we better than best? */ |
541 |
+ |
best = cur; |
542 |
+ |
} |
543 |
+ |
if ((best.s < 0) | (best.d < 0)) |
544 |
+ |
return(.0); |
545 |
+ |
/* make the actual move */ |
546 |
+ |
mig->mtx[mtx_ndx(mig,best.s,best.d)] += best.amt; |
547 |
+ |
src_rem[best.s] -= best.amt; |
548 |
+ |
dst_rem[best.d] -= best.amt; |
549 |
+ |
return(best.amt); |
550 |
+ |
} |
551 |
+ |
|
552 |
+ |
/* Compute (and insert) migration along directed edge */ |
553 |
+ |
static MIGRATION * |
554 |
+ |
make_migration(RBFLIST *from_rbf, RBFLIST *to_rbf) |
555 |
+ |
{ |
556 |
+ |
const double end_thresh = 0.02/(from_rbf->nrbf*to_rbf->nrbf); |
557 |
+ |
float *pmtx = price_routes(from_rbf, to_rbf); |
558 |
+ |
MIGRATION *newmig = (MIGRATION *)malloc(sizeof(MIGRATION) + |
559 |
+ |
sizeof(float) * |
560 |
+ |
(from_rbf->nrbf*to_rbf->nrbf - 1)); |
561 |
+ |
double *src_rem = (double *)malloc(sizeof(double)*from_rbf->nrbf); |
562 |
+ |
double *dst_rem = (double *)malloc(sizeof(double)*to_rbf->nrbf); |
563 |
+ |
double total_rem = 1.; |
564 |
+ |
int i; |
565 |
+ |
|
566 |
+ |
if ((newmig == NULL) | (src_rem == NULL) | (dst_rem == NULL)) { |
567 |
+ |
fputs("Out of memory in make_migration()\n", stderr); |
568 |
+ |
exit(1); |
569 |
+ |
} |
570 |
+ |
newmig->next = NULL; |
571 |
+ |
newmig->rbfv[0] = from_rbf; |
572 |
+ |
newmig->rbfv[1] = to_rbf; |
573 |
+ |
newmig->enxt[0] = newmig->enxt[1] = NULL; |
574 |
+ |
memset(newmig->mtx, 0, sizeof(float)*from_rbf->nrbf*to_rbf->nrbf); |
575 |
+ |
/* starting quantities */ |
576 |
+ |
for (i = from_rbf->nrbf; i--; ) |
577 |
+ |
src_rem[i] = rbf_volume(&from_rbf->rbfa[i]) / from_rbf->vtotal; |
578 |
+ |
for (i = to_rbf->nrbf; i--; ) |
579 |
+ |
dst_rem[i] = rbf_volume(&to_rbf->rbfa[i]) / to_rbf->vtotal; |
580 |
+ |
/* move a bit at a time */ |
581 |
+ |
while (total_rem > end_thresh) |
582 |
+ |
total_rem -= migration_step(newmig, src_rem, dst_rem, pmtx); |
583 |
+ |
|
584 |
+ |
free(pmtx); /* free working arrays */ |
585 |
+ |
free(src_rem); |
586 |
+ |
free(dst_rem); |
587 |
+ |
for (i = from_rbf->nrbf; i--; ) { /* normalize final matrix */ |
588 |
+ |
float nf = rbf_volume(&from_rbf->rbfa[i]); |
589 |
+ |
int j; |
590 |
+ |
if (nf <= FTINY) continue; |
591 |
+ |
nf = from_rbf->vtotal / nf; |
592 |
+ |
for (j = to_rbf->nrbf; j--; ) |
593 |
+ |
newmig->mtx[mtx_ndx(newmig,i,j)] *= nf; |
594 |
+ |
} |
595 |
+ |
/* insert in edge lists */ |
596 |
+ |
newmig->enxt[0] = from_rbf->ejl; |
597 |
+ |
from_rbf->ejl = newmig; |
598 |
+ |
newmig->enxt[1] = to_rbf->ejl; |
599 |
+ |
to_rbf->ejl = newmig; |
600 |
+ |
newmig->next = mig_list; |
601 |
+ |
return(mig_list = newmig); |
602 |
+ |
} |
603 |
+ |
|
604 |
+ |
#if 0 |
605 |
+ |
/* Partially advect between the given RBFs to a newly allocated one */ |
606 |
+ |
static RBFLIST * |
607 |
+ |
advect_rbf(const RBFLIST *from_rbf, const RBFLIST *to_rbf, |
608 |
+ |
const float *mtx, const FVECT invec) |
609 |
+ |
{ |
610 |
+ |
RBFLIST *rbf; |
611 |
+ |
|
612 |
+ |
if (from_rbf->nrbf > to_rbf->nrbf) { |
613 |
+ |
fputs("Internal error: source RBF won't fit destination\n", |
614 |
+ |
stderr); |
615 |
+ |
exit(1); |
616 |
+ |
} |
617 |
+ |
rbf = (RBFLIST *)malloc(sizeof(RBFLIST) + sizeof(RBFVAL)*(to_rbf->nrbf-1)); |
618 |
+ |
if (rbf == NULL) { |
619 |
+ |
fputs("Out of memory in advect_rbf()\n", stderr); |
620 |
+ |
exit(1); |
621 |
+ |
} |
622 |
+ |
rbf->next = NULL; rbf->ejl = NULL; |
623 |
+ |
VCOPY(rbf->invec, invec); |
624 |
+ |
rbf->vtotal = 0; |
625 |
+ |
rbf->nrbf = to_rbf->nrbf; |
626 |
+ |
|
627 |
+ |
return rbf; |
628 |
+ |
} |
629 |
+ |
#endif |
630 |
|
|
631 |
|
#if 1 |
632 |
|
/* Test main produces a Radiance model from the given input file */ |