| 18 |
|
#include <string.h> |
| 19 |
|
#include <math.h> |
| 20 |
|
#include "bsdfrep.h" |
| 21 |
+ |
|
| 22 |
+ |
#ifndef NEIGH_FACT2 |
| 23 |
+ |
#define NEIGH_FACT2 0.2 /* empirical neighborhood distance weight */ |
| 24 |
+ |
#endif |
| 25 |
|
/* number of processes to run */ |
| 26 |
|
int nprocs = 1; |
| 27 |
|
/* number of children (-1 in child) */ |
| 139 |
|
|
| 140 |
|
#endif /* ! _WIN32 */ |
| 141 |
|
|
| 142 |
+ |
/* Compute normalized distribution scattering functions for comparison */ |
| 143 |
+ |
static void |
| 144 |
+ |
compute_nDSFs(const RBFNODE *rbf0, const RBFNODE *rbf1) |
| 145 |
+ |
{ |
| 146 |
+ |
const double nf0 = (GRIDRES*GRIDRES) / rbf0->vtotal; |
| 147 |
+ |
const double nf1 = (GRIDRES*GRIDRES) / rbf1->vtotal; |
| 148 |
+ |
int x, y; |
| 149 |
+ |
FVECT dv; |
| 150 |
+ |
|
| 151 |
+ |
for (x = GRIDRES; x--; ) |
| 152 |
+ |
for (y = GRIDRES; y--; ) { |
| 153 |
+ |
ovec_from_pos(dv, x, y); |
| 154 |
+ |
dsf_grid[x][y].val[0] = nf0 * eval_rbfrep(rbf0, dv); |
| 155 |
+ |
dsf_grid[x][y].val[1] = nf1 * eval_rbfrep(rbf1, dv); |
| 156 |
+ |
} |
| 157 |
+ |
} |
| 158 |
+ |
|
| 159 |
+ |
/* Compute neighborhood distance-squared (dissimilarity) */ |
| 160 |
+ |
static double |
| 161 |
+ |
neighborhood_dist2(int x0, int y0, int x1, int y1) |
| 162 |
+ |
{ |
| 163 |
+ |
int rad = GRIDRES>>5; |
| 164 |
+ |
double sum2 = 0.; |
| 165 |
+ |
double d; |
| 166 |
+ |
int p[4]; |
| 167 |
+ |
int i, j; |
| 168 |
+ |
|
| 169 |
+ |
if ((x0 == x1) & (y0 == y1)) |
| 170 |
+ |
return(0.); |
| 171 |
+ |
/* check radius */ |
| 172 |
+ |
p[0] = x0; p[1] = y0; p[2] = x1; p[3] = y1; |
| 173 |
+ |
for (i = 4; i--; ) { |
| 174 |
+ |
if (p[i] < rad) rad = p[i]; |
| 175 |
+ |
if (GRIDRES-1-p[i] < rad) rad = GRIDRES-1-p[i]; |
| 176 |
+ |
} |
| 177 |
+ |
for (i = -rad; i <= rad; i++) |
| 178 |
+ |
for (j = -rad; j <= rad; j++) { |
| 179 |
+ |
d = dsf_grid[x0+i][y0+j].val[0] - |
| 180 |
+ |
dsf_grid[x1+i][y1+j].val[1]; |
| 181 |
+ |
sum2 += d*d; |
| 182 |
+ |
} |
| 183 |
+ |
return(sum2 / (4*rad*(rad+1) + 1)); |
| 184 |
+ |
} |
| 185 |
+ |
|
| 186 |
|
/* Comparison routine needed for sorting price row */ |
| 187 |
|
static int |
| 188 |
|
msrt_cmp(void *b, const void *p1, const void *p2) |
| 203 |
|
FVECT *vto = (FVECT *)malloc(sizeof(FVECT) * to_rbf->nrbf); |
| 204 |
|
int i, j; |
| 205 |
|
|
| 206 |
+ |
compute_nDSFs(from_rbf, to_rbf); |
| 207 |
|
pm->nrows = from_rbf->nrbf; |
| 208 |
|
pm->ncols = to_rbf->nrbf; |
| 209 |
|
pm->price = (float *)malloc(sizeof(float) * pm->nrows*pm->ncols); |
| 226 |
|
srow = psortrow(pm,i); |
| 227 |
|
for (j = to_rbf->nrbf; j--; ) { |
| 228 |
|
double d; /* quadratic cost function */ |
| 229 |
< |
d = DOT(vfrom, vto[j]); |
| 181 |
< |
d = (d >= 1.) ? .0 : acos(d); |
| 229 |
> |
d = Acos(DOT(vfrom, vto[j])); |
| 230 |
|
pm->prow[j] = d*d; |
| 231 |
|
d = R2ANG(to_rbf->rbfa[j].crad) - from_ang; |
| 232 |
< |
pm->prow[j] += d*d; |
| 232 |
> |
pm->prow[j] += d*d; |
| 233 |
> |
/* neighborhood difference */ |
| 234 |
> |
pm->prow[j] += NEIGH_FACT2 * neighborhood_dist2( |
| 235 |
> |
from_rbf->rbfa[i].gx, from_rbf->rbfa[i].gy, |
| 236 |
> |
to_rbf->rbfa[j].gx, to_rbf->rbfa[j].gy ); |
| 237 |
|
srow[j] = j; |
| 238 |
|
} |
| 239 |
|
qsort_r(srow, pm->ncols, sizeof(short), pm, &msrt_cmp); |
| 268 |
|
return(total_cost); |
| 269 |
|
} |
| 270 |
|
|
| 271 |
< |
/* Compare entries by moving price */ |
| 271 |
> |
typedef struct { |
| 272 |
> |
short s, d; /* source and destination */ |
| 273 |
> |
float dc; /* discount to push inventory */ |
| 274 |
> |
} ROWSENT; /* row sort entry */ |
| 275 |
> |
|
| 276 |
> |
/* Compare entries by discounted moving price */ |
| 277 |
|
static int |
| 278 |
|
rmovcmp(void *b, const void *p1, const void *p2) |
| 279 |
|
{ |
| 280 |
|
PRICEMAT *pm = (PRICEMAT *)b; |
| 281 |
< |
const short *ij1 = (const short *)p1; |
| 282 |
< |
const short *ij2 = (const short *)p2; |
| 283 |
< |
float price_diff; |
| 281 |
> |
const ROWSENT *re1 = (const ROWSENT *)p1; |
| 282 |
> |
const ROWSENT *re2 = (const ROWSENT *)p2; |
| 283 |
> |
double price_diff; |
| 284 |
|
|
| 285 |
< |
if (ij1[1] < 0) return(ij2[1] >= 0); |
| 286 |
< |
if (ij2[1] < 0) return(-1); |
| 287 |
< |
price_diff = pricerow(pm,ij1[0])[ij1[1]] - pricerow(pm,ij2[0])[ij2[1]]; |
| 285 |
> |
if (re1->d < 0) return(re2->d >= 0); |
| 286 |
> |
if (re2->d < 0) return(-1); |
| 287 |
> |
price_diff = re1->dc*pricerow(pm,re1->s)[re1->d] - |
| 288 |
> |
re2->dc*pricerow(pm,re2->s)[re2->d]; |
| 289 |
|
if (price_diff > 0) return(1); |
| 290 |
|
if (price_diff < 0) return(-1); |
| 291 |
|
return(0); |
| 299 |
|
const double maxamt = 1./(double)pm->ncols; |
| 300 |
|
const double minamt = maxamt*1e-4; |
| 301 |
|
double *src_cost; |
| 302 |
< |
short (*rord)[2]; |
| 302 |
> |
ROWSENT *rord; |
| 303 |
|
struct { |
| 304 |
|
int s, d; /* source and destination */ |
| 305 |
< |
double price; /* price estimate per amount moved */ |
| 305 |
> |
double price; /* cost per amount moved */ |
| 306 |
|
double amt; /* amount we can move */ |
| 307 |
|
} cur, best; |
| 308 |
|
int r2check, i, ri; |
| 311 |
|
* destination price implies that another source is closer, so |
| 312 |
|
* we can hold off considering more expensive options until |
| 313 |
|
* some other (hopefully better) moves have been made. |
| 314 |
+ |
* A discount based on source remaining is supposed to prioritize |
| 315 |
+ |
* movement from large lobes, but it doesn't seem to do much, |
| 316 |
+ |
* so we have it set to 1.0 at the moment. |
| 317 |
|
*/ |
| 318 |
+ |
#define discount(qr) 1.0 |
| 319 |
|
/* most promising row order */ |
| 320 |
< |
rord = (short (*)[2])malloc(sizeof(short)*2*pm->nrows); |
| 320 |
> |
rord = (ROWSENT *)malloc(sizeof(ROWSENT)*pm->nrows); |
| 321 |
|
if (rord == NULL) |
| 322 |
|
goto memerr; |
| 323 |
|
for (ri = pm->nrows; ri--; ) { |
| 324 |
< |
rord[ri][0] = ri; |
| 325 |
< |
rord[ri][1] = -1; |
| 324 |
> |
rord[ri].s = ri; |
| 325 |
> |
rord[ri].d = -1; |
| 326 |
> |
rord[ri].dc = 1.f; |
| 327 |
|
if (src_rem[ri] <= minamt) /* enough source material? */ |
| 328 |
|
continue; |
| 329 |
|
for (i = 0; i < pm->ncols; i++) |
| 330 |
< |
if (dst_rem[ rord[ri][1] = psortrow(pm,ri)[i] ] > minamt) |
| 330 |
> |
if (dst_rem[ rord[ri].d = psortrow(pm,ri)[i] ] > minamt) |
| 331 |
|
break; |
| 332 |
|
if (i >= pm->ncols) { /* moved all we can? */ |
| 333 |
|
free(rord); |
| 334 |
|
return(.0); |
| 335 |
|
} |
| 336 |
+ |
rord[ri].dc = discount(src_rem[ri]); |
| 337 |
|
} |
| 338 |
|
if (pm->nrows > max2check) /* sort if too many sources */ |
| 339 |
< |
qsort_r(rord, pm->nrows, sizeof(short)*2, pm, &rmovcmp); |
| 339 |
> |
qsort_r(rord, pm->nrows, sizeof(ROWSENT), pm, &rmovcmp); |
| 340 |
|
/* allocate cost array */ |
| 341 |
|
src_cost = (double *)malloc(sizeof(double)*pm->nrows); |
| 342 |
|
if (src_cost == NULL) |
| 349 |
|
r2check = max2check; /* put a limit on search */ |
| 350 |
|
for (ri = 0; ri < r2check; ri++) { /* check each source row */ |
| 351 |
|
double cost_others = 0; |
| 352 |
< |
cur.s = rord[ri][0]; |
| 353 |
< |
if ((cur.d = rord[ri][1]) < 0 || |
| 354 |
< |
(cur.price = pricerow(pm,cur.s)[cur.d]) >= best.price) { |
| 352 |
> |
cur.s = rord[ri].s; |
| 353 |
> |
if ((cur.d = rord[ri].d) < 0 || |
| 354 |
> |
rord[ri].dc*pricerow(pm,cur.s)[cur.d] >= best.price) { |
| 355 |
|
if (pm->nrows > max2check) break; /* sorted end */ |
| 356 |
|
continue; /* else skip this one */ |
| 357 |
|
} |
| 365 |
|
cost_others += min_cost(src_rem[i], dst_rem, pm, i) |
| 366 |
|
- src_cost[i]; |
| 367 |
|
dst_rem[cur.d] += cur.amt; /* undo trial move */ |
| 368 |
< |
cur.price += cost_others/cur.amt; /* adjust effective price */ |
| 368 |
> |
/* discount effective price */ |
| 369 |
> |
cur.price = ( pricerow(pm,cur.s)[cur.d] + cost_others/cur.amt ) * |
| 370 |
> |
rord[ri].dc; |
| 371 |
|
if (cur.price < best.price) /* are we better than best? */ |
| 372 |
|
best = cur; |
| 373 |
|
} |
| 383 |
|
memerr: |
| 384 |
|
fprintf(stderr, "%s: Out of memory in migration_step()\n", progname); |
| 385 |
|
exit(1); |
| 386 |
+ |
#undef discount |
| 387 |
|
} |
| 388 |
|
|
| 389 |
|
/* Compute and insert migration along directed edge (may fork child) */ |
| 551 |
|
} |
| 552 |
|
} |
| 553 |
|
} |
| 554 |
+ |
|
| 555 |
+ |
/* Add normal direction if missing */ |
| 556 |
+ |
static void |
| 557 |
+ |
check_normal_incidence(void) |
| 558 |
+ |
{ |
| 559 |
+ |
static const FVECT norm_vec = {.0, .0, 1.}; |
| 560 |
+ |
const int saved_nprocs = nprocs; |
| 561 |
+ |
RBFNODE *near_rbf, *mir_rbf, *rbf; |
| 562 |
+ |
double bestd; |
| 563 |
+ |
int n; |
| 564 |
+ |
|
| 565 |
+ |
if (dsf_list == NULL) |
| 566 |
+ |
return; /* XXX should be error? */ |
| 567 |
+ |
near_rbf = dsf_list; |
| 568 |
+ |
bestd = input_orient*near_rbf->invec[2]; |
| 569 |
+ |
if (single_plane_incident) { /* ordered plane incidence? */ |
| 570 |
+ |
if (bestd >= 1.-2.*FTINY) |
| 571 |
+ |
return; /* already have normal */ |
| 572 |
+ |
} else { |
| 573 |
+ |
switch (inp_coverage) { |
| 574 |
+ |
case INP_QUAD1: |
| 575 |
+ |
case INP_QUAD2: |
| 576 |
+ |
case INP_QUAD3: |
| 577 |
+ |
case INP_QUAD4: |
| 578 |
+ |
break; /* quadrilateral symmetry? */ |
| 579 |
+ |
default: |
| 580 |
+ |
return; /* else we can interpolate */ |
| 581 |
+ |
} |
| 582 |
+ |
for (rbf = near_rbf->next; rbf != NULL; rbf = rbf->next) { |
| 583 |
+ |
const double d = input_orient*rbf->invec[2]; |
| 584 |
+ |
if (d >= 1.-2.*FTINY) |
| 585 |
+ |
return; /* seems we have normal */ |
| 586 |
+ |
if (d > bestd) { |
| 587 |
+ |
near_rbf = rbf; |
| 588 |
+ |
bestd = d; |
| 589 |
+ |
} |
| 590 |
+ |
} |
| 591 |
+ |
} |
| 592 |
+ |
if (mig_list != NULL) { /* need to be called first */ |
| 593 |
+ |
fprintf(stderr, "%s: Late call to check_normal_incidence()\n", |
| 594 |
+ |
progname); |
| 595 |
+ |
exit(1); |
| 596 |
+ |
} |
| 597 |
+ |
#ifdef DEBUG |
| 598 |
+ |
fprintf(stderr, "Interpolating normal incidence by mirroring (%.1f,%.1f)\n", |
| 599 |
+ |
get_theta180(near_rbf->invec), get_phi360(near_rbf->invec)); |
| 600 |
+ |
#endif |
| 601 |
+ |
/* mirror nearest incidence */ |
| 602 |
+ |
n = sizeof(RBFNODE) + sizeof(RBFVAL)*(near_rbf->nrbf-1); |
| 603 |
+ |
mir_rbf = (RBFNODE *)malloc(n); |
| 604 |
+ |
if (mir_rbf == NULL) |
| 605 |
+ |
goto memerr; |
| 606 |
+ |
memcpy(mir_rbf, near_rbf, n); |
| 607 |
+ |
mir_rbf->ord = near_rbf->ord - 1; /* not used, I think */ |
| 608 |
+ |
mir_rbf->next = NULL; |
| 609 |
+ |
rev_rbf_symmetry(mir_rbf, MIRROR_X|MIRROR_Y); |
| 610 |
+ |
nprocs = 1; /* compute migration matrix */ |
| 611 |
+ |
if (mig_list != create_migration(mir_rbf, near_rbf)) |
| 612 |
+ |
exit(1); /* XXX should never happen! */ |
| 613 |
+ |
/* interpolate normal dist. */ |
| 614 |
+ |
rbf = e_advect_rbf(mig_list, norm_vec, 2*near_rbf->nrbf); |
| 615 |
+ |
nprocs = saved_nprocs; /* final clean-up */ |
| 616 |
+ |
free(mir_rbf); |
| 617 |
+ |
free(mig_list); |
| 618 |
+ |
mig_list = near_rbf->ejl = NULL; |
| 619 |
+ |
insert_dsf(rbf); /* insert interpolated normal */ |
| 620 |
+ |
return; |
| 621 |
+ |
memerr: |
| 622 |
+ |
fprintf(stderr, "%s: Out of memory in check_normal_incidence()\n", |
| 623 |
+ |
progname); |
| 624 |
+ |
exit(1); |
| 625 |
+ |
} |
| 626 |
|
|
| 627 |
|
/* Build our triangle mesh from recorded RBFs */ |
| 628 |
|
void |
| 631 |
|
double best2 = M_PI*M_PI; |
| 632 |
|
RBFNODE *shrt_edj[2]; |
| 633 |
|
RBFNODE *rbf0, *rbf1; |
| 634 |
+ |
/* add normal if needed */ |
| 635 |
+ |
check_normal_incidence(); |
| 636 |
|
/* check if isotropic */ |
| 637 |
|
if (single_plane_incident) { |
| 638 |
|
for (rbf0 = dsf_list; rbf0 != NULL; rbf0 = rbf0->next) |