| 22 |
|
|
| 23 |
|
#define RSCA 2.7 /* radius scaling factor (empirical) */ |
| 24 |
|
|
| 25 |
< |
#define R2ANG(c) (((c)+.5)*(M_PI/(1<<16))) |
| 25 |
> |
/* convert to/from coded radians */ |
| 26 |
|
#define ANG2R(r) (int)((r)*((1<<16)/M_PI)) |
| 27 |
+ |
#define R2ANG(c) (((c)+.5)*(M_PI/(1<<16))) |
| 28 |
|
|
| 29 |
|
typedef struct { |
| 30 |
|
float vsum; /* DSF sum */ |
| 106 |
|
static RBFLIST * |
| 107 |
|
make_rbfrep(void) |
| 108 |
|
{ |
| 109 |
< |
int niter = 6; |
| 109 |
> |
int niter = 16; |
| 110 |
> |
double lastVar, thisVar = 100.; |
| 111 |
|
int nn; |
| 112 |
|
RBFLIST *newnode; |
| 113 |
|
int i, j; |
| 115 |
|
nn = 0; /* count selected bins */ |
| 116 |
|
for (i = 0; i < GRIDRES; i++) |
| 117 |
|
for (j = 0; j < GRIDRES; j++) |
| 118 |
< |
nn += (dsf_grid[i][j].nval > 0); |
| 118 |
> |
nn += dsf_grid[i][j].nval; |
| 119 |
|
/* allocate RBF array */ |
| 120 |
|
newnode = (RBFLIST *)malloc(sizeof(RBFLIST) + sizeof(RBFVAL)*(nn-1)); |
| 121 |
|
if (newnode == NULL) { |
| 132 |
|
for (i = 0; i < GRIDRES; i++) |
| 133 |
|
for (j = 0; j < GRIDRES; j++) |
| 134 |
|
if (dsf_grid[i][j].nval) { |
| 135 |
< |
newnode->rbfa[nn].peak = |
| 134 |
< |
dsf_grid[i][j].vsum /= |
| 135 |
< |
(double)dsf_grid[i][j].nval; |
| 136 |
< |
dsf_grid[i][j].nval = 1; |
| 135 |
> |
newnode->rbfa[nn].peak = dsf_grid[i][j].vsum; |
| 136 |
|
newnode->rbfa[nn].crad = RSCA*dsf_grid[i][j].crad + .5; |
| 137 |
|
newnode->rbfa[nn].gx = i; |
| 138 |
|
newnode->rbfa[nn].gy = j; |
| 139 |
|
++nn; |
| 140 |
|
} |
| 141 |
< |
/* iterate for better convergence */ |
| 142 |
< |
while (niter--) { |
| 141 |
> |
/* iterate to improve interpolation accuracy */ |
| 142 |
> |
do { |
| 143 |
|
double dsum = .0, dsum2 = .0; |
| 144 |
|
nn = 0; |
| 145 |
|
for (i = 0; i < GRIDRES; i++) |
| 146 |
|
for (j = 0; j < GRIDRES; j++) |
| 147 |
|
if (dsf_grid[i][j].nval) { |
| 148 |
|
FVECT odir; |
| 149 |
< |
/* double corr; */ |
| 149 |
> |
double corr; |
| 150 |
|
vec_from_pos(odir, i, j); |
| 151 |
< |
newnode->rbfa[nn++].peak *= /* corr = */ |
| 151 |
> |
newnode->rbfa[nn++].peak *= corr = |
| 152 |
|
dsf_grid[i][j].vsum / |
| 153 |
|
eval_rbfrep(newnode, odir); |
| 155 |
– |
/* |
| 154 |
|
dsum += corr - 1.; |
| 155 |
|
dsum2 += (corr-1.)*(corr-1.); |
| 158 |
– |
*/ |
| 156 |
|
} |
| 157 |
+ |
lastVar = thisVar; |
| 158 |
+ |
thisVar = dsum2/(double)nn; |
| 159 |
|
/* |
| 160 |
|
fprintf(stderr, "Avg., RMS error: %.1f%% %.1f%%\n", |
| 161 |
|
100.*dsum/(double)nn, |
| 162 |
< |
100.*sqrt(dsum2/(double)nn)); |
| 162 |
> |
100.*sqrt(thisVar)); |
| 163 |
|
*/ |
| 164 |
< |
} |
| 164 |
> |
} while (--niter > 0 && lastVar-thisVar > 0.02*lastVar); |
| 165 |
> |
|
| 166 |
|
newnode->next = dsf_list; |
| 167 |
|
return(dsf_list = newnode); |
| 168 |
|
} |
| 310 |
|
} |
| 311 |
|
} |
| 312 |
|
} |
| 313 |
< |
/* copy back averaged radii */ |
| 313 |
> |
/* copy back blurred radii */ |
| 314 |
|
for (i = 0; i < GRIDRES; i++) |
| 315 |
|
for (j = 0; j < GRIDRES; j++) |
| 316 |
|
if (fill_cnt[i][j]) |
| 317 |
|
dsf_grid[i][j].crad = fill_grid[i][j]/fill_cnt[i][j]; |
| 318 |
|
} |
| 319 |
|
|
| 320 |
< |
/* Cull points for more uniform distribution */ |
| 320 |
> |
/* Cull points for more uniform distribution, leave all nval 0 or 1 */ |
| 321 |
|
static void |
| 322 |
|
cull_values(void) |
| 323 |
|
{ |
| 354 |
|
dsf_grid[i][j].vsum += dsf_grid[ii][jj].vsum; |
| 355 |
|
dsf_grid[i][j].nval += dsf_grid[ii][jj].nval; |
| 356 |
|
/* keep value, though */ |
| 357 |
< |
dsf_grid[ii][jj].vsum /= (double)dsf_grid[ii][jj].nval; |
| 357 |
> |
dsf_grid[ii][jj].vsum /= (float)dsf_grid[ii][jj].nval; |
| 358 |
|
dsf_grid[ii][jj].nval = 0; |
| 359 |
|
} |
| 360 |
|
} |
| 361 |
|
} |
| 362 |
+ |
/* final averaging pass */ |
| 363 |
+ |
for (i = 0; i < GRIDRES; i++) |
| 364 |
+ |
for (j = 0; j < GRIDRES; j++) |
| 365 |
+ |
if (dsf_grid[i][j].nval > 1) { |
| 366 |
+ |
dsf_grid[i][j].vsum /= (float)dsf_grid[i][j].nval; |
| 367 |
+ |
dsf_grid[i][j].nval = 1; |
| 368 |
+ |
} |
| 369 |
|
} |
| 370 |
|
|
| 371 |
|
|