51 |
|
if (i) spinvector(tvec, tvec, outvec, phinc); |
52 |
|
if (tvec[2] > 0 ^ output_orient > 0) |
53 |
|
continue; |
54 |
< |
sum += eval_rbfrep(rbf, tvec) * output_orient*tvec[2]; |
54 |
> |
sum += eval_rbfrep(rbf, tvec) * COSF(tvec[2]); |
55 |
|
++n; |
56 |
|
} |
57 |
|
if (n < 2) /* should never happen! */ |
64 |
|
est_DSFrad(const RBFNODE *rbf, const FVECT outvec) |
65 |
|
{ |
66 |
|
const double rad_epsilon = 0.03; |
67 |
< |
const double DSFtarget = 0.60653066 * eval_rbfrep(rbf,outvec) |
68 |
< |
* output_orient*outvec[2]; |
67 |
> |
const double DSFtarget = 0.60653066 * eval_rbfrep(rbf,outvec) * |
68 |
> |
COSF(outvec[2]); |
69 |
|
double inside_rad = rad_epsilon; |
70 |
|
double outside_rad = 0.5; |
71 |
|
double DSFinside = eval_DSFsurround(rbf, outvec, inside_rad); |
72 |
|
double DSFoutside = eval_DSFsurround(rbf, outvec, outside_rad); |
73 |
|
#define interp_rad inside_rad + (outside_rad-inside_rad) * \ |
74 |
|
(DSFtarget-DSFinside) / (DSFoutside-DSFinside) |
75 |
< |
/* interpolation search */ |
76 |
< |
while (outside_rad-inside_rad > rad_epsilon) { |
75 |
> |
/* Newton's method (sort of) */ |
76 |
> |
do { |
77 |
|
double test_rad = interp_rad; |
78 |
< |
double DSFtest = eval_DSFsurround(rbf, outvec, test_rad); |
79 |
< |
if (DSFtarget < DSFtest) { |
78 |
> |
double DSFtest; |
79 |
> |
if (test_rad >= outside_rad) |
80 |
> |
return(test_rad); |
81 |
> |
if (test_rad <= inside_rad) |
82 |
> |
return(test_rad*(test_rad>0)); |
83 |
> |
DSFtest = eval_DSFsurround(rbf, outvec, test_rad); |
84 |
> |
if (DSFtest > DSFtarget) { |
85 |
|
inside_rad = test_rad; |
86 |
|
DSFinside = DSFtest; |
87 |
|
} else { |
88 |
|
outside_rad = test_rad; |
89 |
|
DSFoutside = DSFtest; |
90 |
|
} |
91 |
< |
} |
91 |
> |
if (DSFoutside >= DSFinside) |
92 |
> |
return(test_rad); |
93 |
> |
} while (outside_rad-inside_rad > rad_epsilon); |
94 |
|
return(interp_rad); |
95 |
|
#undef interp_rad |
96 |
|
} |
97 |
|
|
98 |
+ |
static int |
99 |
+ |
dbl_cmp(const void *p1, const void *p2) |
100 |
+ |
{ |
101 |
+ |
double d1 = *(const double *)p1; |
102 |
+ |
double d2 = *(const double *)p2; |
103 |
+ |
|
104 |
+ |
if (d1 > d2) return(1); |
105 |
+ |
if (d1 < d2) return(-1); |
106 |
+ |
return(0); |
107 |
+ |
} |
108 |
+ |
|
109 |
|
/* Compute average BSDF peak from current DSF's */ |
110 |
|
static void |
111 |
|
comp_bsdf_spec(void) |
112 |
|
{ |
113 |
< |
double peak_sum = 0; |
113 |
> |
double vmod_sum = 0; |
114 |
|
double rad_sum = 0; |
115 |
|
int n = 0; |
116 |
+ |
double *cost_list = NULL; |
117 |
+ |
double max_cost = 1.; |
118 |
|
RBFNODE *rbf; |
119 |
|
FVECT sdv; |
120 |
< |
|
121 |
< |
if (dsf_list == NULL) { |
122 |
< |
bsdf_spec_peak = 0; |
123 |
< |
bsdf_spec_crad = 0; |
120 |
> |
/* grazing 25th percentile */ |
121 |
> |
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) |
122 |
> |
n++; |
123 |
> |
if (n >= 10) |
124 |
> |
cost_list = (double *)malloc(sizeof(double)*n); |
125 |
> |
if (cost_list == NULL) { |
126 |
> |
bsdf_spec_val = 0; |
127 |
> |
bsdf_spec_rad = 0; |
128 |
|
return; |
129 |
|
} |
130 |
+ |
n = 0; |
131 |
+ |
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) |
132 |
+ |
cost_list[n++] = rbf->invec[2]*input_orient; |
133 |
+ |
qsort(cost_list, n, sizeof(double), dbl_cmp); |
134 |
+ |
max_cost = cost_list[(n+3)/4]; |
135 |
+ |
free(cost_list); |
136 |
+ |
n = 0; |
137 |
|
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) { |
138 |
+ |
double this_rad, cosfact, vest; |
139 |
+ |
if (rbf->invec[2]*input_orient > max_cost) |
140 |
+ |
continue; |
141 |
|
sdv[0] = -rbf->invec[0]; |
142 |
|
sdv[1] = -rbf->invec[1]; |
143 |
|
sdv[2] = rbf->invec[2]*(2*(input_orient==output_orient) - 1); |
144 |
< |
peak_sum += eval_rbfrep(rbf, sdv); |
145 |
< |
rad_sum += est_DSFrad(rbf, sdv); |
144 |
> |
this_rad = est_DSFrad(rbf, sdv); |
145 |
> |
cosfact = COSF(sdv[2]); |
146 |
> |
vest = eval_rbfrep(rbf, sdv) * cosfact * |
147 |
> |
(2*M_PI) * this_rad*this_rad; |
148 |
> |
if (vest > rbf->vtotal) |
149 |
> |
vest = rbf->vtotal; |
150 |
> |
vmod_sum += vest / cosfact; |
151 |
> |
rad_sum += this_rad; |
152 |
|
++n; |
153 |
|
} |
154 |
< |
bsdf_spec_peak = peak_sum/(double)n; |
155 |
< |
bsdf_spec_crad = ANG2R( rad_sum/(double)n ); |
154 |
> |
bsdf_spec_rad = rad_sum/(double)n; |
155 |
> |
bsdf_spec_val = vmod_sum/(2.*M_PI*n*bsdf_spec_rad*bsdf_spec_rad); |
156 |
|
} |
157 |
|
|
158 |
|
/* Create a new migration holder (sharing memory for multiprocessing) */ |