51 |
|
if (i) spinvector(tvec, tvec, outvec, phinc); |
52 |
|
if (tvec[2] > 0 ^ output_orient > 0) |
53 |
|
continue; |
54 |
< |
sum += eval_rbfrep(rbf, tvec) * output_orient*tvec[2]; |
54 |
> |
sum += eval_rbfrep(rbf, tvec) * COSF(tvec[2]); |
55 |
|
++n; |
56 |
|
} |
57 |
|
if (n < 2) /* should never happen! */ |
64 |
|
est_DSFrad(const RBFNODE *rbf, const FVECT outvec) |
65 |
|
{ |
66 |
|
const double rad_epsilon = 0.03; |
67 |
< |
const double DSFtarget = 0.60653066 * eval_rbfrep(rbf,outvec) |
68 |
< |
* output_orient*outvec[2]; |
67 |
> |
const double DSFtarget = 0.60653066 * eval_rbfrep(rbf,outvec) * |
68 |
> |
COSF(outvec[2]); |
69 |
|
double inside_rad = rad_epsilon; |
70 |
|
double outside_rad = 0.5; |
71 |
|
double DSFinside = eval_DSFsurround(rbf, outvec, inside_rad); |
95 |
|
#undef interp_rad |
96 |
|
} |
97 |
|
|
98 |
+ |
static int |
99 |
+ |
dbl_cmp(const void *p1, const void *p2) |
100 |
+ |
{ |
101 |
+ |
double d1 = *(const double *)p1; |
102 |
+ |
double d2 = *(const double *)p2; |
103 |
+ |
|
104 |
+ |
if (d1 > d2) return(1); |
105 |
+ |
if (d1 < d2) return(-1); |
106 |
+ |
return(0); |
107 |
+ |
} |
108 |
+ |
|
109 |
|
/* Compute average BSDF peak from current DSF's */ |
110 |
|
static void |
111 |
|
comp_bsdf_spec(void) |
112 |
|
{ |
113 |
< |
double peak_sum = 0; |
113 |
> |
double vmod_sum = 0; |
114 |
|
double rad_sum = 0; |
115 |
|
int n = 0; |
116 |
+ |
double *cost_list = NULL; |
117 |
+ |
double max_cost = 1.; |
118 |
|
RBFNODE *rbf; |
119 |
|
FVECT sdv; |
120 |
< |
|
121 |
< |
if (dsf_list == NULL) { |
122 |
< |
bsdf_spec_peak = 0; |
120 |
> |
/* grazing 25th percentile */ |
121 |
> |
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) |
122 |
> |
n++; |
123 |
> |
if (n >= 10) |
124 |
> |
cost_list = (double *)malloc(sizeof(double)*n); |
125 |
> |
if (cost_list == NULL) { |
126 |
> |
bsdf_spec_val = 0; |
127 |
|
bsdf_spec_rad = 0; |
128 |
|
return; |
129 |
|
} |
130 |
+ |
n = 0; |
131 |
+ |
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) |
132 |
+ |
cost_list[n++] = rbf->invec[2]*input_orient; |
133 |
+ |
qsort(cost_list, n, sizeof(double), dbl_cmp); |
134 |
+ |
max_cost = cost_list[(n+3)/4]; |
135 |
+ |
free(cost_list); |
136 |
+ |
n = 0; |
137 |
|
for (rbf = dsf_list; rbf != NULL; rbf = rbf->next) { |
138 |
+ |
double this_rad, cosfact, vest; |
139 |
+ |
if (rbf->invec[2]*input_orient > max_cost) |
140 |
+ |
continue; |
141 |
|
sdv[0] = -rbf->invec[0]; |
142 |
|
sdv[1] = -rbf->invec[1]; |
143 |
|
sdv[2] = rbf->invec[2]*(2*(input_orient==output_orient) - 1); |
144 |
< |
peak_sum += eval_rbfrep(rbf, sdv); |
145 |
< |
rad_sum += est_DSFrad(rbf, sdv); |
144 |
> |
this_rad = est_DSFrad(rbf, sdv); |
145 |
> |
cosfact = COSF(sdv[2]); |
146 |
> |
vest = eval_rbfrep(rbf, sdv) * cosfact * |
147 |
> |
(2*M_PI) * this_rad*this_rad; |
148 |
> |
if (vest > rbf->vtotal) |
149 |
> |
vest = rbf->vtotal; |
150 |
> |
vmod_sum += vest / cosfact; |
151 |
> |
rad_sum += this_rad; |
152 |
|
++n; |
153 |
|
} |
121 |
– |
bsdf_spec_peak = peak_sum/(double)n; |
154 |
|
bsdf_spec_rad = rad_sum/(double)n; |
155 |
+ |
bsdf_spec_val = vmod_sum/(2.*M_PI*n*bsdf_spec_rad*bsdf_spec_rad); |
156 |
|
} |
157 |
|
|
158 |
|
/* Create a new migration holder (sharing memory for multiprocessing) */ |