| 13 |  | *  These calls are designed similarly to the ones in raycalls.c, | 
| 14 |  | *  but allow for multiple rendering processes on the same host | 
| 15 |  | *  machine.  There is no sense in specifying more child processes | 
| 16 | < | *  than you have processors, but one child may help by allowing | 
| 16 | > | *  than you have processor cores, but one child may help by allowing | 
| 17 |  | *  asynchronous ray computation in an interactive program, and | 
| 18 |  | *  will protect the caller from fatal rendering errors. | 
| 19 |  | * | 
| 20 | < | *  You should first read and undrstand the header in raycalls.c, | 
| 20 | > | *  You should first read and understand the header in raycalls.c, | 
| 21 |  | *  as some things are explained there that are not repated here. | 
| 22 |  | * | 
| 23 |  | *  The first step is opening one or more rendering processes | 
| 24 |  | *  with a call to ray_pinit(oct, nproc).  Before calling fork(), | 
| 25 |  | *  ray_pinit() loads the octree and data structures into the | 
| 26 | < | *  caller's memory.  This permits all sorts of queries that | 
| 27 | < | *  wouldn't be possible otherwise, without causing any real | 
| 26 | > | *  caller's memory, and ray_popen() synchronizes the ambient | 
| 27 | > | *  file, if any.  Shared memory permits all sorts of queries | 
| 28 | > | *  that wouldn't be possible otherwise without causing any real | 
| 29 |  | *  memory overhead, since all the static data are shared | 
| 30 | < | *  between processes.  Rays are then traced using a simple | 
| 30 | > | *  between processes.  Rays are traced using a simple | 
| 31 |  | *  queuing mechanism, explained below. | 
| 32 |  | * | 
| 33 | < | *  The ray queue holds as many rays as there are rendering | 
| 34 | < | *  processes.  Rays are queued and returned by a single | 
| 33 | > | *  The ray queue buffers RAYQLEN rays before sending to | 
| 34 | > | *  children, each of which may internally buffer RAYQLEN rays | 
| 35 | > | *  during evaluation.  Rays are not returned in the order | 
| 36 | > | *  they are sent when multiple processes are open. | 
| 37 | > | * | 
| 38 | > | *  Rays are queued and returned by a single | 
| 39 |  | *  ray_pqueue() call.  A ray_pqueue() return | 
| 40 |  | *  value of 0 indicates that no rays are ready | 
| 41 |  | *  and the queue is not yet full.  A return value of 1 | 
| 48 |  | *      myRay.rorg = ( ray origin point ) | 
| 49 |  | *      myRay.rdir = ( normalized ray direction ) | 
| 50 |  | *      myRay.rmax = ( maximum length, or zero for no limit ) | 
| 51 | < | *      rayorigin(&myRay, NULL, PRIMARY, 1.0); | 
| 51 | > | *      rayorigin(&myRay, PRIMARY, NULL, NULL); | 
| 52 |  | *      myRay.rno = ( my personal ray identifier ) | 
| 53 |  | *      if (ray_pqueue(&myRay) == 1) | 
| 54 |  | *              { do something with results } | 
| 56 |  | *  Note the differences between this and the simpler ray_trace() | 
| 57 |  | *  call.  In particular, the call may or may not return a value | 
| 58 |  | *  in the passed ray structure.  Also, you need to call rayorigin() | 
| 59 | < | *  yourself, which is normally for you by ray_trace().  The | 
| 60 | < | *  great thing is that ray_pqueue() will trace rays faster in | 
| 59 | > | *  yourself, which is normally called for you by ray_trace().  The | 
| 60 | > | *  benefit is that ray_pqueue() will trace rays faster in | 
| 61 |  | *  proportion to the number of CPUs you have available on your | 
| 62 |  | *  system.  If the ray queue is full before the call, ray_pqueue() | 
| 63 |  | *  will block until a result is ready so it can queue this one. | 
| 73 |  | *  results aren't ready, but will immediately return 0. | 
| 74 |  | *  If the second argument is 0, the call will block | 
| 75 |  | *  until a value is available, returning 0 only if the | 
| 76 | < | *  queue is completely empty.  A negative return value | 
| 76 | > | *  queue is completely empty.  Setting the second argument | 
| 77 | > | *  to -1 returns 0 unless a ray is ready in the queue and | 
| 78 | > | *  no system calls are needed.  A negative return value | 
| 79 |  | *  indicates that a rendering process died.  If this | 
| 80 | < | *  happens, ray_close(0) is automatically called to close | 
| 80 | > | *  happens, ray_pclose(0) is automatically called to close | 
| 81 |  | *  all child processes, and ray_pnprocs is set to zero. | 
| 82 |  | * | 
| 83 |  | *  If you just want to fill the ray queue without checking for | 
| 88 |  | *              ray_psend(&myRay); | 
| 89 |  | *      } | 
| 90 |  | * | 
| 91 | < | *  The ray_presult() and/or ray_pqueue() functions may then be | 
| 92 | < | *  called to read back the results. | 
| 91 | > | *  Note that it is a mistake to call ra_psend() when | 
| 92 | > | *  ray_pnidle is zero, and nothing will be sent in | 
| 93 | > | *  this case.  Otherwise, the ray_presult() and/or ray_pqueue() | 
| 94 | > | *  functions may be called subsequently to read back the results | 
| 95 | > | *  of rays queued by ray_psend(). | 
| 96 |  | * | 
| 97 |  | *  When you are done, you may call ray_pdone(1) to close | 
| 98 |  | *  all child processes and clean up memory used by Radiance. | 
| 99 |  | *  Any queued ray calculations will be awaited and discarded. | 
| 100 |  | *  As with ray_done(), ray_pdone(0) hangs onto data files | 
| 101 |  | *  and fonts that are likely to be used in subsequent renderings. | 
| 102 | < | *  Whether you want to bother cleaning up memory or not, you | 
| 103 | < | *  should at least call ray_pclose(0) to clean the child processes. | 
| 102 | > | *  Whether you need to clean up memory or not, you should | 
| 103 | > | *  at least call ray_pclose(0) to await the child processes. | 
| 104 | > | *  The caller should define a quit() function that calls | 
| 105 | > | *  ray_pclose(0) if ray_pnprocs > 0. | 
| 106 |  | * | 
| 107 |  | *  Warning:  You cannot affect any of the rendering processes | 
| 108 |  | *  by changing global parameter values onece ray_pinit() has | 
| 111 |  | *  If you just want to reap children so that you can alter the | 
| 112 |  | *  rendering parameters without reloading the scene, use the | 
| 113 |  | *  ray_pclose(0) and ray_popen(nproc) calls to close | 
| 114 | < | *  then restart the child processes. | 
| 114 | > | *  then restart the child processes after the changes are made. | 
| 115 |  | * | 
| 116 |  | *  Note:  These routines are written to coordinate with the | 
| 117 |  | *  definitions in raycalls.c, and in fact depend on them. | 
| 118 |  | *  If you want to trace a ray and get a result synchronously, | 
| 119 |  | *  use the ray_trace() call to compute it in the parent process. | 
| 120 | + | *  This will not interfere with any subprocess calculations, | 
| 121 | + | *  but beware that a fatal error may end with a call to quit(). | 
| 122 |  | * | 
| 123 |  | *  Note:  One of the advantages of using separate processes | 
| 124 |  | *  is that it gives the calling program some immunity from | 
| 125 |  | *  fatal rendering errors.  As discussed in raycalls.c, | 
| 126 |  | *  Radiance tends to throw up its hands and exit at the | 
| 127 |  | *  first sign of trouble, calling quit() to return control | 
| 128 | < | *  to the system.  Although you can avoid exit() with | 
| 128 | > | *  to the top level.  Although you can avoid exit() with | 
| 129 |  | *  your own longjmp() in quit(), the cleanup afterwards | 
| 130 |  | *  is always suspect.  Through the use of subprocesses, | 
| 131 |  | *  we avoid this pitfall by closing the processes and | 
| 132 |  | *  returning a negative value from ray_pqueue() or | 
| 133 |  | *  ray_presult().  If you get a negative value from either | 
| 134 |  | *  of these calls, you can assume that the processes have | 
| 135 | < | *  been cleaned up with a call to ray_close(), though you | 
| 135 | > | *  been cleaned up with a call to ray_pclose(), though you | 
| 136 |  | *  will have to call ray_pdone() yourself if you want to | 
| 137 | < | *  free memory.  Obviously, you cannot continue rendering, | 
| 138 | < | *  but otherwise your process should not be compromised. | 
| 137 | > | *  free memory.  Obviously, you cannot continue rendering | 
| 138 | > | *  without risking further errors, but otherwise your | 
| 139 | > | *  process should not be compromised. | 
| 140 |  | */ | 
| 141 |  |  | 
| 142 | + | #include  "rtprocess.h" | 
| 143 |  | #include  "ray.h" | 
| 144 | < |  | 
| 144 | > | #include  "ambient.h" | 
| 145 | > | #include  <sys/types.h> | 
| 146 | > | #include  <sys/wait.h> | 
| 147 |  | #include  "selcall.h" | 
| 148 |  |  | 
| 149 |  | #ifndef RAYQLEN | 
| 150 | < | #define RAYQLEN         16              /* # rays to send at once */ | 
| 150 | > | #define RAYQLEN         24              /* # rays to send at once */ | 
| 151 |  | #endif | 
| 152 |  |  | 
| 153 |  | #ifndef MAX_RPROCS | 
| 162 |  |  | 
| 163 |  | int             ray_pnprocs = 0;        /* number of child processes */ | 
| 164 |  | int             ray_pnidle = 0;         /* number of idle children */ | 
| 165 | + | int             ray_pnbatch = 0;        /* throughput over responsiveness? */ | 
| 166 |  |  | 
| 167 |  | static struct child_proc { | 
| 168 | < | int     pid;                            /* child process id */ | 
| 168 | > | RT_PID  pid;                            /* child process id */ | 
| 169 |  | int     fd_send;                        /* write to child here */ | 
| 170 |  | int     fd_recv;                        /* read from child here */ | 
| 171 |  | int     npending;                       /* # rays in process */ | 
| 172 | < | unsigned long  rno[RAYQLEN];            /* working on these rays */ | 
| 172 | > | RNUMBER rno[RAYQLEN];                   /* working on these rays */ | 
| 173 |  | } r_proc[MAX_NPROCS];                   /* our child processes */ | 
| 174 |  |  | 
| 175 |  | static RAY      r_queue[2*RAYQLEN];     /* ray i/o buffer */ | 
| 176 | < | static int      r_send_next;            /* next send ray placement */ | 
| 177 | < | static int      r_recv_first;           /* position of first unreported ray */ | 
| 178 | < | static int      r_recv_next;            /* next receive ray placement */ | 
| 176 | > | static int      r_send_next = 0;        /* next send ray placement */ | 
| 177 | > | static int      r_recv_first = RAYQLEN; /* position of first unreported ray */ | 
| 178 | > | static int      r_recv_next = RAYQLEN;  /* next received ray placement */ | 
| 179 |  |  | 
| 180 | + | static int      samplestep = 1;         /* sample step size */ | 
| 181 | + |  | 
| 182 |  | #define sendq_full()    (r_send_next >= RAYQLEN) | 
| 183 |  |  | 
| 184 | + | static int ray_pflush(void); | 
| 185 | + | static void ray_pchild(int fd_in, int fd_out); | 
| 186 |  |  | 
| 187 | + |  | 
| 188 |  | void | 
| 189 | < | ray_pinit(otnm, nproc)          /* initialize ray-tracing processes */ | 
| 190 | < | char    *otnm; | 
| 191 | < | int     nproc; | 
| 189 | > | ray_pinit(              /* initialize ray-tracing processes */ | 
| 190 | > | char    *otnm, | 
| 191 | > | int     nproc | 
| 192 | > | ) | 
| 193 |  | { | 
| 194 |  | if (nobjects > 0)               /* close old calculation */ | 
| 195 |  | ray_pdone(0); | 
| 196 |  |  | 
| 197 |  | ray_init(otnm);                 /* load the shared scene */ | 
| 198 |  |  | 
| 174 | – | preload_objs();                 /* preload auxiliary data */ | 
| 175 | – |  | 
| 176 | – | /* set shared memory boundary */ | 
| 177 | – | shm_boundary = (char *)malloc(16); | 
| 178 | – | strcpy(shm_boundary, "SHM_BOUNDARY"); | 
| 179 | – |  | 
| 180 | – | r_send_next = 0;                /* set up queue */ | 
| 181 | – | r_recv_first = r_recv_next = RAYQLEN; | 
| 182 | – |  | 
| 199 |  | ray_popen(nproc);               /* fork children */ | 
| 200 |  | } | 
| 201 |  |  | 
| 202 |  |  | 
| 203 |  | static int | 
| 204 | < | ray_pflush()                    /* send queued rays to idle children */ | 
| 204 | > | ray_pflush(void)                        /* send queued rays to idle children */ | 
| 205 |  | { | 
| 206 |  | int     nc, n, nw, i, sfirst; | 
| 207 |  |  | 
| 208 | < | if ((ray_pnidle <= 0 | r_send_next <= 0)) | 
| 208 | > | if ((ray_pnidle <= 0) | (r_send_next <= 0)) | 
| 209 |  | return(0);              /* nothing we can send */ | 
| 210 |  |  | 
| 211 |  | sfirst = 0;                     /* divvy up labor */ | 
| 213 |  | for (i = ray_pnprocs; nc && i--; ) { | 
| 214 |  | if (r_proc[i].npending > 0) | 
| 215 |  | continue;       /* child looks busy */ | 
| 216 | < | n = (r_send_next - sfirst)/nc--; | 
| 216 | > | n = r_send_next - sfirst; | 
| 217 | > | if (ray_pnbatch) | 
| 218 | > | nc--;           /* maximize bundling for batch calc */ | 
| 219 | > | else | 
| 220 | > | n /= nc--;      /* distribute work for interactivity */ | 
| 221 |  | if (!n) | 
| 222 |  | continue; | 
| 223 |  | /* smuggle set size in crtype */ | 
| 233 |  | ray_pnidle--;           /* now she's busy */ | 
| 234 |  | } | 
| 235 |  | if (sfirst != r_send_next) | 
| 236 | < | error(CONSISTENCY, "code screwup in ray_pflush"); | 
| 236 | > | error(CONSISTENCY, "code screwup in ray_pflush()"); | 
| 237 |  | r_send_next = 0; | 
| 238 |  | return(sfirst);                 /* return total # sent */ | 
| 239 |  | } | 
| 240 |  |  | 
| 241 |  |  | 
| 242 | < | void | 
| 243 | < | ray_psend(r)                    /* add a ray to our send queue */ | 
| 244 | < | RAY     *r; | 
| 242 | > | int | 
| 243 | > | ray_psend(                      /* add a ray to our send queue */ | 
| 244 | > | RAY     *r | 
| 245 | > | ) | 
| 246 |  | { | 
| 247 | < | if (r == NULL) | 
| 248 | < | return; | 
| 247 | > | int     rv; | 
| 248 | > |  | 
| 249 | > | if ((r == NULL) | (ray_pnidle <= 0)) | 
| 250 | > | return(0); | 
| 251 |  | /* flush output if necessary */ | 
| 252 | < | if (sendq_full() && ray_pflush() <= 0) | 
| 253 | < | error(INTERNAL, "ray_pflush failed in ray_psend"); | 
| 252 | > | if (sendq_full() && (rv = ray_pflush()) <= 0) | 
| 253 | > | return(rv); | 
| 254 |  |  | 
| 255 | < | copystruct(&r_queue[r_send_next], r); | 
| 256 | < | r_send_next++; | 
| 255 | > | r_queue[r_send_next++] = *r; | 
| 256 | > | return(1); | 
| 257 |  | } | 
| 258 |  |  | 
| 259 |  |  | 
| 260 |  | int | 
| 261 | < | ray_pqueue(r)                   /* queue a ray for computation */ | 
| 262 | < | RAY     *r; | 
| 261 | > | ray_pqueue(                     /* queue a ray for computation */ | 
| 262 | > | RAY     *r | 
| 263 | > | ) | 
| 264 |  | { | 
| 265 |  | if (r == NULL) | 
| 266 |  | return(0); | 
| 267 |  | /* check for full send queue */ | 
| 268 |  | if (sendq_full()) { | 
| 269 | < | RAY     mySend; | 
| 246 | < | int     rval; | 
| 247 | < | copystruct(&mySend, r); | 
| 269 | > | RAY     mySend = *r; | 
| 270 |  | /* wait for a result */ | 
| 271 | < | rval = ray_presult(r, 0); | 
| 271 | > | if (ray_presult(r, 0) <= 0) | 
| 272 | > | return(-1); | 
| 273 |  | /* put new ray in queue */ | 
| 274 | < | copystruct(&r_queue[r_send_next], &mySend); | 
| 275 | < | r_send_next++; | 
| 276 | < | return(rval);           /* done */ | 
| 274 | > | r_queue[r_send_next++] = mySend; | 
| 275 | > |  | 
| 276 | > | return(1); | 
| 277 |  | } | 
| 278 | < | /* add ray to send queue */ | 
| 279 | < | copystruct(&r_queue[r_send_next], r); | 
| 257 | < | r_send_next++; | 
| 278 | > | /* else add ray to send queue */ | 
| 279 | > | r_queue[r_send_next++] = *r; | 
| 280 |  | /* check for returned ray... */ | 
| 281 |  | if (r_recv_first >= r_recv_next) | 
| 282 |  | return(0); | 
| 283 |  | /* ...one is sitting in queue */ | 
| 284 | < | copystruct(r, &r_queue[r_recv_first]); | 
| 263 | < | r_recv_first++; | 
| 284 | > | *r = r_queue[r_recv_first++]; | 
| 285 |  | return(1); | 
| 286 |  | } | 
| 287 |  |  | 
| 288 |  |  | 
| 289 |  | int | 
| 290 | < | ray_presult(r, poll)            /* check for a completed ray */ | 
| 291 | < | RAY     *r; | 
| 292 | < | int     poll; | 
| 290 | > | ray_presult(            /* check for a completed ray */ | 
| 291 | > | RAY     *r, | 
| 292 | > | int     poll | 
| 293 | > | ) | 
| 294 |  | { | 
| 295 |  | static struct timeval   tpoll;  /* zero timeval struct */ | 
| 296 |  | static fd_set   readset, errset; | 
| 297 |  | int     n, ok; | 
| 298 | < | register int    pn; | 
| 298 | > | int     pn; | 
| 299 |  |  | 
| 300 |  | if (r == NULL) | 
| 301 |  | return(0); | 
| 302 |  | /* check queued results first */ | 
| 303 |  | if (r_recv_first < r_recv_next) { | 
| 304 | < | copystruct(r, &r_queue[r_recv_first]); | 
| 283 | < | r_recv_first++; | 
| 304 | > | *r = r_queue[r_recv_first++]; | 
| 305 |  | return(1); | 
| 306 |  | } | 
| 307 | + | if (poll < 0)                   /* immediate polling mode? */ | 
| 308 | + | return(0); | 
| 309 | + |  | 
| 310 |  | n = ray_pnprocs - ray_pnidle;   /* pending before flush? */ | 
| 311 |  |  | 
| 312 |  | if (ray_pflush() < 0)           /* send new rays to process */ | 
| 318 |  | n = ray_pnprocs - ray_pnidle; | 
| 319 |  | if (n <= 0)                     /* return if nothing to await */ | 
| 320 |  | return(0); | 
| 321 | + | if (!poll && ray_pnprocs == 1)  /* one process -> skip select() */ | 
| 322 | + | FD_SET(r_proc[0].fd_recv, &readset); | 
| 323 | + |  | 
| 324 |  | getready:                               /* any children waiting for us? */ | 
| 325 |  | for (pn = ray_pnprocs; pn--; ) | 
| 326 |  | if (FD_ISSET(r_proc[pn].fd_recv, &readset) || | 
| 327 |  | FD_ISSET(r_proc[pn].fd_recv, &errset)) | 
| 328 |  | break; | 
| 329 | < | /* call select if we must */ | 
| 329 | > | /* call select() if we must */ | 
| 330 |  | if (pn < 0) { | 
| 331 |  | FD_ZERO(&readset); FD_ZERO(&errset); n = 0; | 
| 332 |  | for (pn = ray_pnprocs; pn--; ) { | 
| 341 |  | poll ? &tpoll : (struct timeval *)NULL)) < 0) | 
| 342 |  | if (errno != EINTR) { | 
| 343 |  | error(WARNING, | 
| 344 | < | "select call failed in ray_presult"); | 
| 344 | > | "select call failed in ray_presult()"); | 
| 345 |  | ray_pclose(0); | 
| 346 |  | return(-1); | 
| 347 |  | } | 
| 373 |  | } | 
| 374 |  | /* preen returned rays */ | 
| 375 |  | for (n = r_recv_next - r_recv_first; n--; ) { | 
| 376 | < | register RAY    *rp = &r_queue[r_recv_first + n]; | 
| 376 | > | RAY     *rp = &r_queue[r_recv_first + n]; | 
| 377 |  | rp->rno = r_proc[pn].rno[n]; | 
| 378 |  | rp->parent = NULL; | 
| 379 |  | rp->newcset = rp->clipset = NULL; | 
| 381 |  | rp->slights = NULL; | 
| 382 |  | } | 
| 383 |  | /* return first ray received */ | 
| 384 | < | copystruct(r, &r_queue[r_recv_first]); | 
| 358 | < | r_recv_first++; | 
| 384 | > | *r = r_queue[r_recv_first++]; | 
| 385 |  | return(1); | 
| 386 |  | } | 
| 387 |  |  | 
| 388 |  |  | 
| 389 |  | void | 
| 390 | < | ray_pdone(freall)               /* reap children and free data */ | 
| 391 | < | int     freall; | 
| 390 | > | ray_pdone(              /* reap children and free data */ | 
| 391 | > | int     freall | 
| 392 | > | ) | 
| 393 |  | { | 
| 394 |  | ray_pclose(0);                  /* close child processes */ | 
| 395 |  |  | 
| 397 |  | free((void *)shm_boundary); | 
| 398 |  | shm_boundary = NULL; | 
| 399 |  | } | 
| 400 | + |  | 
| 401 |  | ray_done(freall);               /* free rendering data */ | 
| 402 |  | } | 
| 403 |  |  | 
| 404 |  |  | 
| 405 |  | static void | 
| 406 | < | ray_pchild(fd_in, fd_out)       /* process rays (never returns) */ | 
| 407 | < | int     fd_in; | 
| 408 | < | int     fd_out; | 
| 406 | > | ray_pchild(     /* process rays (never returns) */ | 
| 407 | > | int     fd_in, | 
| 408 | > | int     fd_out | 
| 409 | > | ) | 
| 410 |  | { | 
| 411 |  | int     n; | 
| 412 | < | register int    i; | 
| 412 | > | int     i; | 
| 413 | > | /* flag child process for quit() */ | 
| 414 | > | ray_pnprocs = -1; | 
| 415 |  | /* read each ray request set */ | 
| 416 |  | while ((n = read(fd_in, (char *)r_queue, sizeof(r_queue))) > 0) { | 
| 417 |  | int     n2; | 
| 418 | < | if (n % sizeof(RAY)) | 
| 418 | > | if (n < sizeof(RAY)) | 
| 419 |  | break; | 
| 389 | – | n /= sizeof(RAY); | 
| 420 |  | /* get smuggled set length */ | 
| 421 | < | n2 = r_queue[0].crtype - n; | 
| 421 | > | n2 = sizeof(RAY)*r_queue[0].crtype - n; | 
| 422 |  | if (n2 < 0) | 
| 423 | < | error(INTERNAL, "buffer over-read in ray_pchild"); | 
| 423 | > | error(INTERNAL, "buffer over-read in ray_pchild()"); | 
| 424 |  | if (n2 > 0) {           /* read the rest of the set */ | 
| 425 | < | i = readbuf(fd_in, (char *)(r_queue+n), | 
| 426 | < | sizeof(RAY)*n2); | 
| 397 | < | if (i != sizeof(RAY)*n2) | 
| 425 | > | i = readbuf(fd_in, (char *)r_queue + n, n2); | 
| 426 | > | if (i != n2) | 
| 427 |  | break; | 
| 428 |  | n += n2; | 
| 429 |  | } | 
| 430 | + | n /= sizeof(RAY); | 
| 431 |  | /* evaluate rays */ | 
| 432 |  | for (i = 0; i < n; i++) { | 
| 433 |  | r_queue[i].crtype = r_queue[i].rtype; | 
| 434 |  | r_queue[i].parent = NULL; | 
| 435 |  | r_queue[i].clipset = NULL; | 
| 436 |  | r_queue[i].slights = NULL; | 
| 437 | < | r_queue[i].revf = raytrace; | 
| 438 | < | samplendx++; | 
| 437 | > | r_queue[i].rlvl = 0; | 
| 438 | > | samplendx += samplestep; | 
| 439 |  | rayclear(&r_queue[i]); | 
| 440 |  | rayvalue(&r_queue[i]); | 
| 441 |  | } | 
| 442 |  | /* write back our results */ | 
| 443 |  | i = writebuf(fd_out, (char *)r_queue, sizeof(RAY)*n); | 
| 444 |  | if (i != sizeof(RAY)*n) | 
| 445 | < | error(SYSTEM, "write error in ray_pchild"); | 
| 445 | > | error(SYSTEM, "write error in ray_pchild()"); | 
| 446 |  | } | 
| 447 |  | if (n) | 
| 448 | < | error(SYSTEM, "read error in ray_pchild"); | 
| 448 | > | error(SYSTEM, "read error in ray_pchild()"); | 
| 449 |  | ambsync(); | 
| 450 |  | quit(0);                        /* normal exit */ | 
| 451 |  | } | 
| 452 |  |  | 
| 453 |  |  | 
| 454 |  | void | 
| 455 | < | ray_popen(nadd)                 /* open the specified # processes */ | 
| 456 | < | int     nadd; | 
| 455 | > | ray_popen(                      /* open the specified # processes */ | 
| 456 | > | int     nadd | 
| 457 | > | ) | 
| 458 |  | { | 
| 459 |  | /* check if our table has room */ | 
| 460 |  | if (ray_pnprocs + nadd > MAX_NPROCS) | 
| 461 |  | nadd = MAX_NPROCS - ray_pnprocs; | 
| 462 |  | if (nadd <= 0) | 
| 463 |  | return; | 
| 464 | < | fflush(stderr);                 /* clear pending output */ | 
| 465 | < | fflush(stdout); | 
| 464 | > | ambsync();                      /* load any new ambient values */ | 
| 465 | > | if (shm_boundary == NULL) {     /* first child process? */ | 
| 466 | > | preload_objs();         /* preload auxiliary data */ | 
| 467 | > | /* set shared memory boundary */ | 
| 468 | > | shm_boundary = (char *)malloc(16); | 
| 469 | > | strcpy(shm_boundary, "SHM_BOUNDARY"); | 
| 470 | > | } | 
| 471 | > | fflush(NULL);                   /* clear pending output */ | 
| 472 | > | samplestep = ray_pnprocs + nadd; | 
| 473 |  | while (nadd--) {                /* fork each new process */ | 
| 474 |  | int     p0[2], p1[2]; | 
| 475 |  | if (pipe(p0) < 0 || pipe(p1) < 0) | 
| 481 |  | close(r_proc[pn].fd_recv); | 
| 482 |  | } | 
| 483 |  | close(p0[0]); close(p1[1]); | 
| 484 | + | close(0);       /* don't share stdin */ | 
| 485 |  | /* following call never returns */ | 
| 486 |  | ray_pchild(p1[0], p0[1]); | 
| 487 |  | } | 
| 488 |  | if (r_proc[ray_pnprocs].pid < 0) | 
| 489 |  | error(SYSTEM, "cannot fork child process"); | 
| 490 |  | close(p1[0]); close(p0[1]); | 
| 491 | + | if (rand_samp)          /* decorrelate random sequence */ | 
| 492 | + | srandom(random()); | 
| 493 | + | else | 
| 494 | + | samplendx++; | 
| 495 | + | /* | 
| 496 | + | * Close write stream on exec to avoid multiprocessing deadlock. | 
| 497 | + | * No use in read stream without it, so set flag there as well. | 
| 498 | + | */ | 
| 499 | + | fcntl(p1[1], F_SETFD, FD_CLOEXEC); | 
| 500 | + | fcntl(p0[0], F_SETFD, FD_CLOEXEC); | 
| 501 |  | r_proc[ray_pnprocs].fd_send = p1[1]; | 
| 502 |  | r_proc[ray_pnprocs].fd_recv = p0[0]; | 
| 503 |  | r_proc[ray_pnprocs].npending = 0; | 
| 508 |  |  | 
| 509 |  |  | 
| 510 |  | void | 
| 511 | < | ray_pclose(nsub)                /* close one or more child processes */ | 
| 512 | < | int     nsub; | 
| 511 | > | ray_pclose(             /* close one or more child processes */ | 
| 512 | > | int     nsub | 
| 513 | > | ) | 
| 514 |  | { | 
| 515 |  | static int      inclose = 0; | 
| 516 | < | RAY     res; | 
| 516 | > | RAY             res; | 
| 517 | > | int             i, status = 0; | 
| 518 |  | /* check recursion */ | 
| 519 |  | if (inclose) | 
| 520 |  | return; | 
| 521 |  | inclose++; | 
| 522 | + | /* check no child / in child */ | 
| 523 | + | if (ray_pnprocs <= 0) | 
| 524 | + | return; | 
| 525 |  | /* check argument */ | 
| 526 | < | if ((nsub <= 0 | nsub > ray_pnprocs)) | 
| 526 | > | if ((nsub <= 0) | (nsub > ray_pnprocs)) | 
| 527 |  | nsub = ray_pnprocs; | 
| 528 |  | /* clear our ray queue */ | 
| 529 |  | while (ray_presult(&res,0) > 0) | 
| 530 |  | ; | 
| 531 | < | /* clean up children */ | 
| 532 | < | while (nsub--) { | 
| 533 | < | int     status; | 
| 534 | < | ray_pnprocs--; | 
| 535 | < | close(r_proc[ray_pnprocs].fd_recv); | 
| 536 | < | close(r_proc[ray_pnprocs].fd_send); | 
| 537 | < | while (wait(&status) != r_proc[ray_pnprocs].pid) | 
| 538 | < | ; | 
| 539 | < | if (status) { | 
| 540 | < | sprintf(errmsg, | 
| 541 | < | "rendering process %d exited with code %d", | 
| 542 | < | r_proc[ray_pnprocs].pid, status>>8); | 
| 543 | < | error(WARNING, errmsg); | 
| 531 | > | r_send_next = 0;                /* hard reset in case of error */ | 
| 532 | > | r_recv_first = r_recv_next = RAYQLEN; | 
| 533 | > | /* close send pipes */ | 
| 534 | > | for (i = ray_pnprocs-nsub; i < ray_pnprocs; i++) | 
| 535 | > | close(r_proc[i].fd_send); | 
| 536 | > |  | 
| 537 | > | if (nsub == 1) {                /* awaiting single process? */ | 
| 538 | > | if (waitpid(r_proc[ray_pnprocs-1].pid, &status, 0) < 0) | 
| 539 | > | status = 127<<8; | 
| 540 | > | close(r_proc[ray_pnprocs-1].fd_recv); | 
| 541 | > | } else                          /* else unordered wait */ | 
| 542 | > | for (i = 0; i < nsub; ) { | 
| 543 | > | int     j, mystatus; | 
| 544 | > | RT_PID  pid = wait(&mystatus); | 
| 545 | > | for (j = ray_pnprocs-nsub; j < ray_pnprocs; j++) | 
| 546 | > | if (r_proc[j].pid == pid) { | 
| 547 | > | if (mystatus) | 
| 548 | > | status = mystatus; | 
| 549 | > | close(r_proc[j].fd_recv); | 
| 550 | > | ++i; | 
| 551 | > | } | 
| 552 |  | } | 
| 553 | < | ray_pnidle--; | 
| 553 | > | ray_pnprocs -= nsub; | 
| 554 | > | ray_pnidle -= nsub; | 
| 555 | > | if (status) { | 
| 556 | > | sprintf(errmsg, "rendering process exited with code %d", status>>8); | 
| 557 | > | error(WARNING, errmsg); | 
| 558 |  | } | 
| 559 |  | inclose--; | 
| 494 | – | } | 
| 495 | – |  | 
| 496 | – |  | 
| 497 | – | void | 
| 498 | – | quit(ec)                        /* make sure exit is called */ | 
| 499 | – | int     ec; | 
| 500 | – | { | 
| 501 | – | exit(ec); | 
| 560 |  | } |