| 1 | greg | 2.1 | #ifndef lint | 
| 2 | greg | 2.35 | static const char       RCSid[] = "$Id: raypcalls.c,v 2.34 2020/06/16 17:58:11 greg Exp $"; | 
| 3 | greg | 2.1 | #endif | 
| 4 |  |  | /* | 
| 5 |  |  | *  raypcalls.c - interface for parallel rendering using Radiance | 
| 6 |  |  | * | 
| 7 |  |  | *  External symbols declared in ray.h | 
| 8 |  |  | */ | 
| 9 |  |  |  | 
| 10 | greg | 2.2 | #include "copyright.h" | 
| 11 | greg | 2.1 |  | 
| 12 |  |  | /* | 
| 13 |  |  | *  These calls are designed similarly to the ones in raycalls.c, | 
| 14 |  |  | *  but allow for multiple rendering processes on the same host | 
| 15 |  |  | *  machine.  There is no sense in specifying more child processes | 
| 16 | greg | 2.21 | *  than you have processor cores, but one child may help by allowing | 
| 17 | greg | 2.1 | *  asynchronous ray computation in an interactive program, and | 
| 18 |  |  | *  will protect the caller from fatal rendering errors. | 
| 19 |  |  | * | 
| 20 | greg | 2.21 | *  You should first read and understand the header in raycalls.c, | 
| 21 | greg | 2.1 | *  as some things are explained there that are not repated here. | 
| 22 |  |  | * | 
| 23 |  |  | *  The first step is opening one or more rendering processes | 
| 24 |  |  | *  with a call to ray_pinit(oct, nproc).  Before calling fork(), | 
| 25 |  |  | *  ray_pinit() loads the octree and data structures into the | 
| 26 | greg | 2.13 | *  caller's memory, and ray_popen() synchronizes the ambient | 
| 27 |  |  | *  file, if any.  Shared memory permits all sorts of queries | 
| 28 | greg | 2.21 | *  that wouldn't be possible otherwise without causing any real | 
| 29 | greg | 2.1 | *  memory overhead, since all the static data are shared | 
| 30 | greg | 2.21 | *  between processes.  Rays are traced using a simple | 
| 31 | greg | 2.1 | *  queuing mechanism, explained below. | 
| 32 |  |  | * | 
| 33 | greg | 2.14 | *  The ray queue buffers RAYQLEN rays before sending to | 
| 34 | greg | 2.21 | *  children, each of which may internally buffer RAYQLEN rays | 
| 35 |  |  | *  during evaluation.  Rays are not returned in the order | 
| 36 |  |  | *  they are sent when multiple processes are open. | 
| 37 | greg | 2.14 | * | 
| 38 | greg | 2.13 | *  Rays are queued and returned by a single | 
| 39 | greg | 2.1 | *  ray_pqueue() call.  A ray_pqueue() return | 
| 40 |  |  | *  value of 0 indicates that no rays are ready | 
| 41 |  |  | *  and the queue is not yet full.  A return value of 1 | 
| 42 |  |  | *  indicates that a ray was returned, though it is probably | 
| 43 |  |  | *  not the one you just requested.  Rays may be identified by | 
| 44 |  |  | *  the rno member of the RAY struct, which is incremented | 
| 45 |  |  | *  by the rayorigin() call, or may be set explicitly by | 
| 46 |  |  | *  the caller.  Below is an example call sequence: | 
| 47 |  |  | * | 
| 48 |  |  | *      myRay.rorg = ( ray origin point ) | 
| 49 |  |  | *      myRay.rdir = ( normalized ray direction ) | 
| 50 |  |  | *      myRay.rmax = ( maximum length, or zero for no limit ) | 
| 51 | greg | 2.11 | *      rayorigin(&myRay, PRIMARY, NULL, NULL); | 
| 52 | greg | 2.1 | *      myRay.rno = ( my personal ray identifier ) | 
| 53 |  |  | *      if (ray_pqueue(&myRay) == 1) | 
| 54 |  |  | *              { do something with results } | 
| 55 |  |  | * | 
| 56 |  |  | *  Note the differences between this and the simpler ray_trace() | 
| 57 |  |  | *  call.  In particular, the call may or may not return a value | 
| 58 |  |  | *  in the passed ray structure.  Also, you need to call rayorigin() | 
| 59 | greg | 2.7 | *  yourself, which is normally called for you by ray_trace().  The | 
| 60 |  |  | *  benefit is that ray_pqueue() will trace rays faster in | 
| 61 | greg | 2.1 | *  proportion to the number of CPUs you have available on your | 
| 62 |  |  | *  system.  If the ray queue is full before the call, ray_pqueue() | 
| 63 |  |  | *  will block until a result is ready so it can queue this one. | 
| 64 | greg | 2.3 | *  The global int ray_pnidle indicates the number of currently idle | 
| 65 | greg | 2.1 | *  children.  If you want to check for completed rays without blocking, | 
| 66 |  |  | *  or get the results from rays that have been queued without | 
| 67 |  |  | *  queuing any new ones, the ray_presult() call is for you: | 
| 68 |  |  | * | 
| 69 |  |  | *      if (ray_presult(&myRay, 1) == 1) | 
| 70 |  |  | *              { do something with results } | 
| 71 |  |  | * | 
| 72 |  |  | *  If the second argument is 1, the call won't block when | 
| 73 |  |  | *  results aren't ready, but will immediately return 0. | 
| 74 |  |  | *  If the second argument is 0, the call will block | 
| 75 |  |  | *  until a value is available, returning 0 only if the | 
| 76 | greg | 2.26 | *  queue is completely empty.  Setting the second argument | 
| 77 |  |  | *  to -1 returns 0 unless a ray is ready in the queue and | 
| 78 |  |  | *  no system calls are needed.  A negative return value | 
| 79 | greg | 2.1 | *  indicates that a rendering process died.  If this | 
| 80 | greg | 2.21 | *  happens, ray_pclose(0) is automatically called to close | 
| 81 | greg | 2.3 | *  all child processes, and ray_pnprocs is set to zero. | 
| 82 | greg | 2.1 | * | 
| 83 |  |  | *  If you just want to fill the ray queue without checking for | 
| 84 | greg | 2.3 | *  results, check ray_pnidle and call ray_psend(): | 
| 85 | greg | 2.1 | * | 
| 86 | greg | 2.3 | *      while (ray_pnidle) { | 
| 87 | greg | 2.1 | *              ( set up ray ) | 
| 88 |  |  | *              ray_psend(&myRay); | 
| 89 |  |  | *      } | 
| 90 |  |  | * | 
| 91 | greg | 2.25 | *  Note that it is a mistake to call ra_psend() when | 
| 92 |  |  | *  ray_pnidle is zero, and nothing will be sent in | 
| 93 |  |  | *  this case.  Otherwise, the ray_presult() and/or ray_pqueue() | 
| 94 |  |  | *  functions may be called subsequently to read back the results | 
| 95 |  |  | *  of rays queued by ray_psend(). | 
| 96 | greg | 2.1 | * | 
| 97 |  |  | *  When you are done, you may call ray_pdone(1) to close | 
| 98 |  |  | *  all child processes and clean up memory used by Radiance. | 
| 99 |  |  | *  Any queued ray calculations will be awaited and discarded. | 
| 100 |  |  | *  As with ray_done(), ray_pdone(0) hangs onto data files | 
| 101 |  |  | *  and fonts that are likely to be used in subsequent renderings. | 
| 102 | greg | 2.21 | *  Whether you need to clean up memory or not, you should | 
| 103 |  |  | *  at least call ray_pclose(0) to await the child processes. | 
| 104 | greg | 2.23 | *  The caller should define a quit() function that calls | 
| 105 |  |  | *  ray_pclose(0) if ray_pnprocs > 0. | 
| 106 | greg | 2.1 | * | 
| 107 |  |  | *  Warning:  You cannot affect any of the rendering processes | 
| 108 |  |  | *  by changing global parameter values onece ray_pinit() has | 
| 109 |  |  | *  been called.  Changing global parameters will have no effect | 
| 110 |  |  | *  until the next call to ray_pinit(), which restarts everything. | 
| 111 |  |  | *  If you just want to reap children so that you can alter the | 
| 112 |  |  | *  rendering parameters without reloading the scene, use the | 
| 113 |  |  | *  ray_pclose(0) and ray_popen(nproc) calls to close | 
| 114 | greg | 2.7 | *  then restart the child processes after the changes are made. | 
| 115 | greg | 2.1 | * | 
| 116 |  |  | *  Note:  These routines are written to coordinate with the | 
| 117 |  |  | *  definitions in raycalls.c, and in fact depend on them. | 
| 118 |  |  | *  If you want to trace a ray and get a result synchronously, | 
| 119 | greg | 2.13 | *  use the ray_trace() call to compute it in the parent process. | 
| 120 | greg | 2.7 | *  This will not interfere with any subprocess calculations, | 
| 121 |  |  | *  but beware that a fatal error may end with a call to quit(). | 
| 122 | greg | 2.1 | * | 
| 123 |  |  | *  Note:  One of the advantages of using separate processes | 
| 124 |  |  | *  is that it gives the calling program some immunity from | 
| 125 |  |  | *  fatal rendering errors.  As discussed in raycalls.c, | 
| 126 |  |  | *  Radiance tends to throw up its hands and exit at the | 
| 127 |  |  | *  first sign of trouble, calling quit() to return control | 
| 128 | greg | 2.7 | *  to the top level.  Although you can avoid exit() with | 
| 129 | greg | 2.1 | *  your own longjmp() in quit(), the cleanup afterwards | 
| 130 |  |  | *  is always suspect.  Through the use of subprocesses, | 
| 131 |  |  | *  we avoid this pitfall by closing the processes and | 
| 132 |  |  | *  returning a negative value from ray_pqueue() or | 
| 133 |  |  | *  ray_presult().  If you get a negative value from either | 
| 134 |  |  | *  of these calls, you can assume that the processes have | 
| 135 | greg | 2.21 | *  been cleaned up with a call to ray_pclose(), though you | 
| 136 | greg | 2.1 | *  will have to call ray_pdone() yourself if you want to | 
| 137 | greg | 2.7 | *  free memory.  Obviously, you cannot continue rendering | 
| 138 |  |  | *  without risking further errors, but otherwise your | 
| 139 |  |  | *  process should not be compromised. | 
| 140 | greg | 2.1 | */ | 
| 141 |  |  |  | 
| 142 | schorsch | 2.6 | #include  "rtprocess.h" | 
| 143 | greg | 2.1 | #include  "ray.h" | 
| 144 | schorsch | 2.6 | #include  "ambient.h" | 
| 145 | greg | 2.18 | #include  <sys/types.h> | 
| 146 |  |  | #include  <sys/wait.h> | 
| 147 | greg | 2.1 | #include  "selcall.h" | 
| 148 |  |  |  | 
| 149 |  |  | #ifndef RAYQLEN | 
| 150 | greg | 2.32 | #define RAYQLEN         24              /* # rays to send at once */ | 
| 151 | greg | 2.1 | #endif | 
| 152 |  |  |  | 
| 153 |  |  | #ifndef MAX_RPROCS | 
| 154 |  |  | #if (FD_SETSIZE/2-4 < 64) | 
| 155 |  |  | #define MAX_NPROCS      (FD_SETSIZE/2-4) | 
| 156 |  |  | #else | 
| 157 |  |  | #define MAX_NPROCS      64              /* max. # rendering processes */ | 
| 158 |  |  | #endif | 
| 159 |  |  | #endif | 
| 160 |  |  |  | 
| 161 |  |  | extern char     *shm_boundary;          /* boundary of shared memory */ | 
| 162 |  |  |  | 
| 163 | greg | 2.3 | int             ray_pnprocs = 0;        /* number of child processes */ | 
| 164 |  |  | int             ray_pnidle = 0;         /* number of idle children */ | 
| 165 | greg | 2.1 |  | 
| 166 |  |  | static struct child_proc { | 
| 167 | greg | 2.29 | RT_PID  pid;                            /* child process id */ | 
| 168 | greg | 2.1 | int     fd_send;                        /* write to child here */ | 
| 169 |  |  | int     fd_recv;                        /* read from child here */ | 
| 170 |  |  | int     npending;                       /* # rays in process */ | 
| 171 | greg | 2.21 | RNUMBER rno[RAYQLEN];                   /* working on these rays */ | 
| 172 | greg | 2.1 | } r_proc[MAX_NPROCS];                   /* our child processes */ | 
| 173 |  |  |  | 
| 174 |  |  | static RAY      r_queue[2*RAYQLEN];     /* ray i/o buffer */ | 
| 175 | greg | 2.23 | static int      r_send_next = 0;        /* next send ray placement */ | 
| 176 |  |  | static int      r_recv_first = RAYQLEN; /* position of first unreported ray */ | 
| 177 |  |  | static int      r_recv_next = RAYQLEN;  /* next received ray placement */ | 
| 178 | greg | 2.1 |  | 
| 179 | greg | 2.28 | static int      samplestep = 1;         /* sample step size */ | 
| 180 |  |  |  | 
| 181 | greg | 2.1 | #define sendq_full()    (r_send_next >= RAYQLEN) | 
| 182 |  |  |  | 
| 183 | schorsch | 2.6 | static int ray_pflush(void); | 
| 184 | greg | 2.13 | static void ray_pchild(int fd_in, int fd_out); | 
| 185 | greg | 2.1 |  | 
| 186 | schorsch | 2.6 |  | 
| 187 | greg | 2.22 | void | 
| 188 | schorsch | 2.6 | ray_pinit(              /* initialize ray-tracing processes */ | 
| 189 |  |  | char    *otnm, | 
| 190 |  |  | int     nproc | 
| 191 |  |  | ) | 
| 192 | greg | 2.1 | { | 
| 193 |  |  | if (nobjects > 0)               /* close old calculation */ | 
| 194 |  |  | ray_pdone(0); | 
| 195 |  |  |  | 
| 196 |  |  | ray_init(otnm);                 /* load the shared scene */ | 
| 197 |  |  |  | 
| 198 |  |  | ray_popen(nproc);               /* fork children */ | 
| 199 |  |  | } | 
| 200 |  |  |  | 
| 201 |  |  |  | 
| 202 |  |  | static int | 
| 203 | schorsch | 2.6 | ray_pflush(void)                        /* send queued rays to idle children */ | 
| 204 | greg | 2.1 | { | 
| 205 |  |  | int     nc, n, nw, i, sfirst; | 
| 206 |  |  |  | 
| 207 | schorsch | 2.5 | if ((ray_pnidle <= 0) | (r_send_next <= 0)) | 
| 208 | greg | 2.1 | return(0);              /* nothing we can send */ | 
| 209 |  |  |  | 
| 210 |  |  | sfirst = 0;                     /* divvy up labor */ | 
| 211 | greg | 2.3 | nc = ray_pnidle; | 
| 212 |  |  | for (i = ray_pnprocs; nc && i--; ) { | 
| 213 | greg | 2.1 | if (r_proc[i].npending > 0) | 
| 214 |  |  | continue;       /* child looks busy */ | 
| 215 | greg | 2.34 | n = (r_send_next - sfirst) / nc--; | 
| 216 | greg | 2.1 | if (!n) | 
| 217 |  |  | continue; | 
| 218 |  |  | /* smuggle set size in crtype */ | 
| 219 |  |  | r_queue[sfirst].crtype = n; | 
| 220 |  |  | nw = writebuf(r_proc[i].fd_send, (char *)&r_queue[sfirst], | 
| 221 |  |  | sizeof(RAY)*n); | 
| 222 |  |  | if (nw != sizeof(RAY)*n) | 
| 223 |  |  | return(-1);     /* write error */ | 
| 224 |  |  | r_proc[i].npending = n; | 
| 225 |  |  | while (n--)             /* record ray IDs */ | 
| 226 |  |  | r_proc[i].rno[n] = r_queue[sfirst+n].rno; | 
| 227 |  |  | sfirst += r_proc[i].npending; | 
| 228 | greg | 2.3 | ray_pnidle--;           /* now she's busy */ | 
| 229 | greg | 2.1 | } | 
| 230 |  |  | if (sfirst != r_send_next) | 
| 231 | greg | 2.24 | error(CONSISTENCY, "code screwup in ray_pflush()"); | 
| 232 | greg | 2.1 | r_send_next = 0; | 
| 233 |  |  | return(sfirst);                 /* return total # sent */ | 
| 234 |  |  | } | 
| 235 |  |  |  | 
| 236 |  |  |  | 
| 237 | greg | 2.25 | int | 
| 238 | schorsch | 2.6 | ray_psend(                      /* add a ray to our send queue */ | 
| 239 |  |  | RAY     *r | 
| 240 |  |  | ) | 
| 241 | greg | 2.1 | { | 
| 242 | greg | 2.25 | int     rv; | 
| 243 |  |  |  | 
| 244 |  |  | if ((r == NULL) | (ray_pnidle <= 0)) | 
| 245 |  |  | return(0); | 
| 246 | greg | 2.1 | /* flush output if necessary */ | 
| 247 | greg | 2.25 | if (sendq_full() && (rv = ray_pflush()) <= 0) | 
| 248 |  |  | return(rv); | 
| 249 | greg | 2.1 |  | 
| 250 | greg | 2.14 | r_queue[r_send_next++] = *r; | 
| 251 | greg | 2.25 | return(1); | 
| 252 | greg | 2.1 | } | 
| 253 |  |  |  | 
| 254 |  |  |  | 
| 255 | greg | 2.22 | int | 
| 256 | schorsch | 2.6 | ray_pqueue(                     /* queue a ray for computation */ | 
| 257 |  |  | RAY     *r | 
| 258 |  |  | ) | 
| 259 | greg | 2.1 | { | 
| 260 |  |  | if (r == NULL) | 
| 261 |  |  | return(0); | 
| 262 |  |  | /* check for full send queue */ | 
| 263 |  |  | if (sendq_full()) { | 
| 264 | greg | 2.19 | RAY     mySend = *r; | 
| 265 | greg | 2.1 | /* wait for a result */ | 
| 266 | greg | 2.19 | if (ray_presult(r, 0) <= 0) | 
| 267 |  |  | return(-1); | 
| 268 | greg | 2.1 | /* put new ray in queue */ | 
| 269 | greg | 2.14 | r_queue[r_send_next++] = mySend; | 
| 270 | greg | 2.25 |  | 
| 271 | greg | 2.19 | return(1); | 
| 272 | greg | 2.1 | } | 
| 273 | greg | 2.13 | /* else add ray to send queue */ | 
| 274 | greg | 2.14 | r_queue[r_send_next++] = *r; | 
| 275 | greg | 2.1 | /* check for returned ray... */ | 
| 276 |  |  | if (r_recv_first >= r_recv_next) | 
| 277 |  |  | return(0); | 
| 278 |  |  | /* ...one is sitting in queue */ | 
| 279 | greg | 2.14 | *r = r_queue[r_recv_first++]; | 
| 280 | greg | 2.1 | return(1); | 
| 281 |  |  | } | 
| 282 |  |  |  | 
| 283 |  |  |  | 
| 284 | greg | 2.22 | int | 
| 285 | schorsch | 2.6 | ray_presult(            /* check for a completed ray */ | 
| 286 |  |  | RAY     *r, | 
| 287 |  |  | int     poll | 
| 288 |  |  | ) | 
| 289 | greg | 2.1 | { | 
| 290 |  |  | static struct timeval   tpoll;  /* zero timeval struct */ | 
| 291 |  |  | static fd_set   readset, errset; | 
| 292 |  |  | int     n, ok; | 
| 293 | greg | 2.30 | int     pn; | 
| 294 | greg | 2.1 |  | 
| 295 |  |  | if (r == NULL) | 
| 296 |  |  | return(0); | 
| 297 |  |  | /* check queued results first */ | 
| 298 |  |  | if (r_recv_first < r_recv_next) { | 
| 299 | greg | 2.14 | *r = r_queue[r_recv_first++]; | 
| 300 | greg | 2.1 | return(1); | 
| 301 |  |  | } | 
| 302 | greg | 2.23 | if (poll < 0)                   /* immediate polling mode? */ | 
| 303 |  |  | return(0); | 
| 304 |  |  |  | 
| 305 | greg | 2.3 | n = ray_pnprocs - ray_pnidle;   /* pending before flush? */ | 
| 306 | greg | 2.1 |  | 
| 307 |  |  | if (ray_pflush() < 0)           /* send new rays to process */ | 
| 308 |  |  | return(-1); | 
| 309 |  |  | /* reset receive queue */ | 
| 310 |  |  | r_recv_first = r_recv_next = RAYQLEN; | 
| 311 |  |  |  | 
| 312 |  |  | if (!poll)                      /* count newly sent unless polling */ | 
| 313 | greg | 2.3 | n = ray_pnprocs - ray_pnidle; | 
| 314 | greg | 2.1 | if (n <= 0)                     /* return if nothing to await */ | 
| 315 |  |  | return(0); | 
| 316 | greg | 2.16 | if (!poll && ray_pnprocs == 1)  /* one process -> skip select() */ | 
| 317 |  |  | FD_SET(r_proc[0].fd_recv, &readset); | 
| 318 |  |  |  | 
| 319 | greg | 2.1 | getready:                               /* any children waiting for us? */ | 
| 320 | greg | 2.3 | for (pn = ray_pnprocs; pn--; ) | 
| 321 | greg | 2.1 | if (FD_ISSET(r_proc[pn].fd_recv, &readset) || | 
| 322 |  |  | FD_ISSET(r_proc[pn].fd_recv, &errset)) | 
| 323 |  |  | break; | 
| 324 | greg | 2.22 | /* call select() if we must */ | 
| 325 | greg | 2.1 | if (pn < 0) { | 
| 326 |  |  | FD_ZERO(&readset); FD_ZERO(&errset); n = 0; | 
| 327 | greg | 2.3 | for (pn = ray_pnprocs; pn--; ) { | 
| 328 | greg | 2.1 | if (r_proc[pn].npending > 0) | 
| 329 |  |  | FD_SET(r_proc[pn].fd_recv, &readset); | 
| 330 |  |  | FD_SET(r_proc[pn].fd_recv, &errset); | 
| 331 |  |  | if (r_proc[pn].fd_recv >= n) | 
| 332 |  |  | n = r_proc[pn].fd_recv + 1; | 
| 333 |  |  | } | 
| 334 |  |  | /* find out who is ready */ | 
| 335 |  |  | while ((n = select(n, &readset, (fd_set *)NULL, &errset, | 
| 336 |  |  | poll ? &tpoll : (struct timeval *)NULL)) < 0) | 
| 337 |  |  | if (errno != EINTR) { | 
| 338 |  |  | error(WARNING, | 
| 339 | greg | 2.24 | "select call failed in ray_presult()"); | 
| 340 | greg | 2.1 | ray_pclose(0); | 
| 341 |  |  | return(-1); | 
| 342 |  |  | } | 
| 343 |  |  | if (n > 0)              /* go back and get it */ | 
| 344 |  |  | goto getready; | 
| 345 |  |  | return(0);              /* else poll came up empty */ | 
| 346 |  |  | } | 
| 347 |  |  | if (r_recv_next + r_proc[pn].npending > sizeof(r_queue)/sizeof(RAY)) | 
| 348 |  |  | error(CONSISTENCY, "buffer shortage in ray_presult()"); | 
| 349 |  |  |  | 
| 350 |  |  | /* read rendered ray data */ | 
| 351 |  |  | n = readbuf(r_proc[pn].fd_recv, (char *)&r_queue[r_recv_next], | 
| 352 |  |  | sizeof(RAY)*r_proc[pn].npending); | 
| 353 |  |  | if (n > 0) { | 
| 354 |  |  | r_recv_next += n/sizeof(RAY); | 
| 355 |  |  | ok = (n == sizeof(RAY)*r_proc[pn].npending); | 
| 356 |  |  | } else | 
| 357 |  |  | ok = 0; | 
| 358 |  |  | /* reset child's status */ | 
| 359 |  |  | FD_CLR(r_proc[pn].fd_recv, &readset); | 
| 360 |  |  | if (n <= 0) | 
| 361 |  |  | FD_CLR(r_proc[pn].fd_recv, &errset); | 
| 362 |  |  | r_proc[pn].npending = 0; | 
| 363 | greg | 2.3 | ray_pnidle++; | 
| 364 | greg | 2.1 | /* check for rendering errors */ | 
| 365 |  |  | if (!ok) { | 
| 366 |  |  | ray_pclose(0);          /* process died -- clean up */ | 
| 367 |  |  | return(-1); | 
| 368 |  |  | } | 
| 369 |  |  | /* preen returned rays */ | 
| 370 |  |  | for (n = r_recv_next - r_recv_first; n--; ) { | 
| 371 | greg | 2.30 | RAY     *rp = &r_queue[r_recv_first + n]; | 
| 372 | greg | 2.1 | rp->rno = r_proc[pn].rno[n]; | 
| 373 |  |  | rp->parent = NULL; | 
| 374 |  |  | rp->newcset = rp->clipset = NULL; | 
| 375 |  |  | rp->rox = NULL; | 
| 376 |  |  | rp->slights = NULL; | 
| 377 |  |  | } | 
| 378 |  |  | /* return first ray received */ | 
| 379 | greg | 2.13 | *r = r_queue[r_recv_first++]; | 
| 380 | greg | 2.1 | return(1); | 
| 381 |  |  | } | 
| 382 |  |  |  | 
| 383 |  |  |  | 
| 384 | greg | 2.22 | void | 
| 385 | schorsch | 2.6 | ray_pdone(              /* reap children and free data */ | 
| 386 |  |  | int     freall | 
| 387 |  |  | ) | 
| 388 | greg | 2.1 | { | 
| 389 |  |  | ray_pclose(0);                  /* close child processes */ | 
| 390 |  |  |  | 
| 391 |  |  | if (shm_boundary != NULL) {     /* clear shared memory boundary */ | 
| 392 |  |  | free((void *)shm_boundary); | 
| 393 |  |  | shm_boundary = NULL; | 
| 394 |  |  | } | 
| 395 | greg | 2.23 |  | 
| 396 | greg | 2.1 | ray_done(freall);               /* free rendering data */ | 
| 397 |  |  | } | 
| 398 |  |  |  | 
| 399 |  |  |  | 
| 400 |  |  | static void | 
| 401 | schorsch | 2.6 | ray_pchild(     /* process rays (never returns) */ | 
| 402 |  |  | int     fd_in, | 
| 403 |  |  | int     fd_out | 
| 404 |  |  | ) | 
| 405 | greg | 2.1 | { | 
| 406 |  |  | int     n; | 
| 407 | greg | 2.30 | int     i; | 
| 408 | greg | 2.15 | /* flag child process for quit() */ | 
| 409 |  |  | ray_pnprocs = -1; | 
| 410 | greg | 2.1 | /* read each ray request set */ | 
| 411 |  |  | while ((n = read(fd_in, (char *)r_queue, sizeof(r_queue))) > 0) { | 
| 412 |  |  | int     n2; | 
| 413 | greg | 2.12 | if (n < sizeof(RAY)) | 
| 414 | greg | 2.1 | break; | 
| 415 |  |  | /* get smuggled set length */ | 
| 416 | greg | 2.12 | n2 = sizeof(RAY)*r_queue[0].crtype - n; | 
| 417 | greg | 2.1 | if (n2 < 0) | 
| 418 | greg | 2.24 | error(INTERNAL, "buffer over-read in ray_pchild()"); | 
| 419 | greg | 2.1 | if (n2 > 0) {           /* read the rest of the set */ | 
| 420 | greg | 2.12 | i = readbuf(fd_in, (char *)r_queue + n, n2); | 
| 421 |  |  | if (i != n2) | 
| 422 | greg | 2.1 | break; | 
| 423 |  |  | n += n2; | 
| 424 |  |  | } | 
| 425 | greg | 2.12 | n /= sizeof(RAY); | 
| 426 | greg | 2.1 | /* evaluate rays */ | 
| 427 |  |  | for (i = 0; i < n; i++) { | 
| 428 |  |  | r_queue[i].crtype = r_queue[i].rtype; | 
| 429 |  |  | r_queue[i].parent = NULL; | 
| 430 |  |  | r_queue[i].clipset = NULL; | 
| 431 |  |  | r_queue[i].slights = NULL; | 
| 432 | greg | 2.21 | r_queue[i].rlvl = 0; | 
| 433 | greg | 2.28 | samplendx += samplestep; | 
| 434 | greg | 2.1 | rayclear(&r_queue[i]); | 
| 435 |  |  | rayvalue(&r_queue[i]); | 
| 436 |  |  | } | 
| 437 |  |  | /* write back our results */ | 
| 438 |  |  | i = writebuf(fd_out, (char *)r_queue, sizeof(RAY)*n); | 
| 439 |  |  | if (i != sizeof(RAY)*n) | 
| 440 | greg | 2.24 | error(SYSTEM, "write error in ray_pchild()"); | 
| 441 | greg | 2.1 | } | 
| 442 |  |  | if (n) | 
| 443 | greg | 2.24 | error(SYSTEM, "read error in ray_pchild()"); | 
| 444 | greg | 2.1 | ambsync(); | 
| 445 |  |  | quit(0);                        /* normal exit */ | 
| 446 |  |  | } | 
| 447 |  |  |  | 
| 448 |  |  |  | 
| 449 | greg | 2.22 | void | 
| 450 | schorsch | 2.6 | ray_popen(                      /* open the specified # processes */ | 
| 451 |  |  | int     nadd | 
| 452 |  |  | ) | 
| 453 | greg | 2.1 | { | 
| 454 |  |  | /* check if our table has room */ | 
| 455 | greg | 2.3 | if (ray_pnprocs + nadd > MAX_NPROCS) | 
| 456 |  |  | nadd = MAX_NPROCS - ray_pnprocs; | 
| 457 | greg | 2.1 | if (nadd <= 0) | 
| 458 |  |  | return; | 
| 459 | greg | 2.13 | ambsync();                      /* load any new ambient values */ | 
| 460 | greg | 2.20 | if (shm_boundary == NULL) {     /* first child process? */ | 
| 461 |  |  | preload_objs();         /* preload auxiliary data */ | 
| 462 |  |  | /* set shared memory boundary */ | 
| 463 |  |  | shm_boundary = (char *)malloc(16); | 
| 464 |  |  | strcpy(shm_boundary, "SHM_BOUNDARY"); | 
| 465 |  |  | } | 
| 466 | greg | 2.13 | fflush(NULL);                   /* clear pending output */ | 
| 467 | greg | 2.28 | samplestep = ray_pnprocs + nadd; | 
| 468 | greg | 2.1 | while (nadd--) {                /* fork each new process */ | 
| 469 |  |  | int     p0[2], p1[2]; | 
| 470 |  |  | if (pipe(p0) < 0 || pipe(p1) < 0) | 
| 471 |  |  | error(SYSTEM, "cannot create pipe"); | 
| 472 | greg | 2.3 | if ((r_proc[ray_pnprocs].pid = fork()) == 0) { | 
| 473 | greg | 2.1 | int     pn;     /* close others' descriptors */ | 
| 474 | greg | 2.3 | for (pn = ray_pnprocs; pn--; ) { | 
| 475 | greg | 2.1 | close(r_proc[pn].fd_send); | 
| 476 |  |  | close(r_proc[pn].fd_recv); | 
| 477 |  |  | } | 
| 478 |  |  | close(p0[0]); close(p1[1]); | 
| 479 | greg | 2.24 | close(0);       /* don't share stdin */ | 
| 480 | greg | 2.1 | /* following call never returns */ | 
| 481 |  |  | ray_pchild(p1[0], p0[1]); | 
| 482 |  |  | } | 
| 483 | greg | 2.3 | if (r_proc[ray_pnprocs].pid < 0) | 
| 484 | greg | 2.1 | error(SYSTEM, "cannot fork child process"); | 
| 485 |  |  | close(p1[0]); close(p0[1]); | 
| 486 | greg | 2.28 | if (rand_samp)          /* decorrelate random sequence */ | 
| 487 |  |  | srandom(random()); | 
| 488 |  |  | else | 
| 489 |  |  | samplendx++; | 
| 490 | greg | 2.9 | /* | 
| 491 |  |  | * Close write stream on exec to avoid multiprocessing deadlock. | 
| 492 |  |  | * No use in read stream without it, so set flag there as well. | 
| 493 |  |  | */ | 
| 494 |  |  | fcntl(p1[1], F_SETFD, FD_CLOEXEC); | 
| 495 |  |  | fcntl(p0[0], F_SETFD, FD_CLOEXEC); | 
| 496 | greg | 2.3 | r_proc[ray_pnprocs].fd_send = p1[1]; | 
| 497 |  |  | r_proc[ray_pnprocs].fd_recv = p0[0]; | 
| 498 |  |  | r_proc[ray_pnprocs].npending = 0; | 
| 499 |  |  | ray_pnprocs++; | 
| 500 |  |  | ray_pnidle++; | 
| 501 | greg | 2.1 | } | 
| 502 |  |  | } | 
| 503 |  |  |  | 
| 504 |  |  |  | 
| 505 | greg | 2.22 | void | 
| 506 | schorsch | 2.6 | ray_pclose(             /* close one or more child processes */ | 
| 507 |  |  | int     nsub | 
| 508 |  |  | ) | 
| 509 | greg | 2.1 | { | 
| 510 |  |  | static int      inclose = 0; | 
| 511 | greg | 2.29 | RAY             res; | 
| 512 |  |  | int             i, status = 0; | 
| 513 | greg | 2.35 | /* check no child / in child */ | 
| 514 |  |  | if (ray_pnprocs <= 0) | 
| 515 |  |  | return; | 
| 516 | greg | 2.1 | /* check recursion */ | 
| 517 |  |  | if (inclose) | 
| 518 |  |  | return; | 
| 519 |  |  | inclose++; | 
| 520 |  |  | /* check argument */ | 
| 521 | schorsch | 2.5 | if ((nsub <= 0) | (nsub > ray_pnprocs)) | 
| 522 | greg | 2.3 | nsub = ray_pnprocs; | 
| 523 | greg | 2.1 | /* clear our ray queue */ | 
| 524 |  |  | while (ray_presult(&res,0) > 0) | 
| 525 |  |  | ; | 
| 526 | greg | 2.23 | r_send_next = 0;                /* hard reset in case of error */ | 
| 527 |  |  | r_recv_first = r_recv_next = RAYQLEN; | 
| 528 | greg | 2.29 | /* close send pipes */ | 
| 529 |  |  | for (i = ray_pnprocs-nsub; i < ray_pnprocs; i++) | 
| 530 |  |  | close(r_proc[i].fd_send); | 
| 531 |  |  |  | 
| 532 |  |  | if (nsub == 1) {                /* awaiting single process? */ | 
| 533 |  |  | if (waitpid(r_proc[ray_pnprocs-1].pid, &status, 0) < 0) | 
| 534 | greg | 2.8 | status = 127<<8; | 
| 535 | greg | 2.29 | close(r_proc[ray_pnprocs-1].fd_recv); | 
| 536 |  |  | } else                          /* else unordered wait */ | 
| 537 |  |  | for (i = 0; i < nsub; ) { | 
| 538 |  |  | int     j, mystatus; | 
| 539 |  |  | RT_PID  pid = wait(&mystatus); | 
| 540 |  |  | for (j = ray_pnprocs-nsub; j < ray_pnprocs; j++) | 
| 541 |  |  | if (r_proc[j].pid == pid) { | 
| 542 |  |  | if (mystatus) | 
| 543 |  |  | status = mystatus; | 
| 544 |  |  | close(r_proc[j].fd_recv); | 
| 545 |  |  | ++i; | 
| 546 |  |  | } | 
| 547 | greg | 2.1 | } | 
| 548 | greg | 2.29 | ray_pnprocs -= nsub; | 
| 549 |  |  | ray_pnidle -= nsub; | 
| 550 |  |  | if (status) { | 
| 551 |  |  | sprintf(errmsg, "rendering process exited with code %d", status>>8); | 
| 552 |  |  | error(WARNING, errmsg); | 
| 553 | greg | 2.1 | } | 
| 554 |  |  | inclose--; | 
| 555 |  |  | } |