source: branches/working-0711/ccl/lisp-kernel/ppc-exceptions.c @ 7668

Last change on this file since 7668 was 7668, checked in by gb, 12 years ago

openSUSE 10.3 (at least) shipped with a buggy version of bcopy();
see <http://lists.opensuse.oorg/opensuse-bugs/2007-09/msg14146.html>
Use memmove() instead. (I don't think any of the uses of any of this
stuff care about overlap, but we might as well use something that
checks for it.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444      }
445      if (selector & GC_TRAP_FUNCTION_PURIFY) {
446        purify_from_xp(xp, 0L);
447        gc_from_xp(xp, 0L);
448      }
449      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
450        OSErr err;
451        extern OSErr save_application(unsigned);
452        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
453        area *vsarea = tcr->vs_area;
454       
455        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
456        err = save_application(arg);
457        if (err == noErr) {
458          _exit(0);
459        }
460        fatal_oserr(": save_application", err);
461      }
462      switch (selector) {
463      case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
464        xpGPR(xp, imm0) = 0;
465        break;
466
467      case GC_TRAP_FUNCTION_FREEZE:
468        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
469        tenured_area->static_dnodes = area_dnode(a->active, a->low);
470        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
471        break;
472      default:
473        break;
474      }
475    }
476   
477    if (egc_was_enabled) {
478      egc_control(true, NULL);
479    }
480    break;
481   
482  }
483
484  adjust_exception_pc(xp,4);
485  return 0;
486}
487
488
489
490void
491signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
492{
493  /* The cstack just overflowed.  Force the current thread's
494     control stack to do so until all stacks are well under their overflow
495     limits.
496  */
497
498#if 0
499  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
500#endif
501  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
502}
503
504/*
505  Lower (move toward 0) the "end" of the soft protected area associated
506  with a by a page, if we can.
507*/
508
509void
510adjust_soft_protection_limit(area *a)
511{
512  char *proposed_new_soft_limit = a->softlimit - 4096;
513  protected_area_ptr p = a->softprot;
514 
515  if (proposed_new_soft_limit >= (p->start+16384)) {
516    p->end = proposed_new_soft_limit;
517    p->protsize = p->end-p->start;
518    a->softlimit = proposed_new_soft_limit;
519  }
520  protect_area(p);
521}
522
523void
524restore_soft_stack_limit(unsigned stkreg)
525{
526  area *a;
527  TCR *tcr = get_tcr(true);
528
529  switch (stkreg) {
530  case sp:
531    a = tcr->cs_area;
532    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
533      a->softlimit -= 4096;
534    }
535    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
536    break;
537  case vsp:
538    a = tcr->vs_area;
539    adjust_soft_protection_limit(a);
540    break;
541  case tsp:
542    a = tcr->ts_area;
543    adjust_soft_protection_limit(a);
544  }
545}
546
547/* Maybe this'll work someday.  We may have to do something to
548   make the thread look like it's not handling an exception */
549void
550reset_lisp_process(ExceptionInformation *xp)
551{
552  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
553  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
554
555  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
556  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
557
558  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
559  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
560
561  start_lisp(tcr, 1);
562}
563
564/*
565  This doesn't GC; it returns true if it made enough room, false
566  otherwise.
567  If "extend" is true, it can try to extend the dynamic area to
568  satisfy the request.
569*/
570
571Boolean
572new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
573{
574  area *a;
575  natural newlimit, oldlimit;
576  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
577
578  a  = active_dynamic_area;
579  oldlimit = (natural) a->active;
580  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
581              align_to_power_of_2(need, log2_allocation_quantum));
582  if (newlimit > (natural) (a->high)) {
583    if (extend) {
584      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
585      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
586      do {
587        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
588          break;
589        }
590        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
591        if (extend_by < 4<<20) {
592          return false;
593        }
594      } while (1);
595    } else {
596      return false;
597    }
598  }
599  a->active = (BytePtr) newlimit;
600  tcr->last_allocptr = (void *)newlimit;
601  xpGPR(xp,allocptr) = (LispObj) newlimit;
602  xpGPR(xp,allocbase) = (LispObj) oldlimit;
603
604  return true;
605}
606
607 
608void
609update_area_active (area **aptr, BytePtr value)
610{
611  area *a = *aptr;
612  for (; a; a = a->older) {
613    if ((a->low <= value) && (a->high >= value)) break;
614  };
615  if (a == NULL) Bug(NULL, "Can't find active area");
616  a->active = value;
617  *aptr = a;
618
619  for (a = a->younger; a; a = a->younger) {
620    a->active = a->high;
621  }
622}
623
624void
625normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
626{
627  void *cur_allocptr = NULL;
628  LispObj freeptr = 0;
629
630  if (xp) {
631    if (is_other_tcr) {
632      pc_luser_xp(xp, tcr, NULL);
633      freeptr = xpGPR(xp, allocptr);
634      if (fulltag_of(freeptr) == 0){
635        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
636      }
637    }
638    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
639    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
640    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
641#ifdef DEBUG
642    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
643            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
644    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
645            tcr,
646            xpGPR(xp, allocbase),
647            xpGPR(xp, allocptr),
648            xpPC(xp));
649    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
650            tcr,
651            tcr->pending_exception_context);
652#endif
653  } else {
654    /* In ff-call.  No need to update cs_area */
655    cur_allocptr = (void *) (tcr->save_allocptr);
656#ifdef DEBUG
657    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
658            tcr, tcr->save_vsp, tcr->save_tsp);
659    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
660            tcr,
661            tcr->save_allocbase,
662            tcr->save_allocptr,
663            xpPC(xp));
664
665#endif
666    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
667    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
668  }
669
670
671  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
672  if (cur_allocptr) {
673    update_bytes_allocated(tcr, cur_allocptr);
674    if (freeptr) {
675      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
676      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
677    }
678  }
679}
680
681TCR *gc_tcr = NULL;
682
683/* Suspend and "normalize" other tcrs, then call a gc-like function
684   in that context.  Resume the other tcrs, then return what the
685   function returned */
686
687int
688gc_like_from_xp(ExceptionInformation *xp, 
689                int(*fun)(TCR *, signed_natural), 
690                signed_natural param)
691{
692  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
693  ExceptionInformation* other_xp;
694  int result;
695  signed_natural inhibit;
696
697  suspend_other_threads(true);
698  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
699  if (inhibit != 0) {
700    if (inhibit > 0) {
701      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
702    }
703    resume_other_threads(true);
704    gc_deferred++;
705    return 0;
706  }
707  gc_deferred = 0;
708
709  gc_tcr = tcr;
710
711  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
712  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
713
714  normalize_tcr(xp, tcr, false);
715
716
717  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
718    if (other_tcr->pending_exception_context) {
719      other_tcr->gc_context = other_tcr->pending_exception_context;
720    } else if (other_tcr->valence == TCR_STATE_LISP) {
721      other_tcr->gc_context = other_tcr->suspend_context;
722    } else {
723      /* no pending exception, didn't suspend in lisp state:
724         must have executed a synchronous ff-call.
725      */
726      other_tcr->gc_context = NULL;
727    }
728    normalize_tcr(other_tcr->gc_context, other_tcr, true);
729  }
730   
731
732
733  result = fun(tcr, param);
734
735  other_tcr = tcr;
736  do {
737    other_tcr->gc_context = NULL;
738    other_tcr = other_tcr->next;
739  } while (other_tcr != tcr);
740
741  gc_tcr = NULL;
742
743  resume_other_threads(true);
744
745  return result;
746
747}
748
749
750/* Returns #bytes freed by invoking GC */
751
752int
753gc_from_tcr(TCR *tcr, signed_natural param)
754{
755  area *a;
756  BytePtr oldfree, newfree;
757  BytePtr oldend, newend;
758
759#ifdef DEBUG
760  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
761#endif
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774int
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  int status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783int
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789int
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow,
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  /* use the siginfo if it's available.  Some versions of Linux
816     don't propagate the DSISR and TRAP fields correctly from
817     64- to 32-bit handlers.
818  */
819  if (info) {
820    /*
821       To confuse matters still further, the value of SEGV_ACCERR
822       varies quite a bit among LinuxPPC variants (the value defined
823       in the header files varies, and the value actually set by
824       the kernel also varies.  So far, we're only looking at the
825       siginfo under Linux and Linux always seems to generate
826       SIGSEGV, so check for SIGSEGV and check the low 16 bits
827       of the si_code.
828    */
829    return ((info->si_signo == SIGSEGV) &&
830            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
831  }
832  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
833         (xpTRAP(xp) == 
834#ifdef LINUX
8350x0300
836#endif
837#ifdef DARWIN
8380x0300/0x100
839#endif
840)
841         );
842#if 0 
843  /* Maybe worth keeping around; not sure if it's an exhaustive
844     list of PPC instructions that could cause a WP fault */
845  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
846     valid values of those SPRs in the context they provide to
847     exception handlers.  Look at the opcode of the offending
848     instruction & recognize 32-bit store operations */
849  opcode instr = *(xpPC(xp));
850
851  if (xp->regs->trap != 0x300) {
852    return 0;
853  }
854  switch (instr >> 26) {
855  case 47:                      /* STMW */
856  case 36:                      /* STW */
857  case 37:                      /* STWU */
858    return 1;
859  case 31:
860    switch ((instr >> 1) & 1023) {
861    case 151:                   /* STWX */
862    case 183:                   /* STWUX */
863      return 1;
864    default:
865      return 0;
866    }
867  default:
868    return 0;
869  }
870#endif
871}
872
873OSStatus
874handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
875{
876  BytePtr addr;
877  protected_area_ptr area;
878  protection_handler *handler;
879  extern Boolean touch_page(void *);
880  extern void touch_page_end(void);
881
882  if (info) {
883    addr = (BytePtr)(info->si_addr);
884  } else {
885    addr = (BytePtr) ((natural) (xpDAR(xp)));
886  }
887
888  if (addr && (addr == tcr->safe_ref_address)) {
889    adjust_exception_pc(xp,4);
890
891    xpGPR(xp,imm0) = 0;
892    return 0;
893  }
894
895  if (xpPC(xp) == (pc)touch_page) {
896    xpGPR(xp,imm0) = 0;
897    xpPC(xp) = (pc)touch_page_end;
898    return 0;
899  }
900
901
902  if (is_write_fault(xp,info)) {
903    area = find_protected_area(addr);
904    if (area != NULL) {
905      handler = protection_handlers[area->why];
906      return handler(xp, area, addr);
907    }
908  }
909  if (old_valence == TCR_STATE_LISP) {
910    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
911  }
912  return -1;
913}
914
915
916
917
918
919OSStatus
920do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
921{
922#ifdef SUPPORT_PRAGMA_UNUSED
923#pragma unused(area,addr)
924#endif
925  reset_lisp_process(xp);
926  return -1;
927}
928
929extern area*
930allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
931
932extern area*
933allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
934
935#ifdef EXTEND_VSTACK
936Boolean
937catch_frame_p(lisp_frame *spPtr)
938{
939  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
940
941  for (; catch; catch = (catch_frame *) untag(catch->link)) {
942    if (spPtr == ((lisp_frame *) catch->csp)) {
943      return true;
944    }
945  }
946  return false;
947}
948#endif
949
950Boolean
951unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
952{
953  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
954      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
955    return true;
956  } else {
957    return false;
958  }
959}
960
961Boolean
962lexpr_entry_frame_p(lisp_frame *spPtr)
963{
964  LispObj savelr = spPtr->savelr;
965  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
966  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
967  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
968
969  return
970    (savelr == lexpr_return1v) ||
971    (savelr == lexpr_return) ||
972    ((savelr == ret1valn) &&
973     (((spPtr->backlink)->savelr) == lexpr_return));
974}
975
976Boolean
977lisp_frame_p(lisp_frame *spPtr)
978{
979  LispObj savefn;
980  /* We can't just look at the size of the stack frame under the EABI
981     calling sequence, but that's the first thing to check. */
982  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
983    return false;
984  }
985  savefn = spPtr->savefn;
986  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
987 
988}
989
990
991int ffcall_overflow_count = 0;
992
993/* Find a frame that is neither a catch frame nor one of the
994   lexpr_entry frames We don't check for non-lisp frames here because
995   we'll always stop before we get there due to a dummy lisp frame
996   pushed by .SPcallback that masks out the foreign frames.  The one
997   exception is that there is a non-lisp frame without a valid VSP
998   while in the process of ppc-ff-call. We recognize that because its
999   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1000   next frame is 0, then we're executing an unwind-protect cleanup
1001   form, and the top stack frame belongs to its (no longer extant)
1002   catch frame.  */
1003
1004#ifdef EXTEND_VSTACK
1005lisp_frame *
1006find_non_catch_frame_from_xp (ExceptionInformation *xp)
1007{
1008  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1009  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1010    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1011  }
1012  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1013         unwind_protect_cleanup_frame_p(spPtr) ||
1014         catch_frame_p(spPtr) ||
1015         lexpr_entry_frame_p(spPtr) ; ) {
1016     spPtr = spPtr->backlink;
1017     };
1018  return spPtr;
1019}
1020#endif
1021
1022#ifdef EXTEND_VSTACK
1023Boolean
1024db_link_chain_in_area_p (area *a)
1025{
1026  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1027          *high = (LispObj *) a->high,
1028          *low = (LispObj *) a->low;
1029  for (; db; db = (LispObj *) *db) {
1030    if ((db >= low) && (db < high)) return true;
1031  };
1032  return false;
1033}
1034#endif
1035
1036
1037
1038
1039/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1040  the current value of VSP (TSP) or an older area.  */
1041
1042OSStatus
1043do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1044{
1045  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1046  area *a = tcr->vs_area;
1047  protected_area_ptr vsp_soft = a->softprot;
1048  unprotect_area(vsp_soft);
1049  signal_stack_soft_overflow(xp,vsp);
1050  return 0;
1051}
1052
1053
1054OSStatus
1055do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1056{
1057  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1058  area *a = tcr->ts_area;
1059  protected_area_ptr tsp_soft = a->softprot;
1060  unprotect_area(tsp_soft);
1061  signal_stack_soft_overflow(xp,tsp);
1062  return 0;
1063}
1064
1065OSStatus
1066do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1067{
1068  /* Trying to write into a guard page on the vstack or tstack.
1069     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1070     signal an error_stack_overflow condition.
1071      */
1072  lisp_protection_kind which = prot_area->why;
1073  Boolean on_TSP = (which == kTSPsoftguard);
1074
1075  if (on_TSP) {
1076    return do_tsp_overflow(xp, addr);
1077   } else {
1078    return do_vsp_overflow(xp, addr);
1079   }
1080}
1081
1082OSStatus
1083do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1084{
1085#ifdef SUPPORT_PRAGMA_UNUSED
1086#pragma unused(xp,area,addr)
1087#endif
1088  return -1;
1089}
1090
1091/*
1092  We have a couple of choices here.  We can simply unprotect the page
1093  and let the store happen on return, or we can try to emulate writes
1094  that we know will involve an intergenerational reference.  Both are
1095  correct as far as EGC constraints go, but the latter approach is
1096  probably more efficient.  (This only matters in the case where the
1097  GC runs after this exception handler returns but before the write
1098  actually happens.  If we didn't emulate node stores here, the EGC
1099  would scan the newly-writen page, find nothing interesting, and
1100  run to completion.  This thread will try the write again afer it
1101  resumes, the page'll be re-protected, and we'll have taken this
1102  fault twice.  The whole scenario shouldn't happen very often, but
1103  (having already taken a fault and committed to an mprotect syscall)
1104  we might as well emulate stores involving intergenerational references,
1105  since they're pretty easy to identify.
1106
1107  Note that cases involving two or more threads writing to the same
1108  page (before either of them can run this handler) is benign: one
1109  invocation of the handler will just unprotect an unprotected page in
1110  that case.
1111
1112  If there are GCs (or any other suspensions of the thread between
1113  the time that the write fault was detected and the time that the
1114  exception lock is obtained) none of this stuff happens.
1115*/
1116
1117/*
1118  Return true (and emulate the instruction) iff:
1119  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1120     instruction.
1121  b) RS is a node register (>= fn)
1122  c) RS is tagged as a cons or vector
1123  d) RS is in some ephemeral generation.
1124  This is slightly conservative, since RS may be no younger than the
1125  EA being written to.
1126*/
1127Boolean
1128is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1129{
1130  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1131    opcode instr = *xpPC(xp);
1132   
1133    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1134        major_opcode_p(instr, major_opcode_STW)) {
1135      LispObj
1136        rs = RS_field(instr), 
1137        rsval = xpGPR(xp,rs),
1138        tag = fulltag_of(rsval);
1139     
1140      if (rs >= fn) {
1141        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1142          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1143              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1144            *(LispObj *)ea = rsval;
1145            return true;
1146          }
1147        }
1148      }
1149    }
1150  }
1151  return false;
1152}
1153
1154     
1155
1156
1157
1158
1159
1160OSStatus
1161handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1162{
1163  (void) zero_fpscr(tcr);
1164  enable_fp_exceptions();
1165
1166
1167  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1168
1169  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1170     operation; there's an FPU operation right at the PC, so tell it to start
1171     looking one word beyond */
1172  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1173}
1174
1175   
1176int
1177altivec_present = 1;
1178
1179
1180/* This only tries to implement the "optional" fsqrt and fsqrts
1181   instructions, which were generally implemented on IBM hardware
1182   but generally not available on Motorola/Freescale systems.
1183*/               
1184OSStatus
1185handle_unimplemented_instruction(ExceptionInformation *xp,
1186                                 opcode instruction,
1187                                 TCR *tcr)
1188{
1189  (void) zero_fpscr(tcr);
1190  enable_fp_exceptions();
1191  /* the rc bit (bit 0 in the instruction) is supposed to cause
1192     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1193     fsqrt. or fsqrts.
1194  */
1195  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1196       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1197      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1198    double b, d, sqrt(double);
1199
1200    b = xpFPR(xp,RB_field(instruction));
1201    d = sqrt(b);
1202    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1203                   (get_fpscr() & _FPU_RESERVED));
1204    xpFPR(xp,RT_field(instruction)) = d;
1205    adjust_exception_pc(xp,4);
1206    return 0;
1207  }
1208
1209  return -1;
1210}
1211
1212OSStatus
1213PMCL_exception_handler(int xnum, 
1214                       ExceptionInformation *xp, 
1215                       TCR *tcr, 
1216                       siginfo_t *info,
1217                       int old_valence)
1218{
1219  unsigned oldMQ;
1220  OSStatus status = -1;
1221  pc program_counter;
1222  opcode instruction = 0;
1223
1224
1225  program_counter = xpPC(xp);
1226 
1227  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1228    instruction = *program_counter;
1229  }
1230
1231  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1232    status = handle_alloc_trap(xp, tcr);
1233  } else if ((xnum == SIGSEGV) ||
1234             (xnum == SIGBUS)) {
1235    status = handle_protection_violation(xp, info, tcr, old_valence);
1236  } else if (xnum == SIGFPE) {
1237    status = handle_sigfpe(xp, tcr);
1238  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1239    if (instruction == GC_TRAP_INSTRUCTION) {
1240      status = handle_gc_trap(xp, tcr);
1241    } else if (IS_UUO(instruction)) {
1242      status = handle_uuo(xp, instruction, program_counter);
1243    } else if (is_conditional_trap(instruction)) {
1244      status = handle_trap(xp, instruction, program_counter, info);
1245    } else {
1246      status = handle_unimplemented_instruction(xp,instruction,tcr);
1247    }
1248  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1249    tcr->interrupt_pending = 0;
1250    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1251    status = 0;
1252  }
1253
1254  return status;
1255}
1256
1257void
1258adjust_exception_pc(ExceptionInformation *xp, int delta)
1259{
1260  xpPC(xp) += (delta >> 2);
1261}
1262
1263
1264/*
1265  This wants to scan backwards until "where" points to an instruction
1266   whose major opcode is either 63 (double-float) or 59 (single-float)
1267*/
1268
1269OSStatus
1270handle_fpux_binop(ExceptionInformation *xp, pc where)
1271{
1272  OSStatus err;
1273  opcode *there = (opcode *) where, instr, errnum;
1274  int i = TRAP_LOOKUP_TRIES, delta = 0;
1275 
1276  while (i--) {
1277    instr = *--there;
1278    delta -= 4;
1279    if (codevec_hdr_p(instr)) {
1280      return -1;
1281    }
1282    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1283      errnum = error_FPU_exception_double;
1284      break;
1285    }
1286
1287    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1288      errnum = error_FPU_exception_short;
1289      break;
1290    }
1291  }
1292 
1293  err = handle_error(xp, errnum, rcontext, 0,  there);
1294  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1295 
1296  adjust_exception_pc(xp, delta);
1297  xpFPSCR(xp)  &=  0x03fff;
1298 
1299  return err;
1300
1301}
1302
1303OSStatus
1304handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1305{
1306#ifdef SUPPORT_PRAGMA_UNUSED
1307#pragma unused(where)
1308#endif
1309  unsigned 
1310    minor = UUO_MINOR(the_uuo),
1311    rt = 0x1f & (the_uuo >> 21),
1312    ra = 0x1f & (the_uuo >> 16),
1313    rb = 0x1f & (the_uuo >> 11),
1314    errnum = 0x3ff & (the_uuo >> 16);
1315
1316  OSStatus status = -1;
1317
1318  int bump = 4;
1319
1320  switch (minor) {
1321
1322  case UUO_ZERO_FPSCR:
1323    status = 0;
1324    xpFPSCR(xp) = 0;
1325    break;
1326
1327
1328  case UUO_INTERR:
1329    if (errnum == error_propagate_suspend) {
1330      status = 0;
1331    } else {
1332      status = handle_error(xp, errnum, rb, 0,  where);
1333    }
1334    break;
1335
1336  case UUO_INTCERR:
1337    status = handle_error(xp, errnum, rb, 1,  where);
1338    if (errnum == error_udf_call) {
1339      /* If lisp's returned from a continuable undefined-function call,
1340         it's put a code vector in the xp's PC.  Don't advance the
1341         PC ... */
1342      bump = 0;
1343    }
1344    break;
1345
1346  case UUO_FPUX_BINOP:
1347    status = handle_fpux_binop(xp, where);
1348    bump = 0;
1349    break;
1350
1351  default:
1352    status = -1;
1353    bump = 0;
1354  }
1355 
1356  if ((!status) && bump) {
1357    adjust_exception_pc(xp, bump);
1358  }
1359  return status;
1360}
1361
1362natural
1363register_codevector_contains_pc (natural lisp_function, pc where)
1364{
1365  natural code_vector, size;
1366
1367  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1368      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1369    code_vector = deref(lisp_function, 1);
1370    size = header_element_count(header_of(code_vector)) << 2;
1371    if ((untag(code_vector) < (natural)where) && 
1372        ((natural)where < (code_vector + size)))
1373      return(code_vector);
1374  }
1375
1376  return(0);
1377}
1378
1379/* Callback to lisp to handle a trap. Need to translate the
1380   PC (where) into one of two forms of pairs:
1381
1382   1. If PC is in fn or nfn's code vector, use the register number
1383      of fn or nfn and the index into that function's code vector.
1384   2. Otherwise use 0 and the pc itself
1385*/
1386void
1387callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1388                   natural arg1, natural arg2, natural arg3)
1389{
1390  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1391  unsigned register_number = fn;
1392  natural index = (natural)where;
1393
1394  if (code_vector == 0) {
1395    register_number = nfn;
1396    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1397  }
1398  if (code_vector == 0)
1399    register_number = 0;
1400  else
1401    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1402  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1403}
1404
1405void
1406callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1407                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1408{
1409  sigset_t mask;
1410  natural  callback_ptr, i;
1411  area *a;
1412
1413  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1414
1415  /* Put the active stack pointer where .SPcallback expects it */
1416  a = tcr->cs_area;
1417  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1418
1419  /* Copy globals from the exception frame to tcr */
1420  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1421  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1422  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1423  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1424
1425
1426
1427  /* Call back.
1428     Lisp will handle trampolining through some code that
1429     will push lr/fn & pc/nfn stack frames for backtrace.
1430  */
1431  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1432#ifdef DEBUG
1433  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1434#endif
1435  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1436  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1437  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1438#ifdef DEBUG
1439  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1440#endif
1441
1442
1443
1444  /* Copy GC registers back into exception frame */
1445  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1446  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1447}
1448
1449area *
1450allocate_no_stack (natural size)
1451{
1452#ifdef SUPPORT_PRAGMA_UNUSED
1453#pragma unused(size)
1454#endif
1455
1456  return (area *) NULL;
1457}
1458
1459
1460
1461
1462
1463
1464/* callback to (symbol-value cmain) if it is a macptr,
1465   otherwise report cause and function name to console.
1466   Returns noErr if exception handled OK */
1467OSStatus
1468handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1469{
1470  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1471  int       ra, rs, fn_reg = 0;
1472  char *    error_msg = NULL;
1473  char      name[kNameBufLen];
1474  LispObj   cmain = nrs_CMAIN.vcell;
1475  Boolean   event_poll_p = false;
1476  int old_interrupt_level = 0;
1477  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1478
1479  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1480     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1481
1482  /* If it's a "trllt" instruction where RA == sp, it's a failed
1483     control stack overflow check.  In that case:
1484     
1485     a) We're in "yellow zone" mode if the value of the
1486     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1487     we're not already in yellow zone mode, attempt to create a new
1488     thread and continue execution on its stack. If that fails, call
1489     signal_stack_soft_overflow to enter yellow zone mode and signal
1490     the condition to lisp.
1491     
1492     b) If we're already in "yellow zone" mode, then:
1493     
1494     1) if the SP is past the current control-stack area's hard
1495     overflow limit, signal a "hard" stack overflow error (e.g., throw
1496     to toplevel as quickly as possible. If we aren't in "yellow zone"
1497     mode, attempt to continue on another thread first.
1498     
1499     2) if SP is "well" (> 4K) below its soft overflow limit, set
1500     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1501     "yellow zone mode" in this case.
1502     
1503     3) Otherwise, do nothing.  We'll continue to trap every time
1504     something gets pushed on the control stack, so we should try to
1505     detect and handle all of these cases fairly quickly.  Of course,
1506     the trap overhead is going to slow things down quite a bit.
1507     */
1508
1509  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1510      (RA_field(the_trap) == sp) &&
1511      (TO_field(the_trap) == TO_LO)) {
1512    area
1513      *CS_area = tcr->cs_area,
1514      *VS_area = tcr->vs_area;
1515     
1516    natural
1517      current_SP = xpGPR(xp,sp),
1518      current_VSP = xpGPR(xp,vsp);
1519
1520    if (current_SP  < (natural) (CS_area->hardlimit)) {
1521      /* If we're not in soft overflow mode yet, assume that the
1522         user has set the soft overflow size very small and try to
1523         continue on another thread before throwing to toplevel */
1524      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1525        reset_lisp_process(xp);
1526      }
1527    } else {
1528      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1529        /* If the control stack pointer is at least 4K away from its soft limit
1530           and the value stack pointer is at least 4K away from its soft limit,
1531           stop trapping.  Else keep trapping. */
1532        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1533            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1534          protected_area_ptr vs_soft = VS_area->softprot;
1535          if (vs_soft->nprot == 0) {
1536            protect_area(vs_soft);
1537          }
1538          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1539        }
1540      } else {
1541        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1542        signal_stack_soft_overflow(xp, sp);
1543      }
1544    }
1545   
1546    adjust_exception_pc(xp, 4);
1547    return noErr;
1548  } else {
1549    if (the_trap == LISP_BREAK_INSTRUCTION) {
1550      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1551      set_xpPC(xp, xpLR(xp));
1552      if (message == NULL) {
1553        message = "Lisp Breakpoint";
1554      }
1555      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1556      return noErr;
1557    }
1558    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1559      adjust_exception_pc(xp,4);
1560      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1561      return noErr;
1562    }
1563    /*
1564      twlle ra,rb is used to detect tlb overflow, where RA = current
1565      limit and RB = index to use.
1566    */
1567    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1568        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1569      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1570        return noErr;
1571      }
1572      return -1;
1573    }
1574
1575    if ((fulltag_of(cmain) == fulltag_misc) &&
1576        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1577      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1578        /* reset interrup_level, interrupt_pending */
1579        TCR_INTERRUPT_LEVEL(tcr) = 0;
1580        tcr->interrupt_pending = 0;
1581      }
1582#if 0
1583      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1584#endif
1585      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1586      adjust_exception_pc(xp, 4);
1587      return(noErr);
1588    }
1589    return -1;
1590  }
1591}
1592
1593
1594/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1595   Stop if subtag_code_vector is encountered. */
1596unsigned
1597scan_for_instr( unsigned target, unsigned mask, pc where )
1598{
1599  int i = TRAP_LOOKUP_TRIES;
1600
1601  while( i-- ) {
1602    unsigned instr = *(--where);
1603    if ( codevec_hdr_p(instr) ) {
1604      return 0;
1605    } else if ( match_instr(instr, mask, target) ) {
1606      return instr;
1607    }
1608  }
1609  return 0;
1610}
1611
1612
1613void non_fatal_error( char *msg )
1614{
1615  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1616  fflush( stderr );
1617}
1618
1619/* The main opcode.  */
1620
1621int 
1622is_conditional_trap(opcode instr)
1623{
1624  unsigned to = TO_field(instr);
1625  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1626
1627#ifndef MACOS
1628  if ((instr == LISP_BREAK_INSTRUCTION) ||
1629      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1630    return 1;
1631  }
1632#endif
1633  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1634    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1635       EQ bit must be set in the TO mask and either the register
1636       operands (if "tw") are the same or either both of the signed or
1637       both of the unsigned inequality bits must be set. */
1638    if (! (to & TO_EQ)) {
1639      return 1;                 /* Won't trap on EQ: conditional */
1640    }
1641    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1642      return 0;                 /* Will trap on EQ, same regs: unconditional */
1643    }
1644    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1645        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1646      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1647    }
1648    return 1;                   /* must be conditional */
1649  }
1650  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1651                                   debugger have it */
1652}
1653
1654OSStatus
1655handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1656{
1657  LispObj   pname;
1658  LispObj   errdisp = nrs_ERRDISP.vcell;
1659
1660  if ((fulltag_of(errdisp) == fulltag_misc) &&
1661      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1662    /* errdisp is a macptr, we can call back to lisp */
1663    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1664    return(0);
1665    }
1666
1667  return(-1);
1668}
1669               
1670
1671/*
1672   Current thread has all signals masked.  Before unmasking them,
1673   make it appear that the current thread has been suspended.
1674   (This is to handle the case where another thread is trying
1675   to GC before this thread is able to sieze the exception lock.)
1676*/
1677int
1678prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1679{
1680  int old_valence = tcr->valence;
1681
1682  tcr->pending_exception_context = context;
1683  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1684
1685  ALLOW_EXCEPTIONS(context);
1686  return old_valence;
1687} 
1688
1689void
1690wait_for_exception_lock_in_handler(TCR *tcr, 
1691                                   ExceptionInformation *context,
1692                                   xframe_list *xf)
1693{
1694
1695  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1696#ifdef DEBUG
1697  fprintf(stderr, "0x%x has exception lock\n", tcr);
1698#endif
1699  xf->curr = context;
1700  xf->prev = tcr->xframe;
1701  tcr->xframe =  xf;
1702  tcr->pending_exception_context = NULL;
1703  tcr->valence = TCR_STATE_FOREIGN; 
1704}
1705
1706void
1707unlock_exception_lock_in_handler(TCR *tcr)
1708{
1709  tcr->pending_exception_context = tcr->xframe->curr;
1710  tcr->xframe = tcr->xframe->prev;
1711  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1712#ifdef DEBUG
1713  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1714#endif
1715  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1716}
1717
1718/*
1719   If an interrupt is pending on exception exit, try to ensure
1720   that the thread sees it as soon as it's able to run.
1721*/
1722void
1723raise_pending_interrupt(TCR *tcr)
1724{
1725  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1726    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1727  }
1728}
1729
1730void
1731exit_signal_handler(TCR *tcr, int old_valence)
1732{
1733  sigset_t mask;
1734  sigfillset(&mask);
1735 
1736  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1737  tcr->valence = old_valence;
1738  tcr->pending_exception_context = NULL;
1739}
1740
1741
1742void
1743signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1744{
1745  xframe_list xframe_link;
1746
1747#ifdef DARWIN
1748  if (running_under_rosetta) {
1749    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1750  }
1751#endif
1752  if (!use_mach_exception_handling) {
1753   
1754    tcr = (TCR *) get_interrupt_tcr(false);
1755 
1756    /* The signal handler's entered with all signals (notably the
1757       thread_suspend signal) blocked.  Don't allow any other signals
1758       (notably the thread_suspend signal) to preempt us until we've
1759       set the TCR's xframe slot to include the current exception
1760       context.
1761    */
1762   
1763    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1764  }
1765
1766  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1767    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1768    pthread_kill(pthread_self(), thread_suspend_signal);
1769  }
1770
1771 
1772  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1773  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1774    char msg[512];
1775    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1776    if (lisp_Debugger(context, info, signum, false, msg)) {
1777      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1778    }
1779  }
1780
1781  unlock_exception_lock_in_handler(tcr);
1782
1783  /* This thread now looks like a thread that was suspended while
1784     executing lisp code.  If some other thread gets the exception
1785     lock and GCs, the context (this thread's suspend_context) will
1786     be updated.  (That's only of concern if it happens before we
1787     can return to the kernel/to the Mach exception handler).
1788  */
1789  if (!use_mach_exception_handling) {
1790    exit_signal_handler(tcr, old_valence);
1791    raise_pending_interrupt(tcr);
1792  }
1793}
1794
1795/*
1796  If it looks like we're in the middle of an atomic operation, make
1797  it seem as if that operation is either complete or hasn't started
1798  yet.
1799
1800  The cases handled include:
1801
1802  a) storing into a newly-allocated lisp frame on the stack.
1803  b) marking a newly-allocated TSP frame as containing "raw" data.
1804  c) consing: the GC has its own ideas about how this should be
1805     handled, but other callers would be best advised to back
1806     up or move forward, according to whether we're in the middle
1807     of allocating a cons cell or allocating a uvector.
1808  d) a STMW to the vsp
1809  e) EGC write-barrier subprims.
1810*/
1811
1812extern opcode
1813  egc_write_barrier_start,
1814  egc_write_barrier_end, 
1815  egc_store_node_conditional, 
1816  egc_set_hash_key,
1817  egc_gvset,
1818  egc_rplaca,
1819  egc_rplacd;
1820
1821
1822extern opcode ffcall_return_window, ffcall_return_window_end;
1823
1824void
1825pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1826{
1827  pc program_counter = xpPC(xp);
1828  opcode instr = *program_counter;
1829  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1830  LispObj cur_allocptr = xpGPR(xp, allocptr);
1831  int allocptr_tag = fulltag_of(cur_allocptr);
1832 
1833
1834
1835  if ((program_counter < &egc_write_barrier_end) && 
1836      (program_counter >= &egc_write_barrier_start)) {
1837    LispObj *ea = 0, val, root;
1838    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1839    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1840
1841    if (program_counter >= &egc_store_node_conditional) {
1842      if ((program_counter == &egc_store_node_conditional) || ! (xpCCR(xp) & 0x20000000)) {
1843        /* The conditional store either hasn't been attempted yet, or
1844           has failed.  No need to adjust the PC, or do memoization. */
1845        return;
1846      }
1847      val = xpGPR(xp,arg_z);
1848      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1849      xpGPR(xp,arg_z) = t_value;
1850      need_store = false;
1851    } else if (program_counter >= &egc_set_hash_key) {
1852      root = xpGPR(xp,arg_x);
1853      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1854      need_memoize_root = true;
1855    } else if (program_counter >= &egc_gvset) {
1856      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1857      val = xpGPR(xp,arg_z);
1858    } else if (program_counter >= &egc_rplacd) {
1859      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1860      val = xpGPR(xp,arg_z);
1861    } else {                      /* egc_rplaca */
1862      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1863      val = xpGPR(xp,arg_z);
1864    }
1865    if (need_store) {
1866      *ea = val;
1867    }
1868    if (need_check_memo) {
1869      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1870      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1871          ((LispObj)ea < val)) {
1872        atomic_set_bit(refbits, bitnumber);
1873        if (need_memoize_root) {
1874          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1875          atomic_set_bit(refbits, bitnumber);
1876        }
1877      }
1878    }
1879    set_xpPC(xp, xpLR(xp));
1880    return;
1881  }
1882
1883
1884  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1885    LispObj tsp_val = xpGPR(xp,tsp);
1886   
1887    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1888    adjust_exception_pc(xp, 4);
1889    return;
1890  }
1891 
1892  if (frame->backlink == (frame+1)) {
1893    if (
1894#ifdef PPC64
1895        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1896        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1897#else
1898        (major_opcode_p(instr, major_opcode_STW)) && 
1899#endif
1900        (RA_field(instr) == sp) &&
1901        /* There are a few places in the runtime that store into
1902           a previously-allocated frame atop the stack when
1903           throwing values around.  We only care about the case
1904           where the frame was newly allocated, in which case
1905           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1906           a few instructions before the current program counter.
1907           (The whole point here is that a newly allocated frame
1908           might contain random values that we don't want the
1909           GC to see; a previously allocated frame should already
1910           be completely initialized.)
1911        */
1912        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1913         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1914         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1915#ifdef PPC64
1916      int disp = DS_field(instr);
1917#else     
1918      int disp = D_field(instr);
1919#endif
1920
1921
1922      if (disp < (4*node_size)) {
1923#if 0
1924        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1925#endif
1926        frame->savevsp = 0;
1927        if (disp < (3*node_size)) {
1928          frame->savelr = 0;
1929          if (disp == node_size) {
1930            frame->savefn = 0;
1931          }
1932        }
1933      }
1934      return;
1935    }
1936  }
1937
1938  if (allocptr_tag != tag_fixnum) {
1939    signed_natural disp = allocptr_displacement(xp);
1940
1941    if (disp) {
1942      /* Being architecturally "at" the alloc trap doesn't tell
1943         us much (in particular, it doesn't tell us whether
1944         or not the thread has committed to taking the trap
1945         and is waiting for the exception lock (or waiting
1946         for the Mach exception thread to tell it how bad
1947         things are) or is about to execute a conditional
1948         trap.
1949         Regardless of which case applies, we want the
1950         other thread to take (or finish taking) the
1951         trap, and we don't want it to consider its
1952         current allocptr to be valid.
1953         The difference between this case (suspend other
1954         thread for GC) and the previous case (suspend
1955         current thread for interrupt) is solely a
1956         matter of what happens after we leave this
1957         function: some non-current thread will stay
1958         suspended until the GC finishes, then take
1959         (or start processing) the alloc trap.   The
1960         current thread will go off and do PROCESS-INTERRUPT
1961         or something, and may return from the interrupt
1962         and need to finish the allocation that got interrupted.
1963      */
1964
1965      if (alloc_disp) {
1966        *alloc_disp = disp;
1967        xpGPR(xp,allocptr) += disp;
1968        /* Leave the PC at the alloc trap.  When the interrupt
1969           handler returns, it'll decrement allocptr by disp
1970           and the trap may or may not be taken.
1971        */
1972      } else {
1973        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1974        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1975        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1976      }
1977    } else {
1978#ifdef DEBUG
1979      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
1980#endif
1981      /* If we're already past the alloc_trap, finish allocating
1982         the object. */
1983      if (allocptr_tag == fulltag_cons) {
1984        finish_allocating_cons(xp);
1985#ifdef DEBUG
1986          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
1987                  tcr);
1988#endif
1989      } else {
1990        if (allocptr_tag == fulltag_misc) {
1991#ifdef DEBUG
1992          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
1993                  tcr);
1994#endif
1995          finish_allocating_uvector(xp);
1996        } else {
1997          Bug(xp, "what's being allocated here ?");
1998        }
1999      }
2000      /* Whatever we finished allocating, reset allocptr/allocbase to
2001         VOID_ALLOCPTR */
2002      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2003    }
2004    return;
2005  }
2006
2007  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2008    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2009    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2010#if 0
2011        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2012#endif
2013
2014    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2015      deref(frame,idx) = 0;
2016    }
2017    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2018    return;
2019  }
2020
2021#ifndef PC64
2022  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2023      (RA_field(instr) == vsp)) {
2024    int r;
2025    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2026   
2027    for (r = RS_field(instr); r <= 31; r++) {
2028      *vspptr++ = xpGPR(xp,r);
2029    }
2030    adjust_exception_pc(xp, 4);
2031  }
2032#endif
2033}
2034
2035void
2036interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2037{
2038  TCR *tcr = get_interrupt_tcr(false);
2039  if (tcr) {
2040    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2041      tcr->interrupt_pending = 1 << fixnumshift;
2042    } else {
2043      LispObj cmain = nrs_CMAIN.vcell;
2044
2045      if ((fulltag_of(cmain) == fulltag_misc) &&
2046          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2047        /*
2048           This thread can (allegedly) take an interrupt now.
2049           It's tricky to do that if we're executing
2050           foreign code (especially Linuxthreads code, much
2051           of which isn't reentrant.)
2052           If we're unwinding the stack, we also want to defer
2053           the interrupt.
2054        */
2055        if ((tcr->valence != TCR_STATE_LISP) ||
2056            (tcr->unwinding != 0)) {
2057          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2058        } else {
2059          xframe_list xframe_link;
2060          int old_valence;
2061          signed_natural disp=0;
2062         
2063          pc_luser_xp(context, tcr, &disp);
2064          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2065          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2066#ifdef DEBUG
2067          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2068#endif
2069          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2070          if (disp) {
2071            xpGPR(context,allocptr) -= disp;
2072          }
2073          unlock_exception_lock_in_handler(tcr);
2074#ifdef DEBUG
2075          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2076#endif
2077          exit_signal_handler(tcr, old_valence);
2078        }
2079      }
2080    }
2081  }
2082#ifdef DARWIN
2083    DarwinSigReturn(context);
2084#endif
2085}
2086
2087
2088
2089void
2090install_signal_handler(int signo, void *handler)
2091{
2092  struct sigaction sa;
2093 
2094  sa.sa_sigaction = (void *)handler;
2095  sigfillset(&sa.sa_mask);
2096  sa.sa_flags = 
2097    SA_RESTART
2098    | SA_SIGINFO
2099#ifdef DARWIN
2100#ifdef PPC64
2101    | SA_64REGSET
2102#endif
2103#endif
2104    ;
2105
2106  sigaction(signo, &sa, NULL);
2107}
2108
2109void
2110install_pmcl_exception_handlers()
2111{
2112#ifdef DARWIN
2113  extern Boolean use_mach_exception_handling;
2114#endif
2115
2116  Boolean install_signal_handlers_for_exceptions =
2117#ifdef DARWIN
2118    !use_mach_exception_handling
2119#else
2120    true
2121#endif
2122    ;
2123  if (install_signal_handlers_for_exceptions) {
2124    extern int no_sigtrap;
2125    install_signal_handler(SIGILL, (void *)signal_handler);
2126    if (no_sigtrap != 1) {
2127      install_signal_handler(SIGTRAP, (void *)signal_handler);
2128    }
2129    install_signal_handler(SIGBUS,  (void *)signal_handler);
2130    install_signal_handler(SIGSEGV, (void *)signal_handler);
2131    install_signal_handler(SIGFPE, (void *)signal_handler);
2132  }
2133 
2134  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2135                         (void *)interrupt_handler);
2136  signal(SIGPIPE, SIG_IGN);
2137}
2138
2139void
2140quit_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2141{
2142  TCR *tcr = get_tcr(false);
2143  area *a;
2144  sigset_t mask;
2145 
2146  sigemptyset(&mask);
2147
2148  if (tcr) {
2149    tcr->valence = TCR_STATE_FOREIGN;
2150    a = tcr->vs_area;
2151    if (a) {
2152      a->active = a->high;
2153    }
2154    a = tcr->ts_area;
2155    if (a) {
2156      a->active = a->high;
2157    }
2158    a = tcr->cs_area;
2159    if (a) {
2160      a->active = a->high;
2161    }
2162  }
2163 
2164  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2165  pthread_exit(NULL);
2166}
2167
2168void
2169thread_signal_setup()
2170{
2171  thread_suspend_signal = SIG_SUSPEND_THREAD;
2172  thread_resume_signal = SIG_RESUME_THREAD;
2173
2174  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2175  install_signal_handler(thread_resume_signal,  (void *) suspend_resume_handler);
2176  install_signal_handler(SIGQUIT, (void *)quit_handler);
2177}
2178
2179
2180
2181void
2182unprotect_all_areas()
2183{
2184  protected_area_ptr p;
2185
2186  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2187    unprotect_area(p);
2188  }
2189}
2190
2191/*
2192  A binding subprim has just done "twlle limit_regno,idx_regno" and
2193  the trap's been taken.  Extend the tcr's tlb so that the index will
2194  be in bounds and the new limit will be on a page boundary, filling
2195  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2196  the tcr fields and the registers in the xp and return true if this
2197  all works, false otherwise.
2198
2199  Note that the tlb was allocated via malloc, so realloc can do some
2200  of the hard work.
2201*/
2202Boolean
2203extend_tcr_tlb(TCR *tcr, 
2204               ExceptionInformation *xp, 
2205               unsigned limit_regno,
2206               unsigned idx_regno)
2207{
2208  unsigned
2209    index = (unsigned) (xpGPR(xp,idx_regno)),
2210    old_limit = tcr->tlb_limit,
2211    new_limit = align_to_power_of_2(index+1,12),
2212    new_bytes = new_limit-old_limit;
2213  LispObj
2214    *old_tlb = tcr->tlb_pointer,
2215    *new_tlb = realloc(old_tlb, new_limit),
2216    *work;
2217
2218  if (new_tlb == NULL) {
2219    return false;
2220  }
2221 
2222  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2223
2224  while (new_bytes) {
2225    *work++ = no_thread_local_binding_marker;
2226    new_bytes -= sizeof(LispObj);
2227  }
2228  tcr->tlb_pointer = new_tlb;
2229  tcr->tlb_limit = new_limit;
2230  xpGPR(xp, limit_regno) = new_limit;
2231  return true;
2232}
2233
2234
2235
2236void
2237exception_init()
2238{
2239  install_pmcl_exception_handlers();
2240}
2241
2242
2243
2244
2245
2246#ifdef DARWIN
2247
2248
2249#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2250#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2251
2252
2253#if USE_MACH_EXCEPTION_LOCK
2254pthread_mutex_t _mach_exception_lock, *mach_exception_lock;
2255#endif
2256
2257#define LISP_EXCEPTIONS_HANDLED_MASK \
2258 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2259
2260/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2261#define NUM_LISP_EXCEPTIONS_HANDLED 4
2262
2263typedef struct {
2264  int foreign_exception_port_count;
2265  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2266  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2267  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2268  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2269} MACH_foreign_exception_state;
2270
2271
2272
2273
2274/*
2275  Mach's exception mechanism works a little better than its signal
2276  mechanism (and, not incidentally, it gets along with GDB a lot
2277  better.
2278
2279  Initially, we install an exception handler to handle each native
2280  thread's exceptions.  This process involves creating a distinguished
2281  thread which listens for kernel exception messages on a set of
2282  0 or more thread exception ports.  As threads are created, they're
2283  added to that port set; a thread's exception port is destroyed
2284  (and therefore removed from the port set) when the thread exits.
2285
2286  A few exceptions can be handled directly in the handler thread;
2287  others require that we resume the user thread (and that the
2288  exception thread resumes listening for exceptions.)  The user
2289  thread might eventually want to return to the original context
2290  (possibly modified somewhat.)
2291
2292  As it turns out, the simplest way to force the faulting user
2293  thread to handle its own exceptions is to do pretty much what
2294  signal() does: the exception handlng thread sets up a sigcontext
2295  on the user thread's stack and forces the user thread to resume
2296  execution as if a signal handler had been called with that
2297  context as an argument.  We can use a distinguished UUO at a
2298  distinguished address to do something like sigreturn(); that'll
2299  have the effect of resuming the user thread's execution in
2300  the (pseudo-) signal context.
2301
2302  Since:
2303    a) we have miles of code in C and in Lisp that knows how to
2304    deal with Linux sigcontexts
2305    b) Linux sigcontexts contain a little more useful information
2306    (the DAR, DSISR, etc.) than their Darwin counterparts
2307    c) we have to create a sigcontext ourselves when calling out
2308    to the user thread: we aren't really generating a signal, just
2309    leveraging existing signal-handling code.
2310
2311  we create a Linux sigcontext struct.
2312
2313  Simple ?  Hopefully from the outside it is ...
2314
2315  We want the process of passing a thread's own context to it to
2316  appear to be atomic: in particular, we don't want the GC to suspend
2317  a thread that's had an exception but has not yet had its user-level
2318  exception handler called, and we don't want the thread's exception
2319  context to be modified by a GC while the Mach handler thread is
2320  copying it around.  On Linux (and on Jaguar), we avoid this issue
2321  because (a) the kernel sets up the user-level signal handler and
2322  (b) the signal handler blocks signals (including the signal used
2323  by the GC to suspend threads) until tcr->xframe is set up.
2324
2325  The GC and the Mach server thread therefore contend for the lock
2326  "mach_exception_lock".  The Mach server thread holds the lock
2327  when copying exception information between the kernel and the
2328  user thread; the GC holds this lock during most of its execution
2329  (delaying exception processing until it can be done without
2330  GC interference.)
2331
2332*/
2333
2334#ifdef PPC64
2335#define C_REDZONE_LEN           320
2336#define C_STK_ALIGN             32
2337#else
2338#define C_REDZONE_LEN           224
2339#define C_STK_ALIGN             16
2340#endif
2341#define C_PARAMSAVE_LEN         64
2342#define C_LINKAGE_LEN           48
2343
2344#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2345
2346void
2347fatal_mach_error(char *format, ...);
2348
2349#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2350
2351
2352void
2353restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2354{
2355  int i, j;
2356  kern_return_t kret;
2357  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2358
2359  /* Set the thread's FP state from the pseudosigcontext */
2360  kret = thread_set_state(thread,
2361                          PPC_FLOAT_STATE,
2362                          (thread_state_t)&(mc->__fs),
2363                          PPC_FLOAT_STATE_COUNT);
2364
2365  MACH_CHECK_ERROR("setting thread FP state", kret);
2366
2367  /* The thread'll be as good as new ... */
2368#ifdef PPC64
2369  kret = thread_set_state(thread,
2370                          PPC_THREAD_STATE64,
2371                          (thread_state_t)&(mc->__ss),
2372                          PPC_THREAD_STATE64_COUNT);
2373#else
2374  kret = thread_set_state(thread, 
2375                          MACHINE_THREAD_STATE,
2376                          (thread_state_t)&(mc->__ss),
2377                          MACHINE_THREAD_STATE_COUNT);
2378#endif
2379  MACH_CHECK_ERROR("setting thread state", kret);
2380} 
2381
2382/* This code runs in the exception handling thread, in response
2383   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2384   in response to a call to pseudo_sigreturn() from the specified
2385   user thread.
2386   Find that context (the user thread's R3 points to it), then
2387   use that context to set the user thread's state.  When this
2388   function's caller returns, the Mach kernel will resume the
2389   user thread.
2390*/
2391
2392kern_return_t
2393do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2394{
2395  ExceptionInformation *xp;
2396
2397#ifdef DEBUG_MACH_EXCEPTIONS
2398  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2399#endif
2400  xp = tcr->pending_exception_context;
2401  if (xp) {
2402    tcr->pending_exception_context = NULL;
2403    tcr->valence = TCR_STATE_LISP;
2404    restore_mach_thread_state(thread, xp);
2405    raise_pending_interrupt(tcr);
2406  } else {
2407    Bug(NULL, "no xp here!\n");
2408  }
2409#ifdef DEBUG_MACH_EXCEPTIONS
2410  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2411#endif
2412  return KERN_SUCCESS;
2413} 
2414
2415ExceptionInformation *
2416create_thread_context_frame(mach_port_t thread, 
2417                            natural *new_stack_top)
2418{
2419#ifdef PPC64
2420  ppc_thread_state64_t ts;
2421#else
2422  ppc_thread_state_t ts;
2423#endif
2424  mach_msg_type_number_t thread_state_count;
2425  kern_return_t result;
2426  int i,j;
2427  ExceptionInformation *pseudosigcontext;
2428  MCONTEXT_T mc;
2429  natural stackp, backlink;
2430
2431#ifdef PPC64
2432  thread_state_count = PPC_THREAD_STATE64_COUNT;
2433  result = thread_get_state(thread,
2434                            PPC_THREAD_STATE64,
2435                            (thread_state_t)&ts,
2436                            &thread_state_count);
2437#else
2438  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2439  result = thread_get_state(thread, 
2440                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2441                            (thread_state_t)&ts,
2442                            &thread_state_count);
2443#endif
2444 
2445  if (result != KERN_SUCCESS) {
2446    get_tcr(true);
2447    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2448  }
2449  stackp = ts.__r1;
2450  backlink = stackp;
2451  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2452  stackp -= sizeof(*pseudosigcontext);
2453  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2454
2455  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2456  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2457  memmove(&(mc->__ss),&ts,sizeof(ts));
2458
2459  thread_state_count = PPC_FLOAT_STATE_COUNT;
2460  thread_get_state(thread,
2461                   PPC_FLOAT_STATE,
2462                   (thread_state_t)&(mc->__fs),
2463                   &thread_state_count);
2464
2465
2466#ifdef PPC64
2467  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2468#else
2469  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2470#endif
2471  thread_get_state(thread,
2472#ifdef PPC64
2473                   PPC_EXCEPTION_STATE64,
2474#else
2475                   PPC_EXCEPTION_STATE,
2476#endif
2477                   (thread_state_t)&(mc->__es),
2478                   &thread_state_count);
2479
2480
2481  UC_MCONTEXT(pseudosigcontext) = mc;
2482  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2483  stackp -= C_LINKAGE_LEN;
2484  *(natural *)ptr_from_lispobj(stackp) = backlink;
2485  if (new_stack_top) {
2486    *new_stack_top = stackp;
2487  }
2488  return pseudosigcontext;
2489}
2490
2491/*
2492  This code sets up the user thread so that it executes a "pseudo-signal
2493  handler" function when it resumes.  Create a linux sigcontext struct
2494  on the thread's stack and pass it as an argument to the pseudo-signal
2495  handler.
2496
2497  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2498  which will restore the thread's context.
2499
2500  If the handler invokes code that throws (or otherwise never sigreturn()'s
2501  to the context), that's fine.
2502
2503  Actually, check that: throw (and variants) may need to be careful and
2504  pop the tcr's xframe list until it's younger than any frame being
2505  entered.
2506*/
2507
2508int
2509setup_signal_frame(mach_port_t thread,
2510                   void *handler_address,
2511                   int signum,
2512                   int code,
2513                   TCR *tcr)
2514{
2515#ifdef PPC64
2516  ppc_thread_state64_t ts;
2517#else
2518  ppc_thread_state_t ts;
2519#endif
2520  mach_msg_type_number_t thread_state_count;
2521  ExceptionInformation *pseudosigcontext;
2522  int i, j, old_valence = tcr->valence;
2523  kern_return_t result;
2524  natural stackp;
2525
2526#ifdef DEBUG_MACH_EXCEPTIONS
2527  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2528#endif
2529  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2530  pseudosigcontext->uc_onstack = 0;
2531  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2532  tcr->pending_exception_context = pseudosigcontext;
2533  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2534 
2535
2536  /*
2537     It seems like we've created a  sigcontext on the thread's
2538     stack.  Set things up so that we call the handler (with appropriate
2539     args) when the thread's resumed.
2540  */
2541
2542  ts.__srr0 = (natural) handler_address;
2543  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2544  ts.__r1 = stackp;
2545  ts.__r3 = signum;
2546  ts.__r4 = (natural)pseudosigcontext;
2547  ts.__r5 = (natural)tcr;
2548  ts.__r6 = (natural)old_valence;
2549  ts.__lr = (natural)pseudo_sigreturn;
2550
2551
2552#ifdef PPC64
2553  ts.__r13 = xpGPR(pseudosigcontext,13);
2554  thread_set_state(thread,
2555                   PPC_THREAD_STATE64,
2556                   (thread_state_t)&ts,
2557                   PPC_THREAD_STATE64_COUNT);
2558#else
2559  thread_set_state(thread, 
2560                   MACHINE_THREAD_STATE,
2561                   (thread_state_t)&ts,
2562                   MACHINE_THREAD_STATE_COUNT);
2563#endif
2564#ifdef DEBUG_MACH_EXCEPTIONS
2565  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2566#endif
2567  return 0;
2568}
2569
2570
2571void
2572pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2573{
2574  signal_handler(signum, NULL, context, tcr, old_valence);
2575} 
2576
2577
2578int
2579thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2580{
2581#ifdef PPC64
2582  ppc_thread_state64_t ts;
2583#else
2584  ppc_thread_state_t ts;
2585#endif
2586  mach_msg_type_number_t thread_state_count;
2587
2588#ifdef PPC64
2589  thread_state_count = PPC_THREAD_STATE64_COUNT;
2590#else
2591  thread_state_count = PPC_THREAD_STATE_COUNT;
2592#endif
2593  thread_get_state(thread, 
2594#ifdef PPC64
2595                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2596#else
2597                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2598#endif
2599                   (thread_state_t)&ts,
2600                   &thread_state_count);
2601  if (enabled) {
2602    ts.__srr1 |= MSR_FE0_FE1_MASK;
2603  } else {
2604    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2605  }
2606  /*
2607     Hack-o-rama warning (isn't it about time for such a warning?):
2608     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2609     Our handler for lisp's use of pthread_kill() pushes a phony
2610     lisp frame on the stack and force the context to resume at
2611     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2612     lisp frame contains the -real- address that process_interrupt
2613     should have returned to, and the fact that it's in a lisp
2614     frame should convince the GC to notice that address if it
2615     runs in the tiny time window between returning from our
2616     interrupt handler and ... here.
2617     If the top frame on the stack is a lisp frame, discard it
2618     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2619     just adjust ts.srr0 to skip over the UUO.
2620  */
2621  {
2622    lisp_frame *tos = (lisp_frame *)ts.__r1,
2623      *next_frame = tos->backlink;
2624   
2625    if (tos == (next_frame -1)) {
2626      ts.__srr0 = tos->savelr;
2627      ts.__r1 = (LispObj) next_frame;
2628    } else {
2629      ts.__srr0 += 4;
2630    }
2631  }
2632  thread_set_state(thread, 
2633#ifdef PPC64
2634                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2635#else
2636                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2637#endif
2638                   (thread_state_t)&ts,
2639#ifdef PPC64
2640                   PPC_THREAD_STATE64_COUNT
2641#else
2642                   PPC_THREAD_STATE_COUNT
2643#endif
2644                   );
2645
2646  return 0;
2647}
2648
2649/*
2650  This function runs in the exception handling thread.  It's
2651  called (by this precise name) from the library function "exc_server()"
2652  when the thread's exception ports are set up.  (exc_server() is called
2653  via mach_msg_server(), which is a function that waits for and dispatches
2654  on exception messages from the Mach kernel.)
2655
2656  This checks to see if the exception was caused by a pseudo_sigreturn()
2657  UUO; if so, it arranges for the thread to have its state restored
2658  from the specified context.
2659
2660  Otherwise, it tries to map the exception to a signal number and
2661  arranges that the thread run a "pseudo signal handler" to handle
2662  the exception.
2663
2664  Some exceptions could and should be handled here directly.
2665*/
2666
2667kern_return_t
2668catch_exception_raise(mach_port_t exception_port,
2669                      mach_port_t thread,
2670                      mach_port_t task, 
2671                      exception_type_t exception,
2672                      exception_data_t code_vector,
2673                      mach_msg_type_number_t code_count)
2674{
2675  int signum = 0, code = *code_vector, code1;
2676  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2677  kern_return_t kret;
2678
2679#ifdef DEBUG_MACH_EXCEPTIONS
2680  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2681#endif
2682
2683  if (
2684#if USE_MACH_EXCEPTION_LOCK
2685    pthread_mutex_trylock(mach_exception_lock) == 0
2686#else
2687    1
2688#endif
2689    ) {
2690    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2691      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2692    } 
2693    if ((exception == EXC_BAD_INSTRUCTION) &&
2694        (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2695        (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2696         (code1 == (int)enable_fp_exceptions) ||
2697         (code1 == (int)disable_fp_exceptions))) {
2698      if (code1 == (int)pseudo_sigreturn) {
2699        kret = do_pseudo_sigreturn(thread, tcr);
2700#if 0
2701      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2702#endif
2703       
2704      } else if (code1 == (int)enable_fp_exceptions) {
2705        kret = thread_set_fp_exceptions_enabled(thread, true);
2706      } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2707    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2708      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2709      kret = 17;
2710    } else {
2711      switch (exception) {
2712      case EXC_BAD_ACCESS:
2713        signum = SIGSEGV;
2714        break;
2715       
2716      case EXC_BAD_INSTRUCTION:
2717        signum = SIGILL;
2718        break;
2719     
2720      case EXC_SOFTWARE:
2721        if (code == EXC_PPC_TRAP) {
2722          signum = SIGTRAP;
2723        }
2724        break;
2725     
2726      case EXC_ARITHMETIC:
2727        signum = SIGFPE;
2728        break;
2729
2730      default:
2731        break;
2732      }
2733      if (signum) {
2734        kret = setup_signal_frame(thread,
2735                                  (void *)pseudo_signal_handler,
2736                                  signum,
2737                                  code,
2738                                  tcr);
2739#if 0
2740      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2741#endif
2742
2743      } else {
2744        kret = 17;
2745      }
2746    }
2747#if USE_MACH_EXCEPTION_LOCK
2748#ifdef DEBUG_MACH_EXCEPTIONS
2749    fprintf(stderr, "releasing Mach exception lock in exception thread\n");
2750#endif
2751    pthread_mutex_unlock(mach_exception_lock);
2752#endif
2753  } else {
2754    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2755#if 0
2756    fprintf(stderr, "deferring pending exception in 0x%x\n", tcr);
2757#endif
2758    kret = 0;
2759    if (tcr == gc_tcr) {
2760      int i;
2761      write(1, "exception in GC thread. Sleeping for 60 seconds\n",sizeof("exception in GC thread.  Sleeping for 60 seconds\n"));
2762      for (i = 0; i < 60; i++) {
2763        sleep(1);
2764      }
2765      _exit(EX_SOFTWARE);
2766    }
2767  }
2768  return kret;
2769}
2770
2771
2772
2773typedef struct {
2774  mach_msg_header_t Head;
2775  /* start of the kernel processed data */
2776  mach_msg_body_t msgh_body;
2777  mach_msg_port_descriptor_t thread;
2778  mach_msg_port_descriptor_t task;
2779  /* end of the kernel processed data */
2780  NDR_record_t NDR;
2781  exception_type_t exception;
2782  mach_msg_type_number_t codeCnt;
2783  integer_t code[2];
2784  mach_msg_trailer_t trailer;
2785} exceptionRequest;
2786
2787
2788boolean_t
2789openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2790{
2791  static NDR_record_t _NDR = {0};
2792  kern_return_t handled;
2793  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2794  exceptionRequest *req = (exceptionRequest *) in;
2795
2796  reply->NDR = _NDR;
2797
2798  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2799  out->msgh_remote_port = in->msgh_remote_port;
2800  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2801  out->msgh_local_port = MACH_PORT_NULL;
2802  out->msgh_id = in->msgh_id+100;
2803
2804  /* Could handle other exception flavors in the range 2401-2403 */
2805
2806
2807  if (in->msgh_id != 2401) {
2808    reply->RetCode = MIG_BAD_ID;
2809    return FALSE;
2810  }
2811  handled = catch_exception_raise(req->Head.msgh_local_port,
2812                                  req->thread.name,
2813                                  req->task.name,
2814                                  req->exception,
2815                                  req->code,
2816                                  req->codeCnt);
2817  reply->RetCode = handled;
2818  return TRUE;
2819}
2820
2821/*
2822  The initial function for an exception-handling thread.
2823*/
2824
2825void *
2826exception_handler_proc(void *arg)
2827{
2828  extern boolean_t exc_server();
2829  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2830
2831  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2832  /* Should never return. */
2833  abort();
2834}
2835
2836
2837
2838mach_port_t
2839mach_exception_port_set()
2840{
2841  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2842  kern_return_t kret; 
2843  if (__exception_port_set == MACH_PORT_NULL) {
2844#if USE_MACH_EXCEPTION_LOCK
2845    mach_exception_lock = &_mach_exception_lock;
2846    pthread_mutex_init(mach_exception_lock, NULL);
2847#endif
2848    kret = mach_port_allocate(mach_task_self(),
2849                              MACH_PORT_RIGHT_PORT_SET,
2850                              &__exception_port_set);
2851    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2852    create_system_thread(0,
2853                         NULL,
2854                         exception_handler_proc, 
2855                         (void *)((natural)__exception_port_set));
2856  }
2857  return __exception_port_set;
2858}
2859
2860/*
2861  Setup a new thread to handle those exceptions specified by
2862  the mask "which".  This involves creating a special Mach
2863  message port, telling the Mach kernel to send exception
2864  messages for the calling thread to that port, and setting
2865  up a handler thread which listens for and responds to
2866  those messages.
2867
2868*/
2869
2870/*
2871  Establish the lisp thread's TCR as its exception port, and determine
2872  whether any other ports have been established by foreign code for
2873  exceptions that lisp cares about.
2874
2875  If this happens at all, it should happen on return from foreign
2876  code and on entry to lisp code via a callback.
2877
2878  This is a lot of trouble (and overhead) to support Java, or other
2879  embeddable systems that clobber their caller's thread exception ports.
2880 
2881*/
2882kern_return_t
2883tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2884{
2885  kern_return_t kret;
2886  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2887  int i;
2888  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2889  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2890  exception_mask_t mask = 0;
2891
2892  kret = thread_swap_exception_ports(thread,
2893                                     LISP_EXCEPTIONS_HANDLED_MASK,
2894                                     lisp_port,
2895                                     EXCEPTION_DEFAULT,
2896                                     THREAD_STATE_NONE,
2897                                     fxs->masks,
2898                                     &n,
2899                                     fxs->ports,
2900                                     fxs->behaviors,
2901                                     fxs->flavors);
2902  if (kret == KERN_SUCCESS) {
2903    fxs->foreign_exception_port_count = n;
2904    for (i = 0; i < n; i ++) {
2905      foreign_port = fxs->ports[i];
2906
2907      if ((foreign_port != lisp_port) &&
2908          (foreign_port != MACH_PORT_NULL)) {
2909        mask |= fxs->masks[i];
2910      }
2911    }
2912    tcr->foreign_exception_status = (int) mask;
2913  }
2914  return kret;
2915}
2916
2917kern_return_t
2918tcr_establish_lisp_exception_port(TCR *tcr)
2919{
2920  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2921}
2922
2923/*
2924  Do this when calling out to or returning from foreign code, if
2925  any conflicting foreign exception ports were established when we
2926  last entered lisp code.
2927*/
2928kern_return_t
2929restore_foreign_exception_ports(TCR *tcr)
2930{
2931  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2932 
2933  if (m) {
2934    MACH_foreign_exception_state *fxs  = 
2935      (MACH_foreign_exception_state *) tcr->native_thread_info;
2936    int i, n = fxs->foreign_exception_port_count;
2937    exception_mask_t tm;
2938
2939    for (i = 0; i < n; i++) {
2940      if ((tm = fxs->masks[i]) & m) {
2941        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2942                                   tm,
2943                                   fxs->ports[i],
2944                                   fxs->behaviors[i],
2945                                   fxs->flavors[i]);
2946      }
2947    }
2948  }
2949}
2950                                   
2951
2952/*
2953  This assumes that a Mach port (to be used as the thread's exception port) whose
2954  "name" matches the TCR's 32-bit address has already been allocated.
2955*/
2956
2957kern_return_t
2958setup_mach_exception_handling(TCR *tcr)
2959{
2960  mach_port_t
2961    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2962    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2963    task_self = mach_task_self();
2964  kern_return_t kret;
2965
2966  kret = mach_port_insert_right(task_self,
2967                                thread_exception_port,
2968                                thread_exception_port,
2969                                MACH_MSG_TYPE_MAKE_SEND);
2970  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2971
2972  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2973  if (kret == KERN_SUCCESS) {
2974    mach_port_t exception_port_set = mach_exception_port_set();
2975
2976    kret = mach_port_move_member(task_self,
2977                                 thread_exception_port,
2978                                 exception_port_set);
2979  }
2980  return kret;
2981}
2982
2983void
2984darwin_exception_init(TCR *tcr)
2985{
2986  void tcr_monitor_exception_handling(TCR*, Boolean);
2987  kern_return_t kret;
2988  MACH_foreign_exception_state *fxs = 
2989    calloc(1, sizeof(MACH_foreign_exception_state));
2990 
2991  tcr->native_thread_info = (void *) fxs;
2992
2993  if ((kret = setup_mach_exception_handling(tcr))
2994      != KERN_SUCCESS) {
2995    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2996    terminate_lisp();
2997  }
2998  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2999  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
3000}
3001
3002/*
3003  The tcr is the "name" of the corresponding thread's exception port.
3004  Destroying the port should remove it from all port sets of which it's
3005  a member (notably, the exception port set.)
3006*/
3007void
3008darwin_exception_cleanup(TCR *tcr)
3009{
3010  void *fxs = tcr->native_thread_info;
3011  extern Boolean use_mach_exception_handling;
3012
3013  if (fxs) {
3014    tcr->native_thread_info = NULL;
3015    free(fxs);
3016  }
3017  if (use_mach_exception_handling) {
3018    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3019    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3020  }
3021}
3022
3023
3024Boolean
3025suspend_mach_thread(mach_port_t mach_thread)
3026{
3027  kern_return_t status;
3028  Boolean aborted = false;
3029 
3030  do {
3031    aborted = false;
3032    status = thread_suspend(mach_thread);
3033    if (status == KERN_SUCCESS) {
3034      status = thread_abort_safely(mach_thread);
3035      if (status == KERN_SUCCESS) {
3036        aborted = true;
3037      } else {
3038        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3039        thread_resume(mach_thread);
3040      }
3041    } else {
3042      return false;
3043    }
3044  } while (! aborted);
3045  return true;
3046}
3047
3048/*
3049  Only do this if pthread_kill indicated that the pthread isn't
3050  listening to signals anymore, as can happen as soon as pthread_exit()
3051  is called on Darwin.  The thread could still call out to lisp as it
3052  is exiting, so we need another way to suspend it in this case.
3053*/
3054Boolean
3055mach_suspend_tcr(TCR *tcr)
3056{
3057  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3058  ExceptionInformation *pseudosigcontext;
3059  Boolean result = false;
3060 
3061  result = suspend_mach_thread(mach_thread);
3062  if (result) {
3063    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3064    pseudosigcontext->uc_onstack = 0;
3065    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3066    tcr->suspend_context = pseudosigcontext;
3067  }
3068  return result;
3069}
3070
3071void
3072mach_resume_tcr(TCR *tcr)
3073{
3074  ExceptionInformation *xp;
3075  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3076 
3077  xp = tcr->suspend_context;
3078#ifdef DEBUG_MACH_EXCEPTIONS
3079  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3080          tcr, tcr->pending_exception_context);
3081#endif
3082  tcr->suspend_context = NULL;
3083  restore_mach_thread_state(mach_thread, xp);
3084#ifdef DEBUG_MACH_EXCEPTIONS
3085  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3086          tcr, tcr->pending_exception_context);
3087#endif
3088  thread_resume(mach_thread);
3089}
3090
3091void
3092fatal_mach_error(char *format, ...)
3093{
3094  va_list args;
3095  char s[512];
3096 
3097
3098  va_start(args, format);
3099  vsnprintf(s, sizeof(s),format, args);
3100  va_end(args);
3101
3102  Fatal("Mach error", s);
3103}
3104
3105void
3106pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3107{
3108  interrupt_handler(signum, NULL, context);
3109}
3110
3111int
3112mach_raise_thread_interrupt(TCR *target)
3113{
3114  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3115  kern_return_t kret;
3116  Boolean result = false;
3117  TCR *current = get_tcr(false);
3118  thread_basic_info_data_t info; 
3119  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3120
3121  LOCK(lisp_global(TCR_AREA_LOCK), current);
3122#if USE_MACH_EXCEPTION_LOCK
3123  pthread_mutex_lock(mach_exception_lock);
3124#endif
3125
3126  if (suspend_mach_thread(mach_thread)) {
3127    if (thread_info(mach_thread,
3128                    THREAD_BASIC_INFO,
3129                    (thread_info_t)&info,
3130                    &info_count) == KERN_SUCCESS) {
3131      if (info.suspend_count == 1) {
3132        if ((target->valence == TCR_STATE_LISP) &&
3133            (!target->unwinding) &&
3134            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3135          kret = setup_signal_frame(mach_thread,
3136                                    (void *)pseudo_interrupt_handler,
3137                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3138                                    0,
3139                                    target);
3140          if (kret == KERN_SUCCESS) {
3141            result = true;
3142          }
3143        }
3144      }
3145    }
3146    if (! result) {
3147      target->interrupt_pending = 1 << fixnumshift;
3148    }
3149    thread_resume(mach_thread);
3150   
3151  }
3152#if USE_MACH_EXCEPTION_LOCK
3153  pthread_mutex_unlock(mach_exception_lock);
3154#endif
3155  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3156  return 0;
3157}
3158
3159#endif
Note: See TracBrowser for help on using the repository browser.