source: trunk/ccl/lisp-kernel/lisp-exceptions.c @ 870

Last change on this file since 870 was 870, checked in by gb, 16 years ago

mach_resume_tcr: it would be a good idea to actually resume the thread, not
just do the bookkeeping (a call to thread_resume() was missing.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 74.1 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformationPowerPC *);
43#endif
44
45
46#include "Threads.h"
47
48#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
49#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
50#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
51extern void enable_fp_exceptions(void);
52extern void disable_fp_exceptions(void);
53
54#ifdef LINUX
55/* Some relatively recent kernels support this interface.
56   If this prctl isn't supported, assume that we're always
57   running with excptions enabled and "precise".
58*/
59#ifndef PR_SET_FPEXC
60#define PR_SET_FPEXC 12
61#endif
62#ifndef PR_FP_EXC_DISABLED
63#define PR_FP_EXC_DISABLED 0
64#endif
65#ifndef PR_FP_EXC_PRECISE
66#define PR_FP_EXC_PRECISE 3
67#endif
68
69void
70enable_fp_exceptions()
71{
72  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
73}
74
75void
76disable_fp_exceptions()
77{
78  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
79}
80
81#endif
82
83/*
84  Handle exceptions.
85
86*/
87
88extern LispObj lisp_nil;
89
90extern unsigned lisp_heap_gc_threshold;
91extern Boolean grow_dynamic_area(unsigned);
92
93
94/* top 6 bits will be zero, subtag will be subtag_code_vector */
95#define CV_HDR_MASK     (OP_MASK | subtagmask)
96#define CV_HDR_VALUE    subtag_code_vector
97#define codevec_hdr_p(value)    (((value) & CV_HDR_MASK) == CV_HDR_VALUE)
98
99
100
101void
102allocation_failure(Boolean pointerp, unsigned size)
103{
104  char buf[64];
105  sprintf(buf, "Can't allocate %s of size %d bytes.", pointerp ? "pointer" : "handle", size);
106  Fatal(":   Kernel memory allocation failure.  ", buf);
107}
108
109void
110fatal_oserr(StringPtr param, OSErr err)
111{
112  char buf[64];
113  sprintf(buf," - operating system error %d.", err);
114  Fatal(param, buf);
115}
116
117
118Ptr
119allocate(unsigned size)
120{
121  return (Ptr) malloc(size);
122}
123
124void
125deallocate(Ptr p)
126{
127  free((void *)p);
128}
129
130Ptr
131zalloc(unsigned size)
132{
133  Ptr p = allocate(size);
134  if (p != NULL) {
135    memset(p, 0, size);
136  }
137  return p;
138}
139
140int
141ProtectMemory(LogicalAddress addr, int nbytes)
142{
143  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
144 
145  if (status) {
146    status = errno;
147    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
148  }
149  return status;
150}
151
152int
153UnProtectMemory(LogicalAddress addr, int nbytes)
154{
155  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
156}
157
158void
159unprotect_area(protected_area_ptr p)
160{
161  BytePtr start = p->start;
162  unsigned nprot = p->nprot;
163 
164  if (nprot) {
165    UnProtectMemory(start, nprot);
166    p->nprot = 0;
167  }
168}
169
170
171
172int
173page_size = 4096;
174
175extern void
176zero_cache_lines(BytePtr, size_t, size_t);
177
178void
179zero_page(BytePtr start)
180{
181  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
182}
183
184
185
186/*
187  If the PC is pointing to an allocation trap, the previous instruction
188  must have decremented allocptr.  Return the non-zero amount by which
189  allocptr was decremented.
190*/
191int
192allocptr_displacement(ExceptionInformation *xp)
193{
194  pc program_counter = xpPC(xp);
195  opcode instr = *program_counter, prev_instr = *(program_counter-1);
196
197  if (instr == ALLOC_TRAP_INSTRUCTION) {
198    if (match_instr(prev_instr, 
199                    XO_MASK | RT_MASK | RB_MASK,
200                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
201                    RT(allocptr) |
202                    RB(allocptr))) {
203      return ((int) xpGPR(xp, RA_field(prev_instr)));
204    }
205    if (match_instr(prev_instr,
206                    OP_MASK | RT_MASK | RA_MASK,
207                    OP(major_opcode_ADDI) | 
208                    RT(allocptr) |
209                    RA(allocptr))) {
210      return (int) -((short) prev_instr);
211    }
212    Bug(xp, "Can't determine allocation displacement");
213  }
214  return 0;
215}
216
217
218/*
219  A cons cell's been successfully allocated, but the allocptr's
220  still tagged (as fulltag_cons, of course.)  Emulate any instructions
221  that might follow the allocation (stores to the car or cdr, an
222  assignment to the "result" gpr) that take place while the allocptr's
223  tag is non-zero, advancing over each such instruction.  When we're
224  done, the cons cell will be allocated and initialized, the result
225  register will point to it, the allocptr will be untagged, and
226  the PC will point past the instruction that clears the allocptr's
227  tag.
228*/
229void
230finish_allocating_cons(ExceptionInformation *xp)
231{
232  pc program_counter = xpPC(xp);
233  opcode instr;
234  LispObj cur_allocptr = xpGPR(xp, allocptr);
235  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
236  int target_reg;
237
238  while (1) {
239    instr = *program_counter++;
240
241    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
242      xpGPR(xp, allocptr) = untag(cur_allocptr);
243      xpPC(xp) = program_counter;
244      return;
245    }
246   
247    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
248    case STORE_CAR_ALLOCPTR_INSTRUCTION:
249      c->car = xpGPR(xp,RT_field(instr));
250      break;
251    case STORE_CDR_ALLOCPTR_INSTRUCTION:
252      c->cdr = xpGPR(xp,RT_field(instr));
253      break;
254    default:
255      /* Assume that this is an assignment: {rt/ra} <- allocptr.
256         There are several equivalent instruction forms
257         that might have that effect; just assign to target here.
258      */
259      if (major_opcode_p(instr,major_opcode_X31)) {
260        target_reg = RA_field(instr);
261      } else {
262        target_reg = RT_field(instr);
263      }
264      xpGPR(xp,target_reg) = cur_allocptr;
265      break;
266    }
267  }
268}
269
270/*
271  We were interrupted in the process of allocating a uvector; we
272  survived the allocation trap, and allocptr is tagged as fulltag_misc.
273  Emulate any instructions which store a header into the uvector,
274  assign the value of allocptr to some other register, and clear
275  allocptr's tag.  Don't expect/allow any other instructions in
276  this environment.
277*/
278void
279finish_allocating_uvector(ExceptionInformation *xp)
280{
281  pc program_counter = xpPC(xp);
282  opcode instr;
283  LispObj cur_allocptr = xpGPR(xp, allocptr);
284  int target_reg;
285
286  while (1) {
287    instr = *program_counter++;
288    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
289      xpGPR(xp, allocptr) = untag(cur_allocptr);
290      xpPC(xp) = program_counter;
291      return;
292    }
293    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
294        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
295      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
296    } else {
297      /* assume that this is an assignment */
298
299      if (major_opcode_p(instr,major_opcode_X31)) {
300        target_reg = RA_field(instr);
301      } else {
302        target_reg = RT_field(instr);
303      }
304      xpGPR(xp,target_reg) = cur_allocptr;
305    }
306  }
307}
308
309
310Boolean free_segments_zero_filled_by_OS = true;
311
312/*
313  This doesn't GC; it returns true if it made enough room, false
314  otherwise.
315  If "extend" is true, it can try to extend the dynamic area to
316  satisfy the request.
317*/
318
319Boolean
320new_heap_segment(ExceptionInformation *xp, unsigned need, Boolean extend, TCR *tcr)
321{
322  area *a;
323  unsigned newlimit, oldlimit;
324
325
326  a  = active_dynamic_area;
327  oldlimit = (unsigned) a->active;
328  newlimit = (align_to_power_of_2(oldlimit, log2_heap_segment_size) +
329              align_to_power_of_2(need, log2_heap_segment_size));
330  if (newlimit > (unsigned) (a->high)) {
331    if (extend) {
332      resize_dynamic_heap(a->active, (newlimit-oldlimit)+lisp_heap_gc_threshold);
333    } else {
334      return false;
335    }
336  }
337  a->active = (BytePtr) newlimit;
338  tcr->last_allocptr = (void *)newlimit;
339  xpGPR(xp,allocptr) = (LispObj) newlimit;
340  xpGPR(xp,allocbase) = (LispObj) oldlimit;
341 
342  if (!free_segments_zero_filled_by_OS) {
343    BytePtr
344      pageptr = (BytePtr)align_to_power_of_2(oldlimit,log2_heap_segment_size);
345    unsigned i, npages = (newlimit-(unsigned)pageptr) >> 12;
346
347    for (i = 0; i < npages; i++, pageptr += page_size) {
348      zero_page(pageptr);
349    }
350  }
351  return true;
352}
353
354Boolean
355allocate_object(ExceptionInformation *xp,
356                unsigned bytes_needed, 
357                int disp_from_allocptr,
358                TCR *tcr)
359{
360  area *a = active_dynamic_area;
361
362  /* Maybe do an EGC */
363  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
364    if (((a->active)-(a->low)) >= a->threshold) {
365      gc_from_xp(xp);
366    }
367  }
368
369  /* Life is pretty simple if we can simply grab a segment
370     without extending the heap.
371  */
372  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
373    xpGPR(xp, allocptr) += disp_from_allocptr;
374    return true;
375  }
376 
377  /* It doesn't make sense to try a full GC if the object
378     we're trying to allocate is larger than everything
379     allocated so far.
380  */
381  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
382    untenure_from_area(tenured_area); /* force a full GC */
383    unprotect_area(oldspace_protected_area);
384    gc_from_xp(xp);
385  }
386 
387  /* Try again, growing the heap if necessary */
388  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
389    xpGPR(xp, allocptr) += disp_from_allocptr;
390    return true;
391  }
392 
393  return false;
394}
395
396#ifndef XNOMEM
397#define XNOMEM 10
398#endif
399
400void
401update_bytes_allocated(TCR* tcr, void *cur_allocptr)
402{
403  BytePtr
404    last = (BytePtr) tcr->last_allocptr, 
405    current = (BytePtr) cur_allocptr;
406  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
407    tcr->bytes_allocated += last-current;
408  }
409  tcr->last_allocptr = 0;
410}
411
412OSStatus
413handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
414{
415  pc program_counter;
416  unsigned cur_allocptr, bytes_needed = 0;
417  opcode prev_instr;
418  int disp = 0, allocptr_tag;
419
420  cur_allocptr = xpGPR(xp,allocptr);
421  program_counter = xpPC(xp);
422  prev_instr = *(program_counter-1);
423  allocptr_tag = fulltag_of(cur_allocptr);
424
425  switch (allocptr_tag) {
426  case fulltag_cons:
427    bytes_needed = sizeof(cons);
428    disp = -sizeof(cons) + fulltag_cons;
429    break;
430
431  case fulltag_even_fixnum:
432  case fulltag_odd_fixnum:
433    break;
434
435  case fulltag_misc:
436    if (match_instr(prev_instr, 
437                    XO_MASK | RT_MASK | RB_MASK,
438                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
439                    RT(allocptr) |
440                    RB(allocptr))) {
441      disp = -((int) xpGPR(xp, RA_field(prev_instr)));
442    } else if (match_instr(prev_instr,
443                           OP_MASK | RT_MASK | RA_MASK,
444                           OP(major_opcode_ADDI) | 
445                           RT(allocptr) |
446                           RA(allocptr))) {
447      disp = (int) ((short) prev_instr);
448    }
449    if (disp) {
450      bytes_needed = (-disp) + fulltag_misc;
451      break;
452    }
453    /* else fall thru */
454  default:
455    return -1;
456  }
457
458  if (bytes_needed) {
459    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
460    if (allocate_object(xp, bytes_needed, disp, tcr)) {
461      adjust_exception_pc(xp,4);
462      return 0;
463    }
464    /* Couldn't allocate the object.  If it's smaller than some arbitrary
465       size (say 128K bytes), signal a "chronically out-of-memory" condition;
466       else signal a "allocation request failed" condition.
467    */
468    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0, (unsigned) xpPC(xp));
469    return -1;
470  }
471  return -1;
472}
473
474OSStatus
475handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
476{
477  LispObj
478    selector = xpGPR(xp,imm0), 
479    arg = xpGPR(xp,imm1);
480  area *a = active_dynamic_area;
481  Boolean egc_was_enabled = (a->older != NULL);
482
483
484  switch (selector) {
485  case GC_TRAP_FUNCTION_EGC_CONTROL:
486    egc_control(arg != 0, a->active);
487    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
488    break;
489
490  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
491    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
492    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
493    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
494    xpGPR(xp,arg_z) = lisp_nil+t_offset;
495    break;
496
497  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
498    if (((int) arg) > 0) {
499      lisp_heap_gc_threshold = 
500        align_to_power_of_2((arg-1) +
501                            (heap_segment_size - 1),
502                            log2_heap_segment_size);
503    }
504    /* fall through */
505  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
506    xpGPR(xp, arg_z) = box_fixnum(lisp_heap_gc_threshold);
507    break;
508
509  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
510    /*  Try to put the current threshold in effect.  This may
511        need to disable/reenable the EGC. */
512    untenure_from_area(tenured_area);
513    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
514    if (egc_was_enabled) {
515      if ((a->high - a->active) >= a->threshold) {
516        tenure_to_area(tenured_area);
517      }
518    }
519    xpGPR(xp, arg_z) = box_fixnum(lisp_heap_gc_threshold);
520    break;
521
522  default:
523    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
524    if (egc_was_enabled) {
525      egc_control(false, (BytePtr) a->active);
526    }
527    gc_from_xp(xp);
528    if (selector > GC_TRAP_FUNCTION_GC) {
529      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
530        impurify_from_xp(xp);
531        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
532        gc_from_xp(xp);
533      }
534      if (selector & GC_TRAP_FUNCTION_PURIFY) {
535        purify_from_xp(xp);
536        gc_from_xp(xp);
537      }
538      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
539        OSErr err;
540        extern OSErr save_application(unsigned, Boolean);
541        TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
542        area *vsarea = tcr->vs_area;
543       
544        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
545        err = save_application(arg, (selector == GC_TRAP_FLAG_COMPRESS));
546        if (err == noErr) {
547          exit(0);
548        }
549        fatal_oserr(": save_application", err);
550      }
551    }
552   
553    if (egc_was_enabled) {
554      egc_control(true, NULL);
555    }
556    break;
557   
558  }
559
560  adjust_exception_pc(xp,4);
561  return 0;
562}
563
564
565protected_area_ptr
566AllProtectedAreas = NULL;
567
568/*
569  This does a linear search.  Areas aren't created all that often;
570  if there get to be very many of them, some sort of tree search
571  might be justified.
572*/
573
574protected_area_ptr
575find_protected_area(BytePtr addr)
576{
577  protected_area* p;
578 
579  for(p = AllProtectedAreas; p; p=p->next) {
580    if ((p->start <= addr) && (p->end > addr)) {
581      return p;
582    }
583  }
584  return NULL;
585}
586
587void
588signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
589{
590  /* The cstack just overflowed.  Force the current thread's
591     control stack to do so until all stacks are well under their overflow
592     limits.
593  */
594
595#if 0
596  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
597#endif
598  handle_error(xp, error_stack_overflow, reg, 0, (unsigned) xpPC(xp));
599}
600
601/*
602  Lower (move toward 0) the "end" of the soft protected area associated
603  with a by a page, if we can.
604*/
605
606void
607adjust_soft_protection_limit(area *a)
608{
609  char *proposed_new_soft_limit = a->softlimit - 4096;
610  protected_area_ptr p = a->softprot;
611 
612  if (proposed_new_soft_limit >= (p->start+16384)) {
613    p->end = proposed_new_soft_limit;
614    p->protsize = p->end-p->start;
615    a->softlimit = proposed_new_soft_limit;
616  }
617  protect_area(p);
618}
619
620void
621restore_soft_stack_limit(unsigned stkreg)
622{
623  area *a;
624  TCR *tcr = get_tcr(true);
625
626  switch (stkreg) {
627  case sp:
628    a = tcr->cs_area;
629    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
630      a->softlimit -= 4096;
631    }
632    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
633    break;
634  case vsp:
635    a = tcr->vs_area;
636    adjust_soft_protection_limit(a);
637    break;
638  case tsp:
639    a = tcr->ts_area;
640    adjust_soft_protection_limit(a);
641  }
642}
643
644/* Maybe this'll work someday.  We may have to do something to
645   make the thread look like it's not handling an exception */
646void
647reset_lisp_process(ExceptionInformation *xp)
648{
649  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp,rcontext));
650  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
651
652  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
653  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
654
655  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
656  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
657
658  start_lisp(tcr, 1);
659}
660 
661void
662zero_last_page(BytePtr newfree)
663{
664  BytePtr base = (BytePtr)truncate_to_power_of_2(newfree,12);
665  if (base != newfree) {
666    memset(newfree, 0, base+4096-newfree);
667  }
668}
669
670/*
671   Grow or shrink the dynamic area.  Or maybe not.
672   Whether or not the end of (mapped space in) the heap changes,
673   ensure that everything between the freeptr and the heap end
674   is mapped and read/write.  (It'll incidentally be zeroed.)
675*/
676void
677resize_dynamic_heap(BytePtr newfree, 
678                    unsigned free_space_size)
679{
680  unsigned protbytes;
681  area *a = active_dynamic_area;
682  BytePtr newlimit, protptr;
683 
684  if (free_space_size) {
685    BytePtr lowptr = a->active;
686    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
687                                            log2_heap_segment_size);
688    if (newlimit > a->high) {
689      grow_dynamic_area(newlimit-a->high);
690    } else if ((newlimit + free_space_size) < a->high) {
691      shrink_dynamic_area(a->high-newlimit);
692    }
693  }
694
695  protbytes = (((a->high)-newfree)&~(heap_segment_size-1));
696  protptr = a->high-protbytes;
697
698  free_segments_zero_filled_by_OS = false;
699  if (!( nrs_GC_EVENT_STATUS_BITS.vcell & gc_retain_pages_bit)) {
700    commit_pages(protptr, protbytes);
701    free_segments_zero_filled_by_OS = true;
702  }
703  protbytes = (protptr-newfree);
704
705  while (protbytes >= page_size) {
706    protptr -= page_size;
707    protbytes -= page_size;
708    zero_page(protptr);
709  }
710  /*  memset(newfree,0,protbytes); */
711}
712
713
714 
715void
716update_area_active (area **aptr, BytePtr value)
717{
718  area *a = *aptr;
719  for (; a; a = a->older) {
720    if ((a->low <= value) && (a->high >= value)) break;
721  };
722  if (a == NULL) Bug(NULL, "Can't find active area");
723  a->active = value;
724  *aptr = a;
725
726  for (a = a->younger; a; a = a->younger) {
727    a->active = a->high;
728  }
729}
730
731void
732normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
733{
734  void *cur_allocptr = NULL;
735  LispObj freeptr = 0;
736
737  if (xp) {
738    if (is_other_tcr) {
739      pc_luser_xp(xp, tcr);
740      freeptr = xpGPR(xp, allocptr);
741      if (fulltag_of(freeptr) == 0){
742        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
743      }
744    }
745    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
746    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
747    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
748  } else {
749    /* In ff-call.  No need to update cs_area */
750    cur_allocptr = (void *) (tcr->save_allocptr);
751    tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
752    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
753    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
754  }
755  if (cur_allocptr) {
756    update_bytes_allocated(tcr, cur_allocptr);
757    if (freeptr) {
758      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
759      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
760    }
761  }
762}
763
764/* Suspend and "normalize" other tcrs, then call a gc-like function
765   in that context.  Resume the other tcrs, then return what the
766   function returned */
767
768int
769gc_like_from_xp(ExceptionInformation *xp, int(*fun)(TCR *))
770{
771  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext)), *other_tcr;
772  ExceptionInformation* other_xp;
773  int result;
774
775  suspend_other_threads();
776  if (lisp_global(GC_INHIBIT_COUNT) != 0) {
777    resume_other_threads();
778    return 0;
779  }
780
781  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
782  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
783
784  normalize_tcr(xp, tcr, false);
785
786
787  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
788    if (other_tcr->pending_exception_context) {
789      other_tcr->gc_context = other_tcr->pending_exception_context;
790    } else if (other_tcr->valence == TCR_STATE_LISP) {
791      other_tcr->gc_context = other_tcr->suspend_context;
792    } else {
793      /* no pending exception, didn't suspend in lisp state:
794         must have executed a synchronous ff-call.
795      */
796      other_tcr->gc_context = NULL;
797    }
798    normalize_tcr(other_tcr->gc_context, other_tcr, true);
799  }
800   
801
802
803  result = fun(tcr);
804
805  resume_other_threads();
806
807  return result;
808
809}
810
811
812/* Returns #bytes freed by invoking GC */
813
814int
815gc_from_tcr(TCR *tcr)
816{
817  area *a;
818  BytePtr oldfree, newfree;
819  BytePtr oldend, newend;
820
821  a = active_dynamic_area;
822  oldend = a->high;
823  oldfree = a->active;
824  gc(tcr);
825  newfree = a->active;
826  newend = a->high;
827  return ((oldfree-newfree)+(newend-oldend));
828}
829
830int
831gc_from_xp(ExceptionInformation *xp)
832{
833  int status = gc_like_from_xp(xp, gc_from_tcr);
834
835  freeGCptrs();
836  return status;
837}
838
839int
840purify_from_xp(ExceptionInformation *xp)
841{
842  return gc_like_from_xp(xp, purify);
843}
844
845int
846impurify_from_xp(ExceptionInformation *xp)
847{
848  return gc_like_from_xp(xp, impurify);
849}
850
851extern BytePtr
852current_stack_pointer(void);
853
854void
855protect_area(protected_area_ptr p)
856{
857  BytePtr start = p->start;
858  unsigned n = p->protsize;
859
860  if (n && ! p->nprot) {
861    ProtectMemory(start, n);
862    p->nprot = n;
863  }
864}
865
866void
867protect_oldspace(BytePtr end)
868{
869  protected_area_ptr p = oldspace_protected_area;
870  BytePtr
871    start = (BytePtr) ptr_from_lispobj(lisp_global(HEAP_START)),
872    endpage = (BytePtr)ptr_from_lispobj(align_to_power_of_2(ptr_to_lispobj(end),12));
873  int i, npages = (endpage-start)>>12;
874  pageentry *pe = pagemap;
875 
876  p->start = start;
877  p->end = endpage;
878  p->protsize = endpage-start;
879  p->nprot = 0;
880  protect_area(p);
881
882  for (i = 0; i < npages; i++, pe++) {
883    pe->bits.modified = 0;
884  }
885}
886
887
888
889
890protection_handler
891 * protection_handlers[] = {
892   do_spurious_wp_fault,
893   do_soft_stack_overflow,
894   do_soft_stack_overflow,
895   do_soft_stack_overflow,
896   do_hard_stack_overflow,   
897   do_hard_stack_overflow,
898   do_hard_stack_overflow,
899   do_tenured_space_write
900   };
901
902
903Boolean
904is_write_fault(ExceptionInformation *xp, siginfo_t *info)
905{
906  /* use the siginfo if it's available.  Some versions of Linux
907     don't propagate the DSISR and TRAP fields correctly from
908     64- to 32-bit handlers.
909  */
910  if (info) {
911    /*
912       To confuse matters still further, the value of SEGV_ACCERR
913       varies quite a bit among LinuxPPC variants (the value defined
914       in the header files varies, and the value actually set by
915       the kernel also varies.  So far, we're only looking at the
916       siginfo under Linux and Linux always seems to generate
917       SIGSEGV, so check for SIGSEGV and check the low 16 bits
918       of the si_code.
919    */
920    return ((info->si_signo == SIGSEGV) &&
921            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
922  }
923  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
924         (xpTRAP(xp) == 
925#ifdef LINUX
9260x0300
927#endif
928#ifdef DARWIN
9290x0300/0x100
930#endif
931)
932         );
933#if 0 
934  /* Maybe worth keeping around; not sure if it's an exhaustive
935     list of PPC instructions that could cause a WP fault */
936  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
937     valid values of those SPRs in the context they provide to
938     exception handlers.  Look at the opcode of the offending
939     instruction & recognize 32-bit store operations */
940  opcode instr = *(xpPC(xp));
941
942  if (xp->regs->trap != 0x300) {
943    return 0;
944  }
945  switch (instr >> 26) {
946  case 47:                      /* STMW */
947  case 36:                      /* STW */
948  case 37:                      /* STWU */
949    return 1;
950  case 31:
951    switch ((instr >> 1) & 1023) {
952    case 151:                   /* STWX */
953    case 183:                   /* STWUX */
954      return 1;
955    default:
956      return 0;
957    }
958  default:
959    return 0;
960  }
961#endif
962}
963
964OSStatus
965handle_protection_violation(ExceptionInformation *xp, siginfo_t *info)
966{
967  BytePtr addr;
968  protected_area_ptr area;
969  protection_handler *handler;
970  TCR *tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
971
972  if (! is_write_fault(xp, info)) {
973    return -1;
974  }
975
976  if (info) {
977    addr = (BytePtr)(info->si_addr);
978  } else {
979    addr = (BytePtr) ((int) (xpDAR(xp)));
980  }
981  area = find_protected_area(addr);
982
983  if (area == NULL) {           /* Don't know why this fault happened. */
984    return -1;
985  }
986 
987  handler = protection_handlers[area->why];
988
989  return handler(xp, area, addr);
990}
991
992
993protected_area_ptr
994new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, unsigned protsize, Boolean now)
995{
996  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
997 
998  if (p == NULL) return NULL;
999  p->protsize = protsize;
1000  p->nprot = 0;
1001  p->start = start;
1002  p->end = end;
1003  p->why = reason;
1004  p->next = AllProtectedAreas;
1005
1006  AllProtectedAreas = p;
1007  if (now) {
1008    protect_area(p);
1009  }
1010 
1011  return p;
1012}
1013
1014/*
1015  Un-protect the first nbytes bytes in specified area.
1016  Note that this may cause the area to be empty.
1017*/
1018void
1019unprotect_area_prefix(protected_area_ptr area, size_t delta)
1020{
1021  unprotect_area(area);
1022  area->start += delta;
1023  if ((area->start + area->protsize) <= area->end) {
1024    protect_area(area);
1025  }
1026}
1027
1028
1029/*
1030  Extend the protected area, causing the preceding nbytes bytes
1031  to be included and protected.
1032*/
1033void
1034protect_area_prefix(protected_area_ptr area, size_t delta)
1035{
1036  unprotect_area(area);
1037  area->start -= delta;
1038  protect_area(area);
1039}
1040
1041
1042
1043OSStatus
1044do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1045{
1046#ifdef SUPPORT_PRAGMA_UNUSED
1047#pragma unused(area,addr)
1048#endif
1049  reset_lisp_process(xp);
1050  return -1;
1051}
1052
1053extern area*
1054allocate_vstack(unsigned useable);       /* This is in "pmcl-kernel.c" */
1055
1056extern area*
1057allocate_tstack(unsigned useable);       /* This is in "pmcl-kernel.c" */
1058
1059#ifdef EXTEND_VSTACK
1060Boolean
1061catch_frame_p(lisp_frame *spPtr)
1062{
1063  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1064
1065  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1066    if (spPtr == ((lisp_frame *) catch->csp)) {
1067      return true;
1068    }
1069  }
1070  return false;
1071}
1072#endif
1073
1074Boolean
1075unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1076{
1077  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1078      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1079    return true;
1080  } else {
1081    return false;
1082  }
1083}
1084
1085Boolean
1086lexpr_entry_frame_p(lisp_frame *spPtr)
1087{
1088  LispObj savelr = spPtr->savelr;
1089  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1090  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1091  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1092
1093  return
1094    (savelr == lexpr_return1v) ||
1095    (savelr == lexpr_return) ||
1096    ((savelr == ret1valn) &&
1097     (((spPtr->backlink)->savelr) == lexpr_return));
1098}
1099
1100Boolean
1101lisp_frame_p(lisp_frame *spPtr)
1102{
1103  LispObj savefn;
1104  /* We can't just look at the size of the stack frame under the EABI
1105     calling sequence, but that's the first thing to check. */
1106  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1107    return false;
1108  }
1109  savefn = spPtr->savefn;
1110  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1111 
1112}
1113
1114
1115int ffcall_overflow_count = 0;
1116
1117/* Find a frame that is neither a catch frame nor one of the
1118   lexpr_entry frames We don't check for non-lisp frames here because
1119   we'll always stop before we get there due to a dummy lisp frame
1120   pushed by .SPcallback that masks out the foreign frames.  The one
1121   exception is that there is a non-lisp frame without a valid VSP
1122   while in the process of ppc-ff-call. We recognize that because its
1123   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1124   next frame is 0, then we're executing an unwind-protect cleanup
1125   form, and the top stack frame belongs to its (no longer extant)
1126   catch frame.  */
1127
1128#ifdef EXTEND_VSTACK
1129lisp_frame *
1130find_non_catch_frame_from_xp (ExceptionInformation *xp)
1131{
1132  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1133  if ((((unsigned) spPtr) + sizeof(lisp_frame)) != ((unsigned) (spPtr->backlink))) {
1134    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1135  }
1136  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1137         unwind_protect_cleanup_frame_p(spPtr) ||
1138         catch_frame_p(spPtr) ||
1139         lexpr_entry_frame_p(spPtr) ; ) {
1140     spPtr = spPtr->backlink;
1141     };
1142  return spPtr;
1143}
1144#endif
1145
1146#ifdef EXTEND_VSTACK
1147Boolean
1148db_link_chain_in_area_p (area *a)
1149{
1150  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1151          *high = (LispObj *) a->high,
1152          *low = (LispObj *) a->low;
1153  for (; db; db = (LispObj *) *db) {
1154    if ((db >= low) && (db < high)) return true;
1155  };
1156  return false;
1157}
1158#endif
1159
1160
1161
1162
1163/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1164  the current value of VSP (TSP) or an older area.  */
1165
1166OSStatus
1167do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1168{
1169  TCR* tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
1170  area *a = tcr->vs_area;
1171  protected_area_ptr vsp_soft = a->softprot;
1172  unprotect_area(vsp_soft);
1173  signal_stack_soft_overflow(xp,vsp);
1174  return 0;
1175}
1176
1177
1178OSStatus
1179do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1180{
1181  TCR* tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
1182  area *a = tcr->ts_area;
1183  protected_area_ptr tsp_soft = a->softprot;
1184  unprotect_area(tsp_soft);
1185  signal_stack_soft_overflow(xp,tsp);
1186  return 0;
1187}
1188
1189OSStatus
1190do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1191{
1192  /* Trying to write into a guard page on the vstack or tstack.
1193     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1194     signal an error_stack_overflow condition.
1195      */
1196  lisp_protection_kind which = prot_area->why;
1197  Boolean on_TSP = (which == kTSPsoftguard);
1198
1199  if (on_TSP) {
1200    return do_tsp_overflow(xp, addr);
1201   } else {
1202    return do_vsp_overflow(xp, addr);
1203   }
1204}
1205
1206OSStatus
1207do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1208{
1209#ifdef SUPPORT_PRAGMA_UNUSED
1210#pragma unused(xp,area,addr)
1211#endif
1212  return -1;
1213}
1214
1215/*
1216  We have a couple of choices here.  We can simply unprotect the page
1217  and let the store happen on return, or we can try to emulate writes
1218  that we know will involve an intergenerational reference.  Both are
1219  correct as far as EGC constraints go, but the latter approach is
1220  probably more efficient.  (This only matters in the case where the
1221  GC runs after this exception handler returns but before the write
1222  actually happens.  If we didn't emulate node stores here, the EGC
1223  would scan the newly-writen page, find nothing interesting, and
1224  run to completion.  This thread will try the write again afer it
1225  resumes, the page'll be re-protected, and we'll have taken this
1226  fault twice.  The whole scenario shouldn't happen very often, but
1227  (having already taken a fault and committed to an mprotect syscall)
1228  we might as well emulate stores involving intergenerational references,
1229  since they're pretty easy to identify.
1230
1231  Note that cases involving two or more threads writing to the same
1232  page (before either of them can run this handler) is benign: one
1233  invocation of the handler will just unprotect an unprotected page in
1234  that case.
1235
1236  If there are GCs (or any other suspensions of the thread between
1237  the time that the write fault was detected and the time that the
1238  exception lock is obtained) none of this stuff happens.
1239*/
1240
1241/*
1242  Return true (and emulate the instruction) iff:
1243  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1244     instruction.
1245  b) RS is a node register (>= fn)
1246  c) RS is tagged as a cons or vector
1247  d) RS is in some ephemeral generation.
1248  This is slightly conservative, since RS may be no younger than the
1249  EA being written to.
1250*/
1251Boolean
1252is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1253{
1254  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1255    opcode instr = *xpPC(xp);
1256   
1257    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1258        major_opcode_p(instr, major_opcode_STW)) {
1259      LispObj
1260        rs = RS_field(instr), 
1261        rsval = xpGPR(xp,rs),
1262        tag = fulltag_of(rsval);
1263     
1264      if (rs >= fn) {
1265        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1266          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1267              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1268            *(LispObj *)ea = rsval;
1269            return true;
1270          }
1271        }
1272      }
1273    }
1274  }
1275  return false;
1276}
1277
1278     
1279
1280
1281OSStatus
1282do_tenured_space_write(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1283{
1284  /* Note that the page has been "newly" written to, then unprotect it
1285     and continue */
1286  unsigned 
1287    page_start = ((unsigned)addr) & ~4095, 
1288    pgno = (page_start - (unsigned)(area->start)) >> 12;
1289 
1290  if (lisp_global(IN_GC) != 0) {
1291    Bug(xp, "Tenured space protected in GC");
1292  }
1293
1294  pagemap[pgno].bits.modified = 1;
1295  UnProtectMemory((LogicalAddress)page_start, 4096);
1296  if (is_ephemeral_node_store(xp, addr)) {
1297    adjust_exception_pc(xp,4);
1298  }
1299  return 0;
1300}
1301
1302
1303
1304OSStatus
1305handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1306{
1307  (void) zero_fpscr();
1308  enable_fp_exceptions();
1309
1310
1311  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1312
1313  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1314     operation; there's an FPU operation right at the PC, so tell it to start
1315     looking one word beyond */
1316  return handle_fpux_binop(xp, (unsigned)(xpPC(xp))+4);
1317}
1318
1319   
1320int
1321altivec_present = 1;
1322                 
1323
1324
1325OSStatus
1326PMCL_exception_handler(int xnum, 
1327                       ExceptionInformation *xp, 
1328                       TCR *tcr, 
1329                       siginfo_t *info)
1330{
1331  unsigned oldMQ;
1332  OSStatus status = -1;
1333  pc program_counter;
1334  opcode instruction;
1335
1336
1337#ifdef LINUX
1338  oldMQ = xpGPR(xp,PT_MQ);
1339  xpGPR(xp,PT_MQ) = 0;
1340#endif
1341  program_counter = xpPC(xp);
1342  instruction = *program_counter;
1343
1344  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1345    status = handle_alloc_trap(xp, tcr);
1346  } else if ((xnum == SIGSEGV) ||
1347             (xnum == SIGBUS)) {
1348    if (tcr->suspend_total != 
1349        tcr->suspend_total_on_exception_entry) {
1350      /*
1351        This thread's been suspended since the exception occurred
1352        (possibly by the GC); that makes it hard to do anything
1353        meaningful about protection violations - there's no guarantee
1354        that the addess in question is still protected, the object
1355        being written to may have moved, etc.  Retry the instuction.
1356        If it faults again, hopefully it'll eventually do so in an
1357        environmet where this handler runs without having been
1358        suspended.  Note that a pending write to tenured space hasn't
1359        happened yet.
1360      */
1361      status = 0;
1362    } else {
1363      status = handle_protection_violation(xp, info);
1364    }
1365  } else if (xnum == SIGFPE) {
1366    status = handle_sigfpe(xp, tcr);
1367  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1368    if (instruction == GC_TRAP_INSTRUCTION) {
1369      status = handle_gc_trap(xp, tcr);
1370    } else if (IS_UUO(instruction)) {
1371      status = handle_uuo(xp, instruction, program_counter);
1372    } else if (is_conditional_trap(instruction)) {
1373      status = handle_trap(xp, instruction, program_counter);
1374    }
1375  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1376    tcr->interrupt_level = (-1 << fixnumshift);
1377    tcr->interrupt_pending = 0;
1378    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TWI_instruction(TO_GT,nargs,0),0, 0);
1379    tcr->interrupt_level = 0;
1380    tcr->interrupt_pending = 0;
1381    status = 0;
1382  }
1383
1384#ifdef LINUX
1385  xpGPR(xp,PT_MQ) = oldMQ;
1386#endif
1387  return status;
1388}
1389
1390void
1391adjust_exception_pc(ExceptionInformation *xp, int delta)
1392{
1393  xpPC(xp) += (delta >> 2);
1394}
1395
1396
1397/*
1398  This wants to scan backwards until "where" points to an instruction
1399   whose major opcode is either 63 (double-float) or 59 (single-float)
1400*/
1401
1402OSStatus
1403handle_fpux_binop(ExceptionInformation *xp, unsigned where)
1404{
1405  OSStatus err;
1406  unsigned *there = (unsigned *) where, instr, errnum;
1407  int i = TRAP_LOOKUP_TRIES, delta = 0;
1408 
1409  while (i--) {
1410    instr = *--there;
1411    delta -= 4;
1412    if (codevec_hdr_p(instr)) {
1413      return -1;
1414    }
1415    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1416      errnum = error_FPU_exception_double;
1417      break;
1418    }
1419
1420    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1421      errnum = error_FPU_exception_short;
1422      break;
1423    }
1424  }
1425 
1426  err = handle_error(xp, errnum, rcontext, 0, (unsigned) there);
1427  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1428 
1429  adjust_exception_pc(xp, delta);
1430  xpFPSCR(xp)  &=  0x03fff;
1431 
1432  return err;
1433
1434}
1435
1436OSStatus
1437handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1438{
1439#ifdef SUPPORT_PRAGMA_UNUSED
1440#pragma unused(where)
1441#endif
1442  unsigned 
1443    minor = UUO_MINOR(the_uuo),
1444    rt = 0x1f & (the_uuo >> 21),
1445    ra = 0x1f & (the_uuo >> 16),
1446    rb = 0x1f & (the_uuo >> 11),
1447    errnum = 0x3ff & (the_uuo >> 16);
1448
1449  OSStatus status = -1;
1450
1451  int bump = 4;
1452
1453  switch (minor) {
1454
1455  case UUO_ZERO_FPSCR:
1456    status = 0;
1457    xpFPSCR(xp) = 0;
1458    break;
1459
1460
1461  case UUO_INTERR:
1462    status = handle_error(xp, errnum, rb, 0, (unsigned) where);
1463    break;
1464
1465  case UUO_INTCERR:
1466    status = handle_error(xp, errnum, rb, 1, (unsigned) where);
1467    if (errnum == error_udf_call) {
1468      /* If lisp's returned from a continuable undefined-function call,
1469         it's put a code vector in the xp's PC.  Don't advance the
1470         PC ... */
1471      bump = 0;
1472    }
1473    break;
1474
1475  case UUO_FPUX_BINOP:
1476    status = handle_fpux_binop(xp, (unsigned)where);
1477    bump = 0;
1478    break;
1479
1480  default:
1481    status = -1;
1482    bump = 0;
1483  }
1484 
1485  if ((!status) && bump) {
1486    adjust_exception_pc(xp, bump);
1487  }
1488  return status;
1489}
1490
1491unsigned
1492register_codevector_contains_pc (unsigned lisp_function, unsigned where)
1493{
1494  unsigned code_vector, size;
1495
1496  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1497      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1498    code_vector = deref(lisp_function, 1);
1499    size = header_element_count(header_of(code_vector)) << 2;
1500    if ((untag(code_vector) < where) && (where < (code_vector + size)))
1501      return(code_vector);
1502  }
1503
1504  return(0);
1505}
1506
1507/* Callback to lisp to handle a trap. Need to translate the
1508   PC (where) into one of two forms of pairs:
1509
1510   1. If PC is in fn or nfn's code vector, use the register number
1511      of fn or nfn and the index into that function's code vector.
1512   2. Otherwise use 0 and the pc itself
1513*/
1514void
1515callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, unsigned where,
1516                   unsigned arg1, unsigned arg2, unsigned arg3)
1517{
1518  unsigned code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1519  unsigned register_number = fn;
1520  unsigned index = where;
1521
1522  if (code_vector == 0) {
1523    register_number = nfn;
1524    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1525  }
1526  if (code_vector == 0)
1527    register_number = 0;
1528  else
1529    index = (where - (code_vector + misc_data_offset)) >> 2;
1530  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1531}
1532
1533void
1534callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1535                  unsigned arg1, unsigned arg2, unsigned arg3, unsigned arg4, unsigned arg5)
1536{
1537  sigset_t mask;
1538  unsigned  callback_ptr, i;
1539  area *a;
1540
1541  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
1542
1543  /* Put the active stack pointer where .SPcallback expects it */
1544  a = tcr->cs_area;
1545  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1546
1547  /* Copy globals from the exception frame to tcr */
1548  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1549  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1550  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1551  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1552
1553
1554
1555  /* Call back.
1556     Lisp will handle trampolining through some code that
1557     will push lr/fn & pc/nfn stack frames for backtrace.
1558  */
1559  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1560  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1561  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1562  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1563
1564
1565
1566  /* Copy GC registers back into exception frame */
1567  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1568  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1569}
1570
1571area *
1572allocate_no_stack (unsigned size)
1573{
1574#ifdef SUPPORT_PRAGMA_UNUSED
1575#pragma unused(size)
1576#endif
1577
1578  return (area *) NULL;
1579}
1580
1581
1582
1583
1584
1585
1586/* callback to (symbol-value cmain) if it is a macptr,
1587   otherwise report cause and function name to console.
1588   Returns noErr if exception handled OK */
1589OSStatus
1590handle_trap(ExceptionInformation *xp, opcode the_trap, pc where)
1591{
1592  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1593  int       ra, rs, fn_reg = 0;
1594  char *    error_msg = NULL;
1595  char      name[kNameBufLen];
1596  LispObj   cmain = nrs_CMAIN.vcell;
1597  Boolean   event_poll_p = false;
1598  int old_interrupt_level = 0;
1599  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
1600
1601  /* If we got here, "the_trap" is either a TWI or a TW instruction.
1602     It's a TWI instruction iff its major opcode is major_opcode_TWI. */
1603
1604  /* If it's a "twllt" instruction where RA == sp, it's a failed
1605     control stack overflow check.  In that case:
1606     
1607     a) We're in "yellow zone" mode if the value of the lisp_global(CS_OVERFLOW_LIMIT)
1608     is CS_OVERFLOW_FORCE_LIMIT.  If we're not already in yellow zone mode, attempt to create
1609     a new thread and continue execution on its stack. If that fails, call
1610     signal_stack_soft_overflow to enter yellow zone mode and signal the condition to lisp.
1611     
1612     b) If we're already in "yellow zone" mode, then:
1613     
1614     1) if the SP is past the current control-stack area's hard overflow limit,
1615     signal a "hard" stack overflow error (e.g., throw to toplevel as quickly as
1616     possible. If we aren't in "yellow zone" mode, attempt to continue on another
1617     thread first.
1618     
1619     2) if SP is "well" (> 4K) below its soft overflow limit, set lisp_global(CS_OVERFLOW_LIMIT)
1620     to its "real" value.  We're out of "yellow zone mode" in this case.
1621     
1622     3) Otherwise, do nothing.  We'll continue to trap every time something gets pushed
1623     on the control stack, so we should try to detect and handle all of these cases
1624     fairly quickly.  Of course, the trap overhead is going to slow things down quite a bit.
1625     */
1626
1627  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TW) &&
1628      (RA_field(the_trap) == sp) &&
1629      (TO_field(the_trap) == TO_LO)) {
1630    area
1631      *CS_area = tcr->cs_area,
1632      *VS_area = tcr->vs_area;
1633     
1634    unsigned 
1635      current_SP = xpGPR(xp,sp),
1636      current_VSP = xpGPR(xp,vsp);
1637
1638    if (current_SP  < (unsigned) (CS_area->hardlimit)) {
1639      /* If we're not in soft overflow mode yet, assume that the
1640         user has set the soft overflow size very small and try to
1641         continue on another thread before throwing to toplevel */
1642      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1643        reset_lisp_process(xp);
1644      }
1645    } else {
1646      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1647        /* If the control stack pointer is at least 4K away from its soft limit
1648           and the value stack pointer is at least 4K away from its soft limit,
1649           stop trapping.  Else keep trapping. */
1650        if ((current_SP > (unsigned) ((CS_area->softlimit)+4096)) &&
1651            (current_VSP > (unsigned) ((VS_area->softlimit)+4096))) {
1652          protected_area_ptr vs_soft = VS_area->softprot;
1653          if (vs_soft->nprot == 0) {
1654            protect_area(vs_soft);
1655          }
1656          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1657        }
1658      } else {
1659        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1660        signal_stack_soft_overflow(xp, sp);
1661      }
1662    }
1663   
1664    adjust_exception_pc(xp, 4);
1665    return noErr;
1666  } else {
1667    if (the_trap == LISP_BREAK_INSTRUCTION) {
1668      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1669      set_xpPC(xp, xpLR(xp));
1670      if (message == NULL) {
1671        message = "Lisp Breakpoint";
1672      }
1673      lisp_Debugger(xp, debug_entry_dbg,message);
1674      return noErr;
1675    }
1676    /*
1677      twlle ra,rb is used to detect tlb overflow, where RA = current
1678      limit and RB = index to use.
1679    */
1680    if ((X_opcode_p(the_trap, 31, minor_opcode_TW)) && 
1681        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1682      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1683        return noErr;
1684      }
1685      return -1;
1686    }
1687
1688    if ((fulltag_of(cmain) == fulltag_misc) &&
1689        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1690      /* cmain is a macptr, we can call back to lisp */
1691      if (the_trap == TWI_instruction(TO_GT,nargs,0)) {
1692        /* Disable interrupts if we're about to process one */
1693        event_poll_p = true;    /* remember to turn interrupts back on */
1694        tcr->interrupt_level = (-1 << fixnumshift);
1695        tcr->interrupt_pending = 0;
1696      }
1697      callback_for_trap(cmain, xp, (unsigned) where, (unsigned) the_trap,  0, 0);
1698      if (event_poll_p) {
1699        tcr->interrupt_level = 0;
1700        tcr->interrupt_pending = 0;
1701      }
1702      adjust_exception_pc(xp, 4);
1703      return(noErr);
1704    }
1705    return -1;
1706  }
1707}
1708
1709
1710/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1711   Stop if subtag_code_vector is encountered. */
1712unsigned
1713scan_for_instr( unsigned target, unsigned mask, pc where )
1714{
1715  int i = TRAP_LOOKUP_TRIES;
1716
1717  while( i-- ) {
1718    unsigned instr = *(--where);
1719    if ( codevec_hdr_p(instr) ) {
1720      return 0;
1721    } else if ( match_instr(instr, mask, target) ) {
1722      return instr;
1723    }
1724  }
1725  return 0;
1726}
1727
1728
1729void non_fatal_error( char *msg )
1730{
1731  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1732  fflush( stderr );
1733}
1734
1735/* The main opcode.  */
1736
1737int 
1738is_conditional_trap(opcode instr)
1739{
1740  unsigned to = TO_field(instr);
1741  int is_tw = X_opcode_p(instr,major_opcode_X31,minor_opcode_TW);
1742
1743#ifndef MACOS
1744  if (instr == LISP_BREAK_INSTRUCTION) {
1745    return 1;
1746  }
1747#endif
1748  if (is_tw || major_opcode_p(instr,major_opcode_TWI)) {
1749    /* A "tw" or "twi" instruction.  To be unconditional, the EQ bit must be set
1750       in the TO mask and either the register operands (if "tw") are the same or
1751       either both of the signed or both of the unsigned inequality bits
1752       must be set. */
1753    if (! (to & TO_EQ)) {
1754      return 1;                 /* Won't trap on EQ: conditional */
1755    }
1756    if (is_tw && (RA_field(instr) == RB_field(instr))) {
1757      return 0;                 /* Will trap on EQ, same regs: unconditional */
1758    }
1759    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1760        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1761      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1762    }
1763    return 1;                   /* must be conditional */
1764  }
1765  return 0;                     /* Not "tw" or "twi".  Let debugger have it */
1766}
1767
1768OSStatus
1769handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, unsigned where)
1770{
1771  LispObj   pname;
1772  LispObj   errdisp = nrs_ERRDISP.vcell;
1773
1774  if ((fulltag_of(errdisp) == fulltag_misc) &&
1775      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1776    /* errdisp is a macptr, we can call back to lisp */
1777    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1778    return(0);
1779    }
1780
1781  return(-1);
1782}
1783               
1784
1785/*
1786   Current thread has all signals masked.  Before unmasking them,
1787   make it appear that the current thread has been suspended.
1788   (This is to handle the case where another thread is trying
1789   to GC before this thread is able to sieze the exception lock.)
1790*/
1791int
1792prepare_to_wait_for_exception_lock(TCR *tcr, 
1793                                   ExceptionInformationPowerPC *context)
1794{
1795  int old_valence = tcr->valence;
1796
1797  tcr->pending_exception_context = context;
1798  tcr->suspend_total_on_exception_entry =
1799    tcr->suspend_total;
1800  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1801
1802  ALLOW_EXCEPTIONS(context);
1803  return old_valence;
1804} 
1805
1806void
1807wait_for_exception_lock_in_handler(TCR *tcr, 
1808                                   ExceptionInformationPowerPC *context,
1809                                   xframe_list *xf)
1810{
1811
1812  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1813  xf->curr = context;
1814  xf->prev = tcr->xframe;
1815  tcr->xframe =  xf;
1816  tcr->pending_exception_context = NULL;
1817  tcr->valence = TCR_STATE_FOREIGN; 
1818}
1819
1820void
1821unlock_exception_lock_in_handler(TCR *tcr)
1822{
1823  tcr->pending_exception_context = tcr->xframe->curr;
1824  tcr->xframe = tcr->xframe->prev;
1825  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1826  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1827}
1828
1829/*
1830   If an interrupt is pending on exception exit, try to ensure
1831   that the thread sees it as soon as it's able to run.
1832*/
1833void
1834raise_pending_interrupt(TCR *tcr)
1835{
1836  if (tcr->interrupt_level > 0) {
1837    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1838  }
1839}
1840
1841void
1842exit_signal_handler(TCR *tcr, int old_valence)
1843{
1844  sigset_t mask;
1845  sigfillset(&mask);
1846 
1847  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1848  tcr->valence = old_valence;
1849  tcr->pending_exception_context = NULL;
1850}
1851
1852
1853void
1854signal_handler(int signum, siginfo_t *info, ExceptionInformationPowerPC  *context, TCR *tcr)
1855{
1856  xframe_list xframe_link;
1857  int old_valence;
1858#ifdef LINUX
1859
1860  tcr = (TCR *) xpGPR(context,rcontext);
1861 
1862  if (tcr == NULL) {
1863    tcr = get_tcr(false);
1864  } else {
1865    current_r2 = tcr->native_thread_info;
1866  }
1867
1868  /* The signal handler's entered with all signals (notably the
1869     thread_suspend signal) blocked.  Don't allow any other signals
1870     (notably the thread_suspend signal) to preempt us until we've
1871     set the TCR's xframe slot to include the current exception
1872     context.
1873  */
1874
1875  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1876#endif
1877 
1878  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1879  if ((noErr != PMCL_exception_handler(signum, context, tcr, info))) {
1880    char msg[512];
1881    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%08x, context->regs at #x%08x", signum, xpPC(context), (int)xpGPRvector(context));
1882    lisp_Debugger(context, signum, msg);
1883  }
1884  unlock_exception_lock_in_handler(tcr);
1885
1886  /* This thread now looks like a thread that was suspended while
1887     executing lisp code.  If some other thread gets the exception
1888     lock and GCs, the context (this thread's suspend_context) will
1889     be updated.  (That's only of concern if it happens before we
1890     can return to the kernel/to the Mach exception handler).
1891  */
1892#ifdef LINUX
1893  exit_signal_handler(tcr, old_valence);
1894  raise_pending_interrupt(tcr);
1895#endif
1896}
1897
1898/*
1899  If it looks like we're in the middle of an atomic operation, make
1900  it seem as if that operation is either complete or hasn't started
1901  yet.
1902
1903  The cases handled include:
1904
1905  a) storing into a newly-allocated lisp frame on the stack.
1906  b) marking a newly-allocated TSP frame as containing "raw" data.
1907  c) consing: the GC has its own ideas about how this should be
1908     handled, but other callers would be best advised to back
1909     up or move forward, according to whether we're in the middle
1910     of allocating a cons cell or allocating a uvector.
1911  d) a STMW to the vsp
1912*/
1913void
1914pc_luser_xp(ExceptionInformationPowerPC *xp, TCR *tcr)
1915{
1916  pc program_counter = xpPC(xp);
1917  opcode instr = *program_counter;
1918  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1919  LispObj cur_allocptr = xpGPR(xp, allocptr);
1920  int allocptr_tag = fulltag_of(cur_allocptr);
1921 
1922
1923  if (instr == 0x918c0004) {    /* stw tsp,tsp_frame.type(tsp) */
1924    LispObj tsp_val = xpGPR(xp,tsp);
1925   
1926    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1927    adjust_exception_pc(xp, 4);
1928    return;
1929  }
1930 
1931  if (frame->backlink == (frame+1)) {
1932    if ((major_opcode_p(instr, major_opcode_STW)) && 
1933        (RA_field(instr) == sp) &&
1934        /* There are a few places in the runtime that store into
1935           a previously-allocated frame atop the stack when
1936           throwing values around.  We only care about the case
1937           where the frame was newly allocated, in which case
1938           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1939           a few instructions before the current program counter.
1940           (The whole point here is that a newly allocated frame
1941           might contain random values that we don't want the
1942           GC to see; a previously allocated frame should already
1943           be completely initialized.)
1944        */
1945        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1946         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1947         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1948      int disp = D_field(instr);
1949     
1950      if (disp < 16) {
1951        frame->savevsp = 0;
1952        if (disp < 12) {
1953          frame->savelr = 0;
1954          if (disp == 4) {
1955            frame->savefn = 0;
1956          }
1957        }
1958      }
1959      return;
1960    }
1961  }
1962
1963  if (allocptr_tag != tag_fixnum) {
1964    int disp = allocptr_displacement(xp);
1965
1966    if (disp) {
1967      /* We're "at" the alloc_trap.  If this is happening for the
1968         GC's benefit (the GC has just suspended the thread that
1969         owns xp), set allocbase to VOID_ALLOCPTR and pretend that
1970         allocptr was just decremented (by disp) from there; leave
1971         the PC pointing to the alloc_trap.  If this is happening
1972         in response to a PROCESS-INTERRUPT request, we can't have
1973         entered the trap handler yet: back out of the previous
1974         instruction (which subtracted "disp" from allocptr) and
1975         arrange to execute it again after the interrupt.
1976      */
1977      if (tcr) {
1978        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1979        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1980        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1981      } else {
1982        xpGPR(xp, allocptr) = cur_allocptr + disp;
1983        xpPC(xp) -=1;
1984      }
1985    } else {
1986      /* If we're already past the alloc_trap, finish allocating
1987         the object. */
1988      if (allocptr_tag == fulltag_cons) {
1989        finish_allocating_cons(xp);
1990      } else {
1991        if (allocptr_tag == fulltag_misc) {
1992          finish_allocating_uvector(xp);
1993        } else {
1994          Bug(xp, "what's being allocated here ?");
1995        }
1996      }
1997    }
1998  }
1999
2000  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2001      (RA_field(instr) == vsp)) {
2002    int r;
2003    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2004   
2005    for (r = RS_field(instr); r <= 31; r++) {
2006      *vspptr++ = xpGPR(xp,r);
2007    }
2008    adjust_exception_pc(xp, 4);
2009  }
2010}
2011
2012void
2013interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2014{
2015  TCR *tcr = get_interrupt_tcr(false);
2016  if (tcr) {
2017    if (tcr->interrupt_level < 0) {
2018      tcr->interrupt_pending = 1 << fixnumshift;
2019    } else {
2020      LispObj cmain = nrs_CMAIN.vcell;
2021
2022      if ((fulltag_of(cmain) == fulltag_misc) &&
2023          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2024        /*
2025           This thread can (allegedly) take an interrupt now.
2026           It's tricky to do that if we're executing
2027           foreign code (especially Linuxthreads code, much
2028           of which isn't reentrant.)
2029        */
2030        if (tcr->valence != TCR_STATE_LISP) {
2031          tcr->interrupt_level = (1 << fixnumshift);
2032        } else {
2033          xframe_list xframe_link;
2034          int old_valence;
2035         
2036          pc_luser_xp(context, NULL);
2037          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2038          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2039          PMCL_exception_handler(signum, context, tcr, info);
2040          unlock_exception_lock_in_handler(tcr);
2041          exit_signal_handler(tcr, old_valence);
2042        }
2043      }
2044    }
2045  }
2046#ifdef DARWIN
2047  DarwinSigReturn(context);
2048#endif
2049}
2050
2051
2052
2053void
2054install_signal_handler(int signo, __sighandler_t handler)
2055{
2056  struct sigaction sa;
2057 
2058  sa.sa_sigaction = (void *)handler;
2059  sigfillset(&sa.sa_mask);
2060  sa.sa_flags = SA_RESTART | SA_SIGINFO;
2061
2062  sigaction(signo, &sa, NULL);
2063}
2064
2065void
2066install_pmcl_exception_handlers()
2067{
2068#ifndef DARWIN
2069  extern int no_sigtrap;
2070  install_signal_handler(SIGILL, (__sighandler_t)signal_handler);
2071  if (no_sigtrap != 1) {
2072    install_signal_handler(SIGTRAP, (__sighandler_t)signal_handler);
2073  }
2074  install_signal_handler(SIGBUS,  (__sighandler_t)signal_handler);
2075  install_signal_handler(SIGSEGV, (__sighandler_t)signal_handler);
2076  install_signal_handler(SIGFPE, (__sighandler_t)signal_handler);
2077#endif
2078  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2079                         (__sighandler_t)interrupt_handler);
2080  signal(SIGPIPE, SIG_IGN);
2081}
2082
2083
2084void
2085unprotect_all_areas()
2086{
2087  protected_area_ptr p;
2088
2089  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2090    unprotect_area(p);
2091  }
2092}
2093
2094
2095
2096
2097void
2098exception_init()
2099{
2100  install_pmcl_exception_handlers();
2101}
2102
2103
2104void
2105Bug(ExceptionInformation *xp, const char *format, ...)
2106{
2107  va_list args;
2108  char s[512];
2109 
2110  va_start(args, format);
2111  vsnprintf(s, sizeof(s),format, args);
2112  va_end(args);
2113  lisp_Debugger(NULL, debug_entry_bug, s);
2114
2115}
2116
2117void
2118lisp_bug(char *string)
2119{
2120  Bug(NULL, "Bug in MCL-PPC system code:\n%s", string);
2121}
2122
2123
2124
2125#ifdef DARWIN
2126
2127lock_set_t mach_exception_lock_set;
2128
2129
2130#define LISP_EXCEPTIONS_HANDLED_MASK \
2131 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2132
2133/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2134#define NUM_LISP_EXCEPTIONS_HANDLED 4
2135
2136typedef struct {
2137  int foreign_exception_port_count;
2138  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2139  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2140  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2141  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2142} MACH_foreign_exception_state;
2143
2144
2145
2146
2147/*
2148  Mach's exception mechanism works a little better than its signal
2149  mechanism (and, not incidentally, it gets along with GDB a lot
2150  better.
2151
2152  Initially, we install an exception handler to handle each native
2153  thread's exceptions.  This process involves creating a distinguished
2154  thread which listens for kernel exception messages on a set of
2155  0 or more thread exception ports.  As threads are created, they're
2156  added to that port set; a thread's exception port is destroyed
2157  (and therefore removed from the port set) when the thread exits.
2158
2159  A few exceptions can be handled directly in the handler thread;
2160  others require that we resume the user thread (and that the
2161  exception thread resumes listening for exceptions.)  The user
2162  thread might eventually want to return to the original context
2163  (possibly modified somewhat.)
2164
2165  As it turns out, the simplest way to force the faulting user
2166  thread to handle its own exceptions is to do pretty much what
2167  signal() does: the exception handlng thread sets up a sigcontext
2168  on the user thread's stack and forces the user thread to resume
2169  execution as if a signal handler had been called with that
2170  context as an argument.  We can use a distinguished UUO at a
2171  distinguished address to do something like sigreturn(); that'll
2172  have the effect of resuming the user thread's execution in
2173  the (pseudo-) signal context.
2174
2175  Since:
2176    a) we have miles of code in C and in Lisp that knows how to
2177    deal with Linux sigcontexts
2178    b) Linux sigcontexts contain a little more useful information
2179    (the DAR, DSISR, etc.) than their Darwin counterparts
2180    c) we have to create a sigcontext ourselves when calling out
2181    to the user thread: we aren't really generating a signal, just
2182    leveraging existing signal-handling code.
2183
2184  we create a Linux sigcontext struct.
2185
2186  Simple ?  Hopefully from the outside it is ...
2187
2188  We want the process of passing a thread's own context to it to
2189  appear to be atomic: in particular, we don't want the GC to suspend
2190  a thread that's had an exception but has not yet had its user-level
2191  exception handler called, and we don't want the thread's exception
2192  context to be modified by a GC while the Mach handler thread is
2193  copying it around.  On Linux (and on Jaguar), we avoid this issue
2194  because (a) the kernel sets up the user-level signal handler and
2195  (b) the signal handler blocks signals (including the signal used
2196  by the GC to suspend threads) until tcr->xframe is set up.
2197
2198  The GC and the Mach server thread therefore contend for the lock
2199  "mach_exception_lock_set[0]".  The Mach server thread holds the lock
2200  when copying exception information between the kernel and the
2201  user thread; the GC holds this lock during most of its execution
2202  (delaying exception processing until it can be done without
2203  GC interference.)
2204
2205*/
2206
2207#define C_REDZONE_LEN           224
2208#define C_STK_ALIGN                     16
2209#define C_PARAMSAVE_LEN         64
2210#define C_LINKAGE_LEN           48
2211#define TRUNC_DOWN(a,b,c)  (((((unsigned)a)-(b))/(c)) * (c))
2212#include <mach/mach.h>
2213#include <mach/mach_error.h>
2214#include <mach/machine/thread_state.h>
2215#include <mach/machine/thread_status.h>
2216
2217void
2218fatal_mach_error(char *format, ...);
2219
2220#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : ~d", context, x);}
2221
2222
2223void
2224restore_mach_thread_state(mach_port_t thread, ExceptionInformation *lss)
2225{
2226  int i, j;
2227#ifdef PPC64
2228  struct mcontext64 *mc = UC_MCONTEXT(lss);
2229#else
2230  struct mcontext * mc = UC_MCONTEXT(lss);
2231#endif
2232
2233  /* Set the thread's FP state from the lss */
2234  thread_set_state(thread,
2235                   PPC_FLOAT_STATE,
2236                   (thread_state_t)&(mc->fs),
2237                   PPC_FLOAT_STATE_COUNT);
2238
2239  /* The thread'll be as good as new ... */
2240#ifdef PPC64
2241  thread_set_state(thread,
2242                   PPC_THREAD_STATE64,
2243                   (thread_state_t)&(mc->ss),
2244                   PPC_THREAD_STATE64_COUNT);
2245#else
2246  thread_set_state(thread, 
2247                   MACHINE_THREAD_STATE,
2248                   (thread_state_t)&(mc->ss),
2249                   MACHINE_THREAD_STATE_COUNT);
2250#endif
2251} 
2252
2253/* This code runs in the exception handling thread, in response
2254   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2255   in response to a call to pseudo_sigreturn() from the specified
2256   user thread.
2257   Find that context (the user thread's R3 points to it), then
2258   use that context to set the user thread's state.  When this
2259   function's caller returns, the Mach kernel will resume the
2260   user thread.
2261*/
2262
2263kern_return_t
2264do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2265{
2266  ExceptionInformation *xp;
2267
2268  lock_acquire(mach_exception_lock_set, 0);
2269  xp = tcr->pending_exception_context;
2270  tcr->pending_exception_context = NULL;
2271  tcr->valence = TCR_STATE_LISP;
2272  restore_mach_thread_state(thread, xp);
2273  raise_pending_interrupt(tcr);
2274  lock_release(mach_exception_lock_set, 0);
2275  return KERN_SUCCESS;
2276
2277} 
2278
2279ExceptionInformation *
2280create_thread_context_frame(mach_port_t thread, 
2281                            unsigned *new_stack_top)
2282{
2283#ifdef PPC64
2284  ppc_thread_state64_t ts;
2285#else
2286  ppc_thread_state_t ts;
2287#endif
2288  mach_msg_type_number_t thread_state_count;
2289  kern_return_t result;
2290  int i,j;
2291  ExceptionInformation *lss;
2292#ifdef PPC64
2293  struct mcontext64 *mc;
2294#else
2295  struct mcontext *mc;
2296#endif
2297  natural stackp, backlink;
2298
2299#ifdef PPC64
2300  thread_state_count = PPC_THREAD_STATE64_COUNT;
2301  thread_get_state(thread,
2302                   PPC_THREAD_STATE64,
2303                   (thread_state_t)&ts,
2304                   &thread_state_count);
2305#else
2306  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2307  thread_get_state(thread, 
2308                   MACHINE_THREAD_STATE,        /* GPRs, some SPRs  */
2309                   (thread_state_t)&ts,
2310                   &thread_state_count);
2311#endif
2312
2313  stackp = ts.r1;
2314  backlink = stackp;
2315  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2316  stackp -= sizeof(*lss);
2317  lss = (ExceptionInformation *) ptr_from_lispobj(stackp);
2318
2319  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2320#ifdef PPC64
2321  mc = (struct mcontext64 *) ptr_from_lispobj(stackp);
2322#else
2323  mc = (struct mcontext *) ptr_from_lispobj(stackp);
2324#endif
2325  bcopy(&ts,&(mc->ss),sizeof(ts));
2326
2327  thread_state_count = PPC_FLOAT_STATE_COUNT;
2328  thread_get_state(thread,
2329                   PPC_FLOAT_STATE,
2330                   (thread_state_t)&(mc->fs),
2331                   &thread_state_count);
2332
2333
2334  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2335  thread_get_state(thread,
2336                   PPC_EXCEPTION_STATE,
2337                   (thread_state_t)&(mc->es),
2338                   &thread_state_count);
2339
2340
2341  UC_MCONTEXT(lss) = mc;
2342  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2343  stackp -= C_LINKAGE_LEN;
2344  *(natural *)ptr_from_lispobj(stackp) = backlink;
2345  if (new_stack_top) {
2346    *new_stack_top = stackp;
2347  }
2348  return lss;
2349}
2350
2351/*
2352  This code sets up the user thread so that it executes a "pseudo-signal
2353  handler" function when it resumes.  Create a linux sigcontext struct
2354  on the thread's stack and pass it as an argument to the pseudo-signal
2355  handler.
2356
2357  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2358  which will restore the thread's context.
2359
2360  If the handler invokes code that throws (or otherwise never sigreturn()'s
2361  to the context), that's fine.
2362
2363  Actually, check that: throw (and variants) may need to be careful and
2364  pop the tcr's xframe list until it's younger than any frame being
2365  entered.
2366*/
2367
2368int
2369setup_signal_frame(mach_port_t thread,
2370                   void *handler_address,
2371                   int signum,
2372                   int code,
2373                   TCR *tcr)
2374{
2375  ppc_thread_state_t ts;
2376  ppc_exception_state_t xs;
2377  mach_msg_type_number_t thread_state_count;
2378  ExceptionInformation *lss;
2379  int i, j;
2380  kern_return_t result;
2381  unsigned stackp;
2382
2383  lock_acquire(mach_exception_lock_set, 0);
2384
2385  lss = create_thread_context_frame(thread, &stackp);
2386  lss->uc_onstack = 0;
2387  lss->uc_sigmask = (sigset_t) 0;
2388  tcr->pending_exception_context = lss;
2389  tcr->suspend_total_on_exception_entry =
2390    tcr->suspend_total;
2391  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2392 
2393
2394  /*
2395     It seems like we've created a  sigcontext on the thread's
2396     stack.  Set things up so that we call the handler (with appropriate
2397     args) when the thread's resumed.
2398  */
2399
2400  ts.srr0 = (int) handler_address;
2401  ts.srr1 = (int) xpMSR(lss) & ~MSR_FE0_FE1_MASK;
2402  ts.r1 = stackp;
2403  ts.r3 = signum;
2404  ts.r4 = (int)lss;
2405  ts.r5 = (int)tcr;
2406  ts.lr = (int)pseudo_sigreturn;
2407
2408
2409  thread_set_state(thread, 
2410                   MACHINE_THREAD_STATE,
2411                   (thread_state_t)&ts,
2412                   MACHINE_THREAD_STATE_COUNT);
2413  lock_release(mach_exception_lock_set, 0);
2414  return 0;
2415}
2416
2417
2418void
2419pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr)
2420{
2421  signal_handler(signum, NULL, context, tcr);
2422} 
2423
2424
2425int
2426thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2427{
2428  ppc_thread_state_t ts;
2429  mach_msg_type_number_t thread_state_count;
2430
2431  lock_acquire(mach_exception_lock_set, 0);
2432  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2433  thread_get_state(thread, 
2434                   MACHINE_THREAD_STATE,        /* GPRs, some SPRs  */
2435                   (thread_state_t)&ts,
2436                   &thread_state_count);
2437  if (enabled) {
2438    ts.srr1 |= MSR_FE0_FE1_MASK;
2439  } else {
2440    ts.srr1 &= ~MSR_FE0_FE1_MASK;
2441  }
2442  ts.srr0 += 4;
2443  thread_set_state(thread, 
2444                   MACHINE_THREAD_STATE,
2445                   (thread_state_t)&ts,
2446                   MACHINE_THREAD_STATE_COUNT);
2447
2448  lock_release(mach_exception_lock_set, 0);
2449  return 0;
2450}
2451
2452/*
2453  This function runs in the exception handling thread.  It's
2454  called (by this precise name) from the library function "exc_server()"
2455  when the thread's exception ports are set up.  (exc_server() is called
2456  via mach_msg_server(), which is a function that waits for and dispatches
2457  on exception messages from the Mach kernel.)
2458
2459  This checks to see if the exception was caused by a pseudo_sigreturn()
2460  UUO; if so, it arranges for the thread to have its state restored
2461  from the specified context.
2462
2463  Otherwise, it tries to map the exception to a signal number and
2464  arranges that the thread run a "pseudo signal handler" to handle
2465  the exception.
2466
2467  Some exceptions could and should be handled here directly.
2468*/
2469
2470kern_return_t
2471catch_exception_raise(mach_port_t exception_port,
2472                      mach_port_t thread,
2473                      mach_port_t task, 
2474                      exception_type_t exception,
2475                      exception_data_t code_vector,
2476                      mach_msg_type_number_t code_count)
2477{
2478  int signum = 0, code = *code_vector, code1;
2479
2480  if ((exception == EXC_BAD_INSTRUCTION) &&
2481      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2482      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2483       (code1 == (int)enable_fp_exceptions) ||
2484       (code1 == (int)disable_fp_exceptions))) {
2485    if (code1 == (int)pseudo_sigreturn) {
2486      return do_pseudo_sigreturn(thread, (TCR *) exception_port);
2487    }
2488    if (code1 == (int)enable_fp_exceptions) {
2489      return thread_set_fp_exceptions_enabled(thread, true);
2490    }
2491    return thread_set_fp_exceptions_enabled(thread, false);
2492  }
2493  switch (exception) {
2494  case EXC_BAD_ACCESS:
2495    signum = SIGSEGV;
2496    break;
2497
2498  case EXC_BAD_INSTRUCTION:
2499    signum = SIGILL;
2500    break;
2501
2502  case EXC_SOFTWARE:
2503    if (code == EXC_PPC_TRAP) {
2504      signum = SIGTRAP;
2505    }
2506    break;
2507
2508  case EXC_ARITHMETIC:
2509    signum = SIGFPE;
2510    break;
2511
2512  default:
2513    break;
2514  }
2515  if (signum) {
2516    return setup_signal_frame(thread,
2517                              (void *)pseudo_signal_handler,
2518                              signum,
2519                              code,
2520                              (TCR *) exception_port);
2521  }
2522  return 17;
2523}
2524
2525
2526/*
2527  The initial function for an exception-handling thread.
2528*/
2529
2530void *
2531exception_handler_proc(void *arg)
2532{
2533  extern boolean_t exc_server();
2534  mach_port_t p = (mach_port_t) arg;
2535
2536  mach_msg_server(exc_server, 256, p, 0);
2537  /* Should never return. */
2538  abort();
2539}
2540
2541
2542mach_port_t
2543mach_exception_port_set()
2544{
2545  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2546  kern_return_t kret; 
2547  if (__exception_port_set == MACH_PORT_NULL) {
2548    lock_set_create(mach_task_self(), 
2549                    &mach_exception_lock_set,
2550                    1,
2551                    SYNC_POLICY_FIFO);
2552
2553    kret = mach_port_allocate(mach_task_self(),
2554                              MACH_PORT_RIGHT_PORT_SET,
2555                              &__exception_port_set);
2556    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2557    create_system_thread(0,
2558                         NULL,
2559                         exception_handler_proc, 
2560                         (void *)__exception_port_set);
2561  }
2562  return __exception_port_set;
2563}
2564
2565/*
2566  Setup a new thread to handle those exceptions specified by
2567  the mask "which".  This involves creating a special Mach
2568  message port, telling the Mach kernel to send exception
2569  messages for the calling thread to that port, and setting
2570  up a handler thread which listens for and responds to
2571  those messages.
2572
2573*/
2574
2575/*
2576  Establish the lisp thread's TCR as its exception port, and determine
2577  whether any other ports have been established by foreign code for
2578  exceptions that lisp cares about.
2579
2580  If this happens at all, it should happen on return from foreign
2581  code and on entry to lisp code via a callback.
2582
2583  This is a lot of trouble (and overhead) to support Java, or other
2584  embeddable systems that clobber their caller's thread exception ports.
2585 
2586*/
2587kern_return_t
2588tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2589{
2590  kern_return_t kret;
2591  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2592  int i, n = NUM_LISP_EXCEPTIONS_HANDLED;
2593  mach_port_t lisp_port = (mach_port_t)tcr, foreign_port;
2594  exception_mask_t mask = 0;
2595
2596  kret = thread_swap_exception_ports(thread,
2597                                     LISP_EXCEPTIONS_HANDLED_MASK,
2598                                     lisp_port,
2599                                     EXCEPTION_DEFAULT,
2600                                     THREAD_STATE_NONE,
2601                                     fxs->masks,
2602                                     &n,
2603                                     fxs->ports,
2604                                     fxs->behaviors,
2605                                     fxs->flavors);
2606  if (kret == KERN_SUCCESS) {
2607    fxs->foreign_exception_port_count = n;
2608    for (i = 0; i < n; i ++) {
2609      foreign_port = fxs->ports[i];
2610
2611      if ((foreign_port != lisp_port) &&
2612          (foreign_port != MACH_PORT_NULL)) {
2613        mask |= fxs->masks[i];
2614      }
2615    }
2616    tcr->foreign_exception_status = (int) mask;
2617  }
2618  return kret;
2619}
2620
2621kern_return_t
2622tcr_establish_lisp_exception_port(TCR *tcr)
2623{
2624  return tcr_establish_exception_port(tcr, (mach_port_t)tcr->native_thread_id);
2625}
2626
2627/*
2628  Do this when calling out to or returning from foreign code, if
2629  any conflicting foreign exception ports were established when we
2630  last entered lisp code.
2631*/
2632kern_return_t
2633restore_foreign_exception_ports(TCR *tcr)
2634{
2635  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2636 
2637  if (m) {
2638    MACH_foreign_exception_state *fxs  = 
2639      (MACH_foreign_exception_state *) tcr->native_thread_info;
2640    int i, n = fxs->foreign_exception_port_count;
2641    exception_mask_t tm;
2642
2643    for (i = 0; i < n; i++) {
2644      if ((tm = fxs->masks[i]) & m) {
2645        thread_set_exception_ports((mach_port_t)tcr->native_thread_id,
2646                                   tm,
2647                                   fxs->ports[i],
2648                                   fxs->behaviors[i],
2649                                   fxs->flavors[i]);
2650      }
2651    }
2652  }
2653}
2654                                   
2655                                   
2656kern_return_t
2657setup_mach_exception_handling(TCR *tcr)
2658{
2659  mach_port_t
2660    thread_exception_port = (mach_port_t)tcr,
2661    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2662    task_self = mach_task_self();
2663  kern_return_t kret;
2664
2665  kret = mach_port_allocate_name(task_self,
2666                                 MACH_PORT_RIGHT_RECEIVE,
2667                                 thread_exception_port);
2668
2669  MACH_CHECK_ERROR("naming exception_port",kret);
2670
2671  kret = mach_port_insert_right(task_self,
2672                                thread_exception_port,
2673                                thread_exception_port,
2674                                MACH_MSG_TYPE_MAKE_SEND);
2675  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2676
2677  kret = tcr_establish_exception_port(tcr, (mach_port_t) tcr->native_thread_id);
2678  if (kret == KERN_SUCCESS) {
2679    mach_port_t exception_port_set = mach_exception_port_set();
2680
2681    kret = mach_port_move_member(task_self,
2682                                 thread_exception_port,
2683                                 exception_port_set);
2684  }
2685  return kret;
2686}
2687
2688void
2689darwin_exception_init(TCR *tcr)
2690{
2691  void tcr_monitor_exception_handling(TCR*, Boolean);
2692  kern_return_t kret;
2693  MACH_foreign_exception_state *fxs = 
2694    calloc(1, sizeof(MACH_foreign_exception_state));
2695 
2696  tcr->native_thread_info = (void *) fxs;
2697
2698  if ((kret = setup_mach_exception_handling(tcr))
2699      != KERN_SUCCESS) {
2700    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2701    terminate_lisp();
2702  }
2703  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2704  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
2705}
2706
2707/*
2708  The tcr is the "name" of the corresponding thread's exception port.
2709  Destroying the port should remove it from all port sets of which it's
2710  a member (notably, the exception port set.)
2711*/
2712void
2713darwin_exception_cleanup(TCR *tcr)
2714{
2715  void *fxs = tcr->native_thread_info;
2716
2717  if (fxs) {
2718    tcr->native_thread_info = NULL;
2719    free(fxs);
2720  }
2721  mach_port_destroy(mach_task_self(),(mach_port_t)tcr);
2722}
2723
2724
2725/*
2726  Only do this if pthread_kill indicated that the pthread isn't
2727  listening to signals anymore, as can happen as soon as pthread_exit()
2728  is called on Darwin.  The thread could still call out to lisp as it
2729  is exiting, so we need another way to suspend it in this case.
2730*/
2731Boolean
2732mach_suspend_tcr(TCR *tcr)
2733{
2734  mach_port_t mach_thread = (mach_port_t) tcr->native_thread_id;
2735  kern_return_t status = thread_suspend(mach_thread);
2736  ExceptionInformation *lss;
2737 
2738  if (status != KERN_SUCCESS) {
2739    return false;
2740  }
2741  lock_acquire(mach_exception_lock_set, 0);
2742  lss = create_thread_context_frame(mach_thread, NULL);
2743  lss->uc_onstack = 0;
2744  lss->uc_sigmask = (sigset_t) 0;
2745  tcr->suspend_context = lss;
2746  tcr->suspend_total++;
2747  lock_release(mach_exception_lock_set, 0);
2748  return true;
2749}
2750
2751void
2752mach_resume_tcr(TCR *tcr)
2753{
2754  ExceptionInformation *xp;
2755  mach_port_t mach_thread = (mach_port_t)(tcr->native_thread_id);
2756 
2757  lock_acquire(mach_exception_lock_set, 0);
2758  xp = tcr->suspend_context;
2759  tcr->suspend_context = NULL;
2760  restore_mach_thread_state(mach_thread, xp);
2761  thread_resume(mach_thread);
2762  lock_release(mach_exception_lock_set,0);
2763}
2764
2765void
2766fatal_mach_error(char *format, ...)
2767{
2768  va_list args;
2769  char s[512];
2770 
2771
2772  va_start(args, format);
2773  vsnprintf(s, sizeof(s),format, args);
2774  va_end(args);
2775
2776  Fatal("Mach error", s);
2777}
2778
2779#endif
Note: See TracBrowser for help on using the repository browser.