source: trunk/ccl/lisp-kernel/lisp-exceptions.c @ 647

Last change on this file since 647 was 647, checked in by gb, 16 years ago

Fix MACH_CHECK_ERROR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformationPowerPC *);
43#endif
44
45
46#include "Threads.h"
47
48#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
49#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
50#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
51extern void enable_fp_exceptions(void);
52extern void disable_fp_exceptions(void);
53
54#ifdef LINUX
55/* Some relatively recent kernels support this interface.
56   If this prctl isn't supported, assume that we're always
57   running with excptions enabled and "precise".
58*/
59#ifndef PR_SET_FPEXC
60#define PR_SET_FPEXC 12
61#endif
62#ifndef PR_FP_EXC_DISABLED
63#define PR_FP_EXC_DISABLED 0
64#endif
65#ifndef PR_FP_EXC_PRECISE
66#define PR_FP_EXC_PRECISE 3
67#endif
68
69void
70enable_fp_exceptions()
71{
72  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
73}
74
75void
76disable_fp_exceptions()
77{
78  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
79}
80
81#endif
82
83/*
84  Handle exceptions.
85
86*/
87
88extern LispObj lisp_nil;
89
90extern unsigned lisp_heap_gc_threshold;
91extern Boolean grow_dynamic_area(unsigned);
92
93
94/* top 6 bits will be zero, subtag will be subtag_code_vector */
95#define CV_HDR_MASK     (OP_MASK | subtagmask)
96#define CV_HDR_VALUE    subtag_code_vector
97#define codevec_hdr_p(value)    (((value) & CV_HDR_MASK) == CV_HDR_VALUE)
98
99
100
101void
102allocation_failure(Boolean pointerp, unsigned size)
103{
104  char buf[64];
105  sprintf(buf, "Can't allocate %s of size %d bytes.", pointerp ? "pointer" : "handle", size);
106  Fatal(":   Kernel memory allocation failure.  ", buf);
107}
108
109void
110fatal_oserr(StringPtr param, OSErr err)
111{
112  char buf[64];
113  sprintf(buf," - operating system error %d.", err);
114  Fatal(param, buf);
115}
116
117
118Ptr
119allocate(unsigned size)
120{
121  return (Ptr) malloc(size);
122}
123
124void
125deallocate(Ptr p)
126{
127  free((void *)p);
128}
129
130Ptr
131zalloc(unsigned size)
132{
133  Ptr p = allocate(size);
134  if (p != NULL) {
135    memset(p, 0, size);
136  }
137  return p;
138}
139
140int
141ProtectMemory(LogicalAddress addr, int nbytes)
142{
143  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
144 
145  if (status) {
146    status = errno;
147    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
148  }
149  return status;
150}
151
152int
153UnProtectMemory(LogicalAddress addr, int nbytes)
154{
155  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
156}
157
158void
159unprotect_area(protected_area_ptr p)
160{
161  BytePtr start = p->start;
162  unsigned nprot = p->nprot;
163 
164  if (nprot) {
165    UnProtectMemory(start, nprot);
166    p->nprot = 0;
167  }
168}
169
170
171
172int
173page_size = 4096;
174
175extern void
176zero_cache_lines(BytePtr, size_t, size_t);
177
178void
179zero_page(BytePtr start)
180{
181  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
182}
183
184
185
186/*
187  If the PC is pointing to an allocation trap, the previous instruction
188  must have decremented allocptr.  Return the non-zero amount by which
189  allocptr was decremented.
190*/
191int
192allocptr_displacement(ExceptionInformation *xp)
193{
194  pc program_counter = xpPC(xp);
195  opcode instr = *program_counter, prev_instr = *(program_counter-1);
196
197  if (instr == ALLOC_TRAP_INSTRUCTION) {
198    if (match_instr(prev_instr, 
199                    XO_MASK | RT_MASK | RB_MASK,
200                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
201                    RT(allocptr) |
202                    RB(allocptr))) {
203      return ((int) xpGPR(xp, RA_field(prev_instr)));
204    }
205    if (match_instr(prev_instr,
206                    OP_MASK | RT_MASK | RA_MASK,
207                    OP(major_opcode_ADDI) | 
208                    RT(allocptr) |
209                    RA(allocptr))) {
210      return (int) -((short) prev_instr);
211    }
212    Bug(xp, "Can't determine allocation displacement");
213  }
214  return 0;
215}
216
217
218/*
219  A cons cell's been successfully allocated, but the allocptr's
220  still tagged (as fulltag_cons, of course.)  Emulate any instructions
221  that might follow the allocation (stores to the car or cdr, an
222  assignment to the "result" gpr) that take place while the allocptr's
223  tag is non-zero, advancing over each such instruction.  When we're
224  done, the cons cell will be allocated and initialized, the result
225  register will point to it, the allocptr will be untagged, and
226  the PC will point past the instruction that clears the allocptr's
227  tag.
228*/
229void
230finish_allocating_cons(ExceptionInformation *xp)
231{
232  pc program_counter = xpPC(xp);
233  opcode instr;
234  LispObj cur_allocptr = xpGPR(xp, allocptr);
235  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
236  int target_reg;
237
238  while (1) {
239    instr = *program_counter++;
240
241    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
242      xpGPR(xp, allocptr) = untag(cur_allocptr);
243      xpPC(xp) = program_counter;
244      return;
245    }
246   
247    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
248    case STORE_CAR_ALLOCPTR_INSTRUCTION:
249      c->car = xpGPR(xp,RT_field(instr));
250      break;
251    case STORE_CDR_ALLOCPTR_INSTRUCTION:
252      c->cdr = xpGPR(xp,RT_field(instr));
253      break;
254    default:
255      /* Assume that this is an assignment: {rt/ra} <- allocptr.
256         There are several equivalent instruction forms
257         that might have that effect; just assign to target here.
258      */
259      if (major_opcode_p(instr,major_opcode_X31)) {
260        target_reg = RA_field(instr);
261      } else {
262        target_reg = RT_field(instr);
263      }
264      xpGPR(xp,target_reg) = cur_allocptr;
265      break;
266    }
267  }
268}
269
270/*
271  We were interrupted in the process of allocating a uvector; we
272  survived the allocation trap, and allocptr is tagged as fulltag_misc.
273  Emulate any instructions which store a header into the uvector,
274  assign the value of allocptr to some other register, and clear
275  allocptr's tag.  Don't expect/allow any other instructions in
276  this environment.
277*/
278void
279finish_allocating_uvector(ExceptionInformation *xp)
280{
281  pc program_counter = xpPC(xp);
282  opcode instr;
283  LispObj cur_allocptr = xpGPR(xp, allocptr);
284  int target_reg;
285
286  while (1) {
287    instr = *program_counter++;
288    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
289      xpGPR(xp, allocptr) = untag(cur_allocptr);
290      xpPC(xp) = program_counter;
291      return;
292    }
293    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
294        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
295      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
296    } else {
297      /* assume that this is an assignment */
298
299      if (major_opcode_p(instr,major_opcode_X31)) {
300        target_reg = RA_field(instr);
301      } else {
302        target_reg = RT_field(instr);
303      }
304      xpGPR(xp,target_reg) = cur_allocptr;
305    }
306  }
307}
308
309
310Boolean free_segments_zero_filled_by_OS = true;
311
312/*
313  This doesn't GC; it returns true if it made enough room, false
314  otherwise.
315  If "extend" is true, it can try to extend the dynamic area to
316  satisfy the request.
317*/
318
319Boolean
320new_heap_segment(ExceptionInformation *xp, unsigned need, Boolean extend, TCR *tcr)
321{
322  area *a;
323  unsigned newlimit, oldlimit;
324
325
326  a  = active_dynamic_area;
327  oldlimit = (unsigned) a->active;
328  newlimit = (align_to_power_of_2(oldlimit, log2_heap_segment_size) +
329              align_to_power_of_2(need, log2_heap_segment_size));
330  if (newlimit > (unsigned) (a->high)) {
331    if (extend) {
332      resize_dynamic_heap(a->active, (newlimit-oldlimit)+lisp_heap_gc_threshold);
333    } else {
334      return false;
335    }
336  }
337  a->active = (BytePtr) newlimit;
338  tcr->last_allocptr = (void *)newlimit;
339  xpGPR(xp,allocptr) = (LispObj) newlimit;
340  xpGPR(xp,allocbase) = (LispObj) oldlimit;
341 
342  if (!free_segments_zero_filled_by_OS) {
343    BytePtr
344      pageptr = (BytePtr)align_to_power_of_2(oldlimit,log2_heap_segment_size);
345    unsigned i, npages = (newlimit-(unsigned)pageptr) >> 12;
346
347    for (i = 0; i < npages; i++, pageptr += page_size) {
348      zero_page(pageptr);
349    }
350  }
351  return true;
352}
353
354Boolean
355allocate_object(ExceptionInformation *xp,
356                unsigned bytes_needed, 
357                int disp_from_allocptr,
358                TCR *tcr)
359{
360  area *a = active_dynamic_area;
361
362  /* Maybe do an EGC */
363  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
364    if (((a->active)-(a->low)) >= a->threshold) {
365      gc_from_xp(xp);
366    }
367  }
368
369  /* Life is pretty simple if we can simply grab a segment
370     without extending the heap.
371  */
372  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
373    xpGPR(xp, allocptr) += disp_from_allocptr;
374    return true;
375  }
376 
377  /* It doesn't make sense to try a full GC if the object
378     we're trying to allocate is larger than everything
379     allocated so far.
380  */
381  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
382    untenure_from_area(tenured_area); /* force a full GC */
383    unprotect_area(oldspace_protected_area);
384    gc_from_xp(xp);
385  }
386 
387  /* Try again, growing the heap if necessary */
388  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
389    xpGPR(xp, allocptr) += disp_from_allocptr;
390    return true;
391  }
392 
393  return false;
394}
395
396#ifndef XNOMEM
397#define XNOMEM 10
398#endif
399
400void
401update_bytes_allocated(TCR* tcr, void *cur_allocptr)
402{
403  BytePtr
404    last = (BytePtr) tcr->last_allocptr, 
405    current = (BytePtr) cur_allocptr;
406  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
407    tcr->bytes_allocated += last-current;
408  }
409  tcr->last_allocptr = 0;
410}
411
412OSStatus
413handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
414{
415  pc program_counter;
416  unsigned cur_allocptr, bytes_needed = 0;
417  opcode prev_instr;
418  int disp = 0, allocptr_tag;
419
420  cur_allocptr = xpGPR(xp,allocptr);
421  program_counter = xpPC(xp);
422  prev_instr = *(program_counter-1);
423  allocptr_tag = fulltag_of(cur_allocptr);
424
425  switch (allocptr_tag) {
426  case fulltag_cons:
427    bytes_needed = sizeof(cons);
428    disp = -sizeof(cons) + fulltag_cons;
429    break;
430
431  case fulltag_even_fixnum:
432  case fulltag_odd_fixnum:
433    break;
434
435  case fulltag_misc:
436    if (match_instr(prev_instr, 
437                    XO_MASK | RT_MASK | RB_MASK,
438                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
439                    RT(allocptr) |
440                    RB(allocptr))) {
441      disp = -((int) xpGPR(xp, RA_field(prev_instr)));
442    } else if (match_instr(prev_instr,
443                           OP_MASK | RT_MASK | RA_MASK,
444                           OP(major_opcode_ADDI) | 
445                           RT(allocptr) |
446                           RA(allocptr))) {
447      disp = (int) ((short) prev_instr);
448    }
449    if (disp) {
450      bytes_needed = (-disp) + fulltag_misc;
451      break;
452    }
453    /* else fall thru */
454  default:
455    return -1;
456  }
457
458  if (bytes_needed) {
459    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
460    if (allocate_object(xp, bytes_needed, disp, tcr)) {
461      adjust_exception_pc(xp,4);
462      return 0;
463    }
464    /* Couldn't allocate the object.  If it's smaller than some arbitrary
465       size (say 128K bytes), signal a "chronically out-of-memory" condition;
466       else signal a "allocation request failed" condition.
467    */
468    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0, (unsigned) xpPC(xp));
469    return -1;
470  }
471  return -1;
472}
473
474OSStatus
475handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
476{
477  LispObj
478    selector = xpGPR(xp,imm0), 
479    arg = xpGPR(xp,imm1);
480  area *a = active_dynamic_area;
481  Boolean egc_was_enabled = (a->older != NULL);
482
483
484  switch (selector) {
485  case GC_TRAP_FUNCTION_EGC_CONTROL:
486    egc_control(arg != 0, a->active);
487    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
488    break;
489
490  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
491    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
492    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
493    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
494    xpGPR(xp,arg_z) = lisp_nil+t_offset;
495    break;
496
497  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
498    if (((int) arg) > 0) {
499      lisp_heap_gc_threshold = 
500        align_to_power_of_2((arg-1) +
501                            (heap_segment_size - 1),
502                            log2_heap_segment_size);
503    }
504    /* fall through */
505  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
506    xpGPR(xp, arg_z) = box_fixnum(lisp_heap_gc_threshold);
507    break;
508
509  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
510    /*  Try to put the current threshold in effect.  This may
511        need to disable/reenable the EGC. */
512    untenure_from_area(tenured_area);
513    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
514    if (egc_was_enabled) {
515      if ((a->high - a->active) >= a->threshold) {
516        tenure_to_area(tenured_area);
517      }
518    }
519    xpGPR(xp, arg_z) = box_fixnum(lisp_heap_gc_threshold);
520    break;
521
522  default:
523    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
524    if (egc_was_enabled) {
525      egc_control(false, (BytePtr) a->active);
526    }
527    gc_from_xp(xp);
528    if (selector > GC_TRAP_FUNCTION_GC) {
529      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
530        impurify_from_xp(xp);
531        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
532        gc_from_xp(xp);
533      }
534      if (selector & GC_TRAP_FUNCTION_PURIFY) {
535        purify_from_xp(xp);
536        gc_from_xp(xp);
537      }
538      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
539        OSErr err;
540        extern OSErr save_application(unsigned, Boolean);
541        TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
542        area *vsarea = tcr->vs_area;
543       
544        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
545        err = save_application(arg, (selector == GC_TRAP_FLAG_COMPRESS));
546        if (err == noErr) {
547          exit(0);
548        }
549        fatal_oserr(": save_application", err);
550      }
551    }
552   
553    if (egc_was_enabled) {
554      egc_control(true, NULL);
555    }
556    break;
557   
558  }
559
560  adjust_exception_pc(xp,4);
561  return 0;
562}
563
564
565protected_area_ptr
566AllProtectedAreas = NULL;
567
568/*
569  This does a linear search.  Areas aren't created all that often;
570  if there get to be very many of them, some sort of tree search
571  might be justified.
572*/
573
574protected_area_ptr
575find_protected_area(BytePtr addr)
576{
577  protected_area* p;
578 
579  for(p = AllProtectedAreas; p; p=p->next) {
580    if ((p->start <= addr) && (p->end > addr)) {
581      return p;
582    }
583  }
584  return NULL;
585}
586
587void
588signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
589{
590  /* The cstack just overflowed.  Force the current thread's
591     control stack to do so until all stacks are well under their overflow
592     limits.
593  */
594
595#if 0
596  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
597#endif
598  handle_error(xp, error_stack_overflow, reg, 0, (unsigned) xpPC(xp));
599}
600
601void
602restore_soft_stack_limit(unsigned stkreg)
603{
604  area *a;
605  TCR *tcr = get_tcr(true);
606
607  switch (stkreg) {
608  case sp:
609    a = tcr->cs_area;
610    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
611    break;
612  case vsp:
613    a = tcr->vs_area;
614    protect_area(a->softprot);
615    break;
616  }
617}
618
619/* Maybe this'll work someday.  We may have to do something to
620   make the thread look like it's not handling an exception */
621void
622reset_lisp_process(ExceptionInformation *xp)
623{
624  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp,rcontext));
625  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
626
627  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
628  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
629
630  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
631  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
632
633  start_lisp(tcr, 1);
634}
635 
636void
637zero_last_page(BytePtr newfree)
638{
639  BytePtr base = (BytePtr)truncate_to_power_of_2(newfree,12);
640  if (base != newfree) {
641    memset(newfree, 0, base+4096-newfree);
642  }
643}
644
645/*
646   Grow or shrink the dynamic area.  Or maybe not.
647   Whether or not the end of (mapped space in) the heap changes,
648   ensure that everything between the freeptr and the heap end
649   is mapped and read/write.  (It'll incidentally be zeroed.)
650*/
651void
652resize_dynamic_heap(BytePtr newfree, 
653                    unsigned free_space_size)
654{
655  unsigned protbytes;
656  area *a = active_dynamic_area;
657  BytePtr newlimit, protptr;
658 
659  if (free_space_size) {
660    BytePtr lowptr = a->active;
661    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
662                                            log2_heap_segment_size);
663    if (newlimit > a->high) {
664      grow_dynamic_area(newlimit-a->high);
665    } else if ((newlimit + free_space_size) < a->high) {
666      shrink_dynamic_area(a->high-newlimit);
667    }
668  }
669
670  protbytes = (((a->high)-newfree)&~(heap_segment_size-1));
671  protptr = a->high-protbytes;
672
673  free_segments_zero_filled_by_OS = false;
674  if (!( nrs_GC_EVENT_STATUS_BITS.vcell & gc_retain_pages_bit)) {
675    commit_pages(protptr, protbytes);
676    free_segments_zero_filled_by_OS = true;
677  }
678  protbytes = (protptr-newfree);
679
680  while (protbytes >= page_size) {
681    protptr -= page_size;
682    protbytes -= page_size;
683    zero_page(protptr);
684  }
685  /*  memset(newfree,0,protbytes); */
686}
687
688
689 
690void
691update_area_active (area **aptr, BytePtr value)
692{
693  area *a = *aptr;
694  for (; a; a = a->older) {
695    if ((a->low <= value) && (a->high >= value)) break;
696  };
697  if (a == NULL) Bug(NULL, "Can't find active area");
698  a->active = value;
699  *aptr = a;
700
701  for (a = a->younger; a; a = a->younger) {
702    a->active = a->high;
703  }
704}
705
706void
707normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
708{
709  void *cur_allocptr = NULL;
710  LispObj freeptr = 0;
711
712  if (xp) {
713    if (is_other_tcr) {
714      pc_luser_xp(xp, tcr);
715      freeptr = xpGPR(xp, allocptr);
716      if (fulltag_of(freeptr) == 0){
717        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
718      }
719    }
720    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
721    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
722    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
723  } else {
724    /* In ff-call.  No need to update cs_area */
725    cur_allocptr = (void *) (tcr->save_allocptr);
726    tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
727    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
728    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
729  }
730  if (cur_allocptr) {
731    update_bytes_allocated(tcr, cur_allocptr);
732    if (freeptr) {
733      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
734      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
735    }
736  }
737}
738
739/* Suspend and "normalize" other tcrs, then call a gc-like function
740   in that context.  Resume the other tcrs, then return what the
741   function returned */
742
743int
744gc_like_from_xp(ExceptionInformation *xp, int(*fun)(TCR *))
745{
746  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext)), *other_tcr;
747  ExceptionInformation* other_xp;
748  int result;
749
750  suspend_other_threads();
751  if (lisp_global(GC_INHIBIT_COUNT) != 0) {
752    resume_other_threads();
753    return 0;
754  }
755
756  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
757  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
758
759  normalize_tcr(xp, tcr, false);
760
761
762  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
763    if (other_tcr->pending_exception_context) {
764      other_tcr->gc_context = other_tcr->pending_exception_context;
765    } else if (other_tcr->valence == TCR_STATE_LISP) {
766      other_tcr->gc_context = other_tcr->suspend_context;
767    } else {
768      /* no pending exception, didn't suspend in lisp state:
769         must have executed a synchronous ff-call.
770      */
771      other_tcr->gc_context = NULL;
772    }
773    normalize_tcr(other_tcr->gc_context, other_tcr, true);
774  }
775   
776
777
778  result = fun(tcr);
779
780  resume_other_threads();
781
782  return result;
783
784}
785
786
787/* Returns #bytes freed by invoking GC */
788
789int
790gc_from_tcr(TCR *tcr)
791{
792  area *a;
793  BytePtr oldfree, newfree;
794  BytePtr oldend, newend;
795
796  a = active_dynamic_area;
797  oldend = a->high;
798  oldfree = a->active;
799  gc(tcr);
800  newfree = a->active;
801  newend = a->high;
802  return ((oldfree-newfree)+(newend-oldend));
803}
804
805int
806gc_from_xp(ExceptionInformation *xp)
807{
808  return gc_like_from_xp(xp, gc_from_tcr);
809}
810
811int
812purify_from_xp(ExceptionInformation *xp)
813{
814  return gc_like_from_xp(xp, purify);
815}
816
817int
818impurify_from_xp(ExceptionInformation *xp)
819{
820  return gc_like_from_xp(xp, impurify);
821}
822
823extern BytePtr
824current_stack_pointer(void);
825
826void
827protect_area(protected_area_ptr p)
828{
829  BytePtr start = p->start;
830  unsigned n = p->protsize;
831
832  if (n && ! p->nprot) {
833    ProtectMemory(start, n);
834    p->nprot = n;
835  }
836}
837
838void
839protect_oldspace(BytePtr end)
840{
841  protected_area_ptr p = oldspace_protected_area;
842  BytePtr
843    start = (BytePtr) ptr_from_lispobj(lisp_global(HEAP_START)),
844    endpage = (BytePtr)ptr_from_lispobj(align_to_power_of_2(ptr_to_lispobj(end),12));
845  int i, npages = (endpage-start)>>12;
846  pageentry *pe = pagemap;
847 
848  p->start = start;
849  p->end = endpage;
850  p->protsize = endpage-start;
851  p->nprot = 0;
852  protect_area(p);
853
854  for (i = 0; i < npages; i++, pe++) {
855    pe->bits.modified = 0;
856  }
857}
858
859
860
861
862protection_handler
863 * protection_handlers[] = {
864   do_spurious_wp_fault,
865   do_soft_stack_overflow,
866   do_soft_stack_overflow,
867   do_soft_stack_overflow,
868   do_hard_stack_overflow,   
869   do_hard_stack_overflow,
870   do_hard_stack_overflow,
871   do_tenured_space_write
872   };
873
874
875Boolean
876is_write_fault(ExceptionInformation *xp, siginfo_t *info)
877{
878  /* use the siginfo if it's available.  Some versions of Linux
879     don't propagate the DSISR and TRAP fields correctly from
880     64- to 32-bit handlers.
881  */
882  if (info) {
883    /*
884       To confuse matters still further, the value of SEGV_ACCERR
885       varies quite a bit among LinuxPPC variants (the value defined
886       in the header files varies, and the value actually set by
887       the kernel also varies.  So far, we're only looking at the
888       siginfo under Linux and Linux always seems to generate
889       SIGSEGV, so check for SIGSEGV and check the low 16 bits
890       of the si_code.
891    */
892    return ((info->si_signo == SIGSEGV) &&
893            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
894  }
895  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
896         (xpTRAP(xp) == 
897#ifdef LINUX
8980x0300
899#endif
900#ifdef DARWIN
9010x0300/0x100
902#endif
903)
904         );
905#if 0 
906  /* Maybe worth keeping around; not sure if it's an exhaustive
907     list of PPC instructions that could cause a WP fault */
908  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
909     valid values of those SPRs in the context they provide to
910     exception handlers.  Look at the opcode of the offending
911     instruction & recognize 32-bit store operations */
912  opcode instr = *(xpPC(xp));
913
914  if (xp->regs->trap != 0x300) {
915    return 0;
916  }
917  switch (instr >> 26) {
918  case 47:                      /* STMW */
919  case 36:                      /* STW */
920  case 37:                      /* STWU */
921    return 1;
922  case 31:
923    switch ((instr >> 1) & 1023) {
924    case 151:                   /* STWX */
925    case 183:                   /* STWUX */
926      return 1;
927    default:
928      return 0;
929    }
930  default:
931    return 0;
932  }
933#endif
934}
935
936OSStatus
937handle_protection_violation(ExceptionInformation *xp, siginfo_t *info)
938{
939  BytePtr addr;
940  protected_area_ptr area;
941  protection_handler *handler;
942  TCR *tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
943
944  if (! is_write_fault(xp, info)) {
945    return -1;
946  }
947
948  if (info) {
949    addr = (BytePtr)(info->si_addr);
950  } else {
951    addr = (BytePtr) ((int) (xpDAR(xp)));
952  }
953  area = find_protected_area(addr);
954
955  if (area == NULL) {           /* Don't know why this fault happened. */
956    return -1;
957  }
958 
959  handler = protection_handlers[area->why];
960
961  return handler(xp, area, addr);
962}
963
964
965protected_area_ptr
966new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, unsigned protsize, Boolean now)
967{
968  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
969 
970  if (p == NULL) return NULL;
971  p->protsize = protsize;
972  p->nprot = 0;
973  p->start = start;
974  p->end = end;
975  p->why = reason;
976  p->next = AllProtectedAreas;
977
978  AllProtectedAreas = p;
979  if (now) {
980    protect_area(p);
981  }
982 
983  return p;
984}
985
986/*
987  Un-protect the first nbytes bytes in specified area.
988  Note that this may cause the area to be empty.
989*/
990void
991unprotect_area_prefix(protected_area_ptr area, size_t delta)
992{
993  unprotect_area(area);
994  area->start += delta;
995  if ((area->start + area->protsize) <= area->end) {
996    protect_area(area);
997  }
998}
999
1000
1001/*
1002  Extend the protected area, causing the preceding nbytes bytes
1003  to be included and protected.
1004*/
1005void
1006protect_area_prefix(protected_area_ptr area, size_t delta)
1007{
1008  unprotect_area(area);
1009  area->start -= delta;
1010  protect_area(area);
1011}
1012
1013
1014
1015OSStatus
1016do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1017{
1018#ifdef SUPPORT_PRAGMA_UNUSED
1019#pragma unused(area,addr)
1020#endif
1021  reset_lisp_process(xp);
1022  return -1;
1023}
1024
1025extern area*
1026allocate_vstack(unsigned useable);       /* This is in "pmcl-kernel.c" */
1027
1028extern area*
1029allocate_tstack(unsigned useable);       /* This is in "pmcl-kernel.c" */
1030
1031#ifdef EXTEND_VSTACK
1032Boolean
1033catch_frame_p(lisp_frame *spPtr)
1034{
1035  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1036
1037  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1038    if (spPtr == ((lisp_frame *) catch->csp)) {
1039      return true;
1040    }
1041  }
1042  return false;
1043}
1044#endif
1045
1046Boolean
1047unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1048{
1049  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1050      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1051    return true;
1052  } else {
1053    return false;
1054  }
1055}
1056
1057Boolean
1058lexpr_entry_frame_p(lisp_frame *spPtr)
1059{
1060  LispObj savelr = spPtr->savelr;
1061  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1062  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1063  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1064
1065  return
1066    (savelr == lexpr_return1v) ||
1067    (savelr == lexpr_return) ||
1068    ((savelr == ret1valn) &&
1069     (((spPtr->backlink)->savelr) == lexpr_return));
1070}
1071
1072Boolean
1073lisp_frame_p(lisp_frame *spPtr)
1074{
1075  LispObj savefn;
1076  /* We can't just look at the size of the stack frame under the EABI
1077     calling sequence, but that's the first thing to check. */
1078  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1079    return false;
1080  }
1081  savefn = spPtr->savefn;
1082  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1083 
1084}
1085
1086
1087int ffcall_overflow_count = 0;
1088
1089/* Find a frame that is neither a catch frame nor one of the
1090   lexpr_entry frames We don't check for non-lisp frames here because
1091   we'll always stop before we get there due to a dummy lisp frame
1092   pushed by .SPcallback that masks out the foreign frames.  The one
1093   exception is that there is a non-lisp frame without a valid VSP
1094   while in the process of ppc-ff-call. We recognize that because its
1095   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1096   next frame is 0, then we're executing an unwind-protect cleanup
1097   form, and the top stack frame belongs to its (no longer extant)
1098   catch frame.  */
1099
1100#ifdef EXTEND_VSTACK
1101lisp_frame *
1102find_non_catch_frame_from_xp (ExceptionInformation *xp)
1103{
1104  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1105  if ((((unsigned) spPtr) + sizeof(lisp_frame)) != ((unsigned) (spPtr->backlink))) {
1106    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1107  }
1108  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1109         unwind_protect_cleanup_frame_p(spPtr) ||
1110         catch_frame_p(spPtr) ||
1111         lexpr_entry_frame_p(spPtr) ; ) {
1112     spPtr = spPtr->backlink;
1113     };
1114  return spPtr;
1115}
1116#endif
1117
1118#ifdef EXTEND_VSTACK
1119Boolean
1120db_link_chain_in_area_p (area *a)
1121{
1122  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1123          *high = (LispObj *) a->high,
1124          *low = (LispObj *) a->low;
1125  for (; db; db = (LispObj *) *db) {
1126    if ((db >= low) && (db < high)) return true;
1127  };
1128  return false;
1129}
1130#endif
1131
1132
1133
1134
1135/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1136  the current value of VSP (TSP) or an older area.  */
1137
1138OSStatus
1139do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1140{
1141  TCR* tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
1142  area *a = tcr->vs_area;
1143  protected_area_ptr vsp_soft = a->softprot;
1144  unprotect_area(vsp_soft);
1145  signal_stack_soft_overflow(xp,vsp);
1146  return 0;
1147}
1148
1149
1150OSStatus
1151do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1152{
1153  TCR* tcr = (TCR *) ptr_from_lispobj(xpGPR(xp, rcontext));
1154  area *a = tcr->ts_area;
1155  protected_area_ptr tsp_soft = a->softprot;
1156  unprotect_area(tsp_soft);
1157  signal_stack_soft_overflow(xp,tsp);
1158  return 0;
1159}
1160
1161OSStatus
1162do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1163{
1164  /* Trying to write into a guard page on the vstack or tstack.
1165     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1166     signal an error_stack_overflow condition.
1167      */
1168  lisp_protection_kind which = prot_area->why;
1169  Boolean on_TSP = (which == kTSPsoftguard);
1170
1171  if (on_TSP) {
1172    return do_tsp_overflow(xp, addr);
1173   } else {
1174    return do_vsp_overflow(xp, addr);
1175   }
1176}
1177
1178OSStatus
1179do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1180{
1181#ifdef SUPPORT_PRAGMA_UNUSED
1182#pragma unused(xp,area,addr)
1183#endif
1184  return -1;
1185}
1186
1187/*
1188  We have a couple of choices here.  We can simply unprotect the page
1189  and let the store happen on return, or we can try to emulate writes
1190  that we know will involve an intergenerational reference.  Both are
1191  correct as far as EGC constraints go, but the latter approach is
1192  probably more efficient.  (This only matters in the case where the
1193  GC runs after this exception handler returns but before the write
1194  actually happens.  If we didn't emulate node stores here, the EGC
1195  would scan the newly-writen page, find nothing interesting, and
1196  run to completion.  This thread will try the write again afer it
1197  resumes, the page'll be re-protected, and we'll have taken this
1198  fault twice.  The whole scenario shouldn't happen very often, but
1199  (having already taken a fault and committed to an mprotect syscall)
1200  we might as well emulate stores involving intergenerational references,
1201  since they're pretty easy to identify.
1202
1203  Note that cases involving two or more threads writing to the same
1204  page (before either of them can run this handler) is benign: one
1205  invocation of the handler will just unprotect an unprotected page in
1206  that case.
1207
1208  If there are GCs (or any other suspensions of the thread between
1209  the time that the write fault was detected and the time that the
1210  exception lock is obtained) none of this stuff happens.
1211*/
1212
1213/*
1214  Return true (and emulate the instruction) iff:
1215  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1216     instruction.
1217  b) RS is a node register (>= fn)
1218  c) RS is tagged as a cons or vector
1219  d) RS is in some ephemeral generation.
1220  This is slightly conservative, since RS may be no younger than the
1221  EA being written to.
1222*/
1223Boolean
1224is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1225{
1226  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1227    opcode instr = *xpPC(xp);
1228   
1229    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1230        major_opcode_p(instr, major_opcode_STW)) {
1231      LispObj
1232        rs = RS_field(instr), 
1233        rsval = xpGPR(xp,rs),
1234        tag = fulltag_of(rsval);
1235     
1236      if (rs >= fn) {
1237        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1238          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1239              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1240            *(LispObj *)ea = rsval;
1241            return true;
1242          }
1243        }
1244      }
1245    }
1246  }
1247  return false;
1248}
1249
1250     
1251
1252
1253OSStatus
1254do_tenured_space_write(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1255{
1256  /* Note that the page has been "newly" written to, then unprotect it
1257     and continue */
1258  unsigned 
1259    page_start = ((unsigned)addr) & ~4095, 
1260    pgno = (page_start - (unsigned)(area->start)) >> 12;
1261 
1262  if (lisp_global(IN_GC) != 0) {
1263    Bug(xp, "Tenured space protected in GC");
1264  }
1265
1266  pagemap[pgno].bits.modified = 1;
1267  UnProtectMemory((LogicalAddress)page_start, 4096);
1268  if (is_ephemeral_node_store(xp, addr)) {
1269    adjust_exception_pc(xp,4);
1270  }
1271  return 0;
1272}
1273
1274
1275
1276OSStatus
1277handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1278{
1279  (void) zero_fpscr();
1280  enable_fp_exceptions();
1281
1282
1283  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1284
1285  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1286     operation; there's an FPU operation right at the PC, so tell it to start
1287     looking one word beyond */
1288  return handle_fpux_binop(xp, (unsigned)(xpPC(xp))+4);
1289}
1290
1291   
1292int
1293altivec_present = 1;
1294                 
1295
1296
1297OSStatus
1298PMCL_exception_handler(int xnum, 
1299                       ExceptionInformation *xp, 
1300                       TCR *tcr, 
1301                       siginfo_t *info)
1302{
1303  unsigned oldMQ;
1304  OSStatus status = -1;
1305  pc program_counter;
1306  opcode instruction;
1307
1308
1309#ifdef LINUX
1310  oldMQ = xpGPR(xp,PT_MQ);
1311  xpGPR(xp,PT_MQ) = 0;
1312#endif
1313  program_counter = xpPC(xp);
1314  instruction = *program_counter;
1315
1316  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1317    status = handle_alloc_trap(xp, tcr);
1318  } else if ((xnum == SIGSEGV) ||
1319             (xnum == SIGBUS)) {
1320    if (tcr->suspend_total != 
1321        tcr->suspend_total_on_exception_entry) {
1322      /*
1323        This thread's been suspended since the exception occurred
1324        (possibly by the GC); that makes it hard to do anything
1325        meaningful about protection violations - there's no guarantee
1326        that the addess in question is still protected, the object
1327        being written to may have moved, etc.  Retry the instuction.
1328        If it faults again, hopefully it'll eventually do so in an
1329        environmet where this handler runs without having been
1330        suspended.  Note that a pending write to tenured space hasn't
1331        happened yet.
1332      */
1333      status = 0;
1334    } else {
1335      status = handle_protection_violation(xp, info);
1336    }
1337  } else if (xnum == SIGFPE) {
1338    status = handle_sigfpe(xp, tcr);
1339  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1340    if (instruction == GC_TRAP_INSTRUCTION) {
1341      status = handle_gc_trap(xp, tcr);
1342    } else if (IS_UUO(instruction)) {
1343      status = handle_uuo(xp, instruction, program_counter);
1344    } else if (is_conditional_trap(instruction)) {
1345      status = handle_trap(xp, instruction, program_counter);
1346    }
1347  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1348    tcr->interrupt_level = (-1 << fixnumshift);
1349    tcr->interrupt_pending = 0;
1350    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TWI_instruction(TO_GT,nargs,0),0, 0);
1351    tcr->interrupt_level = 0;
1352    tcr->interrupt_pending = 0;
1353    status = 0;
1354  }
1355
1356#ifdef LINUX
1357  xpGPR(xp,PT_MQ) = oldMQ;
1358#endif
1359  return status;
1360}
1361
1362void
1363adjust_exception_pc(ExceptionInformation *xp, int delta)
1364{
1365  xpPC(xp) += (delta >> 2);
1366}
1367
1368
1369/*
1370  This wants to scan backwards until "where" points to an instruction
1371   whose major opcode is either 63 (double-float) or 59 (single-float)
1372*/
1373
1374OSStatus
1375handle_fpux_binop(ExceptionInformation *xp, unsigned where)
1376{
1377  OSStatus err;
1378  unsigned *there = (unsigned *) where, instr, errnum;
1379  int i = TRAP_LOOKUP_TRIES, delta = 0;
1380 
1381  while (i--) {
1382    instr = *--there;
1383    delta -= 4;
1384    if (codevec_hdr_p(instr)) {
1385      return -1;
1386    }
1387    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1388      errnum = error_FPU_exception_double;
1389      break;
1390    }
1391
1392    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1393      errnum = error_FPU_exception_short;
1394      break;
1395    }
1396  }
1397 
1398  err = handle_error(xp, errnum, rcontext, 0, (unsigned) there);
1399  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1400 
1401  adjust_exception_pc(xp, delta);
1402  xpFPSCR(xp)  &=  0x03fff;
1403 
1404  return err;
1405
1406}
1407
1408OSStatus
1409handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1410{
1411#ifdef SUPPORT_PRAGMA_UNUSED
1412#pragma unused(where)
1413#endif
1414  unsigned 
1415    minor = UUO_MINOR(the_uuo),
1416    rt = 0x1f & (the_uuo >> 21),
1417    ra = 0x1f & (the_uuo >> 16),
1418    rb = 0x1f & (the_uuo >> 11),
1419    errnum = 0x3ff & (the_uuo >> 16);
1420
1421  OSStatus status = -1;
1422
1423  int bump = 4;
1424
1425  switch (minor) {
1426
1427  case UUO_ZERO_FPSCR:
1428    status = 0;
1429    xpFPSCR(xp) = 0;
1430    break;
1431
1432
1433  case UUO_INTERR:
1434    status = handle_error(xp, errnum, rb, 0, (unsigned) where);
1435    break;
1436
1437  case UUO_INTCERR:
1438    status = handle_error(xp, errnum, rb, 1, (unsigned) where);
1439    if (errnum == error_udf_call) {
1440      /* If lisp's returned from a continuable undefined-function call,
1441         it's put a code vector in the xp's PC.  Don't advance the
1442         PC ... */
1443      bump = 0;
1444    }
1445    break;
1446
1447  case UUO_FPUX_BINOP:
1448    status = handle_fpux_binop(xp, (unsigned)where);
1449    bump = 0;
1450    break;
1451
1452  default:
1453    status = -1;
1454    bump = 0;
1455  }
1456 
1457  if ((!status) && bump) {
1458    adjust_exception_pc(xp, bump);
1459  }
1460  return status;
1461}
1462
1463unsigned
1464register_codevector_contains_pc (unsigned lisp_function, unsigned where)
1465{
1466  unsigned code_vector, size;
1467
1468  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1469      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1470    code_vector = deref(lisp_function, 1);
1471    size = header_element_count(header_of(code_vector)) << 2;
1472    if ((untag(code_vector) < where) && (where < (code_vector + size)))
1473      return(code_vector);
1474  }
1475
1476  return(0);
1477}
1478
1479/* Callback to lisp to handle a trap. Need to translate the
1480   PC (where) into one of two forms of pairs:
1481
1482   1. If PC is in fn or nfn's code vector, use the register number
1483      of fn or nfn and the index into that function's code vector.
1484   2. Otherwise use 0 and the pc itself
1485*/
1486void
1487callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, unsigned where,
1488                   unsigned arg1, unsigned arg2, unsigned arg3)
1489{
1490  unsigned code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1491  unsigned register_number = fn;
1492  unsigned index = where;
1493
1494  if (code_vector == 0) {
1495    register_number = nfn;
1496    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1497  }
1498  if (code_vector == 0)
1499    register_number = 0;
1500  else
1501    index = (where - (code_vector + misc_data_offset)) >> 2;
1502  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1503}
1504
1505void
1506callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1507                  unsigned arg1, unsigned arg2, unsigned arg3, unsigned arg4, unsigned arg5)
1508{
1509  sigset_t mask;
1510  unsigned  callback_ptr, i;
1511  area *a;
1512
1513  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
1514
1515  /* Put the active stack pointer where .SPcallback expects it */
1516  a = tcr->cs_area;
1517  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1518
1519  /* Copy globals from the exception frame to tcr */
1520  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1521  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1522  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1523  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1524
1525
1526
1527  /* Call back.
1528     Lisp will handle trampolining through some code that
1529     will push lr/fn & pc/nfn stack frames for backtrace.
1530  */
1531  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1532  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1533  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1534  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1535
1536
1537
1538  /* Copy GC registers back into exception frame */
1539  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1540  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1541}
1542
1543area *
1544allocate_no_stack (unsigned size)
1545{
1546#ifdef SUPPORT_PRAGMA_UNUSED
1547#pragma unused(size)
1548#endif
1549
1550  return (area *) NULL;
1551}
1552
1553
1554
1555
1556
1557
1558/* callback to (symbol-value cmain) if it is a macptr,
1559   otherwise report cause and function name to console.
1560   Returns noErr if exception handled OK */
1561OSStatus
1562handle_trap(ExceptionInformation *xp, opcode the_trap, pc where)
1563{
1564  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1565  int       ra, rs, fn_reg = 0;
1566  char *    error_msg = NULL;
1567  char      name[kNameBufLen];
1568  LispObj   cmain = nrs_CMAIN.vcell;
1569  Boolean   event_poll_p = false;
1570  int old_interrupt_level = 0;
1571  TCR *tcr = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
1572
1573  /* If we got here, "the_trap" is either a TWI or a TW instruction.
1574     It's a TWI instruction iff its major opcode is major_opcode_TWI. */
1575
1576  /* If it's a "twllt" instruction where RA == sp, it's a failed
1577     control stack overflow check.  In that case:
1578     
1579     a) We're in "yellow zone" mode if the value of the lisp_global(CS_OVERFLOW_LIMIT)
1580     is CS_OVERFLOW_FORCE_LIMIT.  If we're not already in yellow zone mode, attempt to create
1581     a new thread and continue execution on its stack. If that fails, call
1582     signal_stack_soft_overflow to enter yellow zone mode and signal the condition to lisp.
1583     
1584     b) If we're already in "yellow zone" mode, then:
1585     
1586     1) if the SP is past the current control-stack area's hard overflow limit,
1587     signal a "hard" stack overflow error (e.g., throw to toplevel as quickly as
1588     possible. If we aren't in "yellow zone" mode, attempt to continue on another
1589     thread first.
1590     
1591     2) if SP is "well" (> 4K) below its soft overflow limit, set lisp_global(CS_OVERFLOW_LIMIT)
1592     to its "real" value.  We're out of "yellow zone mode" in this case.
1593     
1594     3) Otherwise, do nothing.  We'll continue to trap every time something gets pushed
1595     on the control stack, so we should try to detect and handle all of these cases
1596     fairly quickly.  Of course, the trap overhead is going to slow things down quite a bit.
1597     */
1598
1599  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TW) &&
1600      (RA_field(the_trap) == sp) &&
1601      (TO_field(the_trap) == TO_LO)) {
1602    area
1603      *CS_area = tcr->cs_area,
1604      *VS_area = tcr->vs_area;
1605     
1606    unsigned 
1607      current_SP = xpGPR(xp,sp),
1608      current_VSP = xpGPR(xp,vsp);
1609
1610    if (current_SP  < (unsigned) (CS_area->hardlimit)) {
1611      /* If we're not in soft overflow mode yet, assume that the
1612         user has set the soft overflow size very small and try to
1613         continue on another thread before throwing to toplevel */
1614      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1615        reset_lisp_process(xp);
1616      }
1617    } else {
1618      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1619        /* If the control stack pointer is at least 4K away from its soft limit
1620           and the value stack pointer is at least 4K away from its soft limit,
1621           stop trapping.  Else keep trapping. */
1622        if ((current_SP > (unsigned) ((CS_area->softlimit)+4096)) &&
1623            (current_VSP > (unsigned) ((VS_area->softlimit)+4096))) {
1624          protected_area_ptr vs_soft = VS_area->softprot;
1625          if (vs_soft->nprot == 0) {
1626            protect_area(vs_soft);
1627          }
1628          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1629        }
1630      } else {
1631        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1632        signal_stack_soft_overflow(xp, sp);
1633      }
1634    }
1635   
1636    adjust_exception_pc(xp, 4);
1637    return noErr;
1638  } else {
1639    if (the_trap == LISP_BREAK_INSTRUCTION) {
1640      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1641      set_xpPC(xp, xpLR(xp));
1642      if (message == NULL) {
1643        message = "Lisp Breakpoint";
1644      }
1645      lisp_Debugger(xp, debug_entry_dbg,message);
1646      return noErr;
1647    }
1648    /*
1649      twlle ra,rb is used to detect tlb overflow, where RA = current
1650      limit and RB = index to use.
1651    */
1652    if ((X_opcode_p(the_trap, 31, minor_opcode_TW)) && 
1653        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1654      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1655        return noErr;
1656      }
1657      return -1;
1658    }
1659
1660    if ((fulltag_of(cmain) == fulltag_misc) &&
1661        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1662      /* cmain is a macptr, we can call back to lisp */
1663      if (the_trap == TWI_instruction(TO_GT,nargs,0)) {
1664        /* Disable interrupts if we're about to process one */
1665        event_poll_p = true;    /* remember to turn interrupts back on */
1666        tcr->interrupt_level = (-1 << fixnumshift);
1667        tcr->interrupt_pending = 0;
1668      }
1669      callback_for_trap(cmain, xp, (unsigned) where, (unsigned) the_trap,  0, 0);
1670      if (event_poll_p) {
1671        tcr->interrupt_level = 0;
1672        tcr->interrupt_pending = 0;
1673      }
1674      adjust_exception_pc(xp, 4);
1675      return(noErr);
1676    }
1677    return -1;
1678  }
1679}
1680
1681
1682/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1683   Stop if subtag_code_vector is encountered. */
1684unsigned
1685scan_for_instr( unsigned target, unsigned mask, pc where )
1686{
1687  int i = TRAP_LOOKUP_TRIES;
1688
1689  while( i-- ) {
1690    unsigned instr = *(--where);
1691    if ( codevec_hdr_p(instr) ) {
1692      return 0;
1693    } else if ( match_instr(instr, mask, target) ) {
1694      return instr;
1695    }
1696  }
1697  return 0;
1698}
1699
1700
1701void non_fatal_error( char *msg )
1702{
1703  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1704  fflush( stderr );
1705}
1706
1707/* The main opcode.  */
1708
1709int 
1710is_conditional_trap(opcode instr)
1711{
1712  unsigned to = TO_field(instr);
1713  int is_tw = X_opcode_p(instr,major_opcode_X31,minor_opcode_TW);
1714
1715#ifndef MACOS
1716  if (instr == LISP_BREAK_INSTRUCTION) {
1717    return 1;
1718  }
1719#endif
1720  if (is_tw || major_opcode_p(instr,major_opcode_TWI)) {
1721    /* A "tw" or "twi" instruction.  To be unconditional, the EQ bit must be set
1722       in the TO mask and either the register operands (if "tw") are the same or
1723       either both of the signed or both of the unsigned inequality bits
1724       must be set. */
1725    if (! (to & TO_EQ)) {
1726      return 1;                 /* Won't trap on EQ: conditional */
1727    }
1728    if (is_tw && (RA_field(instr) == RB_field(instr))) {
1729      return 0;                 /* Will trap on EQ, same regs: unconditional */
1730    }
1731    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1732        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1733      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1734    }
1735    return 1;                   /* must be conditional */
1736  }
1737  return 0;                     /* Not "tw" or "twi".  Let debugger have it */
1738}
1739
1740OSStatus
1741handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, unsigned where)
1742{
1743  LispObj   pname;
1744  LispObj   errdisp = nrs_ERRDISP.vcell;
1745
1746  if ((fulltag_of(errdisp) == fulltag_misc) &&
1747      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1748    /* errdisp is a macptr, we can call back to lisp */
1749    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1750    return(0);
1751    }
1752
1753  return(-1);
1754}
1755               
1756
1757/*
1758   Current thread has all signals masked.  Before unmasking them,
1759   make it appear that the current thread has been suspended.
1760   (This is to handle the case where another thread is trying
1761   to GC before this thread is able to sieze the exception lock.)
1762*/
1763int
1764prepare_to_wait_for_exception_lock(TCR *tcr, 
1765                                   ExceptionInformationPowerPC *context)
1766{
1767  int old_valence = tcr->valence;
1768
1769  tcr->pending_exception_context = context;
1770  tcr->suspend_total_on_exception_entry =
1771    tcr->suspend_total;
1772  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1773
1774  ALLOW_EXCEPTIONS(context);
1775  return old_valence;
1776} 
1777
1778void
1779wait_for_exception_lock_in_handler(TCR *tcr, 
1780                                   ExceptionInformationPowerPC *context,
1781                                   xframe_list *xf)
1782{
1783
1784  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1785  xf->curr = context;
1786  xf->prev = tcr->xframe;
1787  tcr->xframe =  xf;
1788  tcr->pending_exception_context = NULL;
1789  tcr->valence = TCR_STATE_FOREIGN; 
1790}
1791
1792void
1793unlock_exception_lock_in_handler(TCR *tcr)
1794{
1795  tcr->pending_exception_context = tcr->xframe->curr;
1796  tcr->xframe = tcr->xframe->prev;
1797  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1798  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1799}
1800
1801/*
1802   If an interrupt is pending on exception exit, try to ensure
1803   that the thread sees it as soon as it's able to run.
1804*/
1805void
1806raise_pending_interrupt(TCR *tcr)
1807{
1808  if (tcr->interrupt_level > 0) {
1809    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1810  }
1811}
1812
1813void
1814exit_signal_handler(TCR *tcr, int old_valence)
1815{
1816  sigset_t mask;
1817  sigfillset(&mask);
1818 
1819  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1820  tcr->valence = old_valence;
1821  tcr->pending_exception_context = NULL;
1822}
1823
1824
1825void
1826signal_handler(int signum, siginfo_t *info, ExceptionInformationPowerPC  *context, TCR *tcr)
1827{
1828  xframe_list xframe_link;
1829  int old_valence;
1830#ifdef LINUX
1831
1832  tcr = (TCR *) xpGPR(context,rcontext);
1833 
1834  if (tcr == NULL) {
1835    tcr = get_tcr(false);
1836  } else {
1837    current_r2 = tcr->native_thread_info;
1838  }
1839
1840  /* The signal handler's entered with all signals (notably the
1841     thread_suspend signal) blocked.  Don't allow any other signals
1842     (notably the thread_suspend signal) to preempt us until we've
1843     set the TCR's xframe slot to include the current exception
1844     context.
1845  */
1846
1847  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1848#endif
1849 
1850  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1851  if ((noErr != PMCL_exception_handler(signum, context, tcr, info))) {
1852    char msg[512];
1853    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%08x, context->regs at #x%08x", signum, xpPC(context), (int)xpGPRvector(context));
1854    lisp_Debugger(context, signum, msg);
1855  }
1856  unlock_exception_lock_in_handler(tcr);
1857
1858  /* This thread now looks like a thread that was suspended while
1859     executing lisp code.  If some other thread gets the exception
1860     lock and GCs, the context (this thread's suspend_context) will
1861     be updated.  (That's only of concern if it happens before we
1862     can return to the kernel/to the Mach exception handler).
1863  */
1864#ifdef LINUX
1865  exit_signal_handler(tcr, old_valence);
1866  raise_pending_interrupt(tcr);
1867#endif
1868}
1869
1870/*
1871  If it looks like we're in the middle of an atomic operation, make
1872  it seem as if that operation is either complete or hasn't started
1873  yet.
1874
1875  The cases handled include:
1876
1877  a) storing into a newly-allocated lisp frame on the stack.
1878  b) marking a newly-allocated TSP frame as containing "raw" data.
1879  c) consing: the GC has its own ideas about how this should be
1880     handled, but other callers would be best advised to back
1881     up or move forward, according to whether we're in the middle
1882     of allocating a cons cell or allocating a uvector.
1883  d) a STMW to the vsp
1884*/
1885void
1886pc_luser_xp(ExceptionInformationPowerPC *xp, TCR *tcr)
1887{
1888  pc program_counter = xpPC(xp);
1889  opcode instr = *program_counter;
1890  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1891  LispObj cur_allocptr = xpGPR(xp, allocptr);
1892  int allocptr_tag = fulltag_of(cur_allocptr);
1893 
1894
1895  if (instr == 0x918c0004) {    /* stw tsp,tsp_frame.type(tsp) */
1896    LispObj tsp_val = xpGPR(xp,tsp);
1897   
1898    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1899    adjust_exception_pc(xp, 4);
1900    return;
1901  }
1902 
1903  if (frame->backlink == (frame+1)) {
1904    if ((major_opcode_p(instr, major_opcode_STW)) && 
1905        (RA_field(instr) == sp) &&
1906        /* There are a few places in the runtime that store into
1907           a previously-allocated frame atop the stack when
1908           throwing values around.  We only care about the case
1909           where the frame was newly allocated, in which case
1910           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1911           a few instructions before the current program counter.
1912           (The whole point here is that a newly allocated frame
1913           might contain random values that we don't want the
1914           GC to see; a previously allocated frame should already
1915           be completely initialized.)
1916        */
1917        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1918         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1919         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1920      int disp = D_field(instr);
1921     
1922      if (disp < 16) {
1923        frame->savevsp = 0;
1924        if (disp < 12) {
1925          frame->savelr = 0;
1926          if (disp == 4) {
1927            frame->savefn = 0;
1928          }
1929        }
1930      }
1931      return;
1932    }
1933  }
1934
1935  if (allocptr_tag != tag_fixnum) {
1936    int disp = allocptr_displacement(xp);
1937
1938    if (disp) {
1939      /* We're "at" the alloc_trap.  If this is happening for the
1940         GC's benefit (the GC has just suspended the thread that
1941         owns xp), set allocbase to VOID_ALLOCPTR and pretend that
1942         allocptr was just decremented (by disp) from there; leave
1943         the PC pointing to the alloc_trap.  If this is happening
1944         in response to a PROCESS-INTERRUPT request, we can't have
1945         entered the trap handler yet: back out of the previous
1946         instruction (which subtracted "disp" from allocptr) and
1947         arrange to execute it again after the interrupt.
1948      */
1949      if (tcr) {
1950        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1951        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1952        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1953      } else {
1954        xpGPR(xp, allocptr) = cur_allocptr + disp;
1955        xpPC(xp) -=1;
1956      }
1957    } else {
1958      /* If we're already past the alloc_trap, finish allocating
1959         the object. */
1960      if (allocptr_tag == fulltag_cons) {
1961        finish_allocating_cons(xp);
1962      } else {
1963        if (allocptr_tag == fulltag_misc) {
1964          finish_allocating_uvector(xp);
1965        } else {
1966          Bug(xp, "what's being allocated here ?");
1967        }
1968      }
1969    }
1970  }
1971
1972  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
1973      (RA_field(instr) == vsp)) {
1974    int r;
1975    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
1976   
1977    for (r = RS_field(instr); r <= 31; r++) {
1978      *vspptr++ = xpGPR(xp,r);
1979    }
1980    adjust_exception_pc(xp, 4);
1981  }
1982}
1983
1984void
1985interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1986{
1987  TCR *tcr = get_interrupt_tcr(false);
1988  if (tcr) {
1989    if (tcr->interrupt_level < 0) {
1990      tcr->interrupt_pending = 1 << fixnumshift;
1991    } else {
1992      LispObj cmain = nrs_CMAIN.vcell;
1993
1994      if ((fulltag_of(cmain) == fulltag_misc) &&
1995          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1996        /*
1997           This thread can (allegedly) take an interrupt now.
1998           It's tricky to do that if we're executing
1999           foreign code (especially Linuxthreads code, much
2000           of which isn't reentrant.)
2001        */
2002        if (tcr->valence != TCR_STATE_LISP) {
2003          tcr->interrupt_level = (1 << fixnumshift);
2004        } else {
2005          xframe_list xframe_link;
2006          int old_valence;
2007         
2008          pc_luser_xp(context, NULL);
2009          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2010          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2011          PMCL_exception_handler(signum, context, tcr, info);
2012          unlock_exception_lock_in_handler(tcr);
2013          exit_signal_handler(tcr, old_valence);
2014        }
2015      }
2016    }
2017  }
2018#ifdef DARWIN
2019  DarwinSigReturn(context);
2020#endif
2021}
2022
2023
2024
2025void
2026install_signal_handler(int signo, __sighandler_t handler)
2027{
2028  struct sigaction sa;
2029 
2030  sa.sa_sigaction = (void *)handler;
2031  sigfillset(&sa.sa_mask);
2032  sa.sa_flags = SA_RESTART | SA_SIGINFO;
2033
2034  sigaction(signo, &sa, NULL);
2035}
2036
2037void
2038install_pmcl_exception_handlers()
2039{
2040#ifndef DARWIN
2041  extern int no_sigtrap;
2042  install_signal_handler(SIGILL, (__sighandler_t)signal_handler);
2043  if (no_sigtrap != 1) {
2044    install_signal_handler(SIGTRAP, (__sighandler_t)signal_handler);
2045  }
2046  install_signal_handler(SIGBUS,  (__sighandler_t)signal_handler);
2047  install_signal_handler(SIGSEGV, (__sighandler_t)signal_handler);
2048  install_signal_handler(SIGFPE, (__sighandler_t)signal_handler);
2049#endif
2050  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2051                         (__sighandler_t)interrupt_handler);
2052  signal(SIGPIPE, SIG_IGN);
2053}
2054
2055
2056void
2057unprotect_all_areas()
2058{
2059  protected_area_ptr p;
2060
2061  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2062    unprotect_area(p);
2063  }
2064}
2065
2066
2067
2068
2069void
2070exception_init()
2071{
2072  install_pmcl_exception_handlers();
2073}
2074
2075
2076void
2077Bug(ExceptionInformation *xp, const char *format, ...)
2078{
2079  va_list args;
2080  char s[512];
2081 
2082  if (threads_initialized) {
2083    suspend_other_threads();
2084  }
2085
2086  va_start(args, format);
2087  vsnprintf(s, sizeof(s),format, args);
2088  va_end(args);
2089  lisp_Debugger(NULL, debug_entry_bug, s);
2090
2091  if (threads_initialized) {
2092    resume_other_threads();
2093  }
2094}
2095
2096void
2097lisp_bug(char *string)
2098{
2099  Bug(NULL, "Bug in MCL-PPC system code:\n%s", string);
2100}
2101
2102
2103
2104#ifdef DARWIN
2105
2106lock_set_t mach_exception_lock_set;
2107
2108
2109#define LISP_EXCEPTIONS_HANDLED_MASK \
2110 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2111
2112/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2113#define NUM_LISP_EXCEPTIONS_HANDLED 4
2114
2115typedef struct {
2116  int foreign_exception_port_count;
2117  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2118  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2119  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2120  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2121} MACH_foreign_exception_state;
2122
2123
2124
2125
2126/*
2127  Mach's exception mechanism works a little better than its signal
2128  mechanism (and, not incidentally, it gets along with GDB a lot
2129  better.
2130
2131  Initially, we install an exception handler to handle each native
2132  thread's exceptions.  This process involves creating a distinguished
2133  thread which listens for kernel exception messages on a set of
2134  0 or more thread exception ports.  As threads are created, they're
2135  added to that port set; a thread's exception port is destroyed
2136  (and therefore removed from the port set) when the thread exits.
2137
2138  A few exceptions can be handled directly in the handler thread;
2139  others require that we resume the user thread (and that the
2140  exception thread resumes listening for exceptions.)  The user
2141  thread might eventually want to return to the original context
2142  (possibly modified somewhat.)
2143
2144  As it turns out, the simplest way to force the faulting user
2145  thread to handle its own exceptions is to do pretty much what
2146  signal() does: the exception handlng thread sets up a sigcontext
2147  on the user thread's stack and forces the user thread to resume
2148  execution as if a signal handler had been called with that
2149  context as an argument.  We can use a distinguished UUO at a
2150  distinguished address to do something like sigreturn(); that'll
2151  have the effect of resuming the user thread's execution in
2152  the (pseudo-) signal context.
2153
2154  Since:
2155    a) we have miles of code in C and in Lisp that knows how to
2156    deal with Linux sigcontexts
2157    b) Linux sigcontexts contain a little more useful information
2158    (the DAR, DSISR, etc.) than their Darwin counterparts
2159    c) we have to create a sigcontext ourselves when calling out
2160    to the user thread: we aren't really generating a signal, just
2161    leveraging existing signal-handling code.
2162
2163  we create a Linux sigcontext struct.
2164
2165  Simple ?  Hopefully from the outside it is ...
2166
2167  We want the process of passing a thread's own context to it to
2168  appear to be atomic: in particular, we don't want the GC to suspend
2169  a thread that's had an exception but has not yet had its user-level
2170  exception handler called, and we don't want the thread's exception
2171  context to be modified by a GC while the Mach handler thread is
2172  copying it around.  On Linux (and on Jaguar), we avoid this issue
2173  because (a) the kernel sets up the user-level signal handler and
2174  (b) the signal handler blocks signals (including the signal used
2175  by the GC to suspend threads) until tcr->xframe is set up.
2176
2177  The GC and the Mach server thread therefore contend for the lock
2178  "mach_exception_lock_set[0]".  The Mach server thread holds the lock
2179  when copying exception information between the kernel and the
2180  user thread; the GC holds this lock during most of its execution
2181  (delaying exception processing until it can be done without
2182  GC interference.)
2183
2184*/
2185
2186#define C_REDZONE_LEN           224
2187#define C_STK_ALIGN                     16
2188#define C_PARAMSAVE_LEN         64
2189#define C_LINKAGE_LEN           48
2190#define TRUNC_DOWN(a,b,c)  (((((unsigned)a)-(b))/(c)) * (c))
2191#include <mach/mach.h>
2192#include <mach/mach_error.h>
2193#include <mach/machine/thread_state.h>
2194#include <mach/machine/thread_status.h>
2195
2196void
2197fatal_mach_error(format, ...);
2198
2199#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : ~d", context, x);}
2200
2201
2202void
2203restore_mach_thread_state(mach_port_t thread, ExceptionInformation *lss)
2204{
2205  int i, j;
2206#ifdef PPC64
2207  struct mcontext64 *mc = UC_MCONTEXT(lss);
2208#else
2209  struct mcontext * mc = UC_MCONTEXT(lss);
2210#endif
2211
2212  /* Set the thread's FP state from the lss */
2213  thread_set_state(thread,
2214                   PPC_FLOAT_STATE,
2215                   (thread_state_t)&(mc->fs),
2216                   PPC_FLOAT_STATE_COUNT);
2217
2218  /* The thread'll be as good as new ... */
2219#ifdef PPC64
2220  thread_set_state(thread,
2221                   PPC_THREAD_STATE64,
2222                   (thread_state_t)&(mc->ss),
2223                   PPC_THREAD_STATE64_COUNT);
2224#else
2225  thread_set_state(thread, 
2226                   MACHINE_THREAD_STATE,
2227                   (thread_state_t)&(mc->ss),
2228                   MACHINE_THREAD_STATE_COUNT);
2229#endif
2230} 
2231
2232/* This code runs in the exception handling thread, in response
2233   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2234   in response to a call to pseudo_sigreturn() from the specified
2235   user thread.
2236   Find that context (the user thread's R3 points to it), then
2237   use that context to set the user thread's state.  When this
2238   function's caller returns, the Mach kernel will resume the
2239   user thread.
2240*/
2241
2242kern_return_t
2243do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2244{
2245  ExceptionInformation *xp;
2246
2247  lock_acquire(mach_exception_lock_set, 0);
2248  xp = tcr->pending_exception_context;
2249  tcr->pending_exception_context = NULL;
2250  tcr->valence = TCR_STATE_LISP;
2251  restore_mach_thread_state(thread, xp);
2252  raise_pending_interrupt(tcr);
2253  lock_release(mach_exception_lock_set, 0);
2254  return KERN_SUCCESS;
2255
2256} 
2257
2258ExceptionInformation *
2259create_thread_context_frame(mach_port_t thread, 
2260                            unsigned *new_stack_top)
2261{
2262#ifdef PPC64
2263  ppc_thread_state64_t ts;
2264#else
2265  ppc_thread_state_t ts;
2266#endif
2267  mach_msg_type_number_t thread_state_count;
2268  kern_return_t result;
2269  int i,j;
2270  ExceptionInformation *lss;
2271#ifdef PPC64
2272  struct mcontext64 *mc;
2273#else
2274  struct mcontext *mc;
2275#endif
2276  natural stackp, backlink;
2277
2278#ifdef PPC64
2279  thread_state_count = PPC_THREAD_STATE64_COUNT;
2280  thread_get_state(thread,
2281                   PPC_THREAD_STATE64,
2282                   (thread_state_t)&ts,
2283                   &thread_state_count);
2284#else
2285  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2286  thread_get_state(thread, 
2287                   MACHINE_THREAD_STATE,        /* GPRs, some SPRs  */
2288                   (thread_state_t)&ts,
2289                   &thread_state_count);
2290#endif
2291
2292  stackp = ts.r1;
2293  backlink = stackp;
2294  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2295  stackp -= sizeof(*lss);
2296  lss = (ExceptionInformation *) ptr_from_lispobj(stackp);
2297
2298  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2299#ifdef PPC64
2300  mc = (struct mcontext64 *) ptr_from_lispobj(stackp);
2301#else
2302  mc = (struct mcontext *) ptr_from_lispobj(stackp);
2303#endif
2304  bcopy(&ts,&(mc->ss),sizeof(ts));
2305
2306  thread_state_count = PPC_FLOAT_STATE_COUNT;
2307  thread_get_state(thread,
2308                   PPC_FLOAT_STATE,
2309                   (thread_state_t)&(mc->fs),
2310                   &thread_state_count);
2311
2312
2313  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2314  thread_get_state(thread,
2315                   PPC_EXCEPTION_STATE,
2316                   (thread_state_t)&(mc->es),
2317                   &thread_state_count);
2318
2319
2320  UC_MCONTEXT(lss) = mc;
2321  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2322  stackp -= C_LINKAGE_LEN;
2323  *(natural *)ptr_from_lispobj(stackp) = backlink;
2324  if (new_stack_top) {
2325    *new_stack_top = stackp;
2326  }
2327  return lss;
2328}
2329
2330/*
2331  This code sets up the user thread so that it executes a "pseudo-signal
2332  handler" function when it resumes.  Create a linux sigcontext struct
2333  on the thread's stack and pass it as an argument to the pseudo-signal
2334  handler.
2335
2336  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2337  which will restore the thread's context.
2338
2339  If the handler invokes code that throws (or otherwise never sigreturn()'s
2340  to the context), that's fine.
2341
2342  Actually, check that: throw (and variants) may need to be careful and
2343  pop the tcr's xframe list until it's younger than any frame being
2344  entered.
2345*/
2346
2347int
2348setup_signal_frame(mach_port_t thread,
2349                   void *handler_address,
2350                   int signum,
2351                   int code,
2352                   TCR *tcr)
2353{
2354  ppc_thread_state_t ts;
2355  ppc_exception_state_t xs;
2356  mach_msg_type_number_t thread_state_count;
2357  ExceptionInformation *lss;
2358  int i, j;
2359  kern_return_t result;
2360  unsigned stackp;
2361
2362  lock_acquire(mach_exception_lock_set, 0);
2363
2364  lss = create_thread_context_frame(thread, &stackp);
2365  lss->uc_onstack = 0;
2366  lss->uc_sigmask = (sigset_t) 0;
2367  tcr->pending_exception_context = lss;
2368  tcr->suspend_total_on_exception_entry =
2369    tcr->suspend_total;
2370  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2371 
2372
2373  /*
2374     It seems like we've created a  sigcontext on the thread's
2375     stack.  Set things up so that we call the handler (with appropriate
2376     args) when the thread's resumed.
2377  */
2378
2379  ts.srr0 = (int) handler_address;
2380  ts.srr1 = (int) xpMSR(lss) & ~MSR_FE0_FE1_MASK;
2381  ts.r1 = stackp;
2382  ts.r3 = signum;
2383  ts.r4 = (int)lss;
2384  ts.r5 = (int)tcr;
2385  ts.lr = (int)pseudo_sigreturn;
2386
2387
2388  thread_set_state(thread, 
2389                   MACHINE_THREAD_STATE,
2390                   (thread_state_t)&ts,
2391                   MACHINE_THREAD_STATE_COUNT);
2392  lock_release(mach_exception_lock_set, 0);
2393  return 0;
2394}
2395
2396
2397void
2398pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr)
2399{
2400  signal_handler(signum, NULL, context, tcr);
2401} 
2402
2403
2404int
2405thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2406{
2407  ppc_thread_state_t ts;
2408  mach_msg_type_number_t thread_state_count;
2409
2410  lock_acquire(mach_exception_lock_set, 0);
2411  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2412  thread_get_state(thread, 
2413                   MACHINE_THREAD_STATE,        /* GPRs, some SPRs  */
2414                   (thread_state_t)&ts,
2415                   &thread_state_count);
2416  if (enabled) {
2417    ts.srr1 |= MSR_FE0_FE1_MASK;
2418  } else {
2419    ts.srr1 &= ~MSR_FE0_FE1_MASK;
2420  }
2421  ts.srr0 += 4;
2422  thread_set_state(thread, 
2423                   MACHINE_THREAD_STATE,
2424                   (thread_state_t)&ts,
2425                   MACHINE_THREAD_STATE_COUNT);
2426
2427  lock_release(mach_exception_lock_set, 0);
2428  return 0;
2429}
2430
2431/*
2432  This function runs in the exception handling thread.  It's
2433  called (by this precise name) from the library function "exc_server()"
2434  when the thread's exception ports are set up.  (exc_server() is called
2435  via mach_msg_server(), which is a function that waits for and dispatches
2436  on exception messages from the Mach kernel.)
2437
2438  This checks to see if the exception was caused by a pseudo_sigreturn()
2439  UUO; if so, it arranges for the thread to have its state restored
2440  from the specified context.
2441
2442  Otherwise, it tries to map the exception to a signal number and
2443  arranges that the thread run a "pseudo signal handler" to handle
2444  the exception.
2445
2446  Some exceptions could and should be handled here directly.
2447*/
2448
2449kern_return_t
2450catch_exception_raise(mach_port_t exception_port,
2451                      mach_port_t thread,
2452                      mach_port_t task, 
2453                      exception_type_t exception,
2454                      exception_data_t code_vector,
2455                      mach_msg_type_number_t code_count)
2456{
2457  int signum = 0, code = *code_vector, code1;
2458
2459  if ((exception == EXC_BAD_INSTRUCTION) &&
2460      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2461      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2462       (code1 == (int)enable_fp_exceptions) ||
2463       (code1 == (int)disable_fp_exceptions))) {
2464    if (code1 == (int)pseudo_sigreturn) {
2465      return do_pseudo_sigreturn(thread, (TCR *) exception_port);
2466    }
2467    if (code1 == (int)enable_fp_exceptions) {
2468      return thread_set_fp_exceptions_enabled(thread, true);
2469    }
2470    return thread_set_fp_exceptions_enabled(thread, false);
2471  }
2472  switch (exception) {
2473  case EXC_BAD_ACCESS:
2474    signum = SIGSEGV;
2475    break;
2476
2477  case EXC_BAD_INSTRUCTION:
2478    signum = SIGILL;
2479    break;
2480
2481  case EXC_SOFTWARE:
2482    if (code == EXC_PPC_TRAP) {
2483      signum = SIGTRAP;
2484    }
2485    break;
2486
2487  case EXC_ARITHMETIC:
2488    signum = SIGFPE;
2489    break;
2490
2491  default:
2492    break;
2493  }
2494  if (signum) {
2495    return setup_signal_frame(thread,
2496                              (void *)pseudo_signal_handler,
2497                              signum,
2498                              code,
2499                              (TCR *) exception_port);
2500  }
2501  return 17;
2502}
2503
2504
2505/*
2506  The initial function for an exception-handling thread.
2507*/
2508
2509void *
2510exception_handler_proc(void *arg)
2511{
2512  extern boolean_t exc_server();
2513  mach_port_t p = (mach_port_t) arg;
2514
2515  mach_msg_server(exc_server, 256, p, 0);
2516  /* Should never return. */
2517  abort();
2518}
2519
2520
2521mach_port_t
2522mach_exception_port_set()
2523{
2524  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2525  kern_return_t kret; 
2526  if (__exception_port_set == MACH_PORT_NULL) {
2527    lock_set_create(mach_task_self(), 
2528                    &mach_exception_lock_set,
2529                    1,
2530                    SYNC_POLICY_FIFO);
2531
2532    kret = mach_port_allocate(mach_task_self(),
2533                              MACH_PORT_RIGHT_PORT_SET,
2534                              &__exception_port_set);
2535    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2536    create_system_thread(0,
2537                         NULL,
2538                         exception_handler_proc, 
2539                         (void *)__exception_port_set);
2540  }
2541  return __exception_port_set;
2542}
2543
2544/*
2545  Setup a new thread to handle those exceptions specified by
2546  the mask "which".  This involves creating a special Mach
2547  message port, telling the Mach kernel to send exception
2548  messages for the calling thread to that port, and setting
2549  up a handler thread which listens for and responds to
2550  those messages.
2551
2552*/
2553
2554/*
2555  Establish the lisp thread's TCR as its exception port, and determine
2556  whether any other ports have been established by foreign code for
2557  exceptions that lisp cares about.
2558
2559  If this happens at all, it should happen on return from foreign
2560  code and on entry to lisp code via a callback.
2561
2562  This is a lot of trouble (and overhead) to support Java, or other
2563  embeddable systems that clobber their caller's thread exception ports.
2564 
2565*/
2566kern_return_t
2567tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2568{
2569  kern_return_t kret;
2570  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2571  int i, n = NUM_LISP_EXCEPTIONS_HANDLED;
2572  mach_port_t lisp_port = (mach_port_t)tcr, foreign_port;
2573  exception_mask_t mask = 0;
2574
2575  kret = thread_swap_exception_ports(thread,
2576                                     LISP_EXCEPTIONS_HANDLED_MASK,
2577                                     lisp_port,
2578                                     EXCEPTION_DEFAULT,
2579                                     THREAD_STATE_NONE,
2580                                     fxs->masks,
2581                                     &n,
2582                                     fxs->ports,
2583                                     fxs->behaviors,
2584                                     fxs->flavors);
2585  if (kret == KERN_SUCCESS) {
2586    fxs->foreign_exception_port_count = n;
2587    for (i = 0; i < n; i ++) {
2588      foreign_port = fxs->ports[i];
2589
2590      if ((foreign_port != lisp_port) &&
2591          (foreign_port != MACH_PORT_NULL)) {
2592        mask |= fxs->masks[i];
2593      }
2594    }
2595    tcr->foreign_exception_status = (int) mask;
2596  }
2597  return kret;
2598}
2599
2600kern_return_t
2601tcr_establish_lisp_exception_port(TCR *tcr)
2602{
2603  return tcr_establish_exception_port(tcr, (mach_port_t)tcr->native_thread_id);
2604}
2605
2606/*
2607  Do this when calling out to or returning from foreign code, if
2608  any conflicting foreign exception ports were established when we
2609  last entered lisp code.
2610*/
2611kern_return_t
2612restore_foreign_exception_ports(TCR *tcr)
2613{
2614  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2615 
2616  if (m) {
2617    MACH_foreign_exception_state *fxs  = 
2618      (MACH_foreign_exception_state *) tcr->native_thread_info;
2619    int i, n = fxs->foreign_exception_port_count;
2620    exception_mask_t tm;
2621
2622    for (i = 0; i < n; i++) {
2623      if ((tm = fxs->masks[i]) & m) {
2624        thread_set_exception_ports((mach_port_t)tcr->native_thread_id,
2625                                   tm,
2626                                   fxs->ports[i],
2627                                   fxs->behaviors[i],
2628                                   fxs->flavors[i]);
2629      }
2630    }
2631  }
2632}
2633                                   
2634                                   
2635kern_return_t
2636setup_mach_exception_handling(TCR *tcr)
2637{
2638  mach_port_t
2639    thread_exception_port = (mach_port_t)tcr,
2640    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2641    task_self = mach_task_self();
2642  kern_return_t kret;
2643
2644  kret = mach_port_allocate_name(task_self,
2645                                 MACH_PORT_RIGHT_RECEIVE,
2646                                 thread_exception_port);
2647
2648  MACH_CHECK_ERROR("naming exception_port",kret);
2649
2650  kret = mach_port_insert_right(task_self,
2651                                thread_exception_port,
2652                                thread_exception_port,
2653                                MACH_MSG_TYPE_MAKE_SEND);
2654  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2655
2656  kret = tcr_establish_exception_port(tcr, (mach_port_t) tcr->native_thread_id);
2657  if (kret == KERN_SUCCESS) {
2658    mach_port_t exception_port_set = mach_exception_port_set();
2659
2660    kret = mach_port_move_member(task_self,
2661                                 thread_exception_port,
2662                                 exception_port_set);
2663  }
2664  return kret;
2665}
2666
2667void
2668darwin_exception_init(TCR *tcr)
2669{
2670  void tcr_monitor_exception_handling(TCR*, Boolean);
2671  kern_return_t kret;
2672  MACH_foreign_exception_state *fxs = 
2673    calloc(1, sizeof(MACH_foreign_exception_state));
2674 
2675  tcr->native_thread_info = (void *) fxs;
2676
2677  if ((kret = setup_mach_exception_handling(tcr))
2678      != KERN_SUCCESS) {
2679    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2680    terminate_lisp();
2681  }
2682  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2683  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
2684}
2685
2686/*
2687  The tcr is the "name" of the corresponding thread's exception port.
2688  Destroying the port should remove it from all port sets of which it's
2689  a member (notably, the exception port set.)
2690*/
2691void
2692darwin_exception_cleanup(TCR *tcr)
2693{
2694  void *fxs = tcr->native_thread_info;
2695
2696  if (fxs) {
2697    tcr->native_thread_info = NULL;
2698    free(fxs);
2699  }
2700  mach_port_destroy(mach_task_self(),(mach_port_t)tcr);
2701}
2702
2703
2704/*
2705  Only do this if pthread_kill indicated that the pthread isn't
2706  listening to signals anymore, as can happen as soon as pthread_exit()
2707  is called on Darwin.  The thread could still call out to lisp as it
2708  is exiting, so we need another way to suspend it in this case.
2709*/
2710Boolean
2711mach_suspend_tcr(TCR *tcr)
2712{
2713  mach_port_t mach_thread = (mach_port_t) tcr->native_thread_id;
2714  kern_return_t status = thread_suspend(mach_thread);
2715  ExceptionInformation *lss;
2716 
2717  if (status != KERN_SUCCESS) {
2718    return false;
2719  }
2720  lock_acquire(mach_exception_lock_set, 0);
2721  lss = create_thread_context_frame(mach_thread, NULL);
2722  lss->uc_onstack = 0;
2723  lss->uc_sigmask = (sigset_t) 0;
2724  tcr->suspend_context = lss;
2725  tcr->suspend_total++;
2726  lock_release(mach_exception_lock_set, 0);
2727  return true;
2728}
2729
2730void
2731mach_resume_tcr(TCR *tcr)
2732{
2733  ExceptionInformation *xp;
2734  mach_port_t mach_thread = (mach_port_t)(tcr->native_thread_id);
2735 
2736  lock_acquire(mach_exception_lock_set, 0);
2737  xp = tcr->suspend_context;
2738  tcr->suspend_context = NULL;
2739  restore_mach_thread_state(mach_thread, xp);
2740  lock_release(mach_exception_lock_set,0);
2741}
2742
2743void
2744fatal_mach_error(char *format, ...)
2745{
2746  va_list args;
2747  char s[512];
2748 
2749
2750  va_start(args, format);
2751  vsnprintf(s, sizeof(s),format, args);
2752  va_end(args);
2753
2754  Fatal("Mach error", s);
2755}
2756
2757#endif
Note: See TracBrowser for help on using the repository browser.