source: branches/arm/lisp-kernel/arm-exceptions.c @ 13843

Last change on this file since 13843 was 13843, checked in by gb, 9 years ago

Enter kernel debugger on uuo-debug-trap.

File size: 67.4 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call.  No need to update cs_area */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672  }
673
674
675  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
676  if (cur_allocptr) {
677    update_bytes_allocated(tcr, cur_allocptr);
678    if (freeptr) {
679      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
680    }
681  }
682}
683
684TCR *gc_tcr = NULL;
685
686/* Suspend and "normalize" other tcrs, then call a gc-like function
687   in that context.  Resume the other tcrs, then return what the
688   function returned */
689
690signed_natural
691gc_like_from_xp(ExceptionInformation *xp, 
692                signed_natural(*fun)(TCR *, signed_natural), 
693                signed_natural param)
694{
695  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
696  int result;
697  signed_natural inhibit;
698
699  suspend_other_threads(true);
700  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
701  if (inhibit != 0) {
702    if (inhibit > 0) {
703      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
704    }
705    resume_other_threads(true);
706    gc_deferred++;
707    return 0;
708  }
709  gc_deferred = 0;
710
711  gc_tcr = tcr;
712
713  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751
752/* Returns #bytes freed by invoking GC */
753
754signed_natural
755gc_from_tcr(TCR *tcr, signed_natural param)
756{
757  area *a;
758  BytePtr oldfree, newfree;
759  BytePtr oldend, newend;
760
761  a = active_dynamic_area;
762  oldend = a->high;
763  oldfree = a->active;
764  gc(tcr, param);
765  newfree = a->active;
766  newend = a->high;
767#if 0
768  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
769#endif
770  return ((oldfree-newfree)+(newend-oldend));
771}
772
773signed_natural
774gc_from_xp(ExceptionInformation *xp, signed_natural param)
775{
776  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
777
778  freeGCptrs();
779  return status;
780}
781
782signed_natural
783purify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, purify, param);
786}
787
788signed_natural
789impurify_from_xp(ExceptionInformation *xp, signed_natural param)
790{
791  return gc_like_from_xp(xp, impurify, param);
792}
793
794
795
796
797
798
799protection_handler
800 * protection_handlers[] = {
801   do_spurious_wp_fault,
802   do_soft_stack_overflow,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_hard_stack_overflow,   
806   do_hard_stack_overflow,
807   do_hard_stack_overflow
808   };
809
810
811Boolean
812is_write_fault(ExceptionInformation *xp, siginfo_t *info)
813{
814#ifdef LINUX
815  /* Based on experiments with a small sample size; need to R TFM. */
816  return ((xp->uc_mcontext.trap_no == 0xe) &&
817          (xp->uc_mcontext.error_code == 0x817));
818#endif
819}
820
821Boolean
822handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
823{
824  BytePtr addr;
825  protected_area_ptr area;
826  protection_handler *handler;
827  extern Boolean touch_page(void *);
828  extern void touch_page_end(void);
829
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835
836  if (addr && (addr == tcr->safe_ref_address)) {
837    adjust_exception_pc(xp,4);
838
839    xpGPR(xp,imm0) = 0;
840    return true;
841  }
842
843  if (xpPC(xp) == (pc)touch_page) {
844    xpGPR(xp,imm0) = 0;
845    xpPC(xp) = (pc)touch_page_end;
846    return true;
847  }
848
849
850  if (is_write_fault(xp,info)) {
851    area = find_protected_area(addr);
852    if (area != NULL) {
853      handler = protection_handlers[area->why];
854      return handler(xp, area, addr);
855    } else {
856      if ((addr >= readonly_area->low) &&
857          (addr < readonly_area->active)) {
858        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
859                        page_size);
860        return true;
861      }
862    }
863  }
864  if (old_valence == TCR_STATE_LISP) {
865    LispObj cmain = nrs_CMAIN.vcell;
866   
867    if ((fulltag_of(cmain) == fulltag_misc) &&
868      (header_subtag(header_of(cmain)) == subtag_macptr)) {
869     
870      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
871    }
872  }
873  return false;
874}
875
876
877
878
879
880OSStatus
881do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
882{
883#ifdef SUPPORT_PRAGMA_UNUSED
884#pragma unused(area,addr)
885#endif
886  reset_lisp_process(xp);
887  return -1;
888}
889
890extern area*
891allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893extern area*
894allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
895
896
897
898
899
900
901Boolean
902lisp_frame_p(lisp_frame *spPtr)
903{
904  return (spPtr->marker == lisp_frame_marker);
905}
906
907
908int ffcall_overflow_count = 0;
909
910
911
912
913
914
915/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
916  the current value of VSP (TSP) or an older area.  */
917
918OSStatus
919do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
920{
921  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
922  area *a = tcr->vs_area;
923  protected_area_ptr vsp_soft = a->softprot;
924  unprotect_area(vsp_soft);
925  signal_stack_soft_overflow(xp,vsp);
926  return 0;
927}
928
929
930
931OSStatus
932do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
933{
934  /* Trying to write into a guard page on the vstack or tstack.
935     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
936     signal an error_stack_overflow condition.
937      */
938  do_vsp_overflow(xp,addr);
939}
940
941OSStatus
942do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
943{
944#ifdef SUPPORT_PRAGMA_UNUSED
945#pragma unused(xp,area,addr)
946#endif
947  return -1;
948}
949
950
951
952
953     
954
955
956
957
958
959Boolean
960handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
961{
962  return false;
963}
964
965   
966
967
968/* This only tries to implement the "optional" fsqrt and fsqrts
969   instructions, which were generally implemented on IBM hardware
970   but generally not available on Motorola/Freescale systems.
971*/               
972OSStatus
973handle_unimplemented_instruction(ExceptionInformation *xp,
974                                 opcode instruction,
975                                 TCR *tcr)
976{
977
978  return -1;
979}
980
981Boolean
982handle_exception(int xnum, 
983                 ExceptionInformation *xp, 
984                 TCR *tcr, 
985                 siginfo_t *info,
986                 int old_valence)
987{
988  pc program_counter;
989  opcode instruction = 0;
990
991
992  program_counter = xpPC(xp);
993 
994  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
995    instruction = *program_counter;
996  }
997
998  if (IS_ALLOC_TRAP(instruction)) {
999    return handle_alloc_trap(xp, tcr);
1000  } else if ((xnum == SIGSEGV) ||
1001             (xnum == SIGBUS)) {
1002    return handle_protection_violation(xp, info, tcr, old_valence);
1003  } else if (xnum == SIGFPE) {
1004    return handle_sigfpe(xp, tcr);
1005  } else if ((xnum == SIGILL)) {
1006    if (IS_GC_TRAP(instruction)) {
1007      return handle_gc_trap(xp, tcr);
1008    } else if (IS_UUO(instruction)) {
1009      return handle_uuo(xp, instruction);
1010    } else {
1011      return handle_unimplemented_instruction(xp,instruction,tcr);
1012    }
1013  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1014    tcr->interrupt_pending = 0;
1015    callback_for_trap(nrs_CMAIN.vcell, xp, xnum, 0);
1016    return true;
1017  }
1018
1019  return false;
1020}
1021
1022void
1023adjust_exception_pc(ExceptionInformation *xp, int delta)
1024{
1025  xpPC(xp) += (delta >> 2);
1026}
1027
1028
1029/*
1030  This wants to scan backwards until "where" points to an instruction
1031   whose major opcode is either 63 (double-float) or 59 (single-float)
1032*/
1033
1034OSStatus
1035handle_fpux_binop(ExceptionInformation *xp, pc where)
1036{
1037  OSStatus err = -1;
1038  opcode *there = (opcode *) where, instr, errnum = 0;
1039  return err;
1040}
1041
1042Boolean
1043handle_uuo(ExceptionInformation *xp, opcode the_uuo) 
1044{
1045  unsigned 
1046    format = UUO_FORMAT(the_uuo);
1047  Boolean handled = false;
1048  int bump = 4;
1049  TCR *tcr = get_tcr(true);
1050
1051  switch (format) {
1052  case uuo_format_kernel_service:
1053    {
1054      TCR * target = (TCR *)xpGPR(xp,arg_z);
1055      int service = UUO_UNARY_field(the_uuo);
1056
1057      switch (service) {
1058      case error_propagate_suspend:
1059        handled = true;
1060        break;
1061      case error_interrupt:
1062        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1063        handled = true;
1064        break;
1065      case error_suspend:
1066        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1067        handled = true;
1068        break;
1069      case error_suspend_all:
1070        lisp_suspend_other_threads();
1071        handled = true;
1072        break;
1073      case error_resume:
1074        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1075        handled = true;
1076        break;
1077      case error_resume_all:
1078        lisp_resume_other_threads();
1079        handled = true;
1080        break;
1081      case error_kill:
1082        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1083        handled = true;
1084        break;
1085      case error_allocate_list:
1086        allocate_list(xp,tcr);
1087        handled = true;
1088        break;
1089      default:
1090        handled = false;
1091        break;
1092      }
1093      break;
1094    }
1095
1096  case uuo_format_unary:
1097    switch(UUO_UNARY_field(the_uuo)) {
1098    case 3:
1099      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1100        handled = true;
1101        bump = 4;
1102        break;
1103      }
1104      /* fall in */
1105    default:
1106      handled = false;
1107      break;
1108
1109    }
1110    break;
1111
1112  case uuo_format_nullary:
1113    switch (UUOA_field(the_uuo)) {
1114    case 3:
1115      Bug(xp, "Debug trap");
1116      handled = true;
1117      break;
1118
1119    case 4:
1120      tcr->interrupt_pending = 0;
1121      callback_for_trap(nrs_CMAIN.vcell, xp, SIGNAL_FOR_PROCESS_INTERRUPT, 0);
1122      handled = true;
1123      break;
1124    default:
1125      handled = false;
1126      break;
1127    }
1128
1129  case uuo_format_error_lisptag:
1130  case uuo_format_error_fulltag:
1131  case uuo_format_error_xtype:
1132  case uuo_format_nullary_error:
1133  case uuo_format_unary_error:
1134  case uuo_format_binary_error:
1135    handled = handle_error(xp,0,the_uuo);
1136    break;
1137
1138  default:
1139    handled = false;
1140    bump = 0;
1141  }
1142 
1143  if (handled && bump) {
1144    adjust_exception_pc(xp, bump);
1145  }
1146  return handled;
1147}
1148
1149natural
1150register_codevector_contains_pc (natural lisp_function, pc where)
1151{
1152  natural code_vector, size;
1153
1154  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1155      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1156    code_vector = deref(lisp_function, 2);
1157    size = header_element_count(header_of(code_vector)) << 2;
1158    if ((untag(code_vector) < (natural)where) && 
1159        ((natural)where < (code_vector + size)))
1160      return(code_vector);
1161  }
1162
1163  return(0);
1164}
1165
1166int
1167callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1168{
1169  return callback_to_lisp(callback_macptr, xp, info,arg);
1170}
1171
1172int
1173callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1174                  natural arg1, natural arg2)
1175{
1176  natural  callback_ptr;
1177  area *a;
1178  natural fnreg = fn,  codevector, offset;
1179  pc where = xpPC(xp);
1180
1181  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1182  if (codevector == 0) {
1183    fnreg = nfn;
1184    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1185  }
1186  offset = (natural)where - codevector;
1187                                                 
1188                                               
1189
1190  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1191
1192  /* Put the active stack pointer where .SPcallback expects it */
1193  a = tcr->cs_area;
1194  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1195
1196  /* Copy globals from the exception frame to tcr */
1197  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1198  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1199
1200
1201
1202  /* Call back.
1203     Lisp will handle trampolining through some code that
1204     will push lr/fn & pc/nfn stack frames for backtrace.
1205  */
1206  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1207  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1208  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1209  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1210
1211
1212
1213  /* Copy GC registers back into exception frame */
1214  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1215}
1216
1217area *
1218allocate_no_stack (natural size)
1219{
1220#ifdef SUPPORT_PRAGMA_UNUSED
1221#pragma unused(size)
1222#endif
1223
1224  return (area *) NULL;
1225}
1226
1227
1228
1229
1230
1231
1232/* callback to (symbol-value cmain) if it is a macptr,
1233   otherwise report cause and function name to console.
1234   Returns noErr if exception handled OK */
1235OSStatus
1236handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1237{
1238  LispObj   cmain = nrs_CMAIN.vcell;
1239  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1240
1241}
1242
1243
1244
1245
1246void non_fatal_error( char *msg )
1247{
1248  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1249  fflush( dbgout );
1250}
1251
1252
1253
1254Boolean
1255handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1256{
1257  LispObj   errdisp = nrs_ERRDISP.vcell;
1258
1259  if ((fulltag_of(errdisp) == fulltag_misc) &&
1260      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1261    /* errdisp is a macptr, we can call back to lisp */
1262    return callback_for_trap(errdisp, xp, arg1, arg2);
1263    }
1264
1265  return false;
1266}
1267               
1268
1269/*
1270   Current thread has all signals masked.  Before unmasking them,
1271   make it appear that the current thread has been suspended.
1272   (This is to handle the case where another thread is trying
1273   to GC before this thread is able to sieze the exception lock.)
1274*/
1275int
1276prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1277{
1278  int old_valence = tcr->valence;
1279
1280  tcr->pending_exception_context = context;
1281  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1282
1283  ALLOW_EXCEPTIONS(context);
1284  return old_valence;
1285} 
1286
1287void
1288wait_for_exception_lock_in_handler(TCR *tcr, 
1289                                   ExceptionInformation *context,
1290                                   xframe_list *xf)
1291{
1292
1293  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1294  xf->curr = context;
1295  xf->prev = tcr->xframe;
1296  tcr->xframe =  xf;
1297  tcr->pending_exception_context = NULL;
1298  tcr->valence = TCR_STATE_FOREIGN; 
1299}
1300
1301void
1302unlock_exception_lock_in_handler(TCR *tcr)
1303{
1304  tcr->pending_exception_context = tcr->xframe->curr;
1305  tcr->xframe = tcr->xframe->prev;
1306  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1307  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1308}
1309
1310/*
1311   If an interrupt is pending on exception exit, try to ensure
1312   that the thread sees it as soon as it's able to run.
1313*/
1314void
1315raise_pending_interrupt(TCR *tcr)
1316{
1317  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1318    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1319  }
1320}
1321
1322void
1323exit_signal_handler(TCR *tcr, int old_valence)
1324{
1325  sigset_t mask;
1326  sigfillset(&mask);
1327 
1328  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1329  tcr->valence = old_valence;
1330  tcr->pending_exception_context = NULL;
1331}
1332
1333
1334void
1335signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1336{
1337  xframe_list xframe_link;
1338
1339  if (!use_mach_exception_handling) {
1340   
1341    tcr = (TCR *) get_interrupt_tcr(false);
1342 
1343    /* The signal handler's entered with all signals (notably the
1344       thread_suspend signal) blocked.  Don't allow any other signals
1345       (notably the thread_suspend signal) to preempt us until we've
1346       set the TCR's xframe slot to include the current exception
1347       context.
1348    */
1349   
1350    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1351  }
1352
1353  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1354    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1355    pthread_kill(pthread_self(), thread_suspend_signal);
1356  }
1357
1358 
1359  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1360  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1361    char msg[512];
1362    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1363    if (lisp_Debugger(context, info, signum, false, msg)) {
1364      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1365    }
1366  }
1367
1368  unlock_exception_lock_in_handler(tcr);
1369
1370  /* This thread now looks like a thread that was suspended while
1371     executing lisp code.  If some other thread gets the exception
1372     lock and GCs, the context (this thread's suspend_context) will
1373     be updated.  (That's only of concern if it happens before we
1374     can return to the kernel/to the Mach exception handler).
1375  */
1376  if (!use_mach_exception_handling) {
1377    exit_signal_handler(tcr, old_valence);
1378    raise_pending_interrupt(tcr);
1379  }
1380}
1381
1382/*
1383  If it looks like we're in the middle of an atomic operation, make
1384  it seem as if that operation is either complete or hasn't started
1385  yet.
1386
1387  The cases handled include:
1388
1389  a) storing into a newly-allocated lisp frame on the stack.
1390  b) marking a newly-allocated TSP frame as containing "raw" data.
1391  c) consing: the GC has its own ideas about how this should be
1392     handled, but other callers would be best advised to back
1393     up or move forward, according to whether we're in the middle
1394     of allocating a cons cell or allocating a uvector.
1395  d) a STMW to the vsp
1396  e) EGC write-barrier subprims.
1397*/
1398
1399extern opcode
1400  egc_write_barrier_start,
1401  egc_write_barrier_end, 
1402  egc_store_node_conditional, 
1403  egc_store_node_conditional_test,
1404  egc_set_hash_key,
1405  egc_gvset,
1406  egc_rplaca,
1407  egc_rplacd,
1408  egc_set_hash_key_conditional,
1409  egc_set_hash_key_conditional_test;
1410
1411
1412extern opcode ffcall_return_window, ffcall_return_window_end;
1413
1414void
1415pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1416{
1417  pc program_counter = xpPC(xp);
1418  opcode instr = *program_counter;
1419  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1420  LispObj cur_allocptr = xpGPR(xp, allocptr);
1421  int allocptr_tag = fulltag_of(cur_allocptr);
1422 
1423
1424
1425  if ((program_counter < &egc_write_barrier_end) && 
1426      (program_counter >= &egc_write_barrier_start)) {
1427    LispObj *ea = 0, val = 0, root = 0;
1428    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1429    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1430
1431    if (program_counter >= &egc_set_hash_key_conditional) {
1432      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1433          ((program_counter == &egc_set_hash_key_conditional_test) &&
1434           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1435        return;
1436      }
1437      need_store = false;
1438      root = xpGPR(xp,arg_x);
1439      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1440      need_memoize_root = true;
1441    } else if (program_counter >= &egc_store_node_conditional) {
1442      if ((program_counter < &egc_store_node_conditional_test) ||
1443          ((program_counter == &egc_store_node_conditional_test) &&
1444           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1445        /* The conditional store either hasn't been attempted yet, or
1446           has failed.  No need to adjust the PC, or do memoization. */
1447        return;
1448      }
1449      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1450      xpGPR(xp,arg_z) = t_value;
1451      need_store = false;
1452    } else if (program_counter >= &egc_set_hash_key) {
1453      root = xpGPR(xp,arg_x);
1454      val = xpGPR(xp,arg_z);
1455      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1456      need_memoize_root = true;
1457    } else if (program_counter >= &egc_gvset) {
1458      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1459      val = xpGPR(xp,arg_z);
1460    } else if (program_counter >= &egc_rplacd) {
1461      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1462      val = xpGPR(xp,arg_z);
1463    } else {                      /* egc_rplaca */
1464      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1465      val = xpGPR(xp,arg_z);
1466    }
1467    if (need_store) {
1468      *ea = val;
1469    }
1470    if (need_check_memo) {
1471      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1472      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1473          ((LispObj)ea < val)) {
1474        atomic_set_bit(refbits, bitnumber);
1475        if (need_memoize_root) {
1476          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1477          atomic_set_bit(refbits, bitnumber);
1478        }
1479      }
1480    }
1481    xpPC(xp) = xpLR(xp);
1482    return;
1483  }
1484
1485
1486 
1487  if (allocptr_tag != tag_fixnum) {
1488    signed_natural disp = allocptr_displacement(xp);
1489
1490    if (disp) {
1491      /* Being architecturally "at" the alloc trap doesn't tell
1492         us much (in particular, it doesn't tell us whether
1493         or not the thread has committed to taking the trap
1494         and is waiting for the exception lock (or waiting
1495         for the Mach exception thread to tell it how bad
1496         things are) or is about to execute a conditional
1497         trap.
1498         Regardless of which case applies, we want the
1499         other thread to take (or finish taking) the
1500         trap, and we don't want it to consider its
1501         current allocptr to be valid.
1502         The difference between this case (suspend other
1503         thread for GC) and the previous case (suspend
1504         current thread for interrupt) is solely a
1505         matter of what happens after we leave this
1506         function: some non-current thread will stay
1507         suspended until the GC finishes, then take
1508         (or start processing) the alloc trap.   The
1509         current thread will go off and do PROCESS-INTERRUPT
1510         or something, and may return from the interrupt
1511         and need to finish the allocation that got interrupted.
1512      */
1513
1514      if (alloc_disp) {
1515        *alloc_disp = disp;
1516        xpGPR(xp,allocptr) += disp;
1517        /* Leave the PC at the alloc trap.  When the interrupt
1518           handler returns, it'll decrement allocptr by disp
1519           and the trap may or may not be taken.
1520        */
1521      } else {
1522        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1523        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1524      }
1525    } else {
1526      /* If we're already past the alloc_trap, finish allocating
1527         the object. */
1528      if (allocptr_tag == fulltag_cons) {
1529        finish_allocating_cons(xp);
1530      } else {
1531        if (allocptr_tag == fulltag_misc) {
1532          finish_allocating_uvector(xp);
1533        } else {
1534          Bug(xp, "what's being allocated here ?");
1535        }
1536      }
1537      /* Whatever we finished allocating, reset allocptr/allocbase to
1538         VOID_ALLOCPTR */
1539      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1540    }
1541    return;
1542  }
1543}
1544
1545void
1546interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1547{
1548  TCR *tcr = get_interrupt_tcr(false);
1549  if (tcr) {
1550    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1551      tcr->interrupt_pending = 1 << fixnumshift;
1552    } else {
1553      LispObj cmain = nrs_CMAIN.vcell;
1554
1555      if ((fulltag_of(cmain) == fulltag_misc) &&
1556          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1557        /*
1558           This thread can (allegedly) take an interrupt now.
1559           It's tricky to do that if we're executing
1560           foreign code (especially Linuxthreads code, much
1561           of which isn't reentrant.)
1562           If we're unwinding the stack, we also want to defer
1563           the interrupt.
1564        */
1565        if ((tcr->valence != TCR_STATE_LISP) ||
1566            (tcr->unwinding != 0)) {
1567          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
1568        } else {
1569          xframe_list xframe_link;
1570          int old_valence;
1571          signed_natural disp=0;
1572         
1573          pc_luser_xp(context, tcr, &disp);
1574          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1575          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1576          handle_exception(signum, context, tcr, info, old_valence);
1577          if (disp) {
1578            xpGPR(context,allocptr) -= disp;
1579          }
1580          unlock_exception_lock_in_handler(tcr);
1581          exit_signal_handler(tcr, old_valence);
1582        }
1583      }
1584    }
1585  }
1586#ifdef DARWIN
1587    DarwinSigReturn(context);
1588#endif
1589}
1590
1591
1592
1593void
1594install_signal_handler(int signo, void *handler)
1595{
1596  struct sigaction sa;
1597 
1598  sa.sa_sigaction = (void *)handler;
1599  sigfillset(&sa.sa_mask);
1600  sa.sa_flags = 
1601    0 /* SA_RESTART */
1602    | SA_SIGINFO
1603    ;
1604
1605  sigaction(signo, &sa, NULL);
1606}
1607
1608void
1609install_pmcl_exception_handlers()
1610{
1611#ifdef DARWIN
1612  extern Boolean use_mach_exception_handling;
1613#endif
1614
1615  Boolean install_signal_handlers_for_exceptions =
1616#ifdef DARWIN
1617    !use_mach_exception_handling
1618#else
1619    true
1620#endif
1621    ;
1622  if (install_signal_handlers_for_exceptions) {
1623    extern int no_sigtrap;
1624    install_signal_handler(SIGILL, (void *)signal_handler);
1625    if (no_sigtrap != 1) {
1626      install_signal_handler(SIGTRAP, (void *)signal_handler);
1627    }
1628    install_signal_handler(SIGBUS,  (void *)signal_handler);
1629    install_signal_handler(SIGSEGV, (void *)signal_handler);
1630    install_signal_handler(SIGFPE, (void *)signal_handler);
1631  }
1632 
1633  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1634                         (void *)interrupt_handler);
1635  signal(SIGPIPE, SIG_IGN);
1636}
1637
1638void
1639thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1640{
1641  TCR *tcr = get_tcr(false);
1642  area *a;
1643  sigset_t mask;
1644 
1645  sigemptyset(&mask);
1646
1647  if (tcr) {
1648    tcr->valence = TCR_STATE_FOREIGN;
1649    a = tcr->vs_area;
1650    if (a) {
1651      a->active = a->high;
1652    }
1653    a = tcr->cs_area;
1654    if (a) {
1655      a->active = a->high;
1656    }
1657  }
1658 
1659  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1660  pthread_exit(NULL);
1661}
1662
1663void
1664thread_signal_setup()
1665{
1666  thread_suspend_signal = SIG_SUSPEND_THREAD;
1667  thread_kill_signal = SIG_KILL_THREAD;
1668
1669  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1670  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1671}
1672
1673
1674
1675void
1676unprotect_all_areas()
1677{
1678  protected_area_ptr p;
1679
1680  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1681    unprotect_area(p);
1682  }
1683}
1684
1685/*
1686  A binding subprim has just done "twlle limit_regno,idx_regno" and
1687  the trap's been taken.  Extend the tcr's tlb so that the index will
1688  be in bounds and the new limit will be on a page boundary, filling
1689  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1690  the tcr fields and the registers in the xp and return true if this
1691  all works, false otherwise.
1692
1693  Note that the tlb was allocated via malloc, so realloc can do some
1694  of the hard work.
1695*/
1696Boolean
1697extend_tcr_tlb(TCR *tcr, 
1698               ExceptionInformation *xp, 
1699               unsigned idx_regno)
1700{
1701  unsigned
1702    index = (unsigned) (xpGPR(xp,idx_regno)),
1703    old_limit = tcr->tlb_limit,
1704    new_limit = align_to_power_of_2(index+1,12),
1705    new_bytes = new_limit-old_limit;
1706  LispObj
1707    *old_tlb = tcr->tlb_pointer,
1708    *new_tlb = realloc(old_tlb, new_limit),
1709    *work;
1710
1711  if (new_tlb == NULL) {
1712    return false;
1713  }
1714 
1715  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1716
1717  while (new_bytes) {
1718    *work++ = no_thread_local_binding_marker;
1719    new_bytes -= sizeof(LispObj);
1720  }
1721  tcr->tlb_pointer = new_tlb;
1722  tcr->tlb_limit = new_limit;
1723  return true;
1724}
1725
1726
1727
1728void
1729exception_init()
1730{
1731  install_pmcl_exception_handlers();
1732}
1733
1734
1735
1736
1737
1738#ifdef DARWIN
1739
1740
1741#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1742#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1743
1744
1745
1746#define LISP_EXCEPTIONS_HANDLED_MASK \
1747 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1748
1749/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1750#define NUM_LISP_EXCEPTIONS_HANDLED 4
1751
1752typedef struct {
1753  int foreign_exception_port_count;
1754  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1755  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1756  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1757  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1758} MACH_foreign_exception_state;
1759
1760
1761
1762
1763/*
1764  Mach's exception mechanism works a little better than its signal
1765  mechanism (and, not incidentally, it gets along with GDB a lot
1766  better.
1767
1768  Initially, we install an exception handler to handle each native
1769  thread's exceptions.  This process involves creating a distinguished
1770  thread which listens for kernel exception messages on a set of
1771  0 or more thread exception ports.  As threads are created, they're
1772  added to that port set; a thread's exception port is destroyed
1773  (and therefore removed from the port set) when the thread exits.
1774
1775  A few exceptions can be handled directly in the handler thread;
1776  others require that we resume the user thread (and that the
1777  exception thread resumes listening for exceptions.)  The user
1778  thread might eventually want to return to the original context
1779  (possibly modified somewhat.)
1780
1781  As it turns out, the simplest way to force the faulting user
1782  thread to handle its own exceptions is to do pretty much what
1783  signal() does: the exception handlng thread sets up a sigcontext
1784  on the user thread's stack and forces the user thread to resume
1785  execution as if a signal handler had been called with that
1786  context as an argument.  We can use a distinguished UUO at a
1787  distinguished address to do something like sigreturn(); that'll
1788  have the effect of resuming the user thread's execution in
1789  the (pseudo-) signal context.
1790
1791  Since:
1792    a) we have miles of code in C and in Lisp that knows how to
1793    deal with Linux sigcontexts
1794    b) Linux sigcontexts contain a little more useful information
1795    (the DAR, DSISR, etc.) than their Darwin counterparts
1796    c) we have to create a sigcontext ourselves when calling out
1797    to the user thread: we aren't really generating a signal, just
1798    leveraging existing signal-handling code.
1799
1800  we create a Linux sigcontext struct.
1801
1802  Simple ?  Hopefully from the outside it is ...
1803
1804  We want the process of passing a thread's own context to it to
1805  appear to be atomic: in particular, we don't want the GC to suspend
1806  a thread that's had an exception but has not yet had its user-level
1807  exception handler called, and we don't want the thread's exception
1808  context to be modified by a GC while the Mach handler thread is
1809  copying it around.  On Linux (and on Jaguar), we avoid this issue
1810  because (a) the kernel sets up the user-level signal handler and
1811  (b) the signal handler blocks signals (including the signal used
1812  by the GC to suspend threads) until tcr->xframe is set up.
1813
1814  The GC and the Mach server thread therefore contend for the lock
1815  "mach_exception_lock".  The Mach server thread holds the lock
1816  when copying exception information between the kernel and the
1817  user thread; the GC holds this lock during most of its execution
1818  (delaying exception processing until it can be done without
1819  GC interference.)
1820
1821*/
1822
1823
1824#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1825
1826void
1827fatal_mach_error(char *format, ...);
1828
1829#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1830
1831
1832void
1833restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1834{
1835  kern_return_t kret;
1836  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1837
1838  /* Set the thread's FP state from the pseudosigcontext */
1839  kret = thread_set_state(thread,
1840                          ARM_FLOAT_STATE,
1841                          (thread_state_t)&(mc->__fs),
1842                          ARM_FLOAT_STATE_COUNT);
1843
1844  MACH_CHECK_ERROR("setting thread FP state", kret);
1845
1846  /* The thread'll be as good as new ... */
1847  kret = thread_set_state(thread, 
1848                          MACHINE_THREAD_STATE,
1849                          (thread_state_t)&(mc->__ss),
1850                          MACHINE_THREAD_STATE_COUNT);
1851  MACH_CHECK_ERROR("setting thread state", kret);
1852} 
1853
1854/* This code runs in the exception handling thread, in response
1855   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1856   in response to a call to pseudo_sigreturn() from the specified
1857   user thread.
1858   Find that context (the user thread's R3 points to it), then
1859   use that context to set the user thread's state.  When this
1860   function's caller returns, the Mach kernel will resume the
1861   user thread.
1862*/
1863
1864kern_return_t
1865do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1866{
1867  ExceptionInformation *xp;
1868
1869#ifdef DEBUG_MACH_EXCEPTIONS
1870  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1871#endif
1872  xp = tcr->pending_exception_context;
1873  if (xp) {
1874    tcr->pending_exception_context = NULL;
1875    tcr->valence = TCR_STATE_LISP;
1876    restore_mach_thread_state(thread, xp);
1877    raise_pending_interrupt(tcr);
1878  } else {
1879    Bug(NULL, "no xp here!\n");
1880  }
1881#ifdef DEBUG_MACH_EXCEPTIONS
1882  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1883#endif
1884  return KERN_SUCCESS;
1885} 
1886
1887ExceptionInformation *
1888create_thread_context_frame(mach_port_t thread, 
1889                            natural *new_stack_top)
1890{
1891  arm_thread_state_t ts;
1892  mach_msg_type_number_t thread_state_count;
1893  kern_return_t result;
1894  ExceptionInformation *pseudosigcontext;
1895  MCONTEXT_T mc;
1896  natural stackp, backlink;
1897
1898  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1899  result = thread_get_state(thread, 
1900                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1901                            (thread_state_t)&ts,
1902                            &thread_state_count);
1903 
1904  if (result != KERN_SUCCESS) {
1905    get_tcr(true);
1906    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1907  }
1908  stackp = ts.__r1;
1909  backlink = stackp;
1910  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1911  stackp -= sizeof(*pseudosigcontext);
1912  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1913
1914  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1915  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1916  memmove(&(mc->__ss),&ts,sizeof(ts));
1917
1918  thread_state_count = PPC_FLOAT_STATE_COUNT;
1919  thread_get_state(thread,
1920                   PPC_FLOAT_STATE,
1921                   (thread_state_t)&(mc->__fs),
1922                   &thread_state_count);
1923
1924
1925#ifdef PPC64
1926  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1927#else
1928  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1929#endif
1930  thread_get_state(thread,
1931#ifdef PPC64
1932                   PPC_EXCEPTION_STATE64,
1933#else
1934                   PPC_EXCEPTION_STATE,
1935#endif
1936                   (thread_state_t)&(mc->__es),
1937                   &thread_state_count);
1938
1939
1940  UC_MCONTEXT(pseudosigcontext) = mc;
1941  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1942  stackp -= C_LINKAGE_LEN;
1943  *(natural *)ptr_from_lispobj(stackp) = backlink;
1944  if (new_stack_top) {
1945    *new_stack_top = stackp;
1946  }
1947  return pseudosigcontext;
1948}
1949
1950/*
1951  This code sets up the user thread so that it executes a "pseudo-signal
1952  handler" function when it resumes.  Create a linux sigcontext struct
1953  on the thread's stack and pass it as an argument to the pseudo-signal
1954  handler.
1955
1956  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1957  which will restore the thread's context.
1958
1959  If the handler invokes code that throws (or otherwise never sigreturn()'s
1960  to the context), that's fine.
1961
1962  Actually, check that: throw (and variants) may need to be careful and
1963  pop the tcr's xframe list until it's younger than any frame being
1964  entered.
1965*/
1966
1967int
1968setup_signal_frame(mach_port_t thread,
1969                   void *handler_address,
1970                   int signum,
1971                   int code,
1972                   TCR *tcr)
1973{
1974#ifdef PPC64
1975  ppc_thread_state64_t ts;
1976#else
1977  ppc_thread_state_t ts;
1978#endif
1979  ExceptionInformation *pseudosigcontext;
1980  int old_valence = tcr->valence;
1981  natural stackp;
1982
1983#ifdef DEBUG_MACH_EXCEPTIONS
1984  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1985#endif
1986  pseudosigcontext = create_thread_context_frame(thread, &stackp);
1987  pseudosigcontext->uc_onstack = 0;
1988  pseudosigcontext->uc_sigmask = (sigset_t) 0;
1989  tcr->pending_exception_context = pseudosigcontext;
1990  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1991 
1992
1993  /*
1994     It seems like we've created a  sigcontext on the thread's
1995     stack.  Set things up so that we call the handler (with appropriate
1996     args) when the thread's resumed.
1997  */
1998
1999  ts.__srr0 = (natural) handler_address;
2000  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2001  ts.__r1 = stackp;
2002  ts.__r3 = signum;
2003  ts.__r4 = (natural)pseudosigcontext;
2004  ts.__r5 = (natural)tcr;
2005  ts.__r6 = (natural)old_valence;
2006  ts.__lr = (natural)pseudo_sigreturn;
2007
2008
2009#ifdef PPC64
2010  ts.__r13 = xpGPR(pseudosigcontext,13);
2011  thread_set_state(thread,
2012                   PPC_THREAD_STATE64,
2013                   (thread_state_t)&ts,
2014                   PPC_THREAD_STATE64_COUNT);
2015#else
2016  thread_set_state(thread, 
2017                   MACHINE_THREAD_STATE,
2018                   (thread_state_t)&ts,
2019                   MACHINE_THREAD_STATE_COUNT);
2020#endif
2021#ifdef DEBUG_MACH_EXCEPTIONS
2022  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2023#endif
2024  return 0;
2025}
2026
2027
2028void
2029pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2030{
2031  signal_handler(signum, NULL, context, tcr, old_valence);
2032} 
2033
2034
2035int
2036thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2037{
2038#ifdef PPC64
2039  ppc_thread_state64_t ts;
2040#else
2041  ppc_thread_state_t ts;
2042#endif
2043  mach_msg_type_number_t thread_state_count;
2044
2045#ifdef PPC64
2046  thread_state_count = PPC_THREAD_STATE64_COUNT;
2047#else
2048  thread_state_count = PPC_THREAD_STATE_COUNT;
2049#endif
2050  thread_get_state(thread, 
2051#ifdef PPC64
2052                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2053#else
2054                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2055#endif
2056                   (thread_state_t)&ts,
2057                   &thread_state_count);
2058  if (enabled) {
2059    ts.__srr1 |= MSR_FE0_FE1_MASK;
2060  } else {
2061    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2062  }
2063  /*
2064     Hack-o-rama warning (isn't it about time for such a warning?):
2065     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2066     Our handler for lisp's use of pthread_kill() pushes a phony
2067     lisp frame on the stack and force the context to resume at
2068     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2069     lisp frame contains the -real- address that process_interrupt
2070     should have returned to, and the fact that it's in a lisp
2071     frame should convince the GC to notice that address if it
2072     runs in the tiny time window between returning from our
2073     interrupt handler and ... here.
2074     If the top frame on the stack is a lisp frame, discard it
2075     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2076     just adjust ts.srr0 to skip over the UUO.
2077  */
2078  {
2079    lisp_frame *tos = (lisp_frame *)ts.__r1,
2080      *next_frame = tos->backlink;
2081   
2082    if (tos == (next_frame -1)) {
2083      ts.__srr0 = tos->savelr;
2084      ts.__r1 = (LispObj) next_frame;
2085    } else {
2086      ts.__srr0 += 4;
2087    }
2088  }
2089  thread_set_state(thread, 
2090#ifdef PPC64
2091                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2092#else
2093                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2094#endif
2095                   (thread_state_t)&ts,
2096#ifdef PPC64
2097                   PPC_THREAD_STATE64_COUNT
2098#else
2099                   PPC_THREAD_STATE_COUNT
2100#endif
2101                   );
2102
2103  return 0;
2104}
2105
2106/*
2107  This function runs in the exception handling thread.  It's
2108  called (by this precise name) from the library function "exc_server()"
2109  when the thread's exception ports are set up.  (exc_server() is called
2110  via mach_msg_server(), which is a function that waits for and dispatches
2111  on exception messages from the Mach kernel.)
2112
2113  This checks to see if the exception was caused by a pseudo_sigreturn()
2114  UUO; if so, it arranges for the thread to have its state restored
2115  from the specified context.
2116
2117  Otherwise, it tries to map the exception to a signal number and
2118  arranges that the thread run a "pseudo signal handler" to handle
2119  the exception.
2120
2121  Some exceptions could and should be handled here directly.
2122*/
2123
2124kern_return_t
2125catch_exception_raise(mach_port_t exception_port,
2126                      mach_port_t thread,
2127                      mach_port_t task, 
2128                      exception_type_t exception,
2129                      exception_data_t code_vector,
2130                      mach_msg_type_number_t code_count)
2131{
2132  int signum = 0, code = *code_vector, code1;
2133  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2134  kern_return_t kret;
2135
2136#ifdef DEBUG_MACH_EXCEPTIONS
2137  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2138#endif
2139
2140  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2141    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2142  } 
2143  if ((exception == EXC_BAD_INSTRUCTION) &&
2144      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2145      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2146       (code1 == (int)enable_fp_exceptions) ||
2147       (code1 == (int)disable_fp_exceptions))) {
2148    if (code1 == (int)pseudo_sigreturn) {
2149      kret = do_pseudo_sigreturn(thread, tcr);
2150#if 0
2151      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2152#endif
2153       
2154    } else if (code1 == (int)enable_fp_exceptions) {
2155      kret = thread_set_fp_exceptions_enabled(thread, true);
2156    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2157  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2158    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2159    kret = 17;
2160  } else {
2161    switch (exception) {
2162    case EXC_BAD_ACCESS:
2163      signum = SIGSEGV;
2164      break;
2165       
2166    case EXC_BAD_INSTRUCTION:
2167      signum = SIGILL;
2168      break;
2169     
2170    case EXC_SOFTWARE:
2171      if (code == EXC_PPC_TRAP) {
2172        signum = SIGTRAP;
2173      }
2174      break;
2175     
2176    case EXC_ARITHMETIC:
2177      signum = SIGFPE;
2178      break;
2179
2180    default:
2181      break;
2182    }
2183    if (signum) {
2184      kret = setup_signal_frame(thread,
2185                                (void *)pseudo_signal_handler,
2186                                signum,
2187                                code,
2188                                tcr);
2189#if 0
2190      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2191#endif
2192
2193    } else {
2194      kret = 17;
2195    }
2196  }
2197
2198  return kret;
2199}
2200
2201
2202
2203typedef struct {
2204  mach_msg_header_t Head;
2205  /* start of the kernel processed data */
2206  mach_msg_body_t msgh_body;
2207  mach_msg_port_descriptor_t thread;
2208  mach_msg_port_descriptor_t task;
2209  /* end of the kernel processed data */
2210  NDR_record_t NDR;
2211  exception_type_t exception;
2212  mach_msg_type_number_t codeCnt;
2213  integer_t code[2];
2214  mach_msg_trailer_t trailer;
2215} exceptionRequest;
2216
2217
2218boolean_t
2219openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2220{
2221  static NDR_record_t _NDR = {0};
2222  kern_return_t handled;
2223  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2224  exceptionRequest *req = (exceptionRequest *) in;
2225
2226  reply->NDR = _NDR;
2227
2228  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2229  out->msgh_remote_port = in->msgh_remote_port;
2230  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2231  out->msgh_local_port = MACH_PORT_NULL;
2232  out->msgh_id = in->msgh_id+100;
2233
2234  /* Could handle other exception flavors in the range 2401-2403 */
2235
2236
2237  if (in->msgh_id != 2401) {
2238    reply->RetCode = MIG_BAD_ID;
2239    return FALSE;
2240  }
2241  handled = catch_exception_raise(req->Head.msgh_local_port,
2242                                  req->thread.name,
2243                                  req->task.name,
2244                                  req->exception,
2245                                  req->code,
2246                                  req->codeCnt);
2247  reply->RetCode = handled;
2248  return TRUE;
2249}
2250
2251/*
2252  The initial function for an exception-handling thread.
2253*/
2254
2255void *
2256exception_handler_proc(void *arg)
2257{
2258  extern boolean_t exc_server();
2259  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2260
2261  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2262  /* Should never return. */
2263  abort();
2264}
2265
2266
2267
2268mach_port_t
2269mach_exception_port_set()
2270{
2271  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2272  kern_return_t kret; 
2273  if (__exception_port_set == MACH_PORT_NULL) {
2274    kret = mach_port_allocate(mach_task_self(),
2275                              MACH_PORT_RIGHT_PORT_SET,
2276                              &__exception_port_set);
2277    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2278    create_system_thread(0,
2279                         NULL,
2280                         exception_handler_proc, 
2281                         (void *)((natural)__exception_port_set));
2282  }
2283  return __exception_port_set;
2284}
2285
2286/*
2287  Setup a new thread to handle those exceptions specified by
2288  the mask "which".  This involves creating a special Mach
2289  message port, telling the Mach kernel to send exception
2290  messages for the calling thread to that port, and setting
2291  up a handler thread which listens for and responds to
2292  those messages.
2293
2294*/
2295
2296/*
2297  Establish the lisp thread's TCR as its exception port, and determine
2298  whether any other ports have been established by foreign code for
2299  exceptions that lisp cares about.
2300
2301  If this happens at all, it should happen on return from foreign
2302  code and on entry to lisp code via a callback.
2303
2304  This is a lot of trouble (and overhead) to support Java, or other
2305  embeddable systems that clobber their caller's thread exception ports.
2306 
2307*/
2308kern_return_t
2309tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2310{
2311  kern_return_t kret;
2312  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2313  int i;
2314  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2315  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2316  exception_mask_t mask = 0;
2317
2318  kret = thread_swap_exception_ports(thread,
2319                                     LISP_EXCEPTIONS_HANDLED_MASK,
2320                                     lisp_port,
2321                                     EXCEPTION_DEFAULT,
2322                                     THREAD_STATE_NONE,
2323                                     fxs->masks,
2324                                     &n,
2325                                     fxs->ports,
2326                                     fxs->behaviors,
2327                                     fxs->flavors);
2328  if (kret == KERN_SUCCESS) {
2329    fxs->foreign_exception_port_count = n;
2330    for (i = 0; i < n; i ++) {
2331      foreign_port = fxs->ports[i];
2332
2333      if ((foreign_port != lisp_port) &&
2334          (foreign_port != MACH_PORT_NULL)) {
2335        mask |= fxs->masks[i];
2336      }
2337    }
2338    tcr->foreign_exception_status = (int) mask;
2339  }
2340  return kret;
2341}
2342
2343kern_return_t
2344tcr_establish_lisp_exception_port(TCR *tcr)
2345{
2346  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2347}
2348
2349/*
2350  Do this when calling out to or returning from foreign code, if
2351  any conflicting foreign exception ports were established when we
2352  last entered lisp code.
2353*/
2354kern_return_t
2355restore_foreign_exception_ports(TCR *tcr)
2356{
2357  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2358 
2359  if (m) {
2360    MACH_foreign_exception_state *fxs  = 
2361      (MACH_foreign_exception_state *) tcr->native_thread_info;
2362    int i, n = fxs->foreign_exception_port_count;
2363    exception_mask_t tm;
2364
2365    for (i = 0; i < n; i++) {
2366      if ((tm = fxs->masks[i]) & m) {
2367        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2368                                   tm,
2369                                   fxs->ports[i],
2370                                   fxs->behaviors[i],
2371                                   fxs->flavors[i]);
2372      }
2373    }
2374  }
2375}
2376                                   
2377
2378/*
2379  This assumes that a Mach port (to be used as the thread's exception port) whose
2380  "name" matches the TCR's 32-bit address has already been allocated.
2381*/
2382
2383kern_return_t
2384setup_mach_exception_handling(TCR *tcr)
2385{
2386  mach_port_t
2387    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2388    task_self = mach_task_self();
2389  kern_return_t kret;
2390
2391  kret = mach_port_insert_right(task_self,
2392                                thread_exception_port,
2393                                thread_exception_port,
2394                                MACH_MSG_TYPE_MAKE_SEND);
2395  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2396
2397  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2398  if (kret == KERN_SUCCESS) {
2399    mach_port_t exception_port_set = mach_exception_port_set();
2400
2401    kret = mach_port_move_member(task_self,
2402                                 thread_exception_port,
2403                                 exception_port_set);
2404  }
2405  return kret;
2406}
2407
2408void
2409darwin_exception_init(TCR *tcr)
2410{
2411  void tcr_monitor_exception_handling(TCR*, Boolean);
2412  kern_return_t kret;
2413  MACH_foreign_exception_state *fxs = 
2414    calloc(1, sizeof(MACH_foreign_exception_state));
2415 
2416  tcr->native_thread_info = (void *) fxs;
2417
2418  if ((kret = setup_mach_exception_handling(tcr))
2419      != KERN_SUCCESS) {
2420    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2421    terminate_lisp();
2422  }
2423}
2424
2425/*
2426  The tcr is the "name" of the corresponding thread's exception port.
2427  Destroying the port should remove it from all port sets of which it's
2428  a member (notably, the exception port set.)
2429*/
2430void
2431darwin_exception_cleanup(TCR *tcr)
2432{
2433  void *fxs = tcr->native_thread_info;
2434  extern Boolean use_mach_exception_handling;
2435
2436  if (fxs) {
2437    tcr->native_thread_info = NULL;
2438    free(fxs);
2439  }
2440  if (use_mach_exception_handling) {
2441    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2442    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2443  }
2444}
2445
2446
2447Boolean
2448suspend_mach_thread(mach_port_t mach_thread)
2449{
2450  kern_return_t status;
2451  Boolean aborted = false;
2452 
2453  do {
2454    aborted = false;
2455    status = thread_suspend(mach_thread);
2456    if (status == KERN_SUCCESS) {
2457      status = thread_abort_safely(mach_thread);
2458      if (status == KERN_SUCCESS) {
2459        aborted = true;
2460      } else {
2461        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2462        thread_resume(mach_thread);
2463      }
2464    } else {
2465      return false;
2466    }
2467  } while (! aborted);
2468  return true;
2469}
2470
2471/*
2472  Only do this if pthread_kill indicated that the pthread isn't
2473  listening to signals anymore, as can happen as soon as pthread_exit()
2474  is called on Darwin.  The thread could still call out to lisp as it
2475  is exiting, so we need another way to suspend it in this case.
2476*/
2477Boolean
2478mach_suspend_tcr(TCR *tcr)
2479{
2480  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2481  ExceptionInformation *pseudosigcontext;
2482  Boolean result = false;
2483 
2484  result = suspend_mach_thread(mach_thread);
2485  if (result) {
2486    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2487    pseudosigcontext->uc_onstack = 0;
2488    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2489    tcr->suspend_context = pseudosigcontext;
2490  }
2491  return result;
2492}
2493
2494void
2495mach_resume_tcr(TCR *tcr)
2496{
2497  ExceptionInformation *xp;
2498  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2499 
2500  xp = tcr->suspend_context;
2501#ifdef DEBUG_MACH_EXCEPTIONS
2502  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2503          tcr, tcr->pending_exception_context);
2504#endif
2505  tcr->suspend_context = NULL;
2506  restore_mach_thread_state(mach_thread, xp);
2507#ifdef DEBUG_MACH_EXCEPTIONS
2508  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2509          tcr, tcr->pending_exception_context);
2510#endif
2511  thread_resume(mach_thread);
2512}
2513
2514void
2515fatal_mach_error(char *format, ...)
2516{
2517  va_list args;
2518  char s[512];
2519 
2520
2521  va_start(args, format);
2522  vsnprintf(s, sizeof(s),format, args);
2523  va_end(args);
2524
2525  Fatal("Mach error", s);
2526}
2527
2528void
2529pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2530{
2531  interrupt_handler(signum, NULL, context);
2532}
2533
2534int
2535mach_raise_thread_interrupt(TCR *target)
2536{
2537  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2538  kern_return_t kret;
2539  Boolean result = false;
2540  TCR *current = get_tcr(false);
2541  thread_basic_info_data_t info; 
2542  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2543
2544  LOCK(lisp_global(TCR_AREA_LOCK), current);
2545
2546  if (suspend_mach_thread(mach_thread)) {
2547    if (thread_info(mach_thread,
2548                    THREAD_BASIC_INFO,
2549                    (thread_info_t)&info,
2550                    &info_count) == KERN_SUCCESS) {
2551      if (info.suspend_count == 1) {
2552        if ((target->valence == TCR_STATE_LISP) &&
2553            (!target->unwinding) &&
2554            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2555          kret = setup_signal_frame(mach_thread,
2556                                    (void *)pseudo_interrupt_handler,
2557                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2558                                    0,
2559                                    target);
2560          if (kret == KERN_SUCCESS) {
2561            result = true;
2562          }
2563        }
2564      }
2565    }
2566    if (! result) {
2567      target->interrupt_pending = 1 << fixnumshift;
2568    }
2569    thread_resume(mach_thread);
2570   
2571  }
2572  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2573  return 0;
2574}
2575
2576#endif
Note: See TracBrowser for help on using the repository browser.