source: branches/arm/lisp-kernel/arm-exceptions.c @ 14066

Last change on this file since 14066 was 14066, checked in by gb, 9 years ago

Lots of changes to support stack-overflow detection on ARM Linux.
(Write protect the control stack, handle SIGSEGV on an alternate
signal stack ...) The sigaltstack mechanism doesn't work if the
specified signal stack is within the allocated control stack region
(we generally use the top few pages of the control stack on x86;
here, we map a few pages and need to remember to free them when the
thread dies.)
Also: need some recovery mechanism, so that after the thread unwinds
out of the "yellow zone" the yellow zone is re-protected.

File size: 67.3 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = get_tcr(true);
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg, NULL);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = get_tcr(true);
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call. */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
673  }
674
675
676  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
677  if (cur_allocptr) {
678    update_bytes_allocated(tcr, cur_allocptr);
679    if (freeptr) {
680      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
681    }
682  }
683}
684
685TCR *gc_tcr = NULL;
686
687/* Suspend and "normalize" other tcrs, then call a gc-like function
688   in that context.  Resume the other tcrs, then return what the
689   function returned */
690
691signed_natural
692gc_like_from_xp(ExceptionInformation *xp, 
693                signed_natural(*fun)(TCR *, signed_natural), 
694                signed_natural param)
695{
696  TCR *tcr = get_tcr(true), *other_tcr;
697  int result;
698  signed_natural inhibit;
699
700  suspend_other_threads(true);
701  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
702  if (inhibit != 0) {
703    if (inhibit > 0) {
704      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
705    }
706    resume_other_threads(true);
707    gc_deferred++;
708    return 0;
709  }
710  gc_deferred = 0;
711
712  gc_tcr = tcr;
713
714  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
715
716  normalize_tcr(xp, tcr, false);
717
718
719  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
720    if (other_tcr->pending_exception_context) {
721      other_tcr->gc_context = other_tcr->pending_exception_context;
722    } else if (other_tcr->valence == TCR_STATE_LISP) {
723      other_tcr->gc_context = other_tcr->suspend_context;
724    } else {
725      /* no pending exception, didn't suspend in lisp state:
726         must have executed a synchronous ff-call.
727      */
728      other_tcr->gc_context = NULL;
729    }
730    normalize_tcr(other_tcr->gc_context, other_tcr, true);
731  }
732   
733
734
735  result = fun(tcr, param);
736
737  other_tcr = tcr;
738  do {
739    other_tcr->gc_context = NULL;
740    other_tcr = other_tcr->next;
741  } while (other_tcr != tcr);
742
743  gc_tcr = NULL;
744
745  resume_other_threads(true);
746
747  return result;
748
749}
750
751
752
753/* Returns #bytes freed by invoking GC */
754
755signed_natural
756gc_from_tcr(TCR *tcr, signed_natural param)
757{
758  area *a;
759  BytePtr oldfree, newfree;
760  BytePtr oldend, newend;
761
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774signed_natural
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783signed_natural
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789signed_natural
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815#ifdef LINUX
816  /* Based on experiments with a small sample size; need to R TFM. */
817  return ((xp->uc_mcontext.trap_no == 0xe) &&
818          ((xp->uc_mcontext.error_code & 0xfffffff7) == 0x817));
819#endif
820}
821
822Boolean
823handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
824{
825  BytePtr addr;
826  protected_area_ptr area;
827  protection_handler *handler;
828  extern Boolean touch_page(void *);
829  extern void touch_page_end(void);
830
831#ifdef LINUX
832  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
833#else
834  if (info) {
835    addr = (BytePtr)(info->si_addr);
836  } else {
837    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
838  }
839#endif
840
841  if (addr && (addr == tcr->safe_ref_address)) {
842    adjust_exception_pc(xp,4);
843
844    xpGPR(xp,imm0) = 0;
845    return true;
846  }
847
848  if (xpPC(xp) == (pc)touch_page) {
849    xpGPR(xp,imm0) = 0;
850    xpPC(xp) = (pc)touch_page_end;
851    return true;
852  }
853
854
855  if (is_write_fault(xp,info)) {
856    area = find_protected_area(addr);
857    if (area != NULL) {
858      handler = protection_handlers[area->why];
859      return handler(xp, area, addr);
860    } else {
861      if ((addr >= readonly_area->low) &&
862          (addr < readonly_area->active)) {
863        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
864                        page_size);
865        return true;
866      }
867    }
868  }
869  if (old_valence == TCR_STATE_LISP) {
870    LispObj cmain = nrs_CMAIN.vcell;
871   
872    if ((fulltag_of(cmain) == fulltag_misc) &&
873      (header_subtag(header_of(cmain)) == subtag_macptr)) {
874     
875      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
876    }
877  }
878  return false;
879}
880
881
882
883
884
885OSStatus
886do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
887{
888#ifdef SUPPORT_PRAGMA_UNUSED
889#pragma unused(area,addr)
890#endif
891  reset_lisp_process(xp);
892  return -1;
893}
894
895extern area*
896allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
897
898extern area*
899allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
900
901
902
903
904
905
906Boolean
907lisp_frame_p(lisp_frame *spPtr)
908{
909  return (spPtr->marker == lisp_frame_marker);
910}
911
912
913int ffcall_overflow_count = 0;
914
915
916
917
918
919
920/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
921  the current value of VSP (TSP) or an older area.  */
922
923OSStatus
924do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
925{
926  TCR* tcr = get_tcr(true);
927  area *a = tcr->vs_area;
928  protected_area_ptr vsp_soft = a->softprot;
929  unprotect_area(vsp_soft);
930  signal_stack_soft_overflow(xp,vsp);
931  return 0;
932}
933
934
935
936OSStatus
937do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
938{
939  /* Trying to write into a guard page on the vstack or tstack.
940     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
941     signal an error_stack_overflow condition.
942      */
943  if (prot_area->why == kVSPsoftguard) {
944    return do_vsp_overflow(xp,addr);
945  }
946  unprotect_area(prot_area);
947  signal_stack_soft_overflow(xp,Rsp);
948  return 0;
949}
950
951OSStatus
952do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
953{
954#ifdef SUPPORT_PRAGMA_UNUSED
955#pragma unused(xp,area,addr)
956#endif
957  return -1;
958}
959
960
961
962
963     
964
965
966
967
968
969Boolean
970handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
971{
972  return false;
973}
974
975
976Boolean
977handle_unimplemented_instruction(ExceptionInformation *xp,
978                                 opcode instruction,
979                                 TCR *tcr)
980{
981
982  return false;
983}
984
985Boolean
986handle_exception(int xnum, 
987                 ExceptionInformation *xp, 
988                 TCR *tcr, 
989                 siginfo_t *info,
990                 int old_valence)
991{
992  pc program_counter;
993  opcode instruction = 0;
994
995  if (old_valence != TCR_STATE_LISP) {
996    return false;
997  }
998
999  program_counter = xpPC(xp);
1000 
1001  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1002    instruction = *program_counter;
1003  }
1004
1005  if (IS_ALLOC_TRAP(instruction)) {
1006    return handle_alloc_trap(xp, tcr);
1007  } else if ((xnum == SIGSEGV) ||
1008             (xnum == SIGBUS)) {
1009    return handle_protection_violation(xp, info, tcr, old_valence);
1010  } else if (xnum == SIGFPE) {
1011    return handle_sigfpe(xp, tcr);
1012  } else if ((xnum == SIGILL)) {
1013    if (IS_GC_TRAP(instruction)) {
1014      return handle_gc_trap(xp, tcr);
1015    } else if (IS_UUO(instruction)) {
1016      return handle_uuo(xp, info, instruction);
1017    } else {
1018      return handle_unimplemented_instruction(xp,instruction,tcr);
1019    }
1020  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1021    tcr->interrupt_pending = 0;
1022    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1023    return true;
1024  }
1025
1026  return false;
1027}
1028
1029void
1030adjust_exception_pc(ExceptionInformation *xp, int delta)
1031{
1032  xpPC(xp) += (delta >> 2);
1033}
1034
1035
1036/*
1037  This wants to scan backwards until "where" points to an instruction
1038   whose major opcode is either 63 (double-float) or 59 (single-float)
1039*/
1040
1041OSStatus
1042handle_fpux_binop(ExceptionInformation *xp, pc where)
1043{
1044  OSStatus err = -1;
1045  opcode *there = (opcode *) where, instr, errnum = 0;
1046  return err;
1047}
1048
1049Boolean
1050handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1051{
1052  unsigned 
1053    format = UUO_FORMAT(the_uuo);
1054  Boolean handled = false;
1055  int bump = 4;
1056  TCR *tcr = get_tcr(true);
1057
1058  switch (format) {
1059  case uuo_format_kernel_service:
1060    {
1061      TCR * target = (TCR *)xpGPR(xp,arg_z);
1062      int service = UUO_UNARY_field(the_uuo);
1063
1064      switch (service) {
1065      case error_propagate_suspend:
1066        handled = true;
1067        break;
1068      case error_interrupt:
1069        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1070        handled = true;
1071        break;
1072      case error_suspend:
1073        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1074        handled = true;
1075        break;
1076      case error_suspend_all:
1077        lisp_suspend_other_threads();
1078        handled = true;
1079        break;
1080      case error_resume:
1081        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1082        handled = true;
1083        break;
1084      case error_resume_all:
1085        lisp_resume_other_threads();
1086        handled = true;
1087        break;
1088      case error_kill:
1089        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1090        handled = true;
1091        break;
1092      case error_allocate_list:
1093        allocate_list(xp,tcr);
1094        handled = true;
1095        break;
1096      default:
1097        handled = false;
1098        break;
1099      }
1100      break;
1101    }
1102
1103  case uuo_format_unary:
1104    switch(UUO_UNARY_field(the_uuo)) {
1105    case 3:
1106      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1107        handled = true;
1108        bump = 4;
1109        break;
1110      }
1111      /* fall in */
1112    default:
1113      handled = false;
1114      break;
1115
1116    }
1117    break;
1118
1119  case uuo_format_nullary:
1120    switch (UUOA_field(the_uuo)) {
1121    case 3:
1122      adjust_exception_pc(xp, bump);
1123      bump = 0;
1124      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1125      handled = true;
1126      break;
1127
1128    case 4:
1129      tcr->interrupt_pending = 0;
1130      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, &bump);
1131      handled = true;
1132      break;
1133    default:
1134      handled = false;
1135      break;
1136    }
1137    break;
1138
1139
1140  case uuo_format_error_lisptag:
1141  case uuo_format_error_fulltag:
1142  case uuo_format_error_xtype:
1143  case uuo_format_nullary_error:
1144  case uuo_format_unary_error:
1145  case uuo_format_binary_error:
1146    handled = handle_error(xp,0,the_uuo, &bump);
1147    break;
1148
1149  default:
1150    handled = false;
1151    bump = 0;
1152  }
1153 
1154  if (handled && bump) {
1155    adjust_exception_pc(xp, bump);
1156  }
1157  return handled;
1158}
1159
1160natural
1161register_codevector_contains_pc (natural lisp_function, pc where)
1162{
1163  natural code_vector, size;
1164
1165  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1166      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1167    code_vector = deref(lisp_function, 2);
1168    size = header_element_count(header_of(code_vector)) << 2;
1169    if ((untag(code_vector) < (natural)where) && 
1170        ((natural)where < (code_vector + size)))
1171      return(code_vector);
1172  }
1173
1174  return(0);
1175}
1176
1177Boolean
1178callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1179{
1180  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1181}
1182
1183Boolean
1184callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1185                  natural arg1, natural arg2, int *bumpP)
1186{
1187  natural  callback_ptr;
1188  area *a;
1189  natural fnreg = fn,  codevector, offset;
1190  pc where = xpPC(xp);
1191  int delta;
1192
1193  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1194  if (codevector == 0) {
1195    fnreg = nfn;
1196    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1197    if (codevector == 0) {
1198      fnreg = 0;
1199    }
1200  }
1201  if (codevector) {
1202    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1203  } else {
1204    offset = (natural)where;
1205  }
1206                                                 
1207                                               
1208
1209  TCR *tcr = get_tcr(true);
1210
1211  /* Put the active stack pointer where .SPcallback expects it */
1212  a = tcr->cs_area;
1213  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1214
1215  /* Copy globals from the exception frame to tcr */
1216  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1217  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1218
1219
1220
1221  /* Call back.
1222     Lisp will handle trampolining through some code that
1223     will push lr/fn & pc/nfn stack frames for backtrace.
1224  */
1225  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1226  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1227  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1228  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1229
1230  if (bumpP) {
1231    *bumpP = delta;
1232  }
1233
1234  /* Copy GC registers back into exception frame */
1235  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1236  return true;
1237}
1238
1239area *
1240allocate_no_stack (natural size)
1241{
1242#ifdef SUPPORT_PRAGMA_UNUSED
1243#pragma unused(size)
1244#endif
1245
1246  return (area *) NULL;
1247}
1248
1249
1250
1251
1252
1253
1254/* callback to (symbol-value cmain) if it is a macptr,
1255   otherwise report cause and function name to console.
1256   Returns noErr if exception handled OK */
1257OSStatus
1258handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1259{
1260  LispObj   cmain = nrs_CMAIN.vcell;
1261  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1262
1263}
1264
1265
1266
1267
1268void non_fatal_error( char *msg )
1269{
1270  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1271  fflush( dbgout );
1272}
1273
1274
1275
1276Boolean
1277handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1278{
1279  LispObj   errdisp = nrs_ERRDISP.vcell;
1280
1281  if ((fulltag_of(errdisp) == fulltag_misc) &&
1282      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1283    /* errdisp is a macptr, we can call back to lisp */
1284    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1285    }
1286
1287  return false;
1288}
1289               
1290
1291/*
1292   Current thread has all signals masked.  Before unmasking them,
1293   make it appear that the current thread has been suspended.
1294   (This is to handle the case where another thread is trying
1295   to GC before this thread is able to sieze the exception lock.)
1296*/
1297int
1298prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1299{
1300  int old_valence = tcr->valence;
1301
1302  tcr->pending_exception_context = context;
1303  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1304
1305  ALLOW_EXCEPTIONS(context);
1306  return old_valence;
1307} 
1308
1309void
1310wait_for_exception_lock_in_handler(TCR *tcr, 
1311                                   ExceptionInformation *context,
1312                                   xframe_list *xf)
1313{
1314
1315  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1316  xf->curr = context;
1317  xf->prev = tcr->xframe;
1318  tcr->xframe =  xf;
1319  tcr->pending_exception_context = NULL;
1320  tcr->valence = TCR_STATE_FOREIGN; 
1321}
1322
1323void
1324unlock_exception_lock_in_handler(TCR *tcr)
1325{
1326  tcr->pending_exception_context = tcr->xframe->curr;
1327  tcr->xframe = tcr->xframe->prev;
1328  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1329  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1330}
1331
1332/*
1333   If an interrupt is pending on exception exit, try to ensure
1334   that the thread sees it as soon as it's able to run.
1335*/
1336void
1337raise_pending_interrupt(TCR *tcr)
1338{
1339  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1340    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1341  }
1342}
1343
1344void
1345exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1346{
1347  sigset_t mask;
1348  sigfillset(&mask);
1349 
1350  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1351  tcr->valence = old_valence;
1352  tcr->pending_exception_context = NULL;
1353  tcr->last_lisp_frame = old_last_lisp_frame;
1354}
1355
1356
1357void
1358signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1359{
1360  xframe_list xframe_link;
1361
1362  if (!use_mach_exception_handling) {
1363   
1364    tcr = (TCR *) get_interrupt_tcr(false);
1365 
1366    /* The signal handler's entered with all signals (notably the
1367       thread_suspend signal) blocked.  Don't allow any other signals
1368       (notably the thread_suspend signal) to preempt us until we've
1369       set the TCR's xframe slot to include the current exception
1370       context.
1371    */
1372   
1373    old_last_lisp_frame = tcr->last_lisp_frame;
1374    tcr->last_lisp_frame = xpGPR(context,Rsp);
1375    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1376  }
1377
1378  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1379    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1380    pthread_kill(pthread_self(), thread_suspend_signal);
1381  }
1382
1383 
1384  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1385  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1386    char msg[512];
1387    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1388    if (lisp_Debugger(context, info, signum, false, msg)) {
1389      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1390    }
1391  }
1392  unlock_exception_lock_in_handler(tcr);
1393
1394  /* This thread now looks like a thread that was suspended while
1395     executing lisp code.  If some other thread gets the exception
1396     lock and GCs, the context (this thread's suspend_context) will
1397     be updated.  (That's only of concern if it happens before we
1398     can return to the kernel/to the Mach exception handler).
1399  */
1400  if (!use_mach_exception_handling) {
1401    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1402    raise_pending_interrupt(tcr);
1403  }
1404}
1405
1406#ifdef USE_SIGALTSTACK
1407void
1408invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1409{
1410  ExceptionInformation *xp_copy;
1411  siginfo_t *info_copy;
1412  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1413 
1414  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1415  target_sp -= sizeof(ucontext_t);
1416  xp_copy = (ExceptionInformation *)target_sp;
1417  memmove(target_sp,xp,sizeof(*xp));
1418  xp_copy->uc_stack.ss_sp = 0;
1419  xp_copy->uc_stack.ss_size = 0;
1420  xp_copy->uc_stack.ss_flags = 0;
1421  xp_copy->uc_link = NULL;
1422  target_sp -= sizeof(siginfo_t);
1423  info_copy = (siginfo_t *)target_sp;
1424  memmove(target_sp,info,sizeof(*info));
1425  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1426}
1427 
1428void
1429altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1430{
1431  TCR *tcr=get_tcr(true);
1432  if (signo == SIGSEGV) {
1433    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1434    area *a = tcr->cs_area;
1435   
1436    if ((addr >= a->low) &&
1437        (addr < a->softlimit)) {
1438      if (addr < a->hardlimit) {
1439        Bug(xp, "hard stack overflow");
1440      } else {
1441        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1442      }
1443    }
1444  }
1445  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1446}
1447#endif
1448
1449/*
1450  If it looks like we're in the middle of an atomic operation, make
1451  it seem as if that operation is either complete or hasn't started
1452  yet.
1453
1454  The cases handled include:
1455
1456  a) storing into a newly-allocated lisp frame on the stack.
1457  b) marking a newly-allocated TSP frame as containing "raw" data.
1458  c) consing: the GC has its own ideas about how this should be
1459     handled, but other callers would be best advised to back
1460     up or move forward, according to whether we're in the middle
1461     of allocating a cons cell or allocating a uvector.
1462  d) a STMW to the vsp
1463  e) EGC write-barrier subprims.
1464*/
1465
1466extern opcode
1467  egc_write_barrier_start,
1468  egc_write_barrier_end, 
1469  egc_store_node_conditional, 
1470  egc_store_node_conditional_test,
1471  egc_set_hash_key,
1472  egc_gvset,
1473  egc_rplaca,
1474  egc_rplacd,
1475  egc_set_hash_key_conditional,
1476  egc_set_hash_key_conditional_test;
1477
1478
1479extern opcode ffcall_return_window, ffcall_return_window_end;
1480
1481void
1482pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1483{
1484  pc program_counter = xpPC(xp);
1485  opcode instr = *program_counter;
1486  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1487  LispObj cur_allocptr = xpGPR(xp, allocptr);
1488  int allocptr_tag = fulltag_of(cur_allocptr);
1489 
1490
1491
1492  if ((program_counter < &egc_write_barrier_end) && 
1493      (program_counter >= &egc_write_barrier_start)) {
1494    LispObj *ea = 0, val = 0, root = 0;
1495    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1496    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1497
1498    if (program_counter >= &egc_set_hash_key_conditional) {
1499      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1500          ((program_counter == &egc_set_hash_key_conditional_test) &&
1501           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1502        return;
1503      }
1504      need_store = false;
1505      root = xpGPR(xp,arg_x);
1506      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1507      need_memoize_root = true;
1508    } else if (program_counter >= &egc_store_node_conditional) {
1509      if ((program_counter < &egc_store_node_conditional_test) ||
1510          ((program_counter == &egc_store_node_conditional_test) &&
1511           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1512        /* The conditional store either hasn't been attempted yet, or
1513           has failed.  No need to adjust the PC, or do memoization. */
1514        return;
1515      }
1516      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1517      xpGPR(xp,arg_z) = t_value;
1518      need_store = false;
1519    } else if (program_counter >= &egc_set_hash_key) {
1520      root = xpGPR(xp,arg_x);
1521      val = xpGPR(xp,arg_z);
1522      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1523      need_memoize_root = true;
1524    } else if (program_counter >= &egc_gvset) {
1525      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1526      val = xpGPR(xp,arg_z);
1527    } else if (program_counter >= &egc_rplacd) {
1528      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1529      val = xpGPR(xp,arg_z);
1530    } else {                      /* egc_rplaca */
1531      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1532      val = xpGPR(xp,arg_z);
1533    }
1534    if (need_store) {
1535      *ea = val;
1536    }
1537    if (need_check_memo) {
1538      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1539      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1540          ((LispObj)ea < val)) {
1541        atomic_set_bit(refbits, bitnumber);
1542        if (need_memoize_root) {
1543          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1544          atomic_set_bit(refbits, bitnumber);
1545        }
1546      }
1547    }
1548    xpPC(xp) = xpLR(xp);
1549    return;
1550  }
1551
1552
1553 
1554  if (allocptr_tag != tag_fixnum) {
1555    signed_natural disp = allocptr_displacement(xp);
1556
1557    if (disp) {
1558      /* Being architecturally "at" the alloc trap doesn't tell
1559         us much (in particular, it doesn't tell us whether
1560         or not the thread has committed to taking the trap
1561         and is waiting for the exception lock (or waiting
1562         for the Mach exception thread to tell it how bad
1563         things are) or is about to execute a conditional
1564         trap.
1565         Regardless of which case applies, we want the
1566         other thread to take (or finish taking) the
1567         trap, and we don't want it to consider its
1568         current allocptr to be valid.
1569         The difference between this case (suspend other
1570         thread for GC) and the previous case (suspend
1571         current thread for interrupt) is solely a
1572         matter of what happens after we leave this
1573         function: some non-current thread will stay
1574         suspended until the GC finishes, then take
1575         (or start processing) the alloc trap.   The
1576         current thread will go off and do PROCESS-INTERRUPT
1577         or something, and may return from the interrupt
1578         and need to finish the allocation that got interrupted.
1579      */
1580
1581      if (alloc_disp) {
1582        *alloc_disp = disp;
1583        xpGPR(xp,allocptr) += disp;
1584        /* Leave the PC at the alloc trap.  When the interrupt
1585           handler returns, it'll decrement allocptr by disp
1586           and the trap may or may not be taken.
1587        */
1588      } else {
1589        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1590        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1591      }
1592    } else {
1593      /* If we're already past the alloc_trap, finish allocating
1594         the object. */
1595      if (allocptr_tag == fulltag_cons) {
1596        finish_allocating_cons(xp);
1597      } else {
1598        if (allocptr_tag == fulltag_misc) {
1599          finish_allocating_uvector(xp);
1600        } else {
1601          Bug(xp, "what's being allocated here ?");
1602        }
1603      }
1604      /* Whatever we finished allocating, reset allocptr/allocbase to
1605         VOID_ALLOCPTR */
1606      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1607    }
1608    return;
1609  }
1610}
1611
1612void
1613interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1614{
1615  TCR *tcr = get_interrupt_tcr(false);
1616  if (tcr) {
1617    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1618      tcr->interrupt_pending = 1 << fixnumshift;
1619    } else {
1620      LispObj cmain = nrs_CMAIN.vcell;
1621
1622      if ((fulltag_of(cmain) == fulltag_misc) &&
1623          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1624        /*
1625           This thread can (allegedly) take an interrupt now.
1626           It's tricky to do that if we're executing
1627           foreign code (especially Linuxthreads code, much
1628           of which isn't reentrant.)
1629           If we're unwinding the stack, we also want to defer
1630           the interrupt.
1631        */
1632        if ((tcr->valence != TCR_STATE_LISP) ||
1633            (tcr->unwinding != 0)) {
1634          tcr->interrupt_pending = 1 << fixnumshift;
1635        } else {
1636          xframe_list xframe_link;
1637          int old_valence;
1638          signed_natural disp=0;
1639          natural old_last_lisp_frame = tcr->last_lisp_frame;
1640         
1641          tcr->last_lisp_frame = xpGPR(context,Rsp);
1642          pc_luser_xp(context, tcr, &disp);
1643          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1644          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1645          handle_exception(signum, context, tcr, info, old_valence);
1646          if (disp) {
1647            xpGPR(context,allocptr) -= disp;
1648          }
1649          unlock_exception_lock_in_handler(tcr);
1650          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1651        }
1652      }
1653    }
1654  }
1655#ifdef DARWIN
1656    DarwinSigReturn(context);
1657#endif
1658}
1659
1660#ifdef USE_SIGALTSTACK
1661void
1662altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1663{
1664  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1665}
1666#endif
1667
1668
1669void
1670install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1671{
1672  struct sigaction sa;
1673 
1674  sa.sa_sigaction = (void *)handler;
1675  sigfillset(&sa.sa_mask);
1676  sa.sa_flags = 
1677    0 /* SA_RESTART */
1678    | SA_SIGINFO
1679#ifdef USE_SIGALTSTACK
1680    | (on_altstack ? SA_ONSTACK : 0)
1681#endif
1682    ;
1683
1684  sigaction(signo, &sa, NULL);
1685}
1686
1687
1688void
1689install_pmcl_exception_handlers()
1690{
1691#ifdef DARWIN
1692  extern Boolean use_mach_exception_handling;
1693#endif
1694
1695  Boolean install_signal_handlers_for_exceptions =
1696#ifdef DARWIN
1697    !use_mach_exception_handling
1698#else
1699    true
1700#endif
1701    ;
1702  if (install_signal_handlers_for_exceptions) {
1703    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
1704    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1705
1706  }
1707 
1708  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1709                         (void *)interrupt_handler, true, false);
1710  signal(SIGPIPE, SIG_IGN);
1711}
1712
1713#ifdef USE_SIGALTSTACK
1714void
1715setup_sigaltstack(area *a)
1716{
1717  stack_t stack;
1718#if 0
1719  stack.ss_sp = a->low;
1720  a->low += SIGSTKSZ*8;
1721#endif
1722  stack.ss_size = SIGSTKSZ*8;
1723  stack.ss_flags = 0;
1724  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1725  if (sigaltstack(&stack, NULL) != 0) {
1726    perror("sigaltstack");
1727    exit(-1);
1728  }
1729}
1730#endif
1731
1732void
1733thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1734{
1735  TCR *tcr = get_tcr(false);
1736  area *a;
1737  sigset_t mask;
1738 
1739  sigemptyset(&mask);
1740
1741  if (tcr) {
1742    tcr->valence = TCR_STATE_FOREIGN;
1743    a = tcr->vs_area;
1744    if (a) {
1745      a->active = a->high;
1746    }
1747    a = tcr->cs_area;
1748    if (a) {
1749      a->active = a->high;
1750    }
1751  }
1752 
1753  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1754  pthread_exit(NULL);
1755}
1756
1757#ifdef USE_SIGALTSTACK
1758void
1759altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1760{
1761  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1762}
1763#endif
1764
1765void
1766thread_signal_setup()
1767{
1768  thread_suspend_signal = SIG_SUSPEND_THREAD;
1769  thread_kill_signal = SIG_KILL_THREAD;
1770
1771  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1772  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1773}
1774
1775
1776
1777void
1778unprotect_all_areas()
1779{
1780  protected_area_ptr p;
1781
1782  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1783    unprotect_area(p);
1784  }
1785}
1786
1787/*
1788  A binding subprim has just done "twlle limit_regno,idx_regno" and
1789  the trap's been taken.  Extend the tcr's tlb so that the index will
1790  be in bounds and the new limit will be on a page boundary, filling
1791  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1792  the tcr fields and the registers in the xp and return true if this
1793  all works, false otherwise.
1794
1795  Note that the tlb was allocated via malloc, so realloc can do some
1796  of the hard work.
1797*/
1798Boolean
1799extend_tcr_tlb(TCR *tcr, 
1800               ExceptionInformation *xp, 
1801               unsigned idx_regno)
1802{
1803  unsigned
1804    index = (unsigned) (xpGPR(xp,idx_regno)),
1805    old_limit = tcr->tlb_limit,
1806    new_limit = align_to_power_of_2(index+1,12),
1807    new_bytes = new_limit-old_limit;
1808  LispObj
1809    *old_tlb = tcr->tlb_pointer,
1810    *new_tlb = realloc(old_tlb, new_limit),
1811    *work;
1812
1813  if (new_tlb == NULL) {
1814    return false;
1815  }
1816 
1817  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1818
1819  while (new_bytes) {
1820    *work++ = no_thread_local_binding_marker;
1821    new_bytes -= sizeof(LispObj);
1822  }
1823  tcr->tlb_pointer = new_tlb;
1824  tcr->tlb_limit = new_limit;
1825  return true;
1826}
1827
1828
1829
1830void
1831exception_init()
1832{
1833  install_pmcl_exception_handlers();
1834}
1835
1836
1837
1838
1839
1840#ifdef DARWIN
1841
1842
1843#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1844#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1845
1846
1847
1848#define LISP_EXCEPTIONS_HANDLED_MASK \
1849 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1850
1851/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1852#define NUM_LISP_EXCEPTIONS_HANDLED 4
1853
1854typedef struct {
1855  int foreign_exception_port_count;
1856  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1857  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1858  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1859  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1860} MACH_foreign_exception_state;
1861
1862
1863
1864
1865/*
1866  Mach's exception mechanism works a little better than its signal
1867  mechanism (and, not incidentally, it gets along with GDB a lot
1868  better.
1869
1870  Initially, we install an exception handler to handle each native
1871  thread's exceptions.  This process involves creating a distinguished
1872  thread which listens for kernel exception messages on a set of
1873  0 or more thread exception ports.  As threads are created, they're
1874  added to that port set; a thread's exception port is destroyed
1875  (and therefore removed from the port set) when the thread exits.
1876
1877  A few exceptions can be handled directly in the handler thread;
1878  others require that we resume the user thread (and that the
1879  exception thread resumes listening for exceptions.)  The user
1880  thread might eventually want to return to the original context
1881  (possibly modified somewhat.)
1882
1883  As it turns out, the simplest way to force the faulting user
1884  thread to handle its own exceptions is to do pretty much what
1885  signal() does: the exception handlng thread sets up a sigcontext
1886  on the user thread's stack and forces the user thread to resume
1887  execution as if a signal handler had been called with that
1888  context as an argument.  We can use a distinguished UUO at a
1889  distinguished address to do something like sigreturn(); that'll
1890  have the effect of resuming the user thread's execution in
1891  the (pseudo-) signal context.
1892
1893  Since:
1894    a) we have miles of code in C and in Lisp that knows how to
1895    deal with Linux sigcontexts
1896    b) Linux sigcontexts contain a little more useful information
1897    (the DAR, DSISR, etc.) than their Darwin counterparts
1898    c) we have to create a sigcontext ourselves when calling out
1899    to the user thread: we aren't really generating a signal, just
1900    leveraging existing signal-handling code.
1901
1902  we create a Linux sigcontext struct.
1903
1904  Simple ?  Hopefully from the outside it is ...
1905
1906  We want the process of passing a thread's own context to it to
1907  appear to be atomic: in particular, we don't want the GC to suspend
1908  a thread that's had an exception but has not yet had its user-level
1909  exception handler called, and we don't want the thread's exception
1910  context to be modified by a GC while the Mach handler thread is
1911  copying it around.  On Linux (and on Jaguar), we avoid this issue
1912  because (a) the kernel sets up the user-level signal handler and
1913  (b) the signal handler blocks signals (including the signal used
1914  by the GC to suspend threads) until tcr->xframe is set up.
1915
1916  The GC and the Mach server thread therefore contend for the lock
1917  "mach_exception_lock".  The Mach server thread holds the lock
1918  when copying exception information between the kernel and the
1919  user thread; the GC holds this lock during most of its execution
1920  (delaying exception processing until it can be done without
1921  GC interference.)
1922
1923*/
1924
1925
1926#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1927
1928void
1929fatal_mach_error(char *format, ...);
1930
1931#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1932
1933
1934void
1935restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1936{
1937  kern_return_t kret;
1938  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
1939
1940  /* Set the thread's FP state from the pseudosigcontext */
1941  kret = thread_set_state(thread,
1942                          ARM_VFP_STATE,
1943                          (thread_state_t)&(mc->__fs),
1944                          ARM_VFP_STATE_COUNT);
1945
1946  MACH_CHECK_ERROR("setting thread FP state", kret);
1947
1948  /* The thread'll be as good as new ... */
1949  kret = thread_set_state(thread, 
1950                          MACHINE_THREAD_STATE,
1951                          (thread_state_t)&(mc->__ss),
1952                          MACHINE_THREAD_STATE_COUNT);
1953  MACH_CHECK_ERROR("setting thread state", kret);
1954} 
1955
1956/* This code runs in the exception handling thread, in response
1957   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1958   in response to a call to pseudo_sigreturn() from the specified
1959   user thread.
1960   Find that context (the user thread's R3 points to it), then
1961   use that context to set the user thread's state.  When this
1962   function's caller returns, the Mach kernel will resume the
1963   user thread.
1964*/
1965
1966kern_return_t
1967do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1968{
1969  ExceptionInformation *xp;
1970
1971#ifdef DEBUG_MACH_EXCEPTIONS
1972  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1973#endif
1974  xp = tcr->pending_exception_context;
1975  if (xp) {
1976    tcr->pending_exception_context = NULL;
1977    tcr->valence = TCR_STATE_LISP;
1978    restore_mach_thread_state(thread, xp);
1979    raise_pending_interrupt(tcr);
1980  } else {
1981    Bug(NULL, "no xp here!\n");
1982  }
1983#ifdef DEBUG_MACH_EXCEPTIONS
1984  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1985#endif
1986  return KERN_SUCCESS;
1987} 
1988
1989ExceptionInformation *
1990create_thread_context_frame(mach_port_t thread, 
1991                            natural *new_stack_top)
1992{
1993  arm_thread_state_t ts;
1994  mach_msg_type_number_t thread_state_count;
1995  kern_return_t result;
1996  ExceptionInformation *pseudosigcontext;
1997  _STRUCT_MCONTEXT *mc;
1998  natural stackp, backlink;
1999
2000  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2001  result = thread_get_state(thread, 
2002                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2003                            (thread_state_t)&ts,
2004                            &thread_state_count);
2005 
2006  if (result != KERN_SUCCESS) {
2007    get_tcr(true);
2008    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2009  }
2010  stackp = ts.__sp;
2011  backlink = stackp;
2012
2013  stackp -= sizeof(*pseudosigcontext);
2014  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2015
2016  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2017  memmove(&(mc->__ss),&ts,sizeof(ts));
2018
2019  thread_state_count = ARM_VFP_STATE_COUNT;
2020  thread_get_state(thread,
2021                   ARM_VFP_STATE,
2022                   (thread_state_t)&(mc->__fs),
2023                   &thread_state_count);
2024
2025
2026  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2027  thread_get_state(thread,
2028                   ARM_EXCEPTION_STATE,
2029                   (thread_state_t)&(mc->__es),
2030                   &thread_state_count);
2031
2032
2033  UC_MCONTEXT(pseudosigcontext) = mc;
2034  if (new_stack_top) {
2035    *new_stack_top = stackp;
2036  }
2037  return pseudosigcontext;
2038}
2039
2040/*
2041  This code sets up the user thread so that it executes a "pseudo-signal
2042  handler" function when it resumes.  Create a linux sigcontext struct
2043  on the thread's stack and pass it as an argument to the pseudo-signal
2044  handler.
2045
2046  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2047  which will restore the thread's context.
2048
2049  If the handler invokes code that throws (or otherwise never sigreturn()'s
2050  to the context), that's fine.
2051
2052  Actually, check that: throw (and variants) may need to be careful and
2053  pop the tcr's xframe list until it's younger than any frame being
2054  entered.
2055*/
2056
2057int
2058setup_signal_frame(mach_port_t thread,
2059                   void *handler_address,
2060                   int signum,
2061                   int code,
2062                   TCR *tcr)
2063{
2064  arm_thread_state_t ts;
2065  ExceptionInformation *pseudosigcontext;
2066  int old_valence = tcr->valence;
2067  natural stackp;
2068
2069#ifdef DEBUG_MACH_EXCEPTIONS
2070  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2071#endif
2072  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2073  pseudosigcontext->uc_onstack = 0;
2074  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2075  tcr->pending_exception_context = pseudosigcontext;
2076  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2077 
2078
2079  /*
2080     It seems like we've created a  sigcontext on the thread's
2081     stack.  Set things up so that we call the handler (with appropriate
2082     args) when the thread's resumed.
2083  */
2084
2085  ts.__pc = (natural) handler_address;
2086  ts.__sp = stackp;
2087  ts.__r[0] = signum;
2088  ts.__r[1] = (natural)pseudosigcontext;
2089  ts.__r[2] = (natural)tcr;
2090  ts.__r[3] = (natural)old_valence;
2091  ts.__lr = (natural)pseudo_sigreturn;
2092
2093
2094  thread_set_state(thread, 
2095                   MACHINE_THREAD_STATE,
2096                   (thread_state_t)&ts,
2097                   MACHINE_THREAD_STATE_COUNT);
2098#ifdef DEBUG_MACH_EXCEPTIONS
2099  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2100#endif
2101  return 0;
2102}
2103
2104
2105void
2106pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2107{
2108  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2109} 
2110
2111
2112int
2113thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2114{
2115  /* Likely hopeless. */
2116  return 0;
2117}
2118
2119/*
2120  This function runs in the exception handling thread.  It's
2121  called (by this precise name) from the library function "exc_server()"
2122  when the thread's exception ports are set up.  (exc_server() is called
2123  via mach_msg_server(), which is a function that waits for and dispatches
2124  on exception messages from the Mach kernel.)
2125
2126  This checks to see if the exception was caused by a pseudo_sigreturn()
2127  UUO; if so, it arranges for the thread to have its state restored
2128  from the specified context.
2129
2130  Otherwise, it tries to map the exception to a signal number and
2131  arranges that the thread run a "pseudo signal handler" to handle
2132  the exception.
2133
2134  Some exceptions could and should be handled here directly.
2135*/
2136
2137kern_return_t
2138catch_exception_raise(mach_port_t exception_port,
2139                      mach_port_t thread,
2140                      mach_port_t task, 
2141                      exception_type_t exception,
2142                      exception_data_t code_vector,
2143                      mach_msg_type_number_t code_count)
2144{
2145  int signum = 0, code = *code_vector, code1;
2146  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2147  kern_return_t kret;
2148
2149#ifdef DEBUG_MACH_EXCEPTIONS
2150  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2151#endif
2152
2153  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2154    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2155  } 
2156  if ((exception == EXC_BAD_INSTRUCTION) &&
2157      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2158      (((code1 = code_vector[1]) == (int)pseudo_sigreturn))) {
2159    kret = do_pseudo_sigreturn(thread, tcr);
2160  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2161    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2162    kret = 17;
2163  } else {
2164    switch (exception) {
2165    case EXC_BAD_ACCESS:
2166      signum = SIGSEGV;
2167      break;
2168       
2169    case EXC_BAD_INSTRUCTION:
2170      signum = SIGILL;
2171      break;
2172     
2173      break;
2174     
2175    case EXC_ARITHMETIC:
2176      signum = SIGFPE;
2177      break;
2178
2179    default:
2180      break;
2181    }
2182    if (signum) {
2183      kret = setup_signal_frame(thread,
2184                                (void *)pseudo_signal_handler,
2185                                signum,
2186                                code,
2187                                tcr);
2188#if 0
2189      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2190#endif
2191
2192    } else {
2193      kret = 17;
2194    }
2195  }
2196
2197  return kret;
2198}
2199
2200
2201
2202typedef struct {
2203  mach_msg_header_t Head;
2204  /* start of the kernel processed data */
2205  mach_msg_body_t msgh_body;
2206  mach_msg_port_descriptor_t thread;
2207  mach_msg_port_descriptor_t task;
2208  /* end of the kernel processed data */
2209  NDR_record_t NDR;
2210  exception_type_t exception;
2211  mach_msg_type_number_t codeCnt;
2212  integer_t code[2];
2213  mach_msg_trailer_t trailer;
2214} exceptionRequest;
2215
2216
2217boolean_t
2218openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2219{
2220  static NDR_record_t _NDR = {0};
2221  kern_return_t handled;
2222  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2223  exceptionRequest *req = (exceptionRequest *) in;
2224
2225  reply->NDR = _NDR;
2226
2227  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2228  out->msgh_remote_port = in->msgh_remote_port;
2229  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2230  out->msgh_local_port = MACH_PORT_NULL;
2231  out->msgh_id = in->msgh_id+100;
2232
2233  /* Could handle other exception flavors in the range 2401-2403 */
2234
2235
2236  if (in->msgh_id != 2401) {
2237    reply->RetCode = MIG_BAD_ID;
2238    return FALSE;
2239  }
2240  handled = catch_exception_raise(req->Head.msgh_local_port,
2241                                  req->thread.name,
2242                                  req->task.name,
2243                                  req->exception,
2244                                  req->code,
2245                                  req->codeCnt);
2246  reply->RetCode = handled;
2247  return TRUE;
2248}
2249
2250/*
2251  The initial function for an exception-handling thread.
2252*/
2253
2254void *
2255exception_handler_proc(void *arg)
2256{
2257  extern boolean_t exc_server();
2258  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2259
2260  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2261  /* Should never return. */
2262  abort();
2263}
2264
2265
2266
2267mach_port_t
2268mach_exception_port_set()
2269{
2270  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2271  kern_return_t kret; 
2272  if (__exception_port_set == MACH_PORT_NULL) {
2273    kret = mach_port_allocate(mach_task_self(),
2274                              MACH_PORT_RIGHT_PORT_SET,
2275                              &__exception_port_set);
2276    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2277    create_system_thread(0,
2278                         NULL,
2279                         exception_handler_proc, 
2280                         (void *)((natural)__exception_port_set));
2281  }
2282  return __exception_port_set;
2283}
2284
2285/*
2286  Setup a new thread to handle those exceptions specified by
2287  the mask "which".  This involves creating a special Mach
2288  message port, telling the Mach kernel to send exception
2289  messages for the calling thread to that port, and setting
2290  up a handler thread which listens for and responds to
2291  those messages.
2292
2293*/
2294
2295/*
2296  Establish the lisp thread's TCR as its exception port, and determine
2297  whether any other ports have been established by foreign code for
2298  exceptions that lisp cares about.
2299
2300  If this happens at all, it should happen on return from foreign
2301  code and on entry to lisp code via a callback.
2302
2303  This is a lot of trouble (and overhead) to support Java, or other
2304  embeddable systems that clobber their caller's thread exception ports.
2305 
2306*/
2307kern_return_t
2308tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2309{
2310  kern_return_t kret;
2311  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2312  int i;
2313  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2314  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2315  exception_mask_t mask = 0;
2316
2317  kret = thread_swap_exception_ports(thread,
2318                                     LISP_EXCEPTIONS_HANDLED_MASK,
2319                                     lisp_port,
2320                                     EXCEPTION_DEFAULT,
2321                                     THREAD_STATE_NONE,
2322                                     fxs->masks,
2323                                     &n,
2324                                     fxs->ports,
2325                                     fxs->behaviors,
2326                                     fxs->flavors);
2327  if (kret == KERN_SUCCESS) {
2328    fxs->foreign_exception_port_count = n;
2329    for (i = 0; i < n; i ++) {
2330      foreign_port = fxs->ports[i];
2331
2332      if ((foreign_port != lisp_port) &&
2333          (foreign_port != MACH_PORT_NULL)) {
2334        mask |= fxs->masks[i];
2335      }
2336    }
2337    tcr->foreign_exception_status = (int) mask;
2338  }
2339  return kret;
2340}
2341
2342kern_return_t
2343tcr_establish_lisp_exception_port(TCR *tcr)
2344{
2345  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2346}
2347
2348/*
2349  Do this when calling out to or returning from foreign code, if
2350  any conflicting foreign exception ports were established when we
2351  last entered lisp code.
2352*/
2353kern_return_t
2354restore_foreign_exception_ports(TCR *tcr)
2355{
2356  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2357 
2358  if (m) {
2359    MACH_foreign_exception_state *fxs  = 
2360      (MACH_foreign_exception_state *) tcr->native_thread_info;
2361    int i, n = fxs->foreign_exception_port_count;
2362    exception_mask_t tm;
2363
2364    for (i = 0; i < n; i++) {
2365      if ((tm = fxs->masks[i]) & m) {
2366        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2367                                   tm,
2368                                   fxs->ports[i],
2369                                   fxs->behaviors[i],
2370                                   fxs->flavors[i]);
2371      }
2372    }
2373  }
2374}
2375                                   
2376
2377/*
2378  This assumes that a Mach port (to be used as the thread's exception port) whose
2379  "name" matches the TCR's 32-bit address has already been allocated.
2380*/
2381
2382kern_return_t
2383setup_mach_exception_handling(TCR *tcr)
2384{
2385  mach_port_t
2386    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2387    task_self = mach_task_self();
2388  kern_return_t kret;
2389
2390  kret = mach_port_insert_right(task_self,
2391                                thread_exception_port,
2392                                thread_exception_port,
2393                                MACH_MSG_TYPE_MAKE_SEND);
2394  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2395
2396  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2397  if (kret == KERN_SUCCESS) {
2398    mach_port_t exception_port_set = mach_exception_port_set();
2399
2400    kret = mach_port_move_member(task_self,
2401                                 thread_exception_port,
2402                                 exception_port_set);
2403  }
2404  return kret;
2405}
2406
2407void
2408darwin_exception_init(TCR *tcr)
2409{
2410  void tcr_monitor_exception_handling(TCR*, Boolean);
2411  kern_return_t kret;
2412  MACH_foreign_exception_state *fxs = 
2413    calloc(1, sizeof(MACH_foreign_exception_state));
2414 
2415  tcr->native_thread_info = (void *) fxs;
2416
2417  if ((kret = setup_mach_exception_handling(tcr))
2418      != KERN_SUCCESS) {
2419    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2420    terminate_lisp();
2421  }
2422}
2423
2424/*
2425  The tcr is the "name" of the corresponding thread's exception port.
2426  Destroying the port should remove it from all port sets of which it's
2427  a member (notably, the exception port set.)
2428*/
2429void
2430darwin_exception_cleanup(TCR *tcr)
2431{
2432  void *fxs = tcr->native_thread_info;
2433  extern Boolean use_mach_exception_handling;
2434
2435  if (fxs) {
2436    tcr->native_thread_info = NULL;
2437    free(fxs);
2438  }
2439  if (use_mach_exception_handling) {
2440    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2441    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2442  }
2443}
2444
2445
2446Boolean
2447suspend_mach_thread(mach_port_t mach_thread)
2448{
2449  kern_return_t status;
2450  Boolean aborted = false;
2451 
2452  do {
2453    aborted = false;
2454    status = thread_suspend(mach_thread);
2455    if (status == KERN_SUCCESS) {
2456      status = thread_abort_safely(mach_thread);
2457      if (status == KERN_SUCCESS) {
2458        aborted = true;
2459      } else {
2460        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2461        thread_resume(mach_thread);
2462      }
2463    } else {
2464      return false;
2465    }
2466  } while (! aborted);
2467  return true;
2468}
2469
2470/*
2471  Only do this if pthread_kill indicated that the pthread isn't
2472  listening to signals anymore, as can happen as soon as pthread_exit()
2473  is called on Darwin.  The thread could still call out to lisp as it
2474  is exiting, so we need another way to suspend it in this case.
2475*/
2476Boolean
2477mach_suspend_tcr(TCR *tcr)
2478{
2479  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2480  ExceptionInformation *pseudosigcontext;
2481  Boolean result = false;
2482 
2483  result = suspend_mach_thread(mach_thread);
2484  if (result) {
2485    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2486    pseudosigcontext->uc_onstack = 0;
2487    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2488    tcr->suspend_context = pseudosigcontext;
2489  }
2490  return result;
2491}
2492
2493void
2494mach_resume_tcr(TCR *tcr)
2495{
2496  ExceptionInformation *xp;
2497  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2498 
2499  xp = tcr->suspend_context;
2500#ifdef DEBUG_MACH_EXCEPTIONS
2501  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2502          tcr, tcr->pending_exception_context);
2503#endif
2504  tcr->suspend_context = NULL;
2505  restore_mach_thread_state(mach_thread, xp);
2506#ifdef DEBUG_MACH_EXCEPTIONS
2507  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2508          tcr, tcr->pending_exception_context);
2509#endif
2510  thread_resume(mach_thread);
2511}
2512
2513void
2514fatal_mach_error(char *format, ...)
2515{
2516  va_list args;
2517  char s[512];
2518 
2519
2520  va_start(args, format);
2521  vsnprintf(s, sizeof(s),format, args);
2522  va_end(args);
2523
2524  Fatal("Mach error", s);
2525}
2526
2527void
2528pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2529{
2530  interrupt_handler(signum, NULL, context);
2531}
2532
2533int
2534mach_raise_thread_interrupt(TCR *target)
2535{
2536  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2537  kern_return_t kret;
2538  Boolean result = false;
2539  TCR *current = get_tcr(false);
2540  thread_basic_info_data_t info; 
2541  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2542
2543  LOCK(lisp_global(TCR_AREA_LOCK), current);
2544
2545  if (suspend_mach_thread(mach_thread)) {
2546    if (thread_info(mach_thread,
2547                    THREAD_BASIC_INFO,
2548                    (thread_info_t)&info,
2549                    &info_count) == KERN_SUCCESS) {
2550      if (info.suspend_count == 1) {
2551        if ((target->valence == TCR_STATE_LISP) &&
2552            (!target->unwinding) &&
2553            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2554          kret = setup_signal_frame(mach_thread,
2555                                    (void *)pseudo_interrupt_handler,
2556                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2557                                    0,
2558                                    target);
2559          if (kret == KERN_SUCCESS) {
2560            result = true;
2561          }
2562        }
2563      }
2564    }
2565    if (! result) {
2566      target->interrupt_pending = 1 << fixnumshift;
2567    }
2568    thread_resume(mach_thread);
2569   
2570  }
2571  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2572  return 0;
2573}
2574
2575#endif
Note: See TracBrowser for help on using the repository browser.