source: trunk/source/lisp-kernel/arm-exceptions.c @ 14261

Last change on this file since 14261 was 14197, checked in by rme, 9 years ago

Rename Threads.h to threads.h (with no capital letter).

File size: 67.4 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = get_tcr(true);
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg, NULL);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = get_tcr(true);
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call. */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
673  }
674
675
676  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
677  if (cur_allocptr) {
678    update_bytes_allocated(tcr, cur_allocptr);
679    if (freeptr) {
680      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
681    }
682  }
683}
684
685TCR *gc_tcr = NULL;
686
687/* Suspend and "normalize" other tcrs, then call a gc-like function
688   in that context.  Resume the other tcrs, then return what the
689   function returned */
690
691signed_natural
692gc_like_from_xp(ExceptionInformation *xp, 
693                signed_natural(*fun)(TCR *, signed_natural), 
694                signed_natural param)
695{
696  TCR *tcr = get_tcr(true), *other_tcr;
697  int result;
698  signed_natural inhibit;
699
700  suspend_other_threads(true);
701  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
702  if (inhibit != 0) {
703    if (inhibit > 0) {
704      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
705    }
706    resume_other_threads(true);
707    gc_deferred++;
708    return 0;
709  }
710  gc_deferred = 0;
711
712  gc_tcr = tcr;
713
714  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
715
716  normalize_tcr(xp, tcr, false);
717
718
719  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
720    if (other_tcr->pending_exception_context) {
721      other_tcr->gc_context = other_tcr->pending_exception_context;
722    } else if (other_tcr->valence == TCR_STATE_LISP) {
723      other_tcr->gc_context = other_tcr->suspend_context;
724    } else {
725      /* no pending exception, didn't suspend in lisp state:
726         must have executed a synchronous ff-call.
727      */
728      other_tcr->gc_context = NULL;
729    }
730    normalize_tcr(other_tcr->gc_context, other_tcr, true);
731  }
732   
733
734
735  result = fun(tcr, param);
736
737  other_tcr = tcr;
738  do {
739    other_tcr->gc_context = NULL;
740    other_tcr = other_tcr->next;
741  } while (other_tcr != tcr);
742
743  gc_tcr = NULL;
744
745  resume_other_threads(true);
746
747  return result;
748
749}
750
751
752
753/* Returns #bytes freed by invoking GC */
754
755signed_natural
756gc_from_tcr(TCR *tcr, signed_natural param)
757{
758  area *a;
759  BytePtr oldfree, newfree;
760  BytePtr oldend, newend;
761
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774signed_natural
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783signed_natural
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789signed_natural
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  return ((xpFaultStatus(xp) & 0x800) != 0);
816}
817
818Boolean
819handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
820{
821  BytePtr addr;
822  protected_area_ptr area;
823  protection_handler *handler;
824  extern Boolean touch_page(void *);
825  extern void touch_page_end(void);
826
827#ifdef LINUX
828  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
829#else
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835#endif
836
837  if (addr && (addr == tcr->safe_ref_address)) {
838    adjust_exception_pc(xp,4);
839
840    xpGPR(xp,imm0) = 0;
841    return true;
842  }
843
844  if (xpPC(xp) == (pc)touch_page) {
845    xpGPR(xp,imm0) = 0;
846    xpPC(xp) = (pc)touch_page_end;
847    return true;
848  }
849
850
851  if (is_write_fault(xp,info)) {
852    area = find_protected_area(addr);
853    if (area != NULL) {
854      handler = protection_handlers[area->why];
855      return handler(xp, area, addr);
856    } else {
857      if ((addr >= readonly_area->low) &&
858          (addr < readonly_area->active)) {
859        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
860                        page_size);
861        return true;
862      }
863    }
864  }
865  if (old_valence == TCR_STATE_LISP) {
866    LispObj cmain = nrs_CMAIN.vcell;
867   
868    if ((fulltag_of(cmain) == fulltag_misc) &&
869      (header_subtag(header_of(cmain)) == subtag_macptr)) {
870     
871      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
872    }
873  }
874  return false;
875}
876
877
878
879
880
881OSStatus
882do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
883{
884#ifdef SUPPORT_PRAGMA_UNUSED
885#pragma unused(area,addr)
886#endif
887  reset_lisp_process(xp);
888  return -1;
889}
890
891extern area*
892allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
893
894extern area*
895allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
896
897
898
899
900
901
902Boolean
903lisp_frame_p(lisp_frame *spPtr)
904{
905  return (spPtr->marker == lisp_frame_marker);
906}
907
908
909int ffcall_overflow_count = 0;
910
911
912
913
914
915
916/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
917  the current value of VSP (TSP) or an older area.  */
918
919OSStatus
920do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
921{
922  TCR* tcr = get_tcr(true);
923  area *a = tcr->vs_area;
924  protected_area_ptr vsp_soft = a->softprot;
925  unprotect_area(vsp_soft);
926  signal_stack_soft_overflow(xp,vsp);
927  return 0;
928}
929
930
931
932OSStatus
933do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
934{
935  /* Trying to write into a guard page on the vstack or tstack.
936     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
937     signal an error_stack_overflow condition.
938      */
939  if (prot_area->why == kVSPsoftguard) {
940    return do_vsp_overflow(xp,addr);
941  }
942  unprotect_area(prot_area);
943  signal_stack_soft_overflow(xp,Rsp);
944  return 0;
945}
946
947OSStatus
948do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
949{
950#ifdef SUPPORT_PRAGMA_UNUSED
951#pragma unused(xp,area,addr)
952#endif
953  return -1;
954}
955
956
957
958
959     
960
961
962
963
964
965Boolean
966handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
967{
968  return false;
969}
970
971
972Boolean
973handle_unimplemented_instruction(ExceptionInformation *xp,
974                                 opcode instruction,
975                                 TCR *tcr)
976{
977
978  return false;
979}
980
981Boolean
982handle_exception(int xnum, 
983                 ExceptionInformation *xp, 
984                 TCR *tcr, 
985                 siginfo_t *info,
986                 int old_valence)
987{
988  pc program_counter;
989  opcode instruction = 0;
990
991  if (old_valence != TCR_STATE_LISP) {
992    return false;
993  }
994
995  program_counter = xpPC(xp);
996 
997  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
998    instruction = *program_counter;
999  }
1000
1001  if (IS_ALLOC_TRAP(instruction)) {
1002    return handle_alloc_trap(xp, tcr);
1003  } else if ((xnum == SIGSEGV) ||
1004             (xnum == SIGBUS)) {
1005    return handle_protection_violation(xp, info, tcr, old_valence);
1006  } else if (xnum == SIGFPE) {
1007    return handle_sigfpe(xp, tcr);
1008  } else if ((xnum == SIGILL)) {
1009    if (IS_GC_TRAP(instruction)) {
1010      return handle_gc_trap(xp, tcr);
1011    } else if (IS_UUO(instruction)) {
1012      return handle_uuo(xp, info, instruction);
1013    } else {
1014      return handle_unimplemented_instruction(xp,instruction,tcr);
1015    }
1016  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1017    tcr->interrupt_pending = 0;
1018    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1019    return true;
1020  }
1021
1022  return false;
1023}
1024
1025void
1026adjust_exception_pc(ExceptionInformation *xp, int delta)
1027{
1028  xpPC(xp) += (delta >> 2);
1029}
1030
1031
1032/*
1033  This wants to scan backwards until "where" points to an instruction
1034   whose major opcode is either 63 (double-float) or 59 (single-float)
1035*/
1036
1037OSStatus
1038handle_fpux_binop(ExceptionInformation *xp, pc where)
1039{
1040  OSStatus err = -1;
1041  opcode *there = (opcode *) where, instr, errnum = 0;
1042  return err;
1043}
1044
1045Boolean
1046handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1047{
1048  unsigned 
1049    format = UUO_FORMAT(the_uuo);
1050  Boolean handled = false;
1051  int bump = 4;
1052  TCR *tcr = get_tcr(true);
1053
1054  switch (format) {
1055  case uuo_format_kernel_service:
1056    {
1057      TCR * target = (TCR *)xpGPR(xp,arg_z);
1058      int service = UUO_UNARY_field(the_uuo);
1059
1060      switch (service) {
1061      case error_propagate_suspend:
1062        handled = true;
1063        break;
1064      case error_interrupt:
1065        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1066        handled = true;
1067        break;
1068      case error_suspend:
1069        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1070        handled = true;
1071        break;
1072      case error_suspend_all:
1073        lisp_suspend_other_threads();
1074        handled = true;
1075        break;
1076      case error_resume:
1077        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1078        handled = true;
1079        break;
1080      case error_resume_all:
1081        lisp_resume_other_threads();
1082        handled = true;
1083        break;
1084      case error_kill:
1085        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1086        handled = true;
1087        break;
1088      case error_allocate_list:
1089        allocate_list(xp,tcr);
1090        handled = true;
1091        break;
1092      default:
1093        handled = false;
1094        break;
1095      }
1096      break;
1097    }
1098
1099  case uuo_format_unary:
1100    switch(UUO_UNARY_field(the_uuo)) {
1101    case 3:
1102      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1103        handled = true;
1104        bump = 4;
1105        break;
1106      }
1107      /* fall in */
1108    default:
1109      handled = false;
1110      break;
1111
1112    }
1113    break;
1114
1115  case uuo_format_nullary:
1116    switch (UUOA_field(the_uuo)) {
1117    case 3:
1118      adjust_exception_pc(xp, bump);
1119      bump = 0;
1120      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1121      handled = true;
1122      break;
1123
1124    case 4:
1125      tcr->interrupt_pending = 0;
1126      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, &bump);
1127      handled = true;
1128      break;
1129    default:
1130      handled = false;
1131      break;
1132    }
1133    break;
1134
1135
1136  case uuo_format_error_lisptag:
1137  case uuo_format_error_fulltag:
1138  case uuo_format_error_xtype:
1139  case uuo_format_cerror_lisptag:
1140  case uuo_format_cerror_fulltag:
1141  case uuo_format_cerror_xtype:
1142  case uuo_format_nullary_error:
1143  case uuo_format_unary_error:
1144  case uuo_format_binary_error:
1145  case uuo_format_ternary:
1146    handled = handle_error(xp,0,the_uuo, &bump);
1147    break;
1148
1149  default:
1150    handled = false;
1151    bump = 0;
1152  }
1153 
1154  if (handled && bump) {
1155    adjust_exception_pc(xp, bump);
1156  }
1157  return handled;
1158}
1159
1160natural
1161register_codevector_contains_pc (natural lisp_function, pc where)
1162{
1163  natural code_vector, size;
1164
1165  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1166      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1167    code_vector = deref(lisp_function, 2);
1168    size = header_element_count(header_of(code_vector)) << 2;
1169    if ((untag(code_vector) < (natural)where) && 
1170        ((natural)where < (code_vector + size)))
1171      return(code_vector);
1172  }
1173
1174  return(0);
1175}
1176
1177Boolean
1178callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1179{
1180  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1181}
1182
1183Boolean
1184callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1185                  natural arg1, natural arg2, int *bumpP)
1186{
1187  natural  callback_ptr;
1188  area *a;
1189  natural fnreg = fn,  codevector, offset;
1190  pc where = xpPC(xp);
1191  int delta;
1192
1193  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1194  if (codevector == 0) {
1195    fnreg = nfn;
1196    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1197    if (codevector == 0) {
1198      fnreg = 0;
1199    }
1200  }
1201  if (codevector) {
1202    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1203  } else {
1204    offset = (natural)where;
1205  }
1206                                                 
1207                                               
1208
1209  TCR *tcr = get_tcr(true);
1210
1211  /* Put the active stack pointer where .SPcallback expects it */
1212  a = tcr->cs_area;
1213  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1214
1215  /* Copy globals from the exception frame to tcr */
1216  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1217  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1218
1219
1220
1221  /* Call back.
1222     Lisp will handle trampolining through some code that
1223     will push lr/fn & pc/nfn stack frames for backtrace.
1224  */
1225  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1226  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1227  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1228  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1229
1230  if (bumpP) {
1231    *bumpP = delta;
1232  }
1233
1234  /* Copy GC registers back into exception frame */
1235  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1236  return true;
1237}
1238
1239area *
1240allocate_no_stack (natural size)
1241{
1242#ifdef SUPPORT_PRAGMA_UNUSED
1243#pragma unused(size)
1244#endif
1245
1246  return (area *) NULL;
1247}
1248
1249
1250
1251
1252
1253
1254/* callback to (symbol-value cmain) if it is a macptr,
1255   otherwise report cause and function name to console.
1256   Returns noErr if exception handled OK */
1257OSStatus
1258handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1259{
1260  LispObj   cmain = nrs_CMAIN.vcell;
1261  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1262
1263}
1264
1265
1266
1267
1268void non_fatal_error( char *msg )
1269{
1270  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1271  fflush( dbgout );
1272}
1273
1274
1275
1276Boolean
1277handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1278{
1279  LispObj   errdisp = nrs_ERRDISP.vcell;
1280
1281  if ((fulltag_of(errdisp) == fulltag_misc) &&
1282      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1283    /* errdisp is a macptr, we can call back to lisp */
1284    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1285    }
1286
1287  return false;
1288}
1289               
1290
1291/*
1292   Current thread has all signals masked.  Before unmasking them,
1293   make it appear that the current thread has been suspended.
1294   (This is to handle the case where another thread is trying
1295   to GC before this thread is able to sieze the exception lock.)
1296*/
1297int
1298prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1299{
1300  int old_valence = tcr->valence;
1301
1302  tcr->pending_exception_context = context;
1303  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1304
1305  ALLOW_EXCEPTIONS(context);
1306  return old_valence;
1307} 
1308
1309void
1310wait_for_exception_lock_in_handler(TCR *tcr, 
1311                                   ExceptionInformation *context,
1312                                   xframe_list *xf)
1313{
1314
1315  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1316  xf->curr = context;
1317  xf->prev = tcr->xframe;
1318  tcr->xframe =  xf;
1319  tcr->pending_exception_context = NULL;
1320  tcr->valence = TCR_STATE_FOREIGN; 
1321}
1322
1323void
1324unlock_exception_lock_in_handler(TCR *tcr)
1325{
1326  tcr->pending_exception_context = tcr->xframe->curr;
1327  tcr->xframe = tcr->xframe->prev;
1328  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1329  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1330}
1331
1332/*
1333   If an interrupt is pending on exception exit, try to ensure
1334   that the thread sees it as soon as it's able to run.
1335*/
1336void
1337raise_pending_interrupt(TCR *tcr)
1338{
1339  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1340    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1341  }
1342}
1343
1344void
1345exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1346{
1347  sigset_t mask;
1348  sigfillset(&mask);
1349 
1350  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1351  tcr->valence = old_valence;
1352  tcr->pending_exception_context = NULL;
1353  tcr->last_lisp_frame = old_last_lisp_frame;
1354}
1355
1356
1357void
1358signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1359{
1360  xframe_list xframe_link;
1361
1362  if (!use_mach_exception_handling) {
1363   
1364    tcr = (TCR *) get_interrupt_tcr(false);
1365 
1366    /* The signal handler's entered with all signals (notably the
1367       thread_suspend signal) blocked.  Don't allow any other signals
1368       (notably the thread_suspend signal) to preempt us until we've
1369       set the TCR's xframe slot to include the current exception
1370       context.
1371    */
1372   
1373    old_last_lisp_frame = tcr->last_lisp_frame;
1374    tcr->last_lisp_frame = xpGPR(context,Rsp);
1375    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1376  }
1377
1378  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1379    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1380    pthread_kill(pthread_self(), thread_suspend_signal);
1381  }
1382
1383 
1384  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1385  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1386    char msg[512];
1387    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1388    if (lisp_Debugger(context, info, signum, false, msg)) {
1389      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1390    }
1391  }
1392  unlock_exception_lock_in_handler(tcr);
1393
1394  /* This thread now looks like a thread that was suspended while
1395     executing lisp code.  If some other thread gets the exception
1396     lock and GCs, the context (this thread's suspend_context) will
1397     be updated.  (That's only of concern if it happens before we
1398     can return to the kernel/to the Mach exception handler).
1399  */
1400  if (!use_mach_exception_handling) {
1401    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1402    raise_pending_interrupt(tcr);
1403  }
1404}
1405
1406#ifdef USE_SIGALTSTACK
1407void
1408invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1409{
1410  ExceptionInformation *xp_copy;
1411  siginfo_t *info_copy;
1412  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1413 
1414  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1415  target_sp -= sizeof(ucontext_t);
1416  xp_copy = (ExceptionInformation *)target_sp;
1417  memmove(target_sp,xp,sizeof(*xp));
1418  xp_copy->uc_stack.ss_sp = 0;
1419  xp_copy->uc_stack.ss_size = 0;
1420  xp_copy->uc_stack.ss_flags = 0;
1421  xp_copy->uc_link = NULL;
1422  target_sp -= sizeof(siginfo_t);
1423  info_copy = (siginfo_t *)target_sp;
1424  memmove(target_sp,info,sizeof(*info));
1425  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1426}
1427 
1428void
1429altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1430{
1431  TCR *tcr=get_tcr(true);
1432  if (signo == SIGSEGV) {
1433    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1434    area *a = tcr->cs_area;
1435   
1436    if ((addr >= a->low) &&
1437        (addr < a->softlimit)) {
1438      if (addr < a->hardlimit) {
1439        Bug(xp, "hard stack overflow");
1440      } else {
1441        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1442      }
1443    }
1444  }
1445  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1446}
1447#endif
1448
1449/*
1450  If it looks like we're in the middle of an atomic operation, make
1451  it seem as if that operation is either complete or hasn't started
1452  yet.
1453
1454  The cases handled include:
1455
1456  a) storing into a newly-allocated lisp frame on the stack.
1457  b) marking a newly-allocated TSP frame as containing "raw" data.
1458  c) consing: the GC has its own ideas about how this should be
1459     handled, but other callers would be best advised to back
1460     up or move forward, according to whether we're in the middle
1461     of allocating a cons cell or allocating a uvector.
1462  d) a STMW to the vsp
1463  e) EGC write-barrier subprims.
1464*/
1465
1466extern opcode
1467  egc_write_barrier_start,
1468  egc_write_barrier_end, 
1469  egc_store_node_conditional, 
1470  egc_store_node_conditional_test,
1471  egc_set_hash_key,
1472  egc_gvset,
1473  egc_rplaca,
1474  egc_rplacd,
1475  egc_set_hash_key_conditional,
1476  egc_set_hash_key_conditional_test;
1477
1478
1479extern opcode ffcall_return_window, ffcall_return_window_end;
1480
1481void
1482pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1483{
1484  pc program_counter = xpPC(xp);
1485  opcode instr = *program_counter;
1486  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1487  LispObj cur_allocptr = xpGPR(xp, allocptr);
1488  int allocptr_tag = fulltag_of(cur_allocptr);
1489 
1490
1491
1492  if ((program_counter < &egc_write_barrier_end) && 
1493      (program_counter >= &egc_write_barrier_start)) {
1494    LispObj *ea = 0, val = 0, root = 0;
1495    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1496    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1497
1498    if (program_counter >= &egc_set_hash_key_conditional) {
1499      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1500          ((program_counter == &egc_set_hash_key_conditional_test) &&
1501           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1502        return;
1503      }
1504      need_store = false;
1505      root = xpGPR(xp,arg_x);
1506      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1507      need_memoize_root = true;
1508    } else if (program_counter >= &egc_store_node_conditional) {
1509      if ((program_counter < &egc_store_node_conditional_test) ||
1510          ((program_counter == &egc_store_node_conditional_test) &&
1511           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1512        /* The conditional store either hasn't been attempted yet, or
1513           has failed.  No need to adjust the PC, or do memoization. */
1514        return;
1515      }
1516      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1517      xpGPR(xp,arg_z) = t_value;
1518      need_store = false;
1519    } else if (program_counter >= &egc_set_hash_key) {
1520      root = xpGPR(xp,arg_x);
1521      val = xpGPR(xp,arg_z);
1522      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1523      need_memoize_root = true;
1524    } else if (program_counter >= &egc_gvset) {
1525      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1526      val = xpGPR(xp,arg_z);
1527    } else if (program_counter >= &egc_rplacd) {
1528      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1529      val = xpGPR(xp,arg_z);
1530    } else {                      /* egc_rplaca */
1531      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1532      val = xpGPR(xp,arg_z);
1533    }
1534    if (need_store) {
1535      *ea = val;
1536    }
1537    if (need_check_memo) {
1538      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1539      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1540          ((LispObj)ea < val)) {
1541        atomic_set_bit(refbits, bitnumber);
1542        if (need_memoize_root) {
1543          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1544          atomic_set_bit(refbits, bitnumber);
1545        }
1546      }
1547    }
1548    xpPC(xp) = xpLR(xp);
1549    return;
1550  }
1551
1552
1553 
1554  if (allocptr_tag != tag_fixnum) {
1555    signed_natural disp = allocptr_displacement(xp);
1556
1557    if (disp) {
1558      /* Being architecturally "at" the alloc trap doesn't tell
1559         us much (in particular, it doesn't tell us whether
1560         or not the thread has committed to taking the trap
1561         and is waiting for the exception lock (or waiting
1562         for the Mach exception thread to tell it how bad
1563         things are) or is about to execute a conditional
1564         trap.
1565         Regardless of which case applies, we want the
1566         other thread to take (or finish taking) the
1567         trap, and we don't want it to consider its
1568         current allocptr to be valid.
1569         The difference between this case (suspend other
1570         thread for GC) and the previous case (suspend
1571         current thread for interrupt) is solely a
1572         matter of what happens after we leave this
1573         function: some non-current thread will stay
1574         suspended until the GC finishes, then take
1575         (or start processing) the alloc trap.   The
1576         current thread will go off and do PROCESS-INTERRUPT
1577         or something, and may return from the interrupt
1578         and need to finish the allocation that got interrupted.
1579      */
1580
1581      if (alloc_disp) {
1582        *alloc_disp = disp;
1583        xpGPR(xp,allocptr) += disp;
1584        /* Leave the PC at the alloc trap.  When the interrupt
1585           handler returns, it'll decrement allocptr by disp
1586           and the trap may or may not be taken.
1587        */
1588      } else {
1589        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1590        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1591      }
1592    } else {
1593      /* If we're already past the alloc_trap, finish allocating
1594         the object. */
1595      if (allocptr_tag == fulltag_cons) {
1596        finish_allocating_cons(xp);
1597      } else {
1598        if (allocptr_tag == fulltag_misc) {
1599          finish_allocating_uvector(xp);
1600        } else {
1601          Bug(xp, "what's being allocated here ?");
1602        }
1603      }
1604      /* Whatever we finished allocating, reset allocptr/allocbase to
1605         VOID_ALLOCPTR */
1606      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1607    }
1608    return;
1609  }
1610}
1611
1612void
1613interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1614{
1615  TCR *tcr = get_interrupt_tcr(false);
1616  if (tcr) {
1617    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1618      tcr->interrupt_pending = 1 << fixnumshift;
1619    } else {
1620      LispObj cmain = nrs_CMAIN.vcell;
1621
1622      if ((fulltag_of(cmain) == fulltag_misc) &&
1623          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1624        /*
1625           This thread can (allegedly) take an interrupt now.
1626           It's tricky to do that if we're executing
1627           foreign code (especially Linuxthreads code, much
1628           of which isn't reentrant.)
1629           If we're unwinding the stack, we also want to defer
1630           the interrupt.
1631        */
1632        if ((tcr->valence != TCR_STATE_LISP) ||
1633            (tcr->unwinding != 0)) {
1634          tcr->interrupt_pending = 1 << fixnumshift;
1635        } else {
1636          xframe_list xframe_link;
1637          int old_valence;
1638          signed_natural disp=0;
1639          natural old_last_lisp_frame = tcr->last_lisp_frame;
1640         
1641          tcr->last_lisp_frame = xpGPR(context,Rsp);
1642          pc_luser_xp(context, tcr, &disp);
1643          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1644          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1645          handle_exception(signum, context, tcr, info, old_valence);
1646          if (disp) {
1647            xpGPR(context,allocptr) -= disp;
1648          }
1649          unlock_exception_lock_in_handler(tcr);
1650          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1651        }
1652      }
1653    }
1654  }
1655#ifdef DARWIN
1656    DarwinSigReturn(context);
1657#endif
1658}
1659
1660#ifdef USE_SIGALTSTACK
1661void
1662altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1663{
1664  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1665}
1666#endif
1667
1668
1669void
1670install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1671{
1672  struct sigaction sa;
1673 
1674  sa.sa_sigaction = (void *)handler;
1675  sigfillset(&sa.sa_mask);
1676  sa.sa_flags = 
1677    0 /* SA_RESTART */
1678    | SA_SIGINFO
1679#ifdef USE_SIGALTSTACK
1680    | (on_altstack ? SA_ONSTACK : 0)
1681#endif
1682    ;
1683
1684  sigaction(signo, &sa, NULL);
1685}
1686
1687
1688void
1689install_pmcl_exception_handlers()
1690{
1691#ifdef DARWIN
1692  extern Boolean use_mach_exception_handling;
1693#endif
1694
1695  Boolean install_signal_handlers_for_exceptions =
1696#ifdef DARWIN
1697    !use_mach_exception_handling
1698#else
1699    true
1700#endif
1701    ;
1702  if (install_signal_handlers_for_exceptions) {
1703    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
1704    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1705
1706  }
1707 
1708  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1709                         (void *)interrupt_handler, true, false);
1710  signal(SIGPIPE, SIG_IGN);
1711}
1712
1713#ifdef USE_SIGALTSTACK
1714void
1715setup_sigaltstack(area *a)
1716{
1717  stack_t stack;
1718#if 0
1719  stack.ss_sp = a->low;
1720  a->low += SIGSTKSZ*8;
1721#endif
1722  stack.ss_size = SIGSTKSZ*8;
1723  stack.ss_flags = 0;
1724  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1725  if (sigaltstack(&stack, NULL) != 0) {
1726    perror("sigaltstack");
1727    exit(-1);
1728  }
1729}
1730#endif
1731
1732void
1733thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1734{
1735  TCR *tcr = get_tcr(false);
1736  area *a;
1737  sigset_t mask;
1738 
1739  sigemptyset(&mask);
1740
1741  if (tcr) {
1742    tcr->valence = TCR_STATE_FOREIGN;
1743    a = tcr->vs_area;
1744    if (a) {
1745      a->active = a->high;
1746    }
1747    a = tcr->cs_area;
1748    if (a) {
1749      a->active = a->high;
1750    }
1751  }
1752 
1753  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1754  pthread_exit(NULL);
1755}
1756
1757#ifdef USE_SIGALTSTACK
1758void
1759altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1760{
1761  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1762}
1763#endif
1764
1765void
1766thread_signal_setup()
1767{
1768  thread_suspend_signal = SIG_SUSPEND_THREAD;
1769  thread_kill_signal = SIG_KILL_THREAD;
1770
1771  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1772  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1773}
1774
1775
1776
1777void
1778unprotect_all_areas()
1779{
1780  protected_area_ptr p;
1781
1782  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1783    unprotect_area(p);
1784  }
1785}
1786
1787/*
1788  A binding subprim has just done "twlle limit_regno,idx_regno" and
1789  the trap's been taken.  Extend the tcr's tlb so that the index will
1790  be in bounds and the new limit will be on a page boundary, filling
1791  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1792  the tcr fields and the registers in the xp and return true if this
1793  all works, false otherwise.
1794
1795  Note that the tlb was allocated via malloc, so realloc can do some
1796  of the hard work.
1797*/
1798Boolean
1799extend_tcr_tlb(TCR *tcr, 
1800               ExceptionInformation *xp, 
1801               unsigned idx_regno)
1802{
1803  unsigned
1804    index = (unsigned) (xpGPR(xp,idx_regno)),
1805    old_limit = tcr->tlb_limit,
1806    new_limit = align_to_power_of_2(index+1,12),
1807    new_bytes = new_limit-old_limit;
1808  LispObj
1809    *old_tlb = tcr->tlb_pointer,
1810    *new_tlb = realloc(old_tlb, new_limit),
1811    *work;
1812
1813  if (new_tlb == NULL) {
1814    return false;
1815  }
1816 
1817  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1818
1819  while (new_bytes) {
1820    *work++ = no_thread_local_binding_marker;
1821    new_bytes -= sizeof(LispObj);
1822  }
1823  tcr->tlb_pointer = new_tlb;
1824  tcr->tlb_limit = new_limit;
1825  return true;
1826}
1827
1828
1829
1830void
1831exception_init()
1832{
1833  install_pmcl_exception_handlers();
1834}
1835
1836
1837
1838
1839
1840#ifdef DARWIN
1841
1842
1843#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1844#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1845
1846
1847
1848#define LISP_EXCEPTIONS_HANDLED_MASK \
1849 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1850
1851/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1852#define NUM_LISP_EXCEPTIONS_HANDLED 4
1853
1854typedef struct {
1855  int foreign_exception_port_count;
1856  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1857  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1858  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1859  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1860} MACH_foreign_exception_state;
1861
1862
1863
1864
1865/*
1866  Mach's exception mechanism works a little better than its signal
1867  mechanism (and, not incidentally, it gets along with GDB a lot
1868  better.
1869
1870  Initially, we install an exception handler to handle each native
1871  thread's exceptions.  This process involves creating a distinguished
1872  thread which listens for kernel exception messages on a set of
1873  0 or more thread exception ports.  As threads are created, they're
1874  added to that port set; a thread's exception port is destroyed
1875  (and therefore removed from the port set) when the thread exits.
1876
1877  A few exceptions can be handled directly in the handler thread;
1878  others require that we resume the user thread (and that the
1879  exception thread resumes listening for exceptions.)  The user
1880  thread might eventually want to return to the original context
1881  (possibly modified somewhat.)
1882
1883  As it turns out, the simplest way to force the faulting user
1884  thread to handle its own exceptions is to do pretty much what
1885  signal() does: the exception handlng thread sets up a sigcontext
1886  on the user thread's stack and forces the user thread to resume
1887  execution as if a signal handler had been called with that
1888  context as an argument.  We can use a distinguished UUO at a
1889  distinguished address to do something like sigreturn(); that'll
1890  have the effect of resuming the user thread's execution in
1891  the (pseudo-) signal context.
1892
1893  Since:
1894    a) we have miles of code in C and in Lisp that knows how to
1895    deal with Linux sigcontexts
1896    b) Linux sigcontexts contain a little more useful information
1897    (the DAR, DSISR, etc.) than their Darwin counterparts
1898    c) we have to create a sigcontext ourselves when calling out
1899    to the user thread: we aren't really generating a signal, just
1900    leveraging existing signal-handling code.
1901
1902  we create a Linux sigcontext struct.
1903
1904  Simple ?  Hopefully from the outside it is ...
1905
1906  We want the process of passing a thread's own context to it to
1907  appear to be atomic: in particular, we don't want the GC to suspend
1908  a thread that's had an exception but has not yet had its user-level
1909  exception handler called, and we don't want the thread's exception
1910  context to be modified by a GC while the Mach handler thread is
1911  copying it around.  On Linux (and on Jaguar), we avoid this issue
1912  because (a) the kernel sets up the user-level signal handler and
1913  (b) the signal handler blocks signals (including the signal used
1914  by the GC to suspend threads) until tcr->xframe is set up.
1915
1916  The GC and the Mach server thread therefore contend for the lock
1917  "mach_exception_lock".  The Mach server thread holds the lock
1918  when copying exception information between the kernel and the
1919  user thread; the GC holds this lock during most of its execution
1920  (delaying exception processing until it can be done without
1921  GC interference.)
1922
1923*/
1924
1925
1926#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1927
1928void
1929fatal_mach_error(char *format, ...);
1930
1931#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1932
1933
1934void
1935restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1936{
1937  kern_return_t kret;
1938  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
1939
1940  /* Set the thread's FP state from the pseudosigcontext */
1941  kret = thread_set_state(thread,
1942                          ARM_VFP_STATE,
1943                          (thread_state_t)&(mc->__fs),
1944                          ARM_VFP_STATE_COUNT);
1945
1946  MACH_CHECK_ERROR("setting thread FP state", kret);
1947
1948  /* The thread'll be as good as new ... */
1949  kret = thread_set_state(thread, 
1950                          MACHINE_THREAD_STATE,
1951                          (thread_state_t)&(mc->__ss),
1952                          MACHINE_THREAD_STATE_COUNT);
1953  MACH_CHECK_ERROR("setting thread state", kret);
1954} 
1955
1956/* This code runs in the exception handling thread, in response
1957   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1958   in response to a call to pseudo_sigreturn() from the specified
1959   user thread.
1960   Find that context (the user thread's R3 points to it), then
1961   use that context to set the user thread's state.  When this
1962   function's caller returns, the Mach kernel will resume the
1963   user thread.
1964*/
1965
1966kern_return_t
1967do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1968{
1969  ExceptionInformation *xp;
1970
1971#ifdef DEBUG_MACH_EXCEPTIONS
1972  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1973#endif
1974  xp = tcr->pending_exception_context;
1975  if (xp) {
1976    tcr->pending_exception_context = NULL;
1977    tcr->valence = TCR_STATE_LISP;
1978    restore_mach_thread_state(thread, xp);
1979    raise_pending_interrupt(tcr);
1980  } else {
1981    Bug(NULL, "no xp here!\n");
1982  }
1983#ifdef DEBUG_MACH_EXCEPTIONS
1984  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1985#endif
1986  return KERN_SUCCESS;
1987} 
1988
1989ExceptionInformation *
1990create_thread_context_frame(mach_port_t thread, 
1991                            natural *new_stack_top)
1992{
1993  arm_thread_state_t ts;
1994  mach_msg_type_number_t thread_state_count;
1995  kern_return_t result;
1996  ExceptionInformation *pseudosigcontext;
1997  _STRUCT_MCONTEXT *mc;
1998  natural stackp, backlink;
1999
2000  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2001  result = thread_get_state(thread, 
2002                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2003                            (thread_state_t)&ts,
2004                            &thread_state_count);
2005 
2006  if (result != KERN_SUCCESS) {
2007    get_tcr(true);
2008    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2009  }
2010  stackp = ts.__sp;
2011  backlink = stackp;
2012
2013  stackp -= sizeof(*pseudosigcontext);
2014  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2015
2016  stackp -= sizeof(*mc);
2017  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2018  memmove(&(mc->__ss),&ts,sizeof(ts));
2019
2020  thread_state_count = ARM_VFP_STATE_COUNT;
2021  thread_get_state(thread,
2022                   ARM_VFP_STATE,
2023                   (thread_state_t)&(mc->__fs),
2024                   &thread_state_count);
2025
2026
2027  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2028  thread_get_state(thread,
2029                   ARM_EXCEPTION_STATE,
2030                   (thread_state_t)&(mc->__es),
2031                   &thread_state_count);
2032
2033
2034  UC_MCONTEXT(pseudosigcontext) = mc;
2035  if (new_stack_top) {
2036    *new_stack_top = stackp;
2037  }
2038  return pseudosigcontext;
2039}
2040
2041/*
2042  This code sets up the user thread so that it executes a "pseudo-signal
2043  handler" function when it resumes.  Create a linux sigcontext struct
2044  on the thread's stack and pass it as an argument to the pseudo-signal
2045  handler.
2046
2047  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2048  which will restore the thread's context.
2049
2050  If the handler invokes code that throws (or otherwise never sigreturn()'s
2051  to the context), that's fine.
2052
2053  Actually, check that: throw (and variants) may need to be careful and
2054  pop the tcr's xframe list until it's younger than any frame being
2055  entered.
2056*/
2057
2058int
2059setup_signal_frame(mach_port_t thread,
2060                   void *handler_address,
2061                   int signum,
2062                   int code,
2063                   TCR *tcr)
2064{
2065  arm_thread_state_t ts;
2066  ExceptionInformation *pseudosigcontext;
2067  int old_valence = tcr->valence;
2068  natural stackp;
2069
2070#ifdef DEBUG_MACH_EXCEPTIONS
2071  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2072#endif
2073  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2074  pseudosigcontext->uc_onstack = 0;
2075  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2076  tcr->pending_exception_context = pseudosigcontext;
2077  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2078 
2079
2080  /*
2081     It seems like we've created a  sigcontext on the thread's
2082     stack.  Set things up so that we call the handler (with appropriate
2083     args) when the thread's resumed.
2084  */
2085
2086  ts.__pc = (natural) handler_address;
2087  ts.__sp = stackp;
2088  ts.__r[0] = signum;
2089  ts.__r[1] = (natural)pseudosigcontext;
2090  ts.__r[2] = (natural)tcr;
2091  ts.__r[3] = (natural)old_valence;
2092  ts.__lr = (natural)pseudo_sigreturn;
2093  ts.__cpsr = xpPSR(pseudosigcontext);
2094
2095
2096  thread_set_state(thread, 
2097                   MACHINE_THREAD_STATE,
2098                   (thread_state_t)&ts,
2099                   MACHINE_THREAD_STATE_COUNT);
2100#ifdef DEBUG_MACH_EXCEPTIONS
2101  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2102#endif
2103  return 0;
2104}
2105
2106
2107void
2108pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2109{
2110  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2111} 
2112
2113
2114int
2115thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2116{
2117  /* Likely hopeless. */
2118  return 0;
2119}
2120
2121/*
2122  This function runs in the exception handling thread.  It's
2123  called (by this precise name) from the library function "exc_server()"
2124  when the thread's exception ports are set up.  (exc_server() is called
2125  via mach_msg_server(), which is a function that waits for and dispatches
2126  on exception messages from the Mach kernel.)
2127
2128  This checks to see if the exception was caused by a pseudo_sigreturn()
2129  UUO; if so, it arranges for the thread to have its state restored
2130  from the specified context.
2131
2132  Otherwise, it tries to map the exception to a signal number and
2133  arranges that the thread run a "pseudo signal handler" to handle
2134  the exception.
2135
2136  Some exceptions could and should be handled here directly.
2137*/
2138
2139kern_return_t
2140catch_exception_raise(mach_port_t exception_port,
2141                      mach_port_t thread,
2142                      mach_port_t task, 
2143                      exception_type_t exception,
2144                      exception_data_t code_vector,
2145                      mach_msg_type_number_t code_count)
2146{
2147  int signum = 0, code = *code_vector;
2148  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2149  kern_return_t kret;
2150
2151#ifdef DEBUG_MACH_EXCEPTIONS
2152  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2153#endif
2154
2155  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2156    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2157  } 
2158  /* On the ARM, code_vector[1] contains the undefined instruction
2159     in this case, not its address.  */
2160  if ((exception == EXC_BAD_INSTRUCTION) &&
2161      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2162      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2163    kret = do_pseudo_sigreturn(thread, tcr);
2164  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2165    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2166    kret = 17;
2167  } else {
2168    switch (exception) {
2169    case EXC_BAD_ACCESS:
2170      signum = SIGSEGV;
2171      break;
2172       
2173    case EXC_BAD_INSTRUCTION:
2174      signum = SIGILL;
2175      break;
2176     
2177      break;
2178     
2179    case EXC_ARITHMETIC:
2180      signum = SIGFPE;
2181      break;
2182
2183    default:
2184      break;
2185    }
2186    if (signum) {
2187      kret = setup_signal_frame(thread,
2188                                (void *)pseudo_signal_handler,
2189                                signum,
2190                                code,
2191                                tcr);
2192#if 0
2193      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2194#endif
2195
2196    } else {
2197      kret = 17;
2198    }
2199  }
2200
2201  return kret;
2202}
2203
2204
2205
2206typedef struct {
2207  mach_msg_header_t Head;
2208  /* start of the kernel processed data */
2209  mach_msg_body_t msgh_body;
2210  mach_msg_port_descriptor_t thread;
2211  mach_msg_port_descriptor_t task;
2212  /* end of the kernel processed data */
2213  NDR_record_t NDR;
2214  exception_type_t exception;
2215  mach_msg_type_number_t codeCnt;
2216  integer_t code[2];
2217  mach_msg_trailer_t trailer;
2218} exceptionRequest;
2219
2220
2221boolean_t
2222openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2223{
2224  static NDR_record_t _NDR = {0};
2225  kern_return_t handled;
2226  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2227  exceptionRequest *req = (exceptionRequest *) in;
2228
2229  reply->NDR = _NDR;
2230
2231  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2232  out->msgh_remote_port = in->msgh_remote_port;
2233  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2234  out->msgh_local_port = MACH_PORT_NULL;
2235  out->msgh_id = in->msgh_id+100;
2236
2237  /* Could handle other exception flavors in the range 2401-2403 */
2238
2239
2240  if (in->msgh_id != 2401) {
2241    reply->RetCode = MIG_BAD_ID;
2242    return FALSE;
2243  }
2244  handled = catch_exception_raise(req->Head.msgh_local_port,
2245                                  req->thread.name,
2246                                  req->task.name,
2247                                  req->exception,
2248                                  req->code,
2249                                  req->codeCnt);
2250  reply->RetCode = handled;
2251  return TRUE;
2252}
2253
2254/*
2255  The initial function for an exception-handling thread.
2256*/
2257
2258void *
2259exception_handler_proc(void *arg)
2260{
2261  extern boolean_t exc_server();
2262  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2263
2264  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2265  /* Should never return. */
2266  abort();
2267}
2268
2269
2270
2271mach_port_t
2272mach_exception_port_set()
2273{
2274  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2275  kern_return_t kret; 
2276  if (__exception_port_set == MACH_PORT_NULL) {
2277    kret = mach_port_allocate(mach_task_self(),
2278                              MACH_PORT_RIGHT_PORT_SET,
2279                              &__exception_port_set);
2280    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2281    create_system_thread(0,
2282                         NULL,
2283                         exception_handler_proc, 
2284                         (void *)((natural)__exception_port_set));
2285  }
2286  return __exception_port_set;
2287}
2288
2289/*
2290  Setup a new thread to handle those exceptions specified by
2291  the mask "which".  This involves creating a special Mach
2292  message port, telling the Mach kernel to send exception
2293  messages for the calling thread to that port, and setting
2294  up a handler thread which listens for and responds to
2295  those messages.
2296
2297*/
2298
2299/*
2300  Establish the lisp thread's TCR as its exception port, and determine
2301  whether any other ports have been established by foreign code for
2302  exceptions that lisp cares about.
2303
2304  If this happens at all, it should happen on return from foreign
2305  code and on entry to lisp code via a callback.
2306
2307  This is a lot of trouble (and overhead) to support Java, or other
2308  embeddable systems that clobber their caller's thread exception ports.
2309 
2310*/
2311kern_return_t
2312tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2313{
2314  kern_return_t kret;
2315  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2316  int i;
2317  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2318  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2319  exception_mask_t mask = 0;
2320
2321  kret = thread_swap_exception_ports(thread,
2322                                     LISP_EXCEPTIONS_HANDLED_MASK,
2323                                     lisp_port,
2324                                     EXCEPTION_DEFAULT,
2325                                     THREAD_STATE_NONE,
2326                                     fxs->masks,
2327                                     &n,
2328                                     fxs->ports,
2329                                     fxs->behaviors,
2330                                     fxs->flavors);
2331  if (kret == KERN_SUCCESS) {
2332    fxs->foreign_exception_port_count = n;
2333    for (i = 0; i < n; i ++) {
2334      foreign_port = fxs->ports[i];
2335
2336      if ((foreign_port != lisp_port) &&
2337          (foreign_port != MACH_PORT_NULL)) {
2338        mask |= fxs->masks[i];
2339      }
2340    }
2341    tcr->foreign_exception_status = (int) mask;
2342  }
2343  return kret;
2344}
2345
2346kern_return_t
2347tcr_establish_lisp_exception_port(TCR *tcr)
2348{
2349  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2350}
2351
2352/*
2353  Do this when calling out to or returning from foreign code, if
2354  any conflicting foreign exception ports were established when we
2355  last entered lisp code.
2356*/
2357kern_return_t
2358restore_foreign_exception_ports(TCR *tcr)
2359{
2360  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2361 
2362  if (m) {
2363    MACH_foreign_exception_state *fxs  = 
2364      (MACH_foreign_exception_state *) tcr->native_thread_info;
2365    int i, n = fxs->foreign_exception_port_count;
2366    exception_mask_t tm;
2367
2368    for (i = 0; i < n; i++) {
2369      if ((tm = fxs->masks[i]) & m) {
2370        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2371                                   tm,
2372                                   fxs->ports[i],
2373                                   fxs->behaviors[i],
2374                                   fxs->flavors[i]);
2375      }
2376    }
2377  }
2378}
2379                                   
2380
2381/*
2382  This assumes that a Mach port (to be used as the thread's exception port) whose
2383  "name" matches the TCR's 32-bit address has already been allocated.
2384*/
2385
2386kern_return_t
2387setup_mach_exception_handling(TCR *tcr)
2388{
2389  mach_port_t
2390    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2391    task_self = mach_task_self();
2392  kern_return_t kret;
2393
2394  kret = mach_port_insert_right(task_self,
2395                                thread_exception_port,
2396                                thread_exception_port,
2397                                MACH_MSG_TYPE_MAKE_SEND);
2398  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2399
2400  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2401  if (kret == KERN_SUCCESS) {
2402    mach_port_t exception_port_set = mach_exception_port_set();
2403
2404    kret = mach_port_move_member(task_self,
2405                                 thread_exception_port,
2406                                 exception_port_set);
2407  }
2408  return kret;
2409}
2410
2411void
2412darwin_exception_init(TCR *tcr)
2413{
2414  void tcr_monitor_exception_handling(TCR*, Boolean);
2415  kern_return_t kret;
2416  MACH_foreign_exception_state *fxs = 
2417    calloc(1, sizeof(MACH_foreign_exception_state));
2418 
2419  tcr->native_thread_info = (void *) fxs;
2420
2421  if ((kret = setup_mach_exception_handling(tcr))
2422      != KERN_SUCCESS) {
2423    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2424    terminate_lisp();
2425  }
2426}
2427
2428/*
2429  The tcr is the "name" of the corresponding thread's exception port.
2430  Destroying the port should remove it from all port sets of which it's
2431  a member (notably, the exception port set.)
2432*/
2433void
2434darwin_exception_cleanup(TCR *tcr)
2435{
2436  void *fxs = tcr->native_thread_info;
2437  extern Boolean use_mach_exception_handling;
2438
2439  if (fxs) {
2440    tcr->native_thread_info = NULL;
2441    free(fxs);
2442  }
2443  if (use_mach_exception_handling) {
2444    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2445    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2446  }
2447}
2448
2449
2450Boolean
2451suspend_mach_thread(mach_port_t mach_thread)
2452{
2453  kern_return_t status;
2454  Boolean aborted = false;
2455 
2456  do {
2457    aborted = false;
2458    status = thread_suspend(mach_thread);
2459    if (status == KERN_SUCCESS) {
2460      status = thread_abort_safely(mach_thread);
2461      if (status == KERN_SUCCESS) {
2462        aborted = true;
2463      } else {
2464        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2465        thread_resume(mach_thread);
2466      }
2467    } else {
2468      return false;
2469    }
2470  } while (! aborted);
2471  return true;
2472}
2473
2474/*
2475  Only do this if pthread_kill indicated that the pthread isn't
2476  listening to signals anymore, as can happen as soon as pthread_exit()
2477  is called on Darwin.  The thread could still call out to lisp as it
2478  is exiting, so we need another way to suspend it in this case.
2479*/
2480Boolean
2481mach_suspend_tcr(TCR *tcr)
2482{
2483  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2484  ExceptionInformation *pseudosigcontext;
2485  Boolean result = false;
2486 
2487  result = suspend_mach_thread(mach_thread);
2488  if (result) {
2489    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2490    pseudosigcontext->uc_onstack = 0;
2491    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2492    tcr->suspend_context = pseudosigcontext;
2493  }
2494  return result;
2495}
2496
2497void
2498mach_resume_tcr(TCR *tcr)
2499{
2500  ExceptionInformation *xp;
2501  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2502 
2503  xp = tcr->suspend_context;
2504#ifdef DEBUG_MACH_EXCEPTIONS
2505  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2506          tcr, tcr->pending_exception_context);
2507#endif
2508  tcr->suspend_context = NULL;
2509  restore_mach_thread_state(mach_thread, xp);
2510#ifdef DEBUG_MACH_EXCEPTIONS
2511  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2512          tcr, tcr->pending_exception_context);
2513#endif
2514  thread_resume(mach_thread);
2515}
2516
2517void
2518fatal_mach_error(char *format, ...)
2519{
2520  va_list args;
2521  char s[512];
2522 
2523
2524  va_start(args, format);
2525  vsnprintf(s, sizeof(s),format, args);
2526  va_end(args);
2527
2528  Fatal("Mach error", s);
2529}
2530
2531void
2532pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2533{
2534  interrupt_handler(signum, NULL, context);
2535}
2536
2537int
2538mach_raise_thread_interrupt(TCR *target)
2539{
2540  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2541  kern_return_t kret;
2542  Boolean result = false;
2543  TCR *current = get_tcr(false);
2544  thread_basic_info_data_t info; 
2545  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2546
2547  LOCK(lisp_global(TCR_AREA_LOCK), current);
2548
2549  if (suspend_mach_thread(mach_thread)) {
2550    if (thread_info(mach_thread,
2551                    THREAD_BASIC_INFO,
2552                    (thread_info_t)&info,
2553                    &info_count) == KERN_SUCCESS) {
2554      if (info.suspend_count == 1) {
2555        if ((target->valence == TCR_STATE_LISP) &&
2556            (!target->unwinding) &&
2557            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2558          kret = setup_signal_frame(mach_thread,
2559                                    (void *)pseudo_interrupt_handler,
2560                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2561                                    0,
2562                                    target);
2563          if (kret == KERN_SUCCESS) {
2564            result = true;
2565          }
2566        }
2567      }
2568    }
2569    if (! result) {
2570      target->interrupt_pending = 1 << fixnumshift;
2571    }
2572    thread_resume(mach_thread);
2573   
2574  }
2575  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2576  return 0;
2577}
2578
2579#endif
Note: See TracBrowser for help on using the repository browser.