source: branches/arm/lisp-kernel/arm-exceptions.c @ 13992

Last change on this file since 13992 was 13992, checked in by gb, 10 years ago

Keep trying to guess how to identify a write fault on ARM Linux.

File size: 65.2 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg, NULL);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call. */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
673  }
674
675
676  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
677  if (cur_allocptr) {
678    update_bytes_allocated(tcr, cur_allocptr);
679    if (freeptr) {
680      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
681    }
682  }
683}
684
685TCR *gc_tcr = NULL;
686
687/* Suspend and "normalize" other tcrs, then call a gc-like function
688   in that context.  Resume the other tcrs, then return what the
689   function returned */
690
691signed_natural
692gc_like_from_xp(ExceptionInformation *xp, 
693                signed_natural(*fun)(TCR *, signed_natural), 
694                signed_natural param)
695{
696  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
697  int result;
698  signed_natural inhibit;
699
700  suspend_other_threads(true);
701  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
702  if (inhibit != 0) {
703    if (inhibit > 0) {
704      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
705    }
706    resume_other_threads(true);
707    gc_deferred++;
708    return 0;
709  }
710  gc_deferred = 0;
711
712  gc_tcr = tcr;
713
714  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
715
716  normalize_tcr(xp, tcr, false);
717
718
719  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
720    if (other_tcr->pending_exception_context) {
721      other_tcr->gc_context = other_tcr->pending_exception_context;
722    } else if (other_tcr->valence == TCR_STATE_LISP) {
723      other_tcr->gc_context = other_tcr->suspend_context;
724    } else {
725      /* no pending exception, didn't suspend in lisp state:
726         must have executed a synchronous ff-call.
727      */
728      other_tcr->gc_context = NULL;
729    }
730    normalize_tcr(other_tcr->gc_context, other_tcr, true);
731  }
732   
733
734
735  result = fun(tcr, param);
736
737  other_tcr = tcr;
738  do {
739    other_tcr->gc_context = NULL;
740    other_tcr = other_tcr->next;
741  } while (other_tcr != tcr);
742
743  gc_tcr = NULL;
744
745  resume_other_threads(true);
746
747  return result;
748
749}
750
751
752
753/* Returns #bytes freed by invoking GC */
754
755signed_natural
756gc_from_tcr(TCR *tcr, signed_natural param)
757{
758  area *a;
759  BytePtr oldfree, newfree;
760  BytePtr oldend, newend;
761
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774signed_natural
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783signed_natural
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789signed_natural
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815#ifdef LINUX
816  /* Based on experiments with a small sample size; need to R TFM. */
817  return ((xp->uc_mcontext.trap_no == 0xe) &&
818          ((xp->uc_mcontext.error_code & 0xfffffff7) == 0x817));
819#endif
820}
821
822Boolean
823handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
824{
825  BytePtr addr;
826  protected_area_ptr area;
827  protection_handler *handler;
828  extern Boolean touch_page(void *);
829  extern void touch_page_end(void);
830
831#ifdef LINUX
832  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
833#else
834  if (info) {
835    addr = (BytePtr)(info->si_addr);
836  } else {
837    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
838  }
839#endif
840
841  if (addr && (addr == tcr->safe_ref_address)) {
842    adjust_exception_pc(xp,4);
843
844    xpGPR(xp,imm0) = 0;
845    return true;
846  }
847
848  if (xpPC(xp) == (pc)touch_page) {
849    xpGPR(xp,imm0) = 0;
850    xpPC(xp) = (pc)touch_page_end;
851    return true;
852  }
853
854
855  if (is_write_fault(xp,info)) {
856    area = find_protected_area(addr);
857    if (area != NULL) {
858      handler = protection_handlers[area->why];
859      return handler(xp, area, addr);
860    } else {
861      if ((addr >= readonly_area->low) &&
862          (addr < readonly_area->active)) {
863        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
864                        page_size);
865        return true;
866      }
867    }
868  }
869  if (old_valence == TCR_STATE_LISP) {
870    LispObj cmain = nrs_CMAIN.vcell;
871   
872    if ((fulltag_of(cmain) == fulltag_misc) &&
873      (header_subtag(header_of(cmain)) == subtag_macptr)) {
874     
875      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
876    }
877  }
878  return false;
879}
880
881
882
883
884
885OSStatus
886do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
887{
888#ifdef SUPPORT_PRAGMA_UNUSED
889#pragma unused(area,addr)
890#endif
891  reset_lisp_process(xp);
892  return -1;
893}
894
895extern area*
896allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
897
898extern area*
899allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
900
901
902
903
904
905
906Boolean
907lisp_frame_p(lisp_frame *spPtr)
908{
909  return (spPtr->marker == lisp_frame_marker);
910}
911
912
913int ffcall_overflow_count = 0;
914
915
916
917
918
919
920/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
921  the current value of VSP (TSP) or an older area.  */
922
923OSStatus
924do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
925{
926  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
927  area *a = tcr->vs_area;
928  protected_area_ptr vsp_soft = a->softprot;
929  unprotect_area(vsp_soft);
930  signal_stack_soft_overflow(xp,vsp);
931  return 0;
932}
933
934
935
936OSStatus
937do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
938{
939  /* Trying to write into a guard page on the vstack or tstack.
940     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
941     signal an error_stack_overflow condition.
942      */
943  do_vsp_overflow(xp,addr);
944}
945
946OSStatus
947do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
948{
949#ifdef SUPPORT_PRAGMA_UNUSED
950#pragma unused(xp,area,addr)
951#endif
952  return -1;
953}
954
955
956
957
958     
959
960
961
962
963
964Boolean
965handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
966{
967  return false;
968}
969
970
971Boolean
972handle_unimplemented_instruction(ExceptionInformation *xp,
973                                 opcode instruction,
974                                 TCR *tcr)
975{
976
977  return false;
978}
979
980Boolean
981handle_exception(int xnum, 
982                 ExceptionInformation *xp, 
983                 TCR *tcr, 
984                 siginfo_t *info,
985                 int old_valence)
986{
987  pc program_counter;
988  opcode instruction = 0;
989
990  if (old_valence != TCR_STATE_LISP) {
991    return false;
992  }
993
994  program_counter = xpPC(xp);
995 
996  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
997    instruction = *program_counter;
998  }
999
1000  if (IS_ALLOC_TRAP(instruction)) {
1001    return handle_alloc_trap(xp, tcr);
1002  } else if ((xnum == SIGSEGV) ||
1003             (xnum == SIGBUS)) {
1004    return handle_protection_violation(xp, info, tcr, old_valence);
1005  } else if (xnum == SIGFPE) {
1006    return handle_sigfpe(xp, tcr);
1007  } else if ((xnum == SIGILL)) {
1008    if (IS_GC_TRAP(instruction)) {
1009      return handle_gc_trap(xp, tcr);
1010    } else if (IS_UUO(instruction)) {
1011      return handle_uuo(xp, info, instruction);
1012    } else {
1013      return handle_unimplemented_instruction(xp,instruction,tcr);
1014    }
1015  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1016    tcr->interrupt_pending = 0;
1017    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1018    return true;
1019  }
1020
1021  return false;
1022}
1023
1024void
1025adjust_exception_pc(ExceptionInformation *xp, int delta)
1026{
1027  xpPC(xp) += (delta >> 2);
1028}
1029
1030
1031/*
1032  This wants to scan backwards until "where" points to an instruction
1033   whose major opcode is either 63 (double-float) or 59 (single-float)
1034*/
1035
1036OSStatus
1037handle_fpux_binop(ExceptionInformation *xp, pc where)
1038{
1039  OSStatus err = -1;
1040  opcode *there = (opcode *) where, instr, errnum = 0;
1041  return err;
1042}
1043
1044Boolean
1045handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1046{
1047  unsigned 
1048    format = UUO_FORMAT(the_uuo);
1049  Boolean handled = false;
1050  int bump = 4;
1051  TCR *tcr = get_tcr(true);
1052
1053  switch (format) {
1054  case uuo_format_kernel_service:
1055    {
1056      TCR * target = (TCR *)xpGPR(xp,arg_z);
1057      int service = UUO_UNARY_field(the_uuo);
1058
1059      switch (service) {
1060      case error_propagate_suspend:
1061        handled = true;
1062        break;
1063      case error_interrupt:
1064        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1065        handled = true;
1066        break;
1067      case error_suspend:
1068        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1069        handled = true;
1070        break;
1071      case error_suspend_all:
1072        lisp_suspend_other_threads();
1073        handled = true;
1074        break;
1075      case error_resume:
1076        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1077        handled = true;
1078        break;
1079      case error_resume_all:
1080        lisp_resume_other_threads();
1081        handled = true;
1082        break;
1083      case error_kill:
1084        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1085        handled = true;
1086        break;
1087      case error_allocate_list:
1088        allocate_list(xp,tcr);
1089        handled = true;
1090        break;
1091      default:
1092        handled = false;
1093        break;
1094      }
1095      break;
1096    }
1097
1098  case uuo_format_unary:
1099    switch(UUO_UNARY_field(the_uuo)) {
1100    case 3:
1101      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1102        handled = true;
1103        bump = 4;
1104        break;
1105      }
1106      /* fall in */
1107    default:
1108      handled = false;
1109      break;
1110
1111    }
1112    break;
1113
1114  case uuo_format_nullary:
1115    switch (UUOA_field(the_uuo)) {
1116    case 3:
1117      adjust_exception_pc(xp, bump);
1118      bump = 0;
1119      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1120      handled = true;
1121      break;
1122
1123    case 4:
1124      tcr->interrupt_pending = 0;
1125      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, &bump);
1126      handled = true;
1127      break;
1128    default:
1129      handled = false;
1130      break;
1131    }
1132    break;
1133
1134
1135  case uuo_format_error_lisptag:
1136  case uuo_format_error_fulltag:
1137  case uuo_format_error_xtype:
1138  case uuo_format_nullary_error:
1139  case uuo_format_unary_error:
1140  case uuo_format_binary_error:
1141    handled = handle_error(xp,0,the_uuo, &bump);
1142    break;
1143
1144  default:
1145    handled = false;
1146    bump = 0;
1147  }
1148 
1149  if (handled && bump) {
1150    adjust_exception_pc(xp, bump);
1151  }
1152  return handled;
1153}
1154
1155natural
1156register_codevector_contains_pc (natural lisp_function, pc where)
1157{
1158  natural code_vector, size;
1159
1160  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1161      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1162    code_vector = deref(lisp_function, 2);
1163    size = header_element_count(header_of(code_vector)) << 2;
1164    if ((untag(code_vector) < (natural)where) && 
1165        ((natural)where < (code_vector + size)))
1166      return(code_vector);
1167  }
1168
1169  return(0);
1170}
1171
1172Boolean
1173callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1174{
1175  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1176}
1177
1178Boolean
1179callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1180                  natural arg1, natural arg2, int *bumpP)
1181{
1182  natural  callback_ptr;
1183  area *a;
1184  natural fnreg = fn,  codevector, offset;
1185  pc where = xpPC(xp);
1186  int delta;
1187
1188  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1189  if (codevector == 0) {
1190    fnreg = nfn;
1191    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1192    if (codevector == 0) {
1193      fnreg = 0;
1194    }
1195  }
1196  if (codevector) {
1197    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1198  } else {
1199    offset = (natural)where;
1200  }
1201                                                 
1202                                               
1203
1204  TCR *tcr = get_tcr(true);
1205
1206  /* Put the active stack pointer where .SPcallback expects it */
1207  a = tcr->cs_area;
1208  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1209
1210  /* Copy globals from the exception frame to tcr */
1211  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1212  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1213
1214
1215
1216  /* Call back.
1217     Lisp will handle trampolining through some code that
1218     will push lr/fn & pc/nfn stack frames for backtrace.
1219  */
1220  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1221  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1222  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1223  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1224
1225  if (bumpP) {
1226    *bumpP = delta;
1227  }
1228
1229  /* Copy GC registers back into exception frame */
1230  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1231  return true;
1232}
1233
1234area *
1235allocate_no_stack (natural size)
1236{
1237#ifdef SUPPORT_PRAGMA_UNUSED
1238#pragma unused(size)
1239#endif
1240
1241  return (area *) NULL;
1242}
1243
1244
1245
1246
1247
1248
1249/* callback to (symbol-value cmain) if it is a macptr,
1250   otherwise report cause and function name to console.
1251   Returns noErr if exception handled OK */
1252OSStatus
1253handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1254{
1255  LispObj   cmain = nrs_CMAIN.vcell;
1256  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1257
1258}
1259
1260
1261
1262
1263void non_fatal_error( char *msg )
1264{
1265  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1266  fflush( dbgout );
1267}
1268
1269
1270
1271Boolean
1272handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1273{
1274  LispObj   errdisp = nrs_ERRDISP.vcell;
1275
1276  if ((fulltag_of(errdisp) == fulltag_misc) &&
1277      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1278    /* errdisp is a macptr, we can call back to lisp */
1279    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1280    }
1281
1282  return false;
1283}
1284               
1285
1286/*
1287   Current thread has all signals masked.  Before unmasking them,
1288   make it appear that the current thread has been suspended.
1289   (This is to handle the case where another thread is trying
1290   to GC before this thread is able to sieze the exception lock.)
1291*/
1292int
1293prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1294{
1295  int old_valence = tcr->valence;
1296
1297  tcr->pending_exception_context = context;
1298  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1299
1300  ALLOW_EXCEPTIONS(context);
1301  return old_valence;
1302} 
1303
1304void
1305wait_for_exception_lock_in_handler(TCR *tcr, 
1306                                   ExceptionInformation *context,
1307                                   xframe_list *xf)
1308{
1309
1310  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1311  xf->curr = context;
1312  xf->prev = tcr->xframe;
1313  tcr->xframe =  xf;
1314  tcr->pending_exception_context = NULL;
1315  tcr->valence = TCR_STATE_FOREIGN; 
1316}
1317
1318void
1319unlock_exception_lock_in_handler(TCR *tcr)
1320{
1321  tcr->pending_exception_context = tcr->xframe->curr;
1322  tcr->xframe = tcr->xframe->prev;
1323  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1324  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1325}
1326
1327/*
1328   If an interrupt is pending on exception exit, try to ensure
1329   that the thread sees it as soon as it's able to run.
1330*/
1331void
1332raise_pending_interrupt(TCR *tcr)
1333{
1334  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1335    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1336  }
1337}
1338
1339void
1340exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1341{
1342  sigset_t mask;
1343  sigfillset(&mask);
1344 
1345  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1346  tcr->valence = old_valence;
1347  tcr->pending_exception_context = NULL;
1348  tcr->last_lisp_frame = old_last_lisp_frame;
1349}
1350
1351
1352void
1353signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1354{
1355  xframe_list xframe_link;
1356
1357  if (!use_mach_exception_handling) {
1358   
1359    tcr = (TCR *) get_interrupt_tcr(false);
1360 
1361    /* The signal handler's entered with all signals (notably the
1362       thread_suspend signal) blocked.  Don't allow any other signals
1363       (notably the thread_suspend signal) to preempt us until we've
1364       set the TCR's xframe slot to include the current exception
1365       context.
1366    */
1367   
1368    old_last_lisp_frame = tcr->last_lisp_frame;
1369    tcr->last_lisp_frame = xpGPR(context,Rsp);
1370    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1371  }
1372
1373  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1374    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1375    pthread_kill(pthread_self(), thread_suspend_signal);
1376  }
1377
1378 
1379  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1380  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1381    char msg[512];
1382    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1383    if (lisp_Debugger(context, info, signum, false, msg)) {
1384      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1385    }
1386  }
1387  unlock_exception_lock_in_handler(tcr);
1388
1389  /* This thread now looks like a thread that was suspended while
1390     executing lisp code.  If some other thread gets the exception
1391     lock and GCs, the context (this thread's suspend_context) will
1392     be updated.  (That's only of concern if it happens before we
1393     can return to the kernel/to the Mach exception handler).
1394  */
1395  if (!use_mach_exception_handling) {
1396    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1397    raise_pending_interrupt(tcr);
1398  }
1399}
1400
1401/*
1402  If it looks like we're in the middle of an atomic operation, make
1403  it seem as if that operation is either complete or hasn't started
1404  yet.
1405
1406  The cases handled include:
1407
1408  a) storing into a newly-allocated lisp frame on the stack.
1409  b) marking a newly-allocated TSP frame as containing "raw" data.
1410  c) consing: the GC has its own ideas about how this should be
1411     handled, but other callers would be best advised to back
1412     up or move forward, according to whether we're in the middle
1413     of allocating a cons cell or allocating a uvector.
1414  d) a STMW to the vsp
1415  e) EGC write-barrier subprims.
1416*/
1417
1418extern opcode
1419  egc_write_barrier_start,
1420  egc_write_barrier_end, 
1421  egc_store_node_conditional, 
1422  egc_store_node_conditional_test,
1423  egc_set_hash_key,
1424  egc_gvset,
1425  egc_rplaca,
1426  egc_rplacd,
1427  egc_set_hash_key_conditional,
1428  egc_set_hash_key_conditional_test;
1429
1430
1431extern opcode ffcall_return_window, ffcall_return_window_end;
1432
1433void
1434pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1435{
1436  pc program_counter = xpPC(xp);
1437  opcode instr = *program_counter;
1438  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1439  LispObj cur_allocptr = xpGPR(xp, allocptr);
1440  int allocptr_tag = fulltag_of(cur_allocptr);
1441 
1442
1443
1444  if ((program_counter < &egc_write_barrier_end) && 
1445      (program_counter >= &egc_write_barrier_start)) {
1446    LispObj *ea = 0, val = 0, root = 0;
1447    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1448    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1449
1450    if (program_counter >= &egc_set_hash_key_conditional) {
1451      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1452          ((program_counter == &egc_set_hash_key_conditional_test) &&
1453           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1454        return;
1455      }
1456      need_store = false;
1457      root = xpGPR(xp,arg_x);
1458      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1459      need_memoize_root = true;
1460    } else if (program_counter >= &egc_store_node_conditional) {
1461      if ((program_counter < &egc_store_node_conditional_test) ||
1462          ((program_counter == &egc_store_node_conditional_test) &&
1463           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1464        /* The conditional store either hasn't been attempted yet, or
1465           has failed.  No need to adjust the PC, or do memoization. */
1466        return;
1467      }
1468      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1469      xpGPR(xp,arg_z) = t_value;
1470      need_store = false;
1471    } else if (program_counter >= &egc_set_hash_key) {
1472      root = xpGPR(xp,arg_x);
1473      val = xpGPR(xp,arg_z);
1474      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1475      need_memoize_root = true;
1476    } else if (program_counter >= &egc_gvset) {
1477      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1478      val = xpGPR(xp,arg_z);
1479    } else if (program_counter >= &egc_rplacd) {
1480      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1481      val = xpGPR(xp,arg_z);
1482    } else {                      /* egc_rplaca */
1483      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1484      val = xpGPR(xp,arg_z);
1485    }
1486    if (need_store) {
1487      *ea = val;
1488    }
1489    if (need_check_memo) {
1490      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1491      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1492          ((LispObj)ea < val)) {
1493        atomic_set_bit(refbits, bitnumber);
1494        if (need_memoize_root) {
1495          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1496          atomic_set_bit(refbits, bitnumber);
1497        }
1498      }
1499    }
1500    xpPC(xp) = xpLR(xp);
1501    return;
1502  }
1503
1504
1505 
1506  if (allocptr_tag != tag_fixnum) {
1507    signed_natural disp = allocptr_displacement(xp);
1508
1509    if (disp) {
1510      /* Being architecturally "at" the alloc trap doesn't tell
1511         us much (in particular, it doesn't tell us whether
1512         or not the thread has committed to taking the trap
1513         and is waiting for the exception lock (or waiting
1514         for the Mach exception thread to tell it how bad
1515         things are) or is about to execute a conditional
1516         trap.
1517         Regardless of which case applies, we want the
1518         other thread to take (or finish taking) the
1519         trap, and we don't want it to consider its
1520         current allocptr to be valid.
1521         The difference between this case (suspend other
1522         thread for GC) and the previous case (suspend
1523         current thread for interrupt) is solely a
1524         matter of what happens after we leave this
1525         function: some non-current thread will stay
1526         suspended until the GC finishes, then take
1527         (or start processing) the alloc trap.   The
1528         current thread will go off and do PROCESS-INTERRUPT
1529         or something, and may return from the interrupt
1530         and need to finish the allocation that got interrupted.
1531      */
1532
1533      if (alloc_disp) {
1534        *alloc_disp = disp;
1535        xpGPR(xp,allocptr) += disp;
1536        /* Leave the PC at the alloc trap.  When the interrupt
1537           handler returns, it'll decrement allocptr by disp
1538           and the trap may or may not be taken.
1539        */
1540      } else {
1541        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1542        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1543      }
1544    } else {
1545      /* If we're already past the alloc_trap, finish allocating
1546         the object. */
1547      if (allocptr_tag == fulltag_cons) {
1548        finish_allocating_cons(xp);
1549      } else {
1550        if (allocptr_tag == fulltag_misc) {
1551          finish_allocating_uvector(xp);
1552        } else {
1553          Bug(xp, "what's being allocated here ?");
1554        }
1555      }
1556      /* Whatever we finished allocating, reset allocptr/allocbase to
1557         VOID_ALLOCPTR */
1558      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1559    }
1560    return;
1561  }
1562}
1563
1564void
1565interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1566{
1567  TCR *tcr = get_interrupt_tcr(false);
1568  if (tcr) {
1569    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1570      tcr->interrupt_pending = 1 << fixnumshift;
1571    } else {
1572      LispObj cmain = nrs_CMAIN.vcell;
1573
1574      if ((fulltag_of(cmain) == fulltag_misc) &&
1575          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1576        /*
1577           This thread can (allegedly) take an interrupt now.
1578           It's tricky to do that if we're executing
1579           foreign code (especially Linuxthreads code, much
1580           of which isn't reentrant.)
1581           If we're unwinding the stack, we also want to defer
1582           the interrupt.
1583        */
1584        if ((tcr->valence != TCR_STATE_LISP) ||
1585            (tcr->unwinding != 0)) {
1586          tcr->interrupt_pending = 1 << fixnumshift;
1587        } else {
1588          xframe_list xframe_link;
1589          int old_valence;
1590          signed_natural disp=0;
1591          natural old_last_lisp_frame = tcr->last_lisp_frame;
1592         
1593          tcr->last_lisp_frame = xpGPR(context,Rsp);
1594          pc_luser_xp(context, tcr, &disp);
1595          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1596          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1597          handle_exception(signum, context, tcr, info, old_valence);
1598          if (disp) {
1599            xpGPR(context,allocptr) -= disp;
1600          }
1601          unlock_exception_lock_in_handler(tcr);
1602          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1603        }
1604      }
1605    }
1606  }
1607#ifdef DARWIN
1608    DarwinSigReturn(context);
1609#endif
1610}
1611
1612
1613
1614void
1615install_signal_handler(int signo, void *handler)
1616{
1617  struct sigaction sa;
1618 
1619  sa.sa_sigaction = (void *)handler;
1620  sigfillset(&sa.sa_mask);
1621  sa.sa_flags = 
1622    0 /* SA_RESTART */
1623    | SA_SIGINFO
1624    ;
1625
1626  sigaction(signo, &sa, NULL);
1627}
1628
1629void
1630install_pmcl_exception_handlers()
1631{
1632#ifdef DARWIN
1633  extern Boolean use_mach_exception_handling;
1634#endif
1635
1636  Boolean install_signal_handlers_for_exceptions =
1637#ifdef DARWIN
1638    !use_mach_exception_handling
1639#else
1640    true
1641#endif
1642    ;
1643  if (install_signal_handlers_for_exceptions) {
1644    extern int no_sigtrap;
1645    install_signal_handler(SIGILL, (void *)signal_handler);
1646    if (no_sigtrap != 1) {
1647      install_signal_handler(SIGTRAP, (void *)signal_handler);
1648    }
1649    install_signal_handler(SIGBUS,  (void *)signal_handler);
1650    install_signal_handler(SIGSEGV, (void *)signal_handler);
1651    install_signal_handler(SIGFPE, (void *)signal_handler);
1652  }
1653 
1654  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1655                         (void *)interrupt_handler);
1656  signal(SIGPIPE, SIG_IGN);
1657}
1658
1659void
1660thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1661{
1662  TCR *tcr = get_tcr(false);
1663  area *a;
1664  sigset_t mask;
1665 
1666  sigemptyset(&mask);
1667
1668  if (tcr) {
1669    tcr->valence = TCR_STATE_FOREIGN;
1670    a = tcr->vs_area;
1671    if (a) {
1672      a->active = a->high;
1673    }
1674    a = tcr->cs_area;
1675    if (a) {
1676      a->active = a->high;
1677    }
1678  }
1679 
1680  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1681  pthread_exit(NULL);
1682}
1683
1684void
1685thread_signal_setup()
1686{
1687  thread_suspend_signal = SIG_SUSPEND_THREAD;
1688  thread_kill_signal = SIG_KILL_THREAD;
1689
1690  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1691  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1692}
1693
1694
1695
1696void
1697unprotect_all_areas()
1698{
1699  protected_area_ptr p;
1700
1701  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1702    unprotect_area(p);
1703  }
1704}
1705
1706/*
1707  A binding subprim has just done "twlle limit_regno,idx_regno" and
1708  the trap's been taken.  Extend the tcr's tlb so that the index will
1709  be in bounds and the new limit will be on a page boundary, filling
1710  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1711  the tcr fields and the registers in the xp and return true if this
1712  all works, false otherwise.
1713
1714  Note that the tlb was allocated via malloc, so realloc can do some
1715  of the hard work.
1716*/
1717Boolean
1718extend_tcr_tlb(TCR *tcr, 
1719               ExceptionInformation *xp, 
1720               unsigned idx_regno)
1721{
1722  unsigned
1723    index = (unsigned) (xpGPR(xp,idx_regno)),
1724    old_limit = tcr->tlb_limit,
1725    new_limit = align_to_power_of_2(index+1,12),
1726    new_bytes = new_limit-old_limit;
1727  LispObj
1728    *old_tlb = tcr->tlb_pointer,
1729    *new_tlb = realloc(old_tlb, new_limit),
1730    *work;
1731
1732  if (new_tlb == NULL) {
1733    return false;
1734  }
1735 
1736  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1737
1738  while (new_bytes) {
1739    *work++ = no_thread_local_binding_marker;
1740    new_bytes -= sizeof(LispObj);
1741  }
1742  tcr->tlb_pointer = new_tlb;
1743  tcr->tlb_limit = new_limit;
1744  return true;
1745}
1746
1747
1748
1749void
1750exception_init()
1751{
1752  install_pmcl_exception_handlers();
1753}
1754
1755
1756
1757
1758
1759#ifdef DARWIN
1760
1761
1762#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1763#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1764
1765
1766
1767#define LISP_EXCEPTIONS_HANDLED_MASK \
1768 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1769
1770/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1771#define NUM_LISP_EXCEPTIONS_HANDLED 4
1772
1773typedef struct {
1774  int foreign_exception_port_count;
1775  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1776  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1777  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1778  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1779} MACH_foreign_exception_state;
1780
1781
1782
1783
1784/*
1785  Mach's exception mechanism works a little better than its signal
1786  mechanism (and, not incidentally, it gets along with GDB a lot
1787  better.
1788
1789  Initially, we install an exception handler to handle each native
1790  thread's exceptions.  This process involves creating a distinguished
1791  thread which listens for kernel exception messages on a set of
1792  0 or more thread exception ports.  As threads are created, they're
1793  added to that port set; a thread's exception port is destroyed
1794  (and therefore removed from the port set) when the thread exits.
1795
1796  A few exceptions can be handled directly in the handler thread;
1797  others require that we resume the user thread (and that the
1798  exception thread resumes listening for exceptions.)  The user
1799  thread might eventually want to return to the original context
1800  (possibly modified somewhat.)
1801
1802  As it turns out, the simplest way to force the faulting user
1803  thread to handle its own exceptions is to do pretty much what
1804  signal() does: the exception handlng thread sets up a sigcontext
1805  on the user thread's stack and forces the user thread to resume
1806  execution as if a signal handler had been called with that
1807  context as an argument.  We can use a distinguished UUO at a
1808  distinguished address to do something like sigreturn(); that'll
1809  have the effect of resuming the user thread's execution in
1810  the (pseudo-) signal context.
1811
1812  Since:
1813    a) we have miles of code in C and in Lisp that knows how to
1814    deal with Linux sigcontexts
1815    b) Linux sigcontexts contain a little more useful information
1816    (the DAR, DSISR, etc.) than their Darwin counterparts
1817    c) we have to create a sigcontext ourselves when calling out
1818    to the user thread: we aren't really generating a signal, just
1819    leveraging existing signal-handling code.
1820
1821  we create a Linux sigcontext struct.
1822
1823  Simple ?  Hopefully from the outside it is ...
1824
1825  We want the process of passing a thread's own context to it to
1826  appear to be atomic: in particular, we don't want the GC to suspend
1827  a thread that's had an exception but has not yet had its user-level
1828  exception handler called, and we don't want the thread's exception
1829  context to be modified by a GC while the Mach handler thread is
1830  copying it around.  On Linux (and on Jaguar), we avoid this issue
1831  because (a) the kernel sets up the user-level signal handler and
1832  (b) the signal handler blocks signals (including the signal used
1833  by the GC to suspend threads) until tcr->xframe is set up.
1834
1835  The GC and the Mach server thread therefore contend for the lock
1836  "mach_exception_lock".  The Mach server thread holds the lock
1837  when copying exception information between the kernel and the
1838  user thread; the GC holds this lock during most of its execution
1839  (delaying exception processing until it can be done without
1840  GC interference.)
1841
1842*/
1843
1844
1845#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1846
1847void
1848fatal_mach_error(char *format, ...);
1849
1850#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1851
1852
1853void
1854restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1855{
1856  kern_return_t kret;
1857  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
1858
1859  /* Set the thread's FP state from the pseudosigcontext */
1860  kret = thread_set_state(thread,
1861                          ARM_VFP_STATE,
1862                          (thread_state_t)&(mc->__fs),
1863                          ARM_VFP_STATE_COUNT);
1864
1865  MACH_CHECK_ERROR("setting thread FP state", kret);
1866
1867  /* The thread'll be as good as new ... */
1868  kret = thread_set_state(thread, 
1869                          MACHINE_THREAD_STATE,
1870                          (thread_state_t)&(mc->__ss),
1871                          MACHINE_THREAD_STATE_COUNT);
1872  MACH_CHECK_ERROR("setting thread state", kret);
1873} 
1874
1875/* This code runs in the exception handling thread, in response
1876   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1877   in response to a call to pseudo_sigreturn() from the specified
1878   user thread.
1879   Find that context (the user thread's R3 points to it), then
1880   use that context to set the user thread's state.  When this
1881   function's caller returns, the Mach kernel will resume the
1882   user thread.
1883*/
1884
1885kern_return_t
1886do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1887{
1888  ExceptionInformation *xp;
1889
1890#ifdef DEBUG_MACH_EXCEPTIONS
1891  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1892#endif
1893  xp = tcr->pending_exception_context;
1894  if (xp) {
1895    tcr->pending_exception_context = NULL;
1896    tcr->valence = TCR_STATE_LISP;
1897    restore_mach_thread_state(thread, xp);
1898    raise_pending_interrupt(tcr);
1899  } else {
1900    Bug(NULL, "no xp here!\n");
1901  }
1902#ifdef DEBUG_MACH_EXCEPTIONS
1903  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1904#endif
1905  return KERN_SUCCESS;
1906} 
1907
1908ExceptionInformation *
1909create_thread_context_frame(mach_port_t thread, 
1910                            natural *new_stack_top)
1911{
1912  arm_thread_state_t ts;
1913  mach_msg_type_number_t thread_state_count;
1914  kern_return_t result;
1915  ExceptionInformation *pseudosigcontext;
1916  _STRUCT_MCONTEXT *mc;
1917  natural stackp, backlink;
1918
1919  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1920  result = thread_get_state(thread, 
1921                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1922                            (thread_state_t)&ts,
1923                            &thread_state_count);
1924 
1925  if (result != KERN_SUCCESS) {
1926    get_tcr(true);
1927    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1928  }
1929  stackp = ts.__sp;
1930  backlink = stackp;
1931
1932  stackp -= sizeof(*pseudosigcontext);
1933  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1934
1935  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
1936  memmove(&(mc->__ss),&ts,sizeof(ts));
1937
1938  thread_state_count = ARM_VFP_STATE_COUNT;
1939  thread_get_state(thread,
1940                   ARM_VFP_STATE,
1941                   (thread_state_t)&(mc->__fs),
1942                   &thread_state_count);
1943
1944
1945  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
1946  thread_get_state(thread,
1947                   ARM_EXCEPTION_STATE,
1948                   (thread_state_t)&(mc->__es),
1949                   &thread_state_count);
1950
1951
1952  UC_MCONTEXT(pseudosigcontext) = mc;
1953  if (new_stack_top) {
1954    *new_stack_top = stackp;
1955  }
1956  return pseudosigcontext;
1957}
1958
1959/*
1960  This code sets up the user thread so that it executes a "pseudo-signal
1961  handler" function when it resumes.  Create a linux sigcontext struct
1962  on the thread's stack and pass it as an argument to the pseudo-signal
1963  handler.
1964
1965  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1966  which will restore the thread's context.
1967
1968  If the handler invokes code that throws (or otherwise never sigreturn()'s
1969  to the context), that's fine.
1970
1971  Actually, check that: throw (and variants) may need to be careful and
1972  pop the tcr's xframe list until it's younger than any frame being
1973  entered.
1974*/
1975
1976int
1977setup_signal_frame(mach_port_t thread,
1978                   void *handler_address,
1979                   int signum,
1980                   int code,
1981                   TCR *tcr)
1982{
1983  arm_thread_state_t ts;
1984  ExceptionInformation *pseudosigcontext;
1985  int old_valence = tcr->valence;
1986  natural stackp;
1987
1988#ifdef DEBUG_MACH_EXCEPTIONS
1989  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1990#endif
1991  pseudosigcontext = create_thread_context_frame(thread, &stackp);
1992  pseudosigcontext->uc_onstack = 0;
1993  pseudosigcontext->uc_sigmask = (sigset_t) 0;
1994  tcr->pending_exception_context = pseudosigcontext;
1995  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1996 
1997
1998  /*
1999     It seems like we've created a  sigcontext on the thread's
2000     stack.  Set things up so that we call the handler (with appropriate
2001     args) when the thread's resumed.
2002  */
2003
2004  ts.__pc = (natural) handler_address;
2005  ts.__sp = stackp;
2006  ts.__r[0] = signum;
2007  ts.__r[1] = (natural)pseudosigcontext;
2008  ts.__r[2] = (natural)tcr;
2009  ts.__r[3] = (natural)old_valence;
2010  ts.__lr = (natural)pseudo_sigreturn;
2011
2012
2013  thread_set_state(thread, 
2014                   MACHINE_THREAD_STATE,
2015                   (thread_state_t)&ts,
2016                   MACHINE_THREAD_STATE_COUNT);
2017#ifdef DEBUG_MACH_EXCEPTIONS
2018  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2019#endif
2020  return 0;
2021}
2022
2023
2024void
2025pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2026{
2027  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2028} 
2029
2030
2031int
2032thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2033{
2034  /* Likely hopeless. */
2035  return 0;
2036}
2037
2038/*
2039  This function runs in the exception handling thread.  It's
2040  called (by this precise name) from the library function "exc_server()"
2041  when the thread's exception ports are set up.  (exc_server() is called
2042  via mach_msg_server(), which is a function that waits for and dispatches
2043  on exception messages from the Mach kernel.)
2044
2045  This checks to see if the exception was caused by a pseudo_sigreturn()
2046  UUO; if so, it arranges for the thread to have its state restored
2047  from the specified context.
2048
2049  Otherwise, it tries to map the exception to a signal number and
2050  arranges that the thread run a "pseudo signal handler" to handle
2051  the exception.
2052
2053  Some exceptions could and should be handled here directly.
2054*/
2055
2056kern_return_t
2057catch_exception_raise(mach_port_t exception_port,
2058                      mach_port_t thread,
2059                      mach_port_t task, 
2060                      exception_type_t exception,
2061                      exception_data_t code_vector,
2062                      mach_msg_type_number_t code_count)
2063{
2064  int signum = 0, code = *code_vector, code1;
2065  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2066  kern_return_t kret;
2067
2068#ifdef DEBUG_MACH_EXCEPTIONS
2069  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2070#endif
2071
2072  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2073    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2074  } 
2075  if ((exception == EXC_BAD_INSTRUCTION) &&
2076      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2077      (((code1 = code_vector[1]) == (int)pseudo_sigreturn))) {
2078    kret = do_pseudo_sigreturn(thread, tcr);
2079  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2080    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2081    kret = 17;
2082  } else {
2083    switch (exception) {
2084    case EXC_BAD_ACCESS:
2085      signum = SIGSEGV;
2086      break;
2087       
2088    case EXC_BAD_INSTRUCTION:
2089      signum = SIGILL;
2090      break;
2091     
2092      break;
2093     
2094    case EXC_ARITHMETIC:
2095      signum = SIGFPE;
2096      break;
2097
2098    default:
2099      break;
2100    }
2101    if (signum) {
2102      kret = setup_signal_frame(thread,
2103                                (void *)pseudo_signal_handler,
2104                                signum,
2105                                code,
2106                                tcr);
2107#if 0
2108      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2109#endif
2110
2111    } else {
2112      kret = 17;
2113    }
2114  }
2115
2116  return kret;
2117}
2118
2119
2120
2121typedef struct {
2122  mach_msg_header_t Head;
2123  /* start of the kernel processed data */
2124  mach_msg_body_t msgh_body;
2125  mach_msg_port_descriptor_t thread;
2126  mach_msg_port_descriptor_t task;
2127  /* end of the kernel processed data */
2128  NDR_record_t NDR;
2129  exception_type_t exception;
2130  mach_msg_type_number_t codeCnt;
2131  integer_t code[2];
2132  mach_msg_trailer_t trailer;
2133} exceptionRequest;
2134
2135
2136boolean_t
2137openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2138{
2139  static NDR_record_t _NDR = {0};
2140  kern_return_t handled;
2141  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2142  exceptionRequest *req = (exceptionRequest *) in;
2143
2144  reply->NDR = _NDR;
2145
2146  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2147  out->msgh_remote_port = in->msgh_remote_port;
2148  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2149  out->msgh_local_port = MACH_PORT_NULL;
2150  out->msgh_id = in->msgh_id+100;
2151
2152  /* Could handle other exception flavors in the range 2401-2403 */
2153
2154
2155  if (in->msgh_id != 2401) {
2156    reply->RetCode = MIG_BAD_ID;
2157    return FALSE;
2158  }
2159  handled = catch_exception_raise(req->Head.msgh_local_port,
2160                                  req->thread.name,
2161                                  req->task.name,
2162                                  req->exception,
2163                                  req->code,
2164                                  req->codeCnt);
2165  reply->RetCode = handled;
2166  return TRUE;
2167}
2168
2169/*
2170  The initial function for an exception-handling thread.
2171*/
2172
2173void *
2174exception_handler_proc(void *arg)
2175{
2176  extern boolean_t exc_server();
2177  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2178
2179  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2180  /* Should never return. */
2181  abort();
2182}
2183
2184
2185
2186mach_port_t
2187mach_exception_port_set()
2188{
2189  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2190  kern_return_t kret; 
2191  if (__exception_port_set == MACH_PORT_NULL) {
2192    kret = mach_port_allocate(mach_task_self(),
2193                              MACH_PORT_RIGHT_PORT_SET,
2194                              &__exception_port_set);
2195    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2196    create_system_thread(0,
2197                         NULL,
2198                         exception_handler_proc, 
2199                         (void *)((natural)__exception_port_set));
2200  }
2201  return __exception_port_set;
2202}
2203
2204/*
2205  Setup a new thread to handle those exceptions specified by
2206  the mask "which".  This involves creating a special Mach
2207  message port, telling the Mach kernel to send exception
2208  messages for the calling thread to that port, and setting
2209  up a handler thread which listens for and responds to
2210  those messages.
2211
2212*/
2213
2214/*
2215  Establish the lisp thread's TCR as its exception port, and determine
2216  whether any other ports have been established by foreign code for
2217  exceptions that lisp cares about.
2218
2219  If this happens at all, it should happen on return from foreign
2220  code and on entry to lisp code via a callback.
2221
2222  This is a lot of trouble (and overhead) to support Java, or other
2223  embeddable systems that clobber their caller's thread exception ports.
2224 
2225*/
2226kern_return_t
2227tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2228{
2229  kern_return_t kret;
2230  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2231  int i;
2232  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2233  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2234  exception_mask_t mask = 0;
2235
2236  kret = thread_swap_exception_ports(thread,
2237                                     LISP_EXCEPTIONS_HANDLED_MASK,
2238                                     lisp_port,
2239                                     EXCEPTION_DEFAULT,
2240                                     THREAD_STATE_NONE,
2241                                     fxs->masks,
2242                                     &n,
2243                                     fxs->ports,
2244                                     fxs->behaviors,
2245                                     fxs->flavors);
2246  if (kret == KERN_SUCCESS) {
2247    fxs->foreign_exception_port_count = n;
2248    for (i = 0; i < n; i ++) {
2249      foreign_port = fxs->ports[i];
2250
2251      if ((foreign_port != lisp_port) &&
2252          (foreign_port != MACH_PORT_NULL)) {
2253        mask |= fxs->masks[i];
2254      }
2255    }
2256    tcr->foreign_exception_status = (int) mask;
2257  }
2258  return kret;
2259}
2260
2261kern_return_t
2262tcr_establish_lisp_exception_port(TCR *tcr)
2263{
2264  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2265}
2266
2267/*
2268  Do this when calling out to or returning from foreign code, if
2269  any conflicting foreign exception ports were established when we
2270  last entered lisp code.
2271*/
2272kern_return_t
2273restore_foreign_exception_ports(TCR *tcr)
2274{
2275  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2276 
2277  if (m) {
2278    MACH_foreign_exception_state *fxs  = 
2279      (MACH_foreign_exception_state *) tcr->native_thread_info;
2280    int i, n = fxs->foreign_exception_port_count;
2281    exception_mask_t tm;
2282
2283    for (i = 0; i < n; i++) {
2284      if ((tm = fxs->masks[i]) & m) {
2285        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2286                                   tm,
2287                                   fxs->ports[i],
2288                                   fxs->behaviors[i],
2289                                   fxs->flavors[i]);
2290      }
2291    }
2292  }
2293}
2294                                   
2295
2296/*
2297  This assumes that a Mach port (to be used as the thread's exception port) whose
2298  "name" matches the TCR's 32-bit address has already been allocated.
2299*/
2300
2301kern_return_t
2302setup_mach_exception_handling(TCR *tcr)
2303{
2304  mach_port_t
2305    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2306    task_self = mach_task_self();
2307  kern_return_t kret;
2308
2309  kret = mach_port_insert_right(task_self,
2310                                thread_exception_port,
2311                                thread_exception_port,
2312                                MACH_MSG_TYPE_MAKE_SEND);
2313  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2314
2315  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2316  if (kret == KERN_SUCCESS) {
2317    mach_port_t exception_port_set = mach_exception_port_set();
2318
2319    kret = mach_port_move_member(task_self,
2320                                 thread_exception_port,
2321                                 exception_port_set);
2322  }
2323  return kret;
2324}
2325
2326void
2327darwin_exception_init(TCR *tcr)
2328{
2329  void tcr_monitor_exception_handling(TCR*, Boolean);
2330  kern_return_t kret;
2331  MACH_foreign_exception_state *fxs = 
2332    calloc(1, sizeof(MACH_foreign_exception_state));
2333 
2334  tcr->native_thread_info = (void *) fxs;
2335
2336  if ((kret = setup_mach_exception_handling(tcr))
2337      != KERN_SUCCESS) {
2338    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2339    terminate_lisp();
2340  }
2341}
2342
2343/*
2344  The tcr is the "name" of the corresponding thread's exception port.
2345  Destroying the port should remove it from all port sets of which it's
2346  a member (notably, the exception port set.)
2347*/
2348void
2349darwin_exception_cleanup(TCR *tcr)
2350{
2351  void *fxs = tcr->native_thread_info;
2352  extern Boolean use_mach_exception_handling;
2353
2354  if (fxs) {
2355    tcr->native_thread_info = NULL;
2356    free(fxs);
2357  }
2358  if (use_mach_exception_handling) {
2359    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2360    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2361  }
2362}
2363
2364
2365Boolean
2366suspend_mach_thread(mach_port_t mach_thread)
2367{
2368  kern_return_t status;
2369  Boolean aborted = false;
2370 
2371  do {
2372    aborted = false;
2373    status = thread_suspend(mach_thread);
2374    if (status == KERN_SUCCESS) {
2375      status = thread_abort_safely(mach_thread);
2376      if (status == KERN_SUCCESS) {
2377        aborted = true;
2378      } else {
2379        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2380        thread_resume(mach_thread);
2381      }
2382    } else {
2383      return false;
2384    }
2385  } while (! aborted);
2386  return true;
2387}
2388
2389/*
2390  Only do this if pthread_kill indicated that the pthread isn't
2391  listening to signals anymore, as can happen as soon as pthread_exit()
2392  is called on Darwin.  The thread could still call out to lisp as it
2393  is exiting, so we need another way to suspend it in this case.
2394*/
2395Boolean
2396mach_suspend_tcr(TCR *tcr)
2397{
2398  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2399  ExceptionInformation *pseudosigcontext;
2400  Boolean result = false;
2401 
2402  result = suspend_mach_thread(mach_thread);
2403  if (result) {
2404    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2405    pseudosigcontext->uc_onstack = 0;
2406    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2407    tcr->suspend_context = pseudosigcontext;
2408  }
2409  return result;
2410}
2411
2412void
2413mach_resume_tcr(TCR *tcr)
2414{
2415  ExceptionInformation *xp;
2416  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2417 
2418  xp = tcr->suspend_context;
2419#ifdef DEBUG_MACH_EXCEPTIONS
2420  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2421          tcr, tcr->pending_exception_context);
2422#endif
2423  tcr->suspend_context = NULL;
2424  restore_mach_thread_state(mach_thread, xp);
2425#ifdef DEBUG_MACH_EXCEPTIONS
2426  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2427          tcr, tcr->pending_exception_context);
2428#endif
2429  thread_resume(mach_thread);
2430}
2431
2432void
2433fatal_mach_error(char *format, ...)
2434{
2435  va_list args;
2436  char s[512];
2437 
2438
2439  va_start(args, format);
2440  vsnprintf(s, sizeof(s),format, args);
2441  va_end(args);
2442
2443  Fatal("Mach error", s);
2444}
2445
2446void
2447pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2448{
2449  interrupt_handler(signum, NULL, context);
2450}
2451
2452int
2453mach_raise_thread_interrupt(TCR *target)
2454{
2455  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2456  kern_return_t kret;
2457  Boolean result = false;
2458  TCR *current = get_tcr(false);
2459  thread_basic_info_data_t info; 
2460  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2461
2462  LOCK(lisp_global(TCR_AREA_LOCK), current);
2463
2464  if (suspend_mach_thread(mach_thread)) {
2465    if (thread_info(mach_thread,
2466                    THREAD_BASIC_INFO,
2467                    (thread_info_t)&info,
2468                    &info_count) == KERN_SUCCESS) {
2469      if (info.suspend_count == 1) {
2470        if ((target->valence == TCR_STATE_LISP) &&
2471            (!target->unwinding) &&
2472            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2473          kret = setup_signal_frame(mach_thread,
2474                                    (void *)pseudo_interrupt_handler,
2475                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2476                                    0,
2477                                    target);
2478          if (kret == KERN_SUCCESS) {
2479            result = true;
2480          }
2481        }
2482      }
2483    }
2484    if (! result) {
2485      target->interrupt_pending = 1 << fixnumshift;
2486    }
2487    thread_resume(mach_thread);
2488   
2489  }
2490  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2491  return 0;
2492}
2493
2494#endif
Note: See TracBrowser for help on using the repository browser.