source: branches/arm/lisp-kernel/arm-exceptions.c @ 13848

Last change on this file since 13848 was 13848, checked in by gb, 9 years ago

Pass siginfo to handle_uuo; change debug_trap handling.

File size: 67.6 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call.  No need to update cs_area */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672  }
673
674
675  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
676  if (cur_allocptr) {
677    update_bytes_allocated(tcr, cur_allocptr);
678    if (freeptr) {
679      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
680    }
681  }
682}
683
684TCR *gc_tcr = NULL;
685
686/* Suspend and "normalize" other tcrs, then call a gc-like function
687   in that context.  Resume the other tcrs, then return what the
688   function returned */
689
690signed_natural
691gc_like_from_xp(ExceptionInformation *xp, 
692                signed_natural(*fun)(TCR *, signed_natural), 
693                signed_natural param)
694{
695  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
696  int result;
697  signed_natural inhibit;
698
699  suspend_other_threads(true);
700  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
701  if (inhibit != 0) {
702    if (inhibit > 0) {
703      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
704    }
705    resume_other_threads(true);
706    gc_deferred++;
707    return 0;
708  }
709  gc_deferred = 0;
710
711  gc_tcr = tcr;
712
713  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751
752/* Returns #bytes freed by invoking GC */
753
754signed_natural
755gc_from_tcr(TCR *tcr, signed_natural param)
756{
757  area *a;
758  BytePtr oldfree, newfree;
759  BytePtr oldend, newend;
760
761  a = active_dynamic_area;
762  oldend = a->high;
763  oldfree = a->active;
764  gc(tcr, param);
765  newfree = a->active;
766  newend = a->high;
767#if 0
768  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
769#endif
770  return ((oldfree-newfree)+(newend-oldend));
771}
772
773signed_natural
774gc_from_xp(ExceptionInformation *xp, signed_natural param)
775{
776  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
777
778  freeGCptrs();
779  return status;
780}
781
782signed_natural
783purify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, purify, param);
786}
787
788signed_natural
789impurify_from_xp(ExceptionInformation *xp, signed_natural param)
790{
791  return gc_like_from_xp(xp, impurify, param);
792}
793
794
795
796
797
798
799protection_handler
800 * protection_handlers[] = {
801   do_spurious_wp_fault,
802   do_soft_stack_overflow,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_hard_stack_overflow,   
806   do_hard_stack_overflow,
807   do_hard_stack_overflow
808   };
809
810
811Boolean
812is_write_fault(ExceptionInformation *xp, siginfo_t *info)
813{
814#ifdef LINUX
815  /* Based on experiments with a small sample size; need to R TFM. */
816  return ((xp->uc_mcontext.trap_no == 0xe) &&
817          (xp->uc_mcontext.error_code == 0x817));
818#endif
819}
820
821Boolean
822handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
823{
824  BytePtr addr;
825  protected_area_ptr area;
826  protection_handler *handler;
827  extern Boolean touch_page(void *);
828  extern void touch_page_end(void);
829
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835
836  if (addr && (addr == tcr->safe_ref_address)) {
837    adjust_exception_pc(xp,4);
838
839    xpGPR(xp,imm0) = 0;
840    return true;
841  }
842
843  if (xpPC(xp) == (pc)touch_page) {
844    xpGPR(xp,imm0) = 0;
845    xpPC(xp) = (pc)touch_page_end;
846    return true;
847  }
848
849
850  if (is_write_fault(xp,info)) {
851    area = find_protected_area(addr);
852    if (area != NULL) {
853      handler = protection_handlers[area->why];
854      return handler(xp, area, addr);
855    } else {
856      if ((addr >= readonly_area->low) &&
857          (addr < readonly_area->active)) {
858        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
859                        page_size);
860        return true;
861      }
862    }
863  }
864  if (old_valence == TCR_STATE_LISP) {
865    LispObj cmain = nrs_CMAIN.vcell;
866   
867    if ((fulltag_of(cmain) == fulltag_misc) &&
868      (header_subtag(header_of(cmain)) == subtag_macptr)) {
869     
870      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
871    }
872  }
873  return false;
874}
875
876
877
878
879
880OSStatus
881do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
882{
883#ifdef SUPPORT_PRAGMA_UNUSED
884#pragma unused(area,addr)
885#endif
886  reset_lisp_process(xp);
887  return -1;
888}
889
890extern area*
891allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893extern area*
894allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
895
896
897
898
899
900
901Boolean
902lisp_frame_p(lisp_frame *spPtr)
903{
904  return (spPtr->marker == lisp_frame_marker);
905}
906
907
908int ffcall_overflow_count = 0;
909
910
911
912
913
914
915/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
916  the current value of VSP (TSP) or an older area.  */
917
918OSStatus
919do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
920{
921  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
922  area *a = tcr->vs_area;
923  protected_area_ptr vsp_soft = a->softprot;
924  unprotect_area(vsp_soft);
925  signal_stack_soft_overflow(xp,vsp);
926  return 0;
927}
928
929
930
931OSStatus
932do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
933{
934  /* Trying to write into a guard page on the vstack or tstack.
935     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
936     signal an error_stack_overflow condition.
937      */
938  do_vsp_overflow(xp,addr);
939}
940
941OSStatus
942do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
943{
944#ifdef SUPPORT_PRAGMA_UNUSED
945#pragma unused(xp,area,addr)
946#endif
947  return -1;
948}
949
950
951
952
953     
954
955
956
957
958
959Boolean
960handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
961{
962  return false;
963}
964
965   
966
967
968/* This only tries to implement the "optional" fsqrt and fsqrts
969   instructions, which were generally implemented on IBM hardware
970   but generally not available on Motorola/Freescale systems.
971*/               
972OSStatus
973handle_unimplemented_instruction(ExceptionInformation *xp,
974                                 opcode instruction,
975                                 TCR *tcr)
976{
977
978  return -1;
979}
980
981Boolean
982handle_exception(int xnum, 
983                 ExceptionInformation *xp, 
984                 TCR *tcr, 
985                 siginfo_t *info,
986                 int old_valence)
987{
988  pc program_counter;
989  opcode instruction = 0;
990
991
992  program_counter = xpPC(xp);
993 
994  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
995    instruction = *program_counter;
996  }
997
998  if (IS_ALLOC_TRAP(instruction)) {
999    return handle_alloc_trap(xp, tcr);
1000  } else if ((xnum == SIGSEGV) ||
1001             (xnum == SIGBUS)) {
1002    return handle_protection_violation(xp, info, tcr, old_valence);
1003  } else if (xnum == SIGFPE) {
1004    return handle_sigfpe(xp, tcr);
1005  } else if ((xnum == SIGILL)) {
1006    if (IS_GC_TRAP(instruction)) {
1007      return handle_gc_trap(xp, tcr);
1008    } else if (IS_UUO(instruction)) {
1009      return handle_uuo(xp, info, instruction);
1010    } else {
1011      return handle_unimplemented_instruction(xp,instruction,tcr);
1012    }
1013  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1014    tcr->interrupt_pending = 0;
1015    callback_for_trap(nrs_CMAIN.vcell, xp, xnum, 0);
1016    return true;
1017  }
1018
1019  return false;
1020}
1021
1022void
1023adjust_exception_pc(ExceptionInformation *xp, int delta)
1024{
1025  xpPC(xp) += (delta >> 2);
1026}
1027
1028
1029/*
1030  This wants to scan backwards until "where" points to an instruction
1031   whose major opcode is either 63 (double-float) or 59 (single-float)
1032*/
1033
1034OSStatus
1035handle_fpux_binop(ExceptionInformation *xp, pc where)
1036{
1037  OSStatus err = -1;
1038  opcode *there = (opcode *) where, instr, errnum = 0;
1039  return err;
1040}
1041
1042Boolean
1043handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1044{
1045  unsigned 
1046    format = UUO_FORMAT(the_uuo);
1047  Boolean handled = false;
1048  int bump = 4;
1049  TCR *tcr = get_tcr(true);
1050
1051  switch (format) {
1052  case uuo_format_kernel_service:
1053    {
1054      TCR * target = (TCR *)xpGPR(xp,arg_z);
1055      int service = UUO_UNARY_field(the_uuo);
1056
1057      switch (service) {
1058      case error_propagate_suspend:
1059        handled = true;
1060        break;
1061      case error_interrupt:
1062        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1063        handled = true;
1064        break;
1065      case error_suspend:
1066        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1067        handled = true;
1068        break;
1069      case error_suspend_all:
1070        lisp_suspend_other_threads();
1071        handled = true;
1072        break;
1073      case error_resume:
1074        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1075        handled = true;
1076        break;
1077      case error_resume_all:
1078        lisp_resume_other_threads();
1079        handled = true;
1080        break;
1081      case error_kill:
1082        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1083        handled = true;
1084        break;
1085      case error_allocate_list:
1086        allocate_list(xp,tcr);
1087        handled = true;
1088        break;
1089      default:
1090        handled = false;
1091        break;
1092      }
1093      break;
1094    }
1095
1096  case uuo_format_unary:
1097    switch(UUO_UNARY_field(the_uuo)) {
1098    case 3:
1099      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1100        handled = true;
1101        bump = 4;
1102        break;
1103      }
1104      /* fall in */
1105    default:
1106      handled = false;
1107      break;
1108
1109    }
1110    break;
1111
1112  case uuo_format_nullary:
1113    switch (UUOA_field(the_uuo)) {
1114    case 3:
1115      adjust_exception_pc(xp, bump);
1116      bump = 0;
1117      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1118      handled = true;
1119      break;
1120
1121    case 4:
1122      tcr->interrupt_pending = 0;
1123      callback_for_trap(nrs_CMAIN.vcell, xp, SIGNAL_FOR_PROCESS_INTERRUPT, 0);
1124      handled = true;
1125      break;
1126    default:
1127      handled = false;
1128      break;
1129    }
1130
1131  case uuo_format_error_lisptag:
1132  case uuo_format_error_fulltag:
1133  case uuo_format_error_xtype:
1134  case uuo_format_nullary_error:
1135  case uuo_format_unary_error:
1136  case uuo_format_binary_error:
1137    handled = handle_error(xp,0,the_uuo);
1138    break;
1139
1140  default:
1141    handled = false;
1142    bump = 0;
1143  }
1144 
1145  if (handled && bump) {
1146    adjust_exception_pc(xp, bump);
1147  }
1148  return handled;
1149}
1150
1151natural
1152register_codevector_contains_pc (natural lisp_function, pc where)
1153{
1154  natural code_vector, size;
1155
1156  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1157      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1158    code_vector = deref(lisp_function, 2);
1159    size = header_element_count(header_of(code_vector)) << 2;
1160    if ((untag(code_vector) < (natural)where) && 
1161        ((natural)where < (code_vector + size)))
1162      return(code_vector);
1163  }
1164
1165  return(0);
1166}
1167
1168int
1169callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1170{
1171  return callback_to_lisp(callback_macptr, xp, info,arg);
1172}
1173
1174int
1175callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1176                  natural arg1, natural arg2)
1177{
1178  natural  callback_ptr;
1179  area *a;
1180  natural fnreg = fn,  codevector, offset;
1181  pc where = xpPC(xp);
1182
1183  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1184  if (codevector == 0) {
1185    fnreg = nfn;
1186    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1187  }
1188  offset = (natural)where - codevector;
1189                                                 
1190                                               
1191
1192  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1193
1194  /* Put the active stack pointer where .SPcallback expects it */
1195  a = tcr->cs_area;
1196  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1197
1198  /* Copy globals from the exception frame to tcr */
1199  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1200  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1201
1202
1203
1204  /* Call back.
1205     Lisp will handle trampolining through some code that
1206     will push lr/fn & pc/nfn stack frames for backtrace.
1207  */
1208  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1209  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1210  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1211  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1212
1213
1214
1215  /* Copy GC registers back into exception frame */
1216  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1217}
1218
1219area *
1220allocate_no_stack (natural size)
1221{
1222#ifdef SUPPORT_PRAGMA_UNUSED
1223#pragma unused(size)
1224#endif
1225
1226  return (area *) NULL;
1227}
1228
1229
1230
1231
1232
1233
1234/* callback to (symbol-value cmain) if it is a macptr,
1235   otherwise report cause and function name to console.
1236   Returns noErr if exception handled OK */
1237OSStatus
1238handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1239{
1240  LispObj   cmain = nrs_CMAIN.vcell;
1241  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1242
1243}
1244
1245
1246
1247
1248void non_fatal_error( char *msg )
1249{
1250  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1251  fflush( dbgout );
1252}
1253
1254
1255
1256Boolean
1257handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1258{
1259  LispObj   errdisp = nrs_ERRDISP.vcell;
1260
1261  if ((fulltag_of(errdisp) == fulltag_misc) &&
1262      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1263    /* errdisp is a macptr, we can call back to lisp */
1264    return callback_for_trap(errdisp, xp, arg1, arg2);
1265    }
1266
1267  return false;
1268}
1269               
1270
1271/*
1272   Current thread has all signals masked.  Before unmasking them,
1273   make it appear that the current thread has been suspended.
1274   (This is to handle the case where another thread is trying
1275   to GC before this thread is able to sieze the exception lock.)
1276*/
1277int
1278prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1279{
1280  int old_valence = tcr->valence;
1281
1282  tcr->pending_exception_context = context;
1283  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1284
1285  ALLOW_EXCEPTIONS(context);
1286  return old_valence;
1287} 
1288
1289void
1290wait_for_exception_lock_in_handler(TCR *tcr, 
1291                                   ExceptionInformation *context,
1292                                   xframe_list *xf)
1293{
1294
1295  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1296  xf->curr = context;
1297  xf->prev = tcr->xframe;
1298  tcr->xframe =  xf;
1299  tcr->pending_exception_context = NULL;
1300  tcr->valence = TCR_STATE_FOREIGN; 
1301}
1302
1303void
1304unlock_exception_lock_in_handler(TCR *tcr)
1305{
1306  tcr->pending_exception_context = tcr->xframe->curr;
1307  tcr->xframe = tcr->xframe->prev;
1308  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1309  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1310}
1311
1312/*
1313   If an interrupt is pending on exception exit, try to ensure
1314   that the thread sees it as soon as it's able to run.
1315*/
1316void
1317raise_pending_interrupt(TCR *tcr)
1318{
1319  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1320    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1321  }
1322}
1323
1324void
1325exit_signal_handler(TCR *tcr, int old_valence)
1326{
1327  sigset_t mask;
1328  sigfillset(&mask);
1329 
1330  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1331  tcr->valence = old_valence;
1332  tcr->pending_exception_context = NULL;
1333}
1334
1335
1336void
1337signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1338{
1339  xframe_list xframe_link;
1340
1341  if (!use_mach_exception_handling) {
1342   
1343    tcr = (TCR *) get_interrupt_tcr(false);
1344 
1345    /* The signal handler's entered with all signals (notably the
1346       thread_suspend signal) blocked.  Don't allow any other signals
1347       (notably the thread_suspend signal) to preempt us until we've
1348       set the TCR's xframe slot to include the current exception
1349       context.
1350    */
1351   
1352    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1353  }
1354
1355  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1356    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1357    pthread_kill(pthread_self(), thread_suspend_signal);
1358  }
1359
1360 
1361  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1362  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1363    char msg[512];
1364    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1365    if (lisp_Debugger(context, info, signum, false, msg)) {
1366      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1367    }
1368  }
1369
1370  unlock_exception_lock_in_handler(tcr);
1371
1372  /* This thread now looks like a thread that was suspended while
1373     executing lisp code.  If some other thread gets the exception
1374     lock and GCs, the context (this thread's suspend_context) will
1375     be updated.  (That's only of concern if it happens before we
1376     can return to the kernel/to the Mach exception handler).
1377  */
1378  if (!use_mach_exception_handling) {
1379    exit_signal_handler(tcr, old_valence);
1380    raise_pending_interrupt(tcr);
1381  }
1382}
1383
1384/*
1385  If it looks like we're in the middle of an atomic operation, make
1386  it seem as if that operation is either complete or hasn't started
1387  yet.
1388
1389  The cases handled include:
1390
1391  a) storing into a newly-allocated lisp frame on the stack.
1392  b) marking a newly-allocated TSP frame as containing "raw" data.
1393  c) consing: the GC has its own ideas about how this should be
1394     handled, but other callers would be best advised to back
1395     up or move forward, according to whether we're in the middle
1396     of allocating a cons cell or allocating a uvector.
1397  d) a STMW to the vsp
1398  e) EGC write-barrier subprims.
1399*/
1400
1401extern opcode
1402  egc_write_barrier_start,
1403  egc_write_barrier_end, 
1404  egc_store_node_conditional, 
1405  egc_store_node_conditional_test,
1406  egc_set_hash_key,
1407  egc_gvset,
1408  egc_rplaca,
1409  egc_rplacd,
1410  egc_set_hash_key_conditional,
1411  egc_set_hash_key_conditional_test;
1412
1413
1414extern opcode ffcall_return_window, ffcall_return_window_end;
1415
1416void
1417pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1418{
1419  pc program_counter = xpPC(xp);
1420  opcode instr = *program_counter;
1421  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1422  LispObj cur_allocptr = xpGPR(xp, allocptr);
1423  int allocptr_tag = fulltag_of(cur_allocptr);
1424 
1425
1426
1427  if ((program_counter < &egc_write_barrier_end) && 
1428      (program_counter >= &egc_write_barrier_start)) {
1429    LispObj *ea = 0, val = 0, root = 0;
1430    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1431    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1432
1433    if (program_counter >= &egc_set_hash_key_conditional) {
1434      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1435          ((program_counter == &egc_set_hash_key_conditional_test) &&
1436           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1437        return;
1438      }
1439      need_store = false;
1440      root = xpGPR(xp,arg_x);
1441      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1442      need_memoize_root = true;
1443    } else if (program_counter >= &egc_store_node_conditional) {
1444      if ((program_counter < &egc_store_node_conditional_test) ||
1445          ((program_counter == &egc_store_node_conditional_test) &&
1446           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1447        /* The conditional store either hasn't been attempted yet, or
1448           has failed.  No need to adjust the PC, or do memoization. */
1449        return;
1450      }
1451      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1452      xpGPR(xp,arg_z) = t_value;
1453      need_store = false;
1454    } else if (program_counter >= &egc_set_hash_key) {
1455      root = xpGPR(xp,arg_x);
1456      val = xpGPR(xp,arg_z);
1457      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1458      need_memoize_root = true;
1459    } else if (program_counter >= &egc_gvset) {
1460      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1461      val = xpGPR(xp,arg_z);
1462    } else if (program_counter >= &egc_rplacd) {
1463      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1464      val = xpGPR(xp,arg_z);
1465    } else {                      /* egc_rplaca */
1466      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1467      val = xpGPR(xp,arg_z);
1468    }
1469    if (need_store) {
1470      *ea = val;
1471    }
1472    if (need_check_memo) {
1473      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1474      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1475          ((LispObj)ea < val)) {
1476        atomic_set_bit(refbits, bitnumber);
1477        if (need_memoize_root) {
1478          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1479          atomic_set_bit(refbits, bitnumber);
1480        }
1481      }
1482    }
1483    xpPC(xp) = xpLR(xp);
1484    return;
1485  }
1486
1487
1488 
1489  if (allocptr_tag != tag_fixnum) {
1490    signed_natural disp = allocptr_displacement(xp);
1491
1492    if (disp) {
1493      /* Being architecturally "at" the alloc trap doesn't tell
1494         us much (in particular, it doesn't tell us whether
1495         or not the thread has committed to taking the trap
1496         and is waiting for the exception lock (or waiting
1497         for the Mach exception thread to tell it how bad
1498         things are) or is about to execute a conditional
1499         trap.
1500         Regardless of which case applies, we want the
1501         other thread to take (or finish taking) the
1502         trap, and we don't want it to consider its
1503         current allocptr to be valid.
1504         The difference between this case (suspend other
1505         thread for GC) and the previous case (suspend
1506         current thread for interrupt) is solely a
1507         matter of what happens after we leave this
1508         function: some non-current thread will stay
1509         suspended until the GC finishes, then take
1510         (or start processing) the alloc trap.   The
1511         current thread will go off and do PROCESS-INTERRUPT
1512         or something, and may return from the interrupt
1513         and need to finish the allocation that got interrupted.
1514      */
1515
1516      if (alloc_disp) {
1517        *alloc_disp = disp;
1518        xpGPR(xp,allocptr) += disp;
1519        /* Leave the PC at the alloc trap.  When the interrupt
1520           handler returns, it'll decrement allocptr by disp
1521           and the trap may or may not be taken.
1522        */
1523      } else {
1524        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1525        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1526      }
1527    } else {
1528      /* If we're already past the alloc_trap, finish allocating
1529         the object. */
1530      if (allocptr_tag == fulltag_cons) {
1531        finish_allocating_cons(xp);
1532      } else {
1533        if (allocptr_tag == fulltag_misc) {
1534          finish_allocating_uvector(xp);
1535        } else {
1536          Bug(xp, "what's being allocated here ?");
1537        }
1538      }
1539      /* Whatever we finished allocating, reset allocptr/allocbase to
1540         VOID_ALLOCPTR */
1541      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1542    }
1543    return;
1544  }
1545}
1546
1547void
1548interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1549{
1550  TCR *tcr = get_interrupt_tcr(false);
1551  if (tcr) {
1552    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1553      tcr->interrupt_pending = 1 << fixnumshift;
1554    } else {
1555      LispObj cmain = nrs_CMAIN.vcell;
1556
1557      if ((fulltag_of(cmain) == fulltag_misc) &&
1558          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1559        /*
1560           This thread can (allegedly) take an interrupt now.
1561           It's tricky to do that if we're executing
1562           foreign code (especially Linuxthreads code, much
1563           of which isn't reentrant.)
1564           If we're unwinding the stack, we also want to defer
1565           the interrupt.
1566        */
1567        if ((tcr->valence != TCR_STATE_LISP) ||
1568            (tcr->unwinding != 0)) {
1569          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
1570        } else {
1571          xframe_list xframe_link;
1572          int old_valence;
1573          signed_natural disp=0;
1574         
1575          pc_luser_xp(context, tcr, &disp);
1576          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1577          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1578          handle_exception(signum, context, tcr, info, old_valence);
1579          if (disp) {
1580            xpGPR(context,allocptr) -= disp;
1581          }
1582          unlock_exception_lock_in_handler(tcr);
1583          exit_signal_handler(tcr, old_valence);
1584        }
1585      }
1586    }
1587  }
1588#ifdef DARWIN
1589    DarwinSigReturn(context);
1590#endif
1591}
1592
1593
1594
1595void
1596install_signal_handler(int signo, void *handler)
1597{
1598  struct sigaction sa;
1599 
1600  sa.sa_sigaction = (void *)handler;
1601  sigfillset(&sa.sa_mask);
1602  sa.sa_flags = 
1603    0 /* SA_RESTART */
1604    | SA_SIGINFO
1605    ;
1606
1607  sigaction(signo, &sa, NULL);
1608}
1609
1610void
1611install_pmcl_exception_handlers()
1612{
1613#ifdef DARWIN
1614  extern Boolean use_mach_exception_handling;
1615#endif
1616
1617  Boolean install_signal_handlers_for_exceptions =
1618#ifdef DARWIN
1619    !use_mach_exception_handling
1620#else
1621    true
1622#endif
1623    ;
1624  if (install_signal_handlers_for_exceptions) {
1625    extern int no_sigtrap;
1626    install_signal_handler(SIGILL, (void *)signal_handler);
1627    if (no_sigtrap != 1) {
1628      install_signal_handler(SIGTRAP, (void *)signal_handler);
1629    }
1630    install_signal_handler(SIGBUS,  (void *)signal_handler);
1631    install_signal_handler(SIGSEGV, (void *)signal_handler);
1632    install_signal_handler(SIGFPE, (void *)signal_handler);
1633  }
1634 
1635  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1636                         (void *)interrupt_handler);
1637  signal(SIGPIPE, SIG_IGN);
1638}
1639
1640void
1641thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1642{
1643  TCR *tcr = get_tcr(false);
1644  area *a;
1645  sigset_t mask;
1646 
1647  sigemptyset(&mask);
1648
1649  if (tcr) {
1650    tcr->valence = TCR_STATE_FOREIGN;
1651    a = tcr->vs_area;
1652    if (a) {
1653      a->active = a->high;
1654    }
1655    a = tcr->cs_area;
1656    if (a) {
1657      a->active = a->high;
1658    }
1659  }
1660 
1661  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1662  pthread_exit(NULL);
1663}
1664
1665void
1666thread_signal_setup()
1667{
1668  thread_suspend_signal = SIG_SUSPEND_THREAD;
1669  thread_kill_signal = SIG_KILL_THREAD;
1670
1671  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1672  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1673}
1674
1675
1676
1677void
1678unprotect_all_areas()
1679{
1680  protected_area_ptr p;
1681
1682  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1683    unprotect_area(p);
1684  }
1685}
1686
1687/*
1688  A binding subprim has just done "twlle limit_regno,idx_regno" and
1689  the trap's been taken.  Extend the tcr's tlb so that the index will
1690  be in bounds and the new limit will be on a page boundary, filling
1691  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1692  the tcr fields and the registers in the xp and return true if this
1693  all works, false otherwise.
1694
1695  Note that the tlb was allocated via malloc, so realloc can do some
1696  of the hard work.
1697*/
1698Boolean
1699extend_tcr_tlb(TCR *tcr, 
1700               ExceptionInformation *xp, 
1701               unsigned idx_regno)
1702{
1703  unsigned
1704    index = (unsigned) (xpGPR(xp,idx_regno)),
1705    old_limit = tcr->tlb_limit,
1706    new_limit = align_to_power_of_2(index+1,12),
1707    new_bytes = new_limit-old_limit;
1708  LispObj
1709    *old_tlb = tcr->tlb_pointer,
1710    *new_tlb = realloc(old_tlb, new_limit),
1711    *work;
1712
1713  if (new_tlb == NULL) {
1714    return false;
1715  }
1716 
1717  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1718
1719  while (new_bytes) {
1720    *work++ = no_thread_local_binding_marker;
1721    new_bytes -= sizeof(LispObj);
1722  }
1723  tcr->tlb_pointer = new_tlb;
1724  tcr->tlb_limit = new_limit;
1725  return true;
1726}
1727
1728
1729
1730void
1731exception_init()
1732{
1733  install_pmcl_exception_handlers();
1734}
1735
1736
1737
1738
1739
1740#ifdef DARWIN
1741
1742
1743#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1744#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1745
1746
1747
1748#define LISP_EXCEPTIONS_HANDLED_MASK \
1749 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1750
1751/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1752#define NUM_LISP_EXCEPTIONS_HANDLED 4
1753
1754typedef struct {
1755  int foreign_exception_port_count;
1756  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1757  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1758  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1759  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1760} MACH_foreign_exception_state;
1761
1762
1763
1764
1765/*
1766  Mach's exception mechanism works a little better than its signal
1767  mechanism (and, not incidentally, it gets along with GDB a lot
1768  better.
1769
1770  Initially, we install an exception handler to handle each native
1771  thread's exceptions.  This process involves creating a distinguished
1772  thread which listens for kernel exception messages on a set of
1773  0 or more thread exception ports.  As threads are created, they're
1774  added to that port set; a thread's exception port is destroyed
1775  (and therefore removed from the port set) when the thread exits.
1776
1777  A few exceptions can be handled directly in the handler thread;
1778  others require that we resume the user thread (and that the
1779  exception thread resumes listening for exceptions.)  The user
1780  thread might eventually want to return to the original context
1781  (possibly modified somewhat.)
1782
1783  As it turns out, the simplest way to force the faulting user
1784  thread to handle its own exceptions is to do pretty much what
1785  signal() does: the exception handlng thread sets up a sigcontext
1786  on the user thread's stack and forces the user thread to resume
1787  execution as if a signal handler had been called with that
1788  context as an argument.  We can use a distinguished UUO at a
1789  distinguished address to do something like sigreturn(); that'll
1790  have the effect of resuming the user thread's execution in
1791  the (pseudo-) signal context.
1792
1793  Since:
1794    a) we have miles of code in C and in Lisp that knows how to
1795    deal with Linux sigcontexts
1796    b) Linux sigcontexts contain a little more useful information
1797    (the DAR, DSISR, etc.) than their Darwin counterparts
1798    c) we have to create a sigcontext ourselves when calling out
1799    to the user thread: we aren't really generating a signal, just
1800    leveraging existing signal-handling code.
1801
1802  we create a Linux sigcontext struct.
1803
1804  Simple ?  Hopefully from the outside it is ...
1805
1806  We want the process of passing a thread's own context to it to
1807  appear to be atomic: in particular, we don't want the GC to suspend
1808  a thread that's had an exception but has not yet had its user-level
1809  exception handler called, and we don't want the thread's exception
1810  context to be modified by a GC while the Mach handler thread is
1811  copying it around.  On Linux (and on Jaguar), we avoid this issue
1812  because (a) the kernel sets up the user-level signal handler and
1813  (b) the signal handler blocks signals (including the signal used
1814  by the GC to suspend threads) until tcr->xframe is set up.
1815
1816  The GC and the Mach server thread therefore contend for the lock
1817  "mach_exception_lock".  The Mach server thread holds the lock
1818  when copying exception information between the kernel and the
1819  user thread; the GC holds this lock during most of its execution
1820  (delaying exception processing until it can be done without
1821  GC interference.)
1822
1823*/
1824
1825
1826#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1827
1828void
1829fatal_mach_error(char *format, ...);
1830
1831#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1832
1833
1834void
1835restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1836{
1837  kern_return_t kret;
1838  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1839
1840  /* Set the thread's FP state from the pseudosigcontext */
1841  kret = thread_set_state(thread,
1842                          ARM_FLOAT_STATE,
1843                          (thread_state_t)&(mc->__fs),
1844                          ARM_FLOAT_STATE_COUNT);
1845
1846  MACH_CHECK_ERROR("setting thread FP state", kret);
1847
1848  /* The thread'll be as good as new ... */
1849  kret = thread_set_state(thread, 
1850                          MACHINE_THREAD_STATE,
1851                          (thread_state_t)&(mc->__ss),
1852                          MACHINE_THREAD_STATE_COUNT);
1853  MACH_CHECK_ERROR("setting thread state", kret);
1854} 
1855
1856/* This code runs in the exception handling thread, in response
1857   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1858   in response to a call to pseudo_sigreturn() from the specified
1859   user thread.
1860   Find that context (the user thread's R3 points to it), then
1861   use that context to set the user thread's state.  When this
1862   function's caller returns, the Mach kernel will resume the
1863   user thread.
1864*/
1865
1866kern_return_t
1867do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1868{
1869  ExceptionInformation *xp;
1870
1871#ifdef DEBUG_MACH_EXCEPTIONS
1872  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1873#endif
1874  xp = tcr->pending_exception_context;
1875  if (xp) {
1876    tcr->pending_exception_context = NULL;
1877    tcr->valence = TCR_STATE_LISP;
1878    restore_mach_thread_state(thread, xp);
1879    raise_pending_interrupt(tcr);
1880  } else {
1881    Bug(NULL, "no xp here!\n");
1882  }
1883#ifdef DEBUG_MACH_EXCEPTIONS
1884  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1885#endif
1886  return KERN_SUCCESS;
1887} 
1888
1889ExceptionInformation *
1890create_thread_context_frame(mach_port_t thread, 
1891                            natural *new_stack_top)
1892{
1893  arm_thread_state_t ts;
1894  mach_msg_type_number_t thread_state_count;
1895  kern_return_t result;
1896  ExceptionInformation *pseudosigcontext;
1897  MCONTEXT_T mc;
1898  natural stackp, backlink;
1899
1900  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1901  result = thread_get_state(thread, 
1902                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1903                            (thread_state_t)&ts,
1904                            &thread_state_count);
1905 
1906  if (result != KERN_SUCCESS) {
1907    get_tcr(true);
1908    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1909  }
1910  stackp = ts.__r1;
1911  backlink = stackp;
1912  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1913  stackp -= sizeof(*pseudosigcontext);
1914  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1915
1916  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1917  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1918  memmove(&(mc->__ss),&ts,sizeof(ts));
1919
1920  thread_state_count = PPC_FLOAT_STATE_COUNT;
1921  thread_get_state(thread,
1922                   PPC_FLOAT_STATE,
1923                   (thread_state_t)&(mc->__fs),
1924                   &thread_state_count);
1925
1926
1927#ifdef PPC64
1928  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1929#else
1930  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1931#endif
1932  thread_get_state(thread,
1933#ifdef PPC64
1934                   PPC_EXCEPTION_STATE64,
1935#else
1936                   PPC_EXCEPTION_STATE,
1937#endif
1938                   (thread_state_t)&(mc->__es),
1939                   &thread_state_count);
1940
1941
1942  UC_MCONTEXT(pseudosigcontext) = mc;
1943  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1944  stackp -= C_LINKAGE_LEN;
1945  *(natural *)ptr_from_lispobj(stackp) = backlink;
1946  if (new_stack_top) {
1947    *new_stack_top = stackp;
1948  }
1949  return pseudosigcontext;
1950}
1951
1952/*
1953  This code sets up the user thread so that it executes a "pseudo-signal
1954  handler" function when it resumes.  Create a linux sigcontext struct
1955  on the thread's stack and pass it as an argument to the pseudo-signal
1956  handler.
1957
1958  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1959  which will restore the thread's context.
1960
1961  If the handler invokes code that throws (or otherwise never sigreturn()'s
1962  to the context), that's fine.
1963
1964  Actually, check that: throw (and variants) may need to be careful and
1965  pop the tcr's xframe list until it's younger than any frame being
1966  entered.
1967*/
1968
1969int
1970setup_signal_frame(mach_port_t thread,
1971                   void *handler_address,
1972                   int signum,
1973                   int code,
1974                   TCR *tcr)
1975{
1976#ifdef PPC64
1977  ppc_thread_state64_t ts;
1978#else
1979  ppc_thread_state_t ts;
1980#endif
1981  ExceptionInformation *pseudosigcontext;
1982  int old_valence = tcr->valence;
1983  natural stackp;
1984
1985#ifdef DEBUG_MACH_EXCEPTIONS
1986  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1987#endif
1988  pseudosigcontext = create_thread_context_frame(thread, &stackp);
1989  pseudosigcontext->uc_onstack = 0;
1990  pseudosigcontext->uc_sigmask = (sigset_t) 0;
1991  tcr->pending_exception_context = pseudosigcontext;
1992  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1993 
1994
1995  /*
1996     It seems like we've created a  sigcontext on the thread's
1997     stack.  Set things up so that we call the handler (with appropriate
1998     args) when the thread's resumed.
1999  */
2000
2001  ts.__srr0 = (natural) handler_address;
2002  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2003  ts.__r1 = stackp;
2004  ts.__r3 = signum;
2005  ts.__r4 = (natural)pseudosigcontext;
2006  ts.__r5 = (natural)tcr;
2007  ts.__r6 = (natural)old_valence;
2008  ts.__lr = (natural)pseudo_sigreturn;
2009
2010
2011#ifdef PPC64
2012  ts.__r13 = xpGPR(pseudosigcontext,13);
2013  thread_set_state(thread,
2014                   PPC_THREAD_STATE64,
2015                   (thread_state_t)&ts,
2016                   PPC_THREAD_STATE64_COUNT);
2017#else
2018  thread_set_state(thread, 
2019                   MACHINE_THREAD_STATE,
2020                   (thread_state_t)&ts,
2021                   MACHINE_THREAD_STATE_COUNT);
2022#endif
2023#ifdef DEBUG_MACH_EXCEPTIONS
2024  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2025#endif
2026  return 0;
2027}
2028
2029
2030void
2031pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2032{
2033  signal_handler(signum, NULL, context, tcr, old_valence);
2034} 
2035
2036
2037int
2038thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2039{
2040#ifdef PPC64
2041  ppc_thread_state64_t ts;
2042#else
2043  ppc_thread_state_t ts;
2044#endif
2045  mach_msg_type_number_t thread_state_count;
2046
2047#ifdef PPC64
2048  thread_state_count = PPC_THREAD_STATE64_COUNT;
2049#else
2050  thread_state_count = PPC_THREAD_STATE_COUNT;
2051#endif
2052  thread_get_state(thread, 
2053#ifdef PPC64
2054                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2055#else
2056                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2057#endif
2058                   (thread_state_t)&ts,
2059                   &thread_state_count);
2060  if (enabled) {
2061    ts.__srr1 |= MSR_FE0_FE1_MASK;
2062  } else {
2063    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2064  }
2065  /*
2066     Hack-o-rama warning (isn't it about time for such a warning?):
2067     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2068     Our handler for lisp's use of pthread_kill() pushes a phony
2069     lisp frame on the stack and force the context to resume at
2070     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2071     lisp frame contains the -real- address that process_interrupt
2072     should have returned to, and the fact that it's in a lisp
2073     frame should convince the GC to notice that address if it
2074     runs in the tiny time window between returning from our
2075     interrupt handler and ... here.
2076     If the top frame on the stack is a lisp frame, discard it
2077     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2078     just adjust ts.srr0 to skip over the UUO.
2079  */
2080  {
2081    lisp_frame *tos = (lisp_frame *)ts.__r1,
2082      *next_frame = tos->backlink;
2083   
2084    if (tos == (next_frame -1)) {
2085      ts.__srr0 = tos->savelr;
2086      ts.__r1 = (LispObj) next_frame;
2087    } else {
2088      ts.__srr0 += 4;
2089    }
2090  }
2091  thread_set_state(thread, 
2092#ifdef PPC64
2093                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2094#else
2095                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2096#endif
2097                   (thread_state_t)&ts,
2098#ifdef PPC64
2099                   PPC_THREAD_STATE64_COUNT
2100#else
2101                   PPC_THREAD_STATE_COUNT
2102#endif
2103                   );
2104
2105  return 0;
2106}
2107
2108/*
2109  This function runs in the exception handling thread.  It's
2110  called (by this precise name) from the library function "exc_server()"
2111  when the thread's exception ports are set up.  (exc_server() is called
2112  via mach_msg_server(), which is a function that waits for and dispatches
2113  on exception messages from the Mach kernel.)
2114
2115  This checks to see if the exception was caused by a pseudo_sigreturn()
2116  UUO; if so, it arranges for the thread to have its state restored
2117  from the specified context.
2118
2119  Otherwise, it tries to map the exception to a signal number and
2120  arranges that the thread run a "pseudo signal handler" to handle
2121  the exception.
2122
2123  Some exceptions could and should be handled here directly.
2124*/
2125
2126kern_return_t
2127catch_exception_raise(mach_port_t exception_port,
2128                      mach_port_t thread,
2129                      mach_port_t task, 
2130                      exception_type_t exception,
2131                      exception_data_t code_vector,
2132                      mach_msg_type_number_t code_count)
2133{
2134  int signum = 0, code = *code_vector, code1;
2135  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2136  kern_return_t kret;
2137
2138#ifdef DEBUG_MACH_EXCEPTIONS
2139  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2140#endif
2141
2142  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2143    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2144  } 
2145  if ((exception == EXC_BAD_INSTRUCTION) &&
2146      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2147      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2148       (code1 == (int)enable_fp_exceptions) ||
2149       (code1 == (int)disable_fp_exceptions))) {
2150    if (code1 == (int)pseudo_sigreturn) {
2151      kret = do_pseudo_sigreturn(thread, tcr);
2152#if 0
2153      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2154#endif
2155       
2156    } else if (code1 == (int)enable_fp_exceptions) {
2157      kret = thread_set_fp_exceptions_enabled(thread, true);
2158    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2159  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2160    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2161    kret = 17;
2162  } else {
2163    switch (exception) {
2164    case EXC_BAD_ACCESS:
2165      signum = SIGSEGV;
2166      break;
2167       
2168    case EXC_BAD_INSTRUCTION:
2169      signum = SIGILL;
2170      break;
2171     
2172    case EXC_SOFTWARE:
2173      if (code == EXC_PPC_TRAP) {
2174        signum = SIGTRAP;
2175      }
2176      break;
2177     
2178    case EXC_ARITHMETIC:
2179      signum = SIGFPE;
2180      break;
2181
2182    default:
2183      break;
2184    }
2185    if (signum) {
2186      kret = setup_signal_frame(thread,
2187                                (void *)pseudo_signal_handler,
2188                                signum,
2189                                code,
2190                                tcr);
2191#if 0
2192      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2193#endif
2194
2195    } else {
2196      kret = 17;
2197    }
2198  }
2199
2200  return kret;
2201}
2202
2203
2204
2205typedef struct {
2206  mach_msg_header_t Head;
2207  /* start of the kernel processed data */
2208  mach_msg_body_t msgh_body;
2209  mach_msg_port_descriptor_t thread;
2210  mach_msg_port_descriptor_t task;
2211  /* end of the kernel processed data */
2212  NDR_record_t NDR;
2213  exception_type_t exception;
2214  mach_msg_type_number_t codeCnt;
2215  integer_t code[2];
2216  mach_msg_trailer_t trailer;
2217} exceptionRequest;
2218
2219
2220boolean_t
2221openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2222{
2223  static NDR_record_t _NDR = {0};
2224  kern_return_t handled;
2225  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2226  exceptionRequest *req = (exceptionRequest *) in;
2227
2228  reply->NDR = _NDR;
2229
2230  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2231  out->msgh_remote_port = in->msgh_remote_port;
2232  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2233  out->msgh_local_port = MACH_PORT_NULL;
2234  out->msgh_id = in->msgh_id+100;
2235
2236  /* Could handle other exception flavors in the range 2401-2403 */
2237
2238
2239  if (in->msgh_id != 2401) {
2240    reply->RetCode = MIG_BAD_ID;
2241    return FALSE;
2242  }
2243  handled = catch_exception_raise(req->Head.msgh_local_port,
2244                                  req->thread.name,
2245                                  req->task.name,
2246                                  req->exception,
2247                                  req->code,
2248                                  req->codeCnt);
2249  reply->RetCode = handled;
2250  return TRUE;
2251}
2252
2253/*
2254  The initial function for an exception-handling thread.
2255*/
2256
2257void *
2258exception_handler_proc(void *arg)
2259{
2260  extern boolean_t exc_server();
2261  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2262
2263  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2264  /* Should never return. */
2265  abort();
2266}
2267
2268
2269
2270mach_port_t
2271mach_exception_port_set()
2272{
2273  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2274  kern_return_t kret; 
2275  if (__exception_port_set == MACH_PORT_NULL) {
2276    kret = mach_port_allocate(mach_task_self(),
2277                              MACH_PORT_RIGHT_PORT_SET,
2278                              &__exception_port_set);
2279    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2280    create_system_thread(0,
2281                         NULL,
2282                         exception_handler_proc, 
2283                         (void *)((natural)__exception_port_set));
2284  }
2285  return __exception_port_set;
2286}
2287
2288/*
2289  Setup a new thread to handle those exceptions specified by
2290  the mask "which".  This involves creating a special Mach
2291  message port, telling the Mach kernel to send exception
2292  messages for the calling thread to that port, and setting
2293  up a handler thread which listens for and responds to
2294  those messages.
2295
2296*/
2297
2298/*
2299  Establish the lisp thread's TCR as its exception port, and determine
2300  whether any other ports have been established by foreign code for
2301  exceptions that lisp cares about.
2302
2303  If this happens at all, it should happen on return from foreign
2304  code and on entry to lisp code via a callback.
2305
2306  This is a lot of trouble (and overhead) to support Java, or other
2307  embeddable systems that clobber their caller's thread exception ports.
2308 
2309*/
2310kern_return_t
2311tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2312{
2313  kern_return_t kret;
2314  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2315  int i;
2316  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2317  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2318  exception_mask_t mask = 0;
2319
2320  kret = thread_swap_exception_ports(thread,
2321                                     LISP_EXCEPTIONS_HANDLED_MASK,
2322                                     lisp_port,
2323                                     EXCEPTION_DEFAULT,
2324                                     THREAD_STATE_NONE,
2325                                     fxs->masks,
2326                                     &n,
2327                                     fxs->ports,
2328                                     fxs->behaviors,
2329                                     fxs->flavors);
2330  if (kret == KERN_SUCCESS) {
2331    fxs->foreign_exception_port_count = n;
2332    for (i = 0; i < n; i ++) {
2333      foreign_port = fxs->ports[i];
2334
2335      if ((foreign_port != lisp_port) &&
2336          (foreign_port != MACH_PORT_NULL)) {
2337        mask |= fxs->masks[i];
2338      }
2339    }
2340    tcr->foreign_exception_status = (int) mask;
2341  }
2342  return kret;
2343}
2344
2345kern_return_t
2346tcr_establish_lisp_exception_port(TCR *tcr)
2347{
2348  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2349}
2350
2351/*
2352  Do this when calling out to or returning from foreign code, if
2353  any conflicting foreign exception ports were established when we
2354  last entered lisp code.
2355*/
2356kern_return_t
2357restore_foreign_exception_ports(TCR *tcr)
2358{
2359  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2360 
2361  if (m) {
2362    MACH_foreign_exception_state *fxs  = 
2363      (MACH_foreign_exception_state *) tcr->native_thread_info;
2364    int i, n = fxs->foreign_exception_port_count;
2365    exception_mask_t tm;
2366
2367    for (i = 0; i < n; i++) {
2368      if ((tm = fxs->masks[i]) & m) {
2369        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2370                                   tm,
2371                                   fxs->ports[i],
2372                                   fxs->behaviors[i],
2373                                   fxs->flavors[i]);
2374      }
2375    }
2376  }
2377}
2378                                   
2379
2380/*
2381  This assumes that a Mach port (to be used as the thread's exception port) whose
2382  "name" matches the TCR's 32-bit address has already been allocated.
2383*/
2384
2385kern_return_t
2386setup_mach_exception_handling(TCR *tcr)
2387{
2388  mach_port_t
2389    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2390    task_self = mach_task_self();
2391  kern_return_t kret;
2392
2393  kret = mach_port_insert_right(task_self,
2394                                thread_exception_port,
2395                                thread_exception_port,
2396                                MACH_MSG_TYPE_MAKE_SEND);
2397  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2398
2399  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2400  if (kret == KERN_SUCCESS) {
2401    mach_port_t exception_port_set = mach_exception_port_set();
2402
2403    kret = mach_port_move_member(task_self,
2404                                 thread_exception_port,
2405                                 exception_port_set);
2406  }
2407  return kret;
2408}
2409
2410void
2411darwin_exception_init(TCR *tcr)
2412{
2413  void tcr_monitor_exception_handling(TCR*, Boolean);
2414  kern_return_t kret;
2415  MACH_foreign_exception_state *fxs = 
2416    calloc(1, sizeof(MACH_foreign_exception_state));
2417 
2418  tcr->native_thread_info = (void *) fxs;
2419
2420  if ((kret = setup_mach_exception_handling(tcr))
2421      != KERN_SUCCESS) {
2422    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2423    terminate_lisp();
2424  }
2425}
2426
2427/*
2428  The tcr is the "name" of the corresponding thread's exception port.
2429  Destroying the port should remove it from all port sets of which it's
2430  a member (notably, the exception port set.)
2431*/
2432void
2433darwin_exception_cleanup(TCR *tcr)
2434{
2435  void *fxs = tcr->native_thread_info;
2436  extern Boolean use_mach_exception_handling;
2437
2438  if (fxs) {
2439    tcr->native_thread_info = NULL;
2440    free(fxs);
2441  }
2442  if (use_mach_exception_handling) {
2443    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2444    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2445  }
2446}
2447
2448
2449Boolean
2450suspend_mach_thread(mach_port_t mach_thread)
2451{
2452  kern_return_t status;
2453  Boolean aborted = false;
2454 
2455  do {
2456    aborted = false;
2457    status = thread_suspend(mach_thread);
2458    if (status == KERN_SUCCESS) {
2459      status = thread_abort_safely(mach_thread);
2460      if (status == KERN_SUCCESS) {
2461        aborted = true;
2462      } else {
2463        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2464        thread_resume(mach_thread);
2465      }
2466    } else {
2467      return false;
2468    }
2469  } while (! aborted);
2470  return true;
2471}
2472
2473/*
2474  Only do this if pthread_kill indicated that the pthread isn't
2475  listening to signals anymore, as can happen as soon as pthread_exit()
2476  is called on Darwin.  The thread could still call out to lisp as it
2477  is exiting, so we need another way to suspend it in this case.
2478*/
2479Boolean
2480mach_suspend_tcr(TCR *tcr)
2481{
2482  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2483  ExceptionInformation *pseudosigcontext;
2484  Boolean result = false;
2485 
2486  result = suspend_mach_thread(mach_thread);
2487  if (result) {
2488    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2489    pseudosigcontext->uc_onstack = 0;
2490    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2491    tcr->suspend_context = pseudosigcontext;
2492  }
2493  return result;
2494}
2495
2496void
2497mach_resume_tcr(TCR *tcr)
2498{
2499  ExceptionInformation *xp;
2500  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2501 
2502  xp = tcr->suspend_context;
2503#ifdef DEBUG_MACH_EXCEPTIONS
2504  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2505          tcr, tcr->pending_exception_context);
2506#endif
2507  tcr->suspend_context = NULL;
2508  restore_mach_thread_state(mach_thread, xp);
2509#ifdef DEBUG_MACH_EXCEPTIONS
2510  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2511          tcr, tcr->pending_exception_context);
2512#endif
2513  thread_resume(mach_thread);
2514}
2515
2516void
2517fatal_mach_error(char *format, ...)
2518{
2519  va_list args;
2520  char s[512];
2521 
2522
2523  va_start(args, format);
2524  vsnprintf(s, sizeof(s),format, args);
2525  va_end(args);
2526
2527  Fatal("Mach error", s);
2528}
2529
2530void
2531pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2532{
2533  interrupt_handler(signum, NULL, context);
2534}
2535
2536int
2537mach_raise_thread_interrupt(TCR *target)
2538{
2539  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2540  kern_return_t kret;
2541  Boolean result = false;
2542  TCR *current = get_tcr(false);
2543  thread_basic_info_data_t info; 
2544  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2545
2546  LOCK(lisp_global(TCR_AREA_LOCK), current);
2547
2548  if (suspend_mach_thread(mach_thread)) {
2549    if (thread_info(mach_thread,
2550                    THREAD_BASIC_INFO,
2551                    (thread_info_t)&info,
2552                    &info_count) == KERN_SUCCESS) {
2553      if (info.suspend_count == 1) {
2554        if ((target->valence == TCR_STATE_LISP) &&
2555            (!target->unwinding) &&
2556            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2557          kret = setup_signal_frame(mach_thread,
2558                                    (void *)pseudo_interrupt_handler,
2559                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2560                                    0,
2561                                    target);
2562          if (kret == KERN_SUCCESS) {
2563            result = true;
2564          }
2565        }
2566      }
2567    }
2568    if (! result) {
2569      target->interrupt_pending = 1 << fixnumshift;
2570    }
2571    thread_resume(mach_thread);
2572   
2573  }
2574  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2575  return 0;
2576}
2577
2578#endif
Note: See TracBrowser for help on using the repository browser.