source: branches/arm/lisp-kernel/arm-exceptions.c @ 13892

Last change on this file since 13892 was 13892, checked in by gb, 10 years ago

arm-asm.lisp: change handling of non-conditional instructions (like "clrex").

arm-vinsns.lisp: EXTRACT-TAG-FIXNUM: shift first, then mask.

arm-utils.lisp: %WALK-DYNAMIC-AREA: more sanity checks, more sane.

l0-misc.lisp: %THREAD-STACK-SPACE: no temp stack on ARM.

arm-exceptions.c : callback_to_lisp(): don't assume that TCR is in rcontext.
handle_exception(): punt if in foreign code. handle_uuo(): break.

arm-spentry.s : _SPtfuncalllgen: push the right register.
_SPatomic_incf_node: preserve unboxed offset in case we loop.

arm-subprims.s: toplevel_loop, call _SPfuncall since the macro just
jumps.

Can read toplevel forms, but TOPLEVEL-EVAL uses PROGV which isn't all
there yet.

File size: 67.4 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call.  No need to update cs_area */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672  }
673
674
675  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
676  if (cur_allocptr) {
677    update_bytes_allocated(tcr, cur_allocptr);
678    if (freeptr) {
679      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
680    }
681  }
682}
683
684TCR *gc_tcr = NULL;
685
686/* Suspend and "normalize" other tcrs, then call a gc-like function
687   in that context.  Resume the other tcrs, then return what the
688   function returned */
689
690signed_natural
691gc_like_from_xp(ExceptionInformation *xp, 
692                signed_natural(*fun)(TCR *, signed_natural), 
693                signed_natural param)
694{
695  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
696  int result;
697  signed_natural inhibit;
698
699  suspend_other_threads(true);
700  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
701  if (inhibit != 0) {
702    if (inhibit > 0) {
703      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
704    }
705    resume_other_threads(true);
706    gc_deferred++;
707    return 0;
708  }
709  gc_deferred = 0;
710
711  gc_tcr = tcr;
712
713  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751
752/* Returns #bytes freed by invoking GC */
753
754signed_natural
755gc_from_tcr(TCR *tcr, signed_natural param)
756{
757  area *a;
758  BytePtr oldfree, newfree;
759  BytePtr oldend, newend;
760
761  a = active_dynamic_area;
762  oldend = a->high;
763  oldfree = a->active;
764  gc(tcr, param);
765  newfree = a->active;
766  newend = a->high;
767#if 0
768  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
769#endif
770  return ((oldfree-newfree)+(newend-oldend));
771}
772
773signed_natural
774gc_from_xp(ExceptionInformation *xp, signed_natural param)
775{
776  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
777
778  freeGCptrs();
779  return status;
780}
781
782signed_natural
783purify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, purify, param);
786}
787
788signed_natural
789impurify_from_xp(ExceptionInformation *xp, signed_natural param)
790{
791  return gc_like_from_xp(xp, impurify, param);
792}
793
794
795
796
797
798
799protection_handler
800 * protection_handlers[] = {
801   do_spurious_wp_fault,
802   do_soft_stack_overflow,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_hard_stack_overflow,   
806   do_hard_stack_overflow,
807   do_hard_stack_overflow
808   };
809
810
811Boolean
812is_write_fault(ExceptionInformation *xp, siginfo_t *info)
813{
814#ifdef LINUX
815  /* Based on experiments with a small sample size; need to R TFM. */
816  return ((xp->uc_mcontext.trap_no == 0xe) &&
817          (xp->uc_mcontext.error_code == 0x817));
818#endif
819}
820
821Boolean
822handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
823{
824  BytePtr addr;
825  protected_area_ptr area;
826  protection_handler *handler;
827  extern Boolean touch_page(void *);
828  extern void touch_page_end(void);
829
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835
836  if (addr && (addr == tcr->safe_ref_address)) {
837    adjust_exception_pc(xp,4);
838
839    xpGPR(xp,imm0) = 0;
840    return true;
841  }
842
843  if (xpPC(xp) == (pc)touch_page) {
844    xpGPR(xp,imm0) = 0;
845    xpPC(xp) = (pc)touch_page_end;
846    return true;
847  }
848
849
850  if (is_write_fault(xp,info)) {
851    area = find_protected_area(addr);
852    if (area != NULL) {
853      handler = protection_handlers[area->why];
854      return handler(xp, area, addr);
855    } else {
856      if ((addr >= readonly_area->low) &&
857          (addr < readonly_area->active)) {
858        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
859                        page_size);
860        return true;
861      }
862    }
863  }
864  if (old_valence == TCR_STATE_LISP) {
865    LispObj cmain = nrs_CMAIN.vcell;
866   
867    if ((fulltag_of(cmain) == fulltag_misc) &&
868      (header_subtag(header_of(cmain)) == subtag_macptr)) {
869     
870      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
871    }
872  }
873  return false;
874}
875
876
877
878
879
880OSStatus
881do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
882{
883#ifdef SUPPORT_PRAGMA_UNUSED
884#pragma unused(area,addr)
885#endif
886  reset_lisp_process(xp);
887  return -1;
888}
889
890extern area*
891allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893extern area*
894allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
895
896
897
898
899
900
901Boolean
902lisp_frame_p(lisp_frame *spPtr)
903{
904  return (spPtr->marker == lisp_frame_marker);
905}
906
907
908int ffcall_overflow_count = 0;
909
910
911
912
913
914
915/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
916  the current value of VSP (TSP) or an older area.  */
917
918OSStatus
919do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
920{
921  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
922  area *a = tcr->vs_area;
923  protected_area_ptr vsp_soft = a->softprot;
924  unprotect_area(vsp_soft);
925  signal_stack_soft_overflow(xp,vsp);
926  return 0;
927}
928
929
930
931OSStatus
932do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
933{
934  /* Trying to write into a guard page on the vstack or tstack.
935     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
936     signal an error_stack_overflow condition.
937      */
938  do_vsp_overflow(xp,addr);
939}
940
941OSStatus
942do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
943{
944#ifdef SUPPORT_PRAGMA_UNUSED
945#pragma unused(xp,area,addr)
946#endif
947  return -1;
948}
949
950
951
952
953     
954
955
956
957
958
959Boolean
960handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
961{
962  return false;
963}
964
965
966Boolean
967handle_unimplemented_instruction(ExceptionInformation *xp,
968                                 opcode instruction,
969                                 TCR *tcr)
970{
971
972  return false;
973}
974
975Boolean
976handle_exception(int xnum, 
977                 ExceptionInformation *xp, 
978                 TCR *tcr, 
979                 siginfo_t *info,
980                 int old_valence)
981{
982  pc program_counter;
983  opcode instruction = 0;
984
985  if (old_valence != TCR_STATE_LISP) {
986    return false;
987  }
988
989  program_counter = xpPC(xp);
990 
991  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
992    instruction = *program_counter;
993  }
994
995  if (IS_ALLOC_TRAP(instruction)) {
996    return handle_alloc_trap(xp, tcr);
997  } else if ((xnum == SIGSEGV) ||
998             (xnum == SIGBUS)) {
999    return handle_protection_violation(xp, info, tcr, old_valence);
1000  } else if (xnum == SIGFPE) {
1001    return handle_sigfpe(xp, tcr);
1002  } else if ((xnum == SIGILL)) {
1003    if (IS_GC_TRAP(instruction)) {
1004      return handle_gc_trap(xp, tcr);
1005    } else if (IS_UUO(instruction)) {
1006      return handle_uuo(xp, info, instruction);
1007    } else {
1008      return handle_unimplemented_instruction(xp,instruction,tcr);
1009    }
1010  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1011    tcr->interrupt_pending = 0;
1012    callback_for_trap(nrs_CMAIN.vcell, xp, xnum, 0);
1013    return true;
1014  }
1015
1016  return false;
1017}
1018
1019void
1020adjust_exception_pc(ExceptionInformation *xp, int delta)
1021{
1022  xpPC(xp) += (delta >> 2);
1023}
1024
1025
1026/*
1027  This wants to scan backwards until "where" points to an instruction
1028   whose major opcode is either 63 (double-float) or 59 (single-float)
1029*/
1030
1031OSStatus
1032handle_fpux_binop(ExceptionInformation *xp, pc where)
1033{
1034  OSStatus err = -1;
1035  opcode *there = (opcode *) where, instr, errnum = 0;
1036  return err;
1037}
1038
1039Boolean
1040handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1041{
1042  unsigned 
1043    format = UUO_FORMAT(the_uuo);
1044  Boolean handled = false;
1045  int bump = 4;
1046  TCR *tcr = get_tcr(true);
1047
1048  switch (format) {
1049  case uuo_format_kernel_service:
1050    {
1051      TCR * target = (TCR *)xpGPR(xp,arg_z);
1052      int service = UUO_UNARY_field(the_uuo);
1053
1054      switch (service) {
1055      case error_propagate_suspend:
1056        handled = true;
1057        break;
1058      case error_interrupt:
1059        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1060        handled = true;
1061        break;
1062      case error_suspend:
1063        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1064        handled = true;
1065        break;
1066      case error_suspend_all:
1067        lisp_suspend_other_threads();
1068        handled = true;
1069        break;
1070      case error_resume:
1071        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1072        handled = true;
1073        break;
1074      case error_resume_all:
1075        lisp_resume_other_threads();
1076        handled = true;
1077        break;
1078      case error_kill:
1079        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1080        handled = true;
1081        break;
1082      case error_allocate_list:
1083        allocate_list(xp,tcr);
1084        handled = true;
1085        break;
1086      default:
1087        handled = false;
1088        break;
1089      }
1090      break;
1091    }
1092
1093  case uuo_format_unary:
1094    switch(UUO_UNARY_field(the_uuo)) {
1095    case 3:
1096      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1097        handled = true;
1098        bump = 4;
1099        break;
1100      }
1101      /* fall in */
1102    default:
1103      handled = false;
1104      break;
1105
1106    }
1107    break;
1108
1109  case uuo_format_nullary:
1110    switch (UUOA_field(the_uuo)) {
1111    case 3:
1112      adjust_exception_pc(xp, bump);
1113      bump = 0;
1114      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1115      handled = true;
1116      break;
1117
1118    case 4:
1119      tcr->interrupt_pending = 0;
1120      callback_for_trap(nrs_CMAIN.vcell, xp, SIGNAL_FOR_PROCESS_INTERRUPT, 0);
1121      handled = true;
1122      break;
1123    default:
1124      handled = false;
1125      break;
1126    }
1127    break;
1128
1129
1130  case uuo_format_error_lisptag:
1131  case uuo_format_error_fulltag:
1132  case uuo_format_error_xtype:
1133  case uuo_format_nullary_error:
1134  case uuo_format_unary_error:
1135  case uuo_format_binary_error:
1136    handled = handle_error(xp,0,the_uuo);
1137    break;
1138
1139  default:
1140    handled = false;
1141    bump = 0;
1142  }
1143 
1144  if (handled && bump) {
1145    adjust_exception_pc(xp, bump);
1146  }
1147  return handled;
1148}
1149
1150natural
1151register_codevector_contains_pc (natural lisp_function, pc where)
1152{
1153  natural code_vector, size;
1154
1155  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1156      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1157    code_vector = deref(lisp_function, 2);
1158    size = header_element_count(header_of(code_vector)) << 2;
1159    if ((untag(code_vector) < (natural)where) && 
1160        ((natural)where < (code_vector + size)))
1161      return(code_vector);
1162  }
1163
1164  return(0);
1165}
1166
1167int
1168callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1169{
1170  return callback_to_lisp(callback_macptr, xp, info,arg);
1171}
1172
1173int
1174callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1175                  natural arg1, natural arg2)
1176{
1177  natural  callback_ptr;
1178  area *a;
1179  natural fnreg = fn,  codevector, offset;
1180  pc where = xpPC(xp);
1181
1182  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1183  if (codevector == 0) {
1184    fnreg = nfn;
1185    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1186  }
1187  offset = (natural)where - codevector;
1188                                                 
1189                                               
1190
1191  TCR *tcr = get_tcr(true);
1192
1193  /* Put the active stack pointer where .SPcallback expects it */
1194  a = tcr->cs_area;
1195  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1196
1197  /* Copy globals from the exception frame to tcr */
1198  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1199  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1200
1201
1202
1203  /* Call back.
1204     Lisp will handle trampolining through some code that
1205     will push lr/fn & pc/nfn stack frames for backtrace.
1206  */
1207  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1208  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1209  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1210  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1211
1212
1213
1214  /* Copy GC registers back into exception frame */
1215  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1216}
1217
1218area *
1219allocate_no_stack (natural size)
1220{
1221#ifdef SUPPORT_PRAGMA_UNUSED
1222#pragma unused(size)
1223#endif
1224
1225  return (area *) NULL;
1226}
1227
1228
1229
1230
1231
1232
1233/* callback to (symbol-value cmain) if it is a macptr,
1234   otherwise report cause and function name to console.
1235   Returns noErr if exception handled OK */
1236OSStatus
1237handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1238{
1239  LispObj   cmain = nrs_CMAIN.vcell;
1240  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1241
1242}
1243
1244
1245
1246
1247void non_fatal_error( char *msg )
1248{
1249  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1250  fflush( dbgout );
1251}
1252
1253
1254
1255Boolean
1256handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1257{
1258  LispObj   errdisp = nrs_ERRDISP.vcell;
1259
1260  if ((fulltag_of(errdisp) == fulltag_misc) &&
1261      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1262    /* errdisp is a macptr, we can call back to lisp */
1263    return callback_for_trap(errdisp, xp, arg1, arg2);
1264    }
1265
1266  return false;
1267}
1268               
1269
1270/*
1271   Current thread has all signals masked.  Before unmasking them,
1272   make it appear that the current thread has been suspended.
1273   (This is to handle the case where another thread is trying
1274   to GC before this thread is able to sieze the exception lock.)
1275*/
1276int
1277prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1278{
1279  int old_valence = tcr->valence;
1280
1281  tcr->pending_exception_context = context;
1282  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1283
1284  ALLOW_EXCEPTIONS(context);
1285  return old_valence;
1286} 
1287
1288void
1289wait_for_exception_lock_in_handler(TCR *tcr, 
1290                                   ExceptionInformation *context,
1291                                   xframe_list *xf)
1292{
1293
1294  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1295  xf->curr = context;
1296  xf->prev = tcr->xframe;
1297  tcr->xframe =  xf;
1298  tcr->pending_exception_context = NULL;
1299  tcr->valence = TCR_STATE_FOREIGN; 
1300}
1301
1302void
1303unlock_exception_lock_in_handler(TCR *tcr)
1304{
1305  tcr->pending_exception_context = tcr->xframe->curr;
1306  tcr->xframe = tcr->xframe->prev;
1307  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1308  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1309}
1310
1311/*
1312   If an interrupt is pending on exception exit, try to ensure
1313   that the thread sees it as soon as it's able to run.
1314*/
1315void
1316raise_pending_interrupt(TCR *tcr)
1317{
1318  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1319    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1320  }
1321}
1322
1323void
1324exit_signal_handler(TCR *tcr, int old_valence)
1325{
1326  sigset_t mask;
1327  sigfillset(&mask);
1328 
1329  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1330  tcr->valence = old_valence;
1331  tcr->pending_exception_context = NULL;
1332}
1333
1334
1335void
1336signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1337{
1338  xframe_list xframe_link;
1339
1340  if (!use_mach_exception_handling) {
1341   
1342    tcr = (TCR *) get_interrupt_tcr(false);
1343 
1344    /* The signal handler's entered with all signals (notably the
1345       thread_suspend signal) blocked.  Don't allow any other signals
1346       (notably the thread_suspend signal) to preempt us until we've
1347       set the TCR's xframe slot to include the current exception
1348       context.
1349    */
1350   
1351    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1352  }
1353
1354  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1355    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1356    pthread_kill(pthread_self(), thread_suspend_signal);
1357  }
1358
1359 
1360  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1361  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1362    char msg[512];
1363    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1364    if (lisp_Debugger(context, info, signum, false, msg)) {
1365      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1366    }
1367  }
1368
1369  unlock_exception_lock_in_handler(tcr);
1370
1371  /* This thread now looks like a thread that was suspended while
1372     executing lisp code.  If some other thread gets the exception
1373     lock and GCs, the context (this thread's suspend_context) will
1374     be updated.  (That's only of concern if it happens before we
1375     can return to the kernel/to the Mach exception handler).
1376  */
1377  if (!use_mach_exception_handling) {
1378    exit_signal_handler(tcr, old_valence);
1379    raise_pending_interrupt(tcr);
1380  }
1381}
1382
1383/*
1384  If it looks like we're in the middle of an atomic operation, make
1385  it seem as if that operation is either complete or hasn't started
1386  yet.
1387
1388  The cases handled include:
1389
1390  a) storing into a newly-allocated lisp frame on the stack.
1391  b) marking a newly-allocated TSP frame as containing "raw" data.
1392  c) consing: the GC has its own ideas about how this should be
1393     handled, but other callers would be best advised to back
1394     up or move forward, according to whether we're in the middle
1395     of allocating a cons cell or allocating a uvector.
1396  d) a STMW to the vsp
1397  e) EGC write-barrier subprims.
1398*/
1399
1400extern opcode
1401  egc_write_barrier_start,
1402  egc_write_barrier_end, 
1403  egc_store_node_conditional, 
1404  egc_store_node_conditional_test,
1405  egc_set_hash_key,
1406  egc_gvset,
1407  egc_rplaca,
1408  egc_rplacd,
1409  egc_set_hash_key_conditional,
1410  egc_set_hash_key_conditional_test;
1411
1412
1413extern opcode ffcall_return_window, ffcall_return_window_end;
1414
1415void
1416pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1417{
1418  pc program_counter = xpPC(xp);
1419  opcode instr = *program_counter;
1420  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1421  LispObj cur_allocptr = xpGPR(xp, allocptr);
1422  int allocptr_tag = fulltag_of(cur_allocptr);
1423 
1424
1425
1426  if ((program_counter < &egc_write_barrier_end) && 
1427      (program_counter >= &egc_write_barrier_start)) {
1428    LispObj *ea = 0, val = 0, root = 0;
1429    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1430    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1431
1432    if (program_counter >= &egc_set_hash_key_conditional) {
1433      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1434          ((program_counter == &egc_set_hash_key_conditional_test) &&
1435           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1436        return;
1437      }
1438      need_store = false;
1439      root = xpGPR(xp,arg_x);
1440      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1441      need_memoize_root = true;
1442    } else if (program_counter >= &egc_store_node_conditional) {
1443      if ((program_counter < &egc_store_node_conditional_test) ||
1444          ((program_counter == &egc_store_node_conditional_test) &&
1445           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1446        /* The conditional store either hasn't been attempted yet, or
1447           has failed.  No need to adjust the PC, or do memoization. */
1448        return;
1449      }
1450      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1451      xpGPR(xp,arg_z) = t_value;
1452      need_store = false;
1453    } else if (program_counter >= &egc_set_hash_key) {
1454      root = xpGPR(xp,arg_x);
1455      val = xpGPR(xp,arg_z);
1456      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1457      need_memoize_root = true;
1458    } else if (program_counter >= &egc_gvset) {
1459      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1460      val = xpGPR(xp,arg_z);
1461    } else if (program_counter >= &egc_rplacd) {
1462      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1463      val = xpGPR(xp,arg_z);
1464    } else {                      /* egc_rplaca */
1465      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1466      val = xpGPR(xp,arg_z);
1467    }
1468    if (need_store) {
1469      *ea = val;
1470    }
1471    if (need_check_memo) {
1472      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1473      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1474          ((LispObj)ea < val)) {
1475        atomic_set_bit(refbits, bitnumber);
1476        if (need_memoize_root) {
1477          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1478          atomic_set_bit(refbits, bitnumber);
1479        }
1480      }
1481    }
1482    xpPC(xp) = xpLR(xp);
1483    return;
1484  }
1485
1486
1487 
1488  if (allocptr_tag != tag_fixnum) {
1489    signed_natural disp = allocptr_displacement(xp);
1490
1491    if (disp) {
1492      /* Being architecturally "at" the alloc trap doesn't tell
1493         us much (in particular, it doesn't tell us whether
1494         or not the thread has committed to taking the trap
1495         and is waiting for the exception lock (or waiting
1496         for the Mach exception thread to tell it how bad
1497         things are) or is about to execute a conditional
1498         trap.
1499         Regardless of which case applies, we want the
1500         other thread to take (or finish taking) the
1501         trap, and we don't want it to consider its
1502         current allocptr to be valid.
1503         The difference between this case (suspend other
1504         thread for GC) and the previous case (suspend
1505         current thread for interrupt) is solely a
1506         matter of what happens after we leave this
1507         function: some non-current thread will stay
1508         suspended until the GC finishes, then take
1509         (or start processing) the alloc trap.   The
1510         current thread will go off and do PROCESS-INTERRUPT
1511         or something, and may return from the interrupt
1512         and need to finish the allocation that got interrupted.
1513      */
1514
1515      if (alloc_disp) {
1516        *alloc_disp = disp;
1517        xpGPR(xp,allocptr) += disp;
1518        /* Leave the PC at the alloc trap.  When the interrupt
1519           handler returns, it'll decrement allocptr by disp
1520           and the trap may or may not be taken.
1521        */
1522      } else {
1523        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1524        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1525      }
1526    } else {
1527      /* If we're already past the alloc_trap, finish allocating
1528         the object. */
1529      if (allocptr_tag == fulltag_cons) {
1530        finish_allocating_cons(xp);
1531      } else {
1532        if (allocptr_tag == fulltag_misc) {
1533          finish_allocating_uvector(xp);
1534        } else {
1535          Bug(xp, "what's being allocated here ?");
1536        }
1537      }
1538      /* Whatever we finished allocating, reset allocptr/allocbase to
1539         VOID_ALLOCPTR */
1540      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1541    }
1542    return;
1543  }
1544}
1545
1546void
1547interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1548{
1549  TCR *tcr = get_interrupt_tcr(false);
1550  if (tcr) {
1551    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1552      tcr->interrupt_pending = 1 << fixnumshift;
1553    } else {
1554      LispObj cmain = nrs_CMAIN.vcell;
1555
1556      if ((fulltag_of(cmain) == fulltag_misc) &&
1557          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1558        /*
1559           This thread can (allegedly) take an interrupt now.
1560           It's tricky to do that if we're executing
1561           foreign code (especially Linuxthreads code, much
1562           of which isn't reentrant.)
1563           If we're unwinding the stack, we also want to defer
1564           the interrupt.
1565        */
1566        if ((tcr->valence != TCR_STATE_LISP) ||
1567            (tcr->unwinding != 0)) {
1568          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
1569        } else {
1570          xframe_list xframe_link;
1571          int old_valence;
1572          signed_natural disp=0;
1573         
1574          pc_luser_xp(context, tcr, &disp);
1575          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1576          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1577          handle_exception(signum, context, tcr, info, old_valence);
1578          if (disp) {
1579            xpGPR(context,allocptr) -= disp;
1580          }
1581          unlock_exception_lock_in_handler(tcr);
1582          exit_signal_handler(tcr, old_valence);
1583        }
1584      }
1585    }
1586  }
1587#ifdef DARWIN
1588    DarwinSigReturn(context);
1589#endif
1590}
1591
1592
1593
1594void
1595install_signal_handler(int signo, void *handler)
1596{
1597  struct sigaction sa;
1598 
1599  sa.sa_sigaction = (void *)handler;
1600  sigfillset(&sa.sa_mask);
1601  sa.sa_flags = 
1602    0 /* SA_RESTART */
1603    | SA_SIGINFO
1604    ;
1605
1606  sigaction(signo, &sa, NULL);
1607}
1608
1609void
1610install_pmcl_exception_handlers()
1611{
1612#ifdef DARWIN
1613  extern Boolean use_mach_exception_handling;
1614#endif
1615
1616  Boolean install_signal_handlers_for_exceptions =
1617#ifdef DARWIN
1618    !use_mach_exception_handling
1619#else
1620    true
1621#endif
1622    ;
1623  if (install_signal_handlers_for_exceptions) {
1624    extern int no_sigtrap;
1625    install_signal_handler(SIGILL, (void *)signal_handler);
1626    if (no_sigtrap != 1) {
1627      install_signal_handler(SIGTRAP, (void *)signal_handler);
1628    }
1629    install_signal_handler(SIGBUS,  (void *)signal_handler);
1630    install_signal_handler(SIGSEGV, (void *)signal_handler);
1631    install_signal_handler(SIGFPE, (void *)signal_handler);
1632  }
1633 
1634  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1635                         (void *)interrupt_handler);
1636  signal(SIGPIPE, SIG_IGN);
1637}
1638
1639void
1640thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1641{
1642  TCR *tcr = get_tcr(false);
1643  area *a;
1644  sigset_t mask;
1645 
1646  sigemptyset(&mask);
1647
1648  if (tcr) {
1649    tcr->valence = TCR_STATE_FOREIGN;
1650    a = tcr->vs_area;
1651    if (a) {
1652      a->active = a->high;
1653    }
1654    a = tcr->cs_area;
1655    if (a) {
1656      a->active = a->high;
1657    }
1658  }
1659 
1660  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1661  pthread_exit(NULL);
1662}
1663
1664void
1665thread_signal_setup()
1666{
1667  thread_suspend_signal = SIG_SUSPEND_THREAD;
1668  thread_kill_signal = SIG_KILL_THREAD;
1669
1670  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1671  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1672}
1673
1674
1675
1676void
1677unprotect_all_areas()
1678{
1679  protected_area_ptr p;
1680
1681  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1682    unprotect_area(p);
1683  }
1684}
1685
1686/*
1687  A binding subprim has just done "twlle limit_regno,idx_regno" and
1688  the trap's been taken.  Extend the tcr's tlb so that the index will
1689  be in bounds and the new limit will be on a page boundary, filling
1690  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1691  the tcr fields and the registers in the xp and return true if this
1692  all works, false otherwise.
1693
1694  Note that the tlb was allocated via malloc, so realloc can do some
1695  of the hard work.
1696*/
1697Boolean
1698extend_tcr_tlb(TCR *tcr, 
1699               ExceptionInformation *xp, 
1700               unsigned idx_regno)
1701{
1702  unsigned
1703    index = (unsigned) (xpGPR(xp,idx_regno)),
1704    old_limit = tcr->tlb_limit,
1705    new_limit = align_to_power_of_2(index+1,12),
1706    new_bytes = new_limit-old_limit;
1707  LispObj
1708    *old_tlb = tcr->tlb_pointer,
1709    *new_tlb = realloc(old_tlb, new_limit),
1710    *work;
1711
1712  if (new_tlb == NULL) {
1713    return false;
1714  }
1715 
1716  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1717
1718  while (new_bytes) {
1719    *work++ = no_thread_local_binding_marker;
1720    new_bytes -= sizeof(LispObj);
1721  }
1722  tcr->tlb_pointer = new_tlb;
1723  tcr->tlb_limit = new_limit;
1724  return true;
1725}
1726
1727
1728
1729void
1730exception_init()
1731{
1732  install_pmcl_exception_handlers();
1733}
1734
1735
1736
1737
1738
1739#ifdef DARWIN
1740
1741
1742#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1743#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1744
1745
1746
1747#define LISP_EXCEPTIONS_HANDLED_MASK \
1748 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1749
1750/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1751#define NUM_LISP_EXCEPTIONS_HANDLED 4
1752
1753typedef struct {
1754  int foreign_exception_port_count;
1755  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1756  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1757  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1758  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1759} MACH_foreign_exception_state;
1760
1761
1762
1763
1764/*
1765  Mach's exception mechanism works a little better than its signal
1766  mechanism (and, not incidentally, it gets along with GDB a lot
1767  better.
1768
1769  Initially, we install an exception handler to handle each native
1770  thread's exceptions.  This process involves creating a distinguished
1771  thread which listens for kernel exception messages on a set of
1772  0 or more thread exception ports.  As threads are created, they're
1773  added to that port set; a thread's exception port is destroyed
1774  (and therefore removed from the port set) when the thread exits.
1775
1776  A few exceptions can be handled directly in the handler thread;
1777  others require that we resume the user thread (and that the
1778  exception thread resumes listening for exceptions.)  The user
1779  thread might eventually want to return to the original context
1780  (possibly modified somewhat.)
1781
1782  As it turns out, the simplest way to force the faulting user
1783  thread to handle its own exceptions is to do pretty much what
1784  signal() does: the exception handlng thread sets up a sigcontext
1785  on the user thread's stack and forces the user thread to resume
1786  execution as if a signal handler had been called with that
1787  context as an argument.  We can use a distinguished UUO at a
1788  distinguished address to do something like sigreturn(); that'll
1789  have the effect of resuming the user thread's execution in
1790  the (pseudo-) signal context.
1791
1792  Since:
1793    a) we have miles of code in C and in Lisp that knows how to
1794    deal with Linux sigcontexts
1795    b) Linux sigcontexts contain a little more useful information
1796    (the DAR, DSISR, etc.) than their Darwin counterparts
1797    c) we have to create a sigcontext ourselves when calling out
1798    to the user thread: we aren't really generating a signal, just
1799    leveraging existing signal-handling code.
1800
1801  we create a Linux sigcontext struct.
1802
1803  Simple ?  Hopefully from the outside it is ...
1804
1805  We want the process of passing a thread's own context to it to
1806  appear to be atomic: in particular, we don't want the GC to suspend
1807  a thread that's had an exception but has not yet had its user-level
1808  exception handler called, and we don't want the thread's exception
1809  context to be modified by a GC while the Mach handler thread is
1810  copying it around.  On Linux (and on Jaguar), we avoid this issue
1811  because (a) the kernel sets up the user-level signal handler and
1812  (b) the signal handler blocks signals (including the signal used
1813  by the GC to suspend threads) until tcr->xframe is set up.
1814
1815  The GC and the Mach server thread therefore contend for the lock
1816  "mach_exception_lock".  The Mach server thread holds the lock
1817  when copying exception information between the kernel and the
1818  user thread; the GC holds this lock during most of its execution
1819  (delaying exception processing until it can be done without
1820  GC interference.)
1821
1822*/
1823
1824
1825#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1826
1827void
1828fatal_mach_error(char *format, ...);
1829
1830#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1831
1832
1833void
1834restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1835{
1836  kern_return_t kret;
1837  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1838
1839  /* Set the thread's FP state from the pseudosigcontext */
1840  kret = thread_set_state(thread,
1841                          ARM_FLOAT_STATE,
1842                          (thread_state_t)&(mc->__fs),
1843                          ARM_FLOAT_STATE_COUNT);
1844
1845  MACH_CHECK_ERROR("setting thread FP state", kret);
1846
1847  /* The thread'll be as good as new ... */
1848  kret = thread_set_state(thread, 
1849                          MACHINE_THREAD_STATE,
1850                          (thread_state_t)&(mc->__ss),
1851                          MACHINE_THREAD_STATE_COUNT);
1852  MACH_CHECK_ERROR("setting thread state", kret);
1853} 
1854
1855/* This code runs in the exception handling thread, in response
1856   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1857   in response to a call to pseudo_sigreturn() from the specified
1858   user thread.
1859   Find that context (the user thread's R3 points to it), then
1860   use that context to set the user thread's state.  When this
1861   function's caller returns, the Mach kernel will resume the
1862   user thread.
1863*/
1864
1865kern_return_t
1866do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1867{
1868  ExceptionInformation *xp;
1869
1870#ifdef DEBUG_MACH_EXCEPTIONS
1871  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1872#endif
1873  xp = tcr->pending_exception_context;
1874  if (xp) {
1875    tcr->pending_exception_context = NULL;
1876    tcr->valence = TCR_STATE_LISP;
1877    restore_mach_thread_state(thread, xp);
1878    raise_pending_interrupt(tcr);
1879  } else {
1880    Bug(NULL, "no xp here!\n");
1881  }
1882#ifdef DEBUG_MACH_EXCEPTIONS
1883  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1884#endif
1885  return KERN_SUCCESS;
1886} 
1887
1888ExceptionInformation *
1889create_thread_context_frame(mach_port_t thread, 
1890                            natural *new_stack_top)
1891{
1892  arm_thread_state_t ts;
1893  mach_msg_type_number_t thread_state_count;
1894  kern_return_t result;
1895  ExceptionInformation *pseudosigcontext;
1896  MCONTEXT_T mc;
1897  natural stackp, backlink;
1898
1899  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1900  result = thread_get_state(thread, 
1901                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1902                            (thread_state_t)&ts,
1903                            &thread_state_count);
1904 
1905  if (result != KERN_SUCCESS) {
1906    get_tcr(true);
1907    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1908  }
1909  stackp = ts.__r1;
1910  backlink = stackp;
1911  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1912  stackp -= sizeof(*pseudosigcontext);
1913  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1914
1915  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1916  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1917  memmove(&(mc->__ss),&ts,sizeof(ts));
1918
1919  thread_state_count = PPC_FLOAT_STATE_COUNT;
1920  thread_get_state(thread,
1921                   PPC_FLOAT_STATE,
1922                   (thread_state_t)&(mc->__fs),
1923                   &thread_state_count);
1924
1925
1926#ifdef PPC64
1927  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1928#else
1929  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1930#endif
1931  thread_get_state(thread,
1932#ifdef PPC64
1933                   PPC_EXCEPTION_STATE64,
1934#else
1935                   PPC_EXCEPTION_STATE,
1936#endif
1937                   (thread_state_t)&(mc->__es),
1938                   &thread_state_count);
1939
1940
1941  UC_MCONTEXT(pseudosigcontext) = mc;
1942  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1943  stackp -= C_LINKAGE_LEN;
1944  *(natural *)ptr_from_lispobj(stackp) = backlink;
1945  if (new_stack_top) {
1946    *new_stack_top = stackp;
1947  }
1948  return pseudosigcontext;
1949}
1950
1951/*
1952  This code sets up the user thread so that it executes a "pseudo-signal
1953  handler" function when it resumes.  Create a linux sigcontext struct
1954  on the thread's stack and pass it as an argument to the pseudo-signal
1955  handler.
1956
1957  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1958  which will restore the thread's context.
1959
1960  If the handler invokes code that throws (or otherwise never sigreturn()'s
1961  to the context), that's fine.
1962
1963  Actually, check that: throw (and variants) may need to be careful and
1964  pop the tcr's xframe list until it's younger than any frame being
1965  entered.
1966*/
1967
1968int
1969setup_signal_frame(mach_port_t thread,
1970                   void *handler_address,
1971                   int signum,
1972                   int code,
1973                   TCR *tcr)
1974{
1975#ifdef PPC64
1976  ppc_thread_state64_t ts;
1977#else
1978  ppc_thread_state_t ts;
1979#endif
1980  ExceptionInformation *pseudosigcontext;
1981  int old_valence = tcr->valence;
1982  natural stackp;
1983
1984#ifdef DEBUG_MACH_EXCEPTIONS
1985  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1986#endif
1987  pseudosigcontext = create_thread_context_frame(thread, &stackp);
1988  pseudosigcontext->uc_onstack = 0;
1989  pseudosigcontext->uc_sigmask = (sigset_t) 0;
1990  tcr->pending_exception_context = pseudosigcontext;
1991  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1992 
1993
1994  /*
1995     It seems like we've created a  sigcontext on the thread's
1996     stack.  Set things up so that we call the handler (with appropriate
1997     args) when the thread's resumed.
1998  */
1999
2000  ts.__srr0 = (natural) handler_address;
2001  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2002  ts.__r1 = stackp;
2003  ts.__r3 = signum;
2004  ts.__r4 = (natural)pseudosigcontext;
2005  ts.__r5 = (natural)tcr;
2006  ts.__r6 = (natural)old_valence;
2007  ts.__lr = (natural)pseudo_sigreturn;
2008
2009
2010#ifdef PPC64
2011  ts.__r13 = xpGPR(pseudosigcontext,13);
2012  thread_set_state(thread,
2013                   PPC_THREAD_STATE64,
2014                   (thread_state_t)&ts,
2015                   PPC_THREAD_STATE64_COUNT);
2016#else
2017  thread_set_state(thread, 
2018                   MACHINE_THREAD_STATE,
2019                   (thread_state_t)&ts,
2020                   MACHINE_THREAD_STATE_COUNT);
2021#endif
2022#ifdef DEBUG_MACH_EXCEPTIONS
2023  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2024#endif
2025  return 0;
2026}
2027
2028
2029void
2030pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2031{
2032  signal_handler(signum, NULL, context, tcr, old_valence);
2033} 
2034
2035
2036int
2037thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2038{
2039#ifdef PPC64
2040  ppc_thread_state64_t ts;
2041#else
2042  ppc_thread_state_t ts;
2043#endif
2044  mach_msg_type_number_t thread_state_count;
2045
2046#ifdef PPC64
2047  thread_state_count = PPC_THREAD_STATE64_COUNT;
2048#else
2049  thread_state_count = PPC_THREAD_STATE_COUNT;
2050#endif
2051  thread_get_state(thread, 
2052#ifdef PPC64
2053                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2054#else
2055                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2056#endif
2057                   (thread_state_t)&ts,
2058                   &thread_state_count);
2059  if (enabled) {
2060    ts.__srr1 |= MSR_FE0_FE1_MASK;
2061  } else {
2062    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2063  }
2064  /*
2065     Hack-o-rama warning (isn't it about time for such a warning?):
2066     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2067     Our handler for lisp's use of pthread_kill() pushes a phony
2068     lisp frame on the stack and force the context to resume at
2069     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2070     lisp frame contains the -real- address that process_interrupt
2071     should have returned to, and the fact that it's in a lisp
2072     frame should convince the GC to notice that address if it
2073     runs in the tiny time window between returning from our
2074     interrupt handler and ... here.
2075     If the top frame on the stack is a lisp frame, discard it
2076     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2077     just adjust ts.srr0 to skip over the UUO.
2078  */
2079  {
2080    lisp_frame *tos = (lisp_frame *)ts.__r1,
2081      *next_frame = tos->backlink;
2082   
2083    if (tos == (next_frame -1)) {
2084      ts.__srr0 = tos->savelr;
2085      ts.__r1 = (LispObj) next_frame;
2086    } else {
2087      ts.__srr0 += 4;
2088    }
2089  }
2090  thread_set_state(thread, 
2091#ifdef PPC64
2092                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2093#else
2094                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2095#endif
2096                   (thread_state_t)&ts,
2097#ifdef PPC64
2098                   PPC_THREAD_STATE64_COUNT
2099#else
2100                   PPC_THREAD_STATE_COUNT
2101#endif
2102                   );
2103
2104  return 0;
2105}
2106
2107/*
2108  This function runs in the exception handling thread.  It's
2109  called (by this precise name) from the library function "exc_server()"
2110  when the thread's exception ports are set up.  (exc_server() is called
2111  via mach_msg_server(), which is a function that waits for and dispatches
2112  on exception messages from the Mach kernel.)
2113
2114  This checks to see if the exception was caused by a pseudo_sigreturn()
2115  UUO; if so, it arranges for the thread to have its state restored
2116  from the specified context.
2117
2118  Otherwise, it tries to map the exception to a signal number and
2119  arranges that the thread run a "pseudo signal handler" to handle
2120  the exception.
2121
2122  Some exceptions could and should be handled here directly.
2123*/
2124
2125kern_return_t
2126catch_exception_raise(mach_port_t exception_port,
2127                      mach_port_t thread,
2128                      mach_port_t task, 
2129                      exception_type_t exception,
2130                      exception_data_t code_vector,
2131                      mach_msg_type_number_t code_count)
2132{
2133  int signum = 0, code = *code_vector, code1;
2134  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2135  kern_return_t kret;
2136
2137#ifdef DEBUG_MACH_EXCEPTIONS
2138  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2139#endif
2140
2141  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2142    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2143  } 
2144  if ((exception == EXC_BAD_INSTRUCTION) &&
2145      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2146      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2147       (code1 == (int)enable_fp_exceptions) ||
2148       (code1 == (int)disable_fp_exceptions))) {
2149    if (code1 == (int)pseudo_sigreturn) {
2150      kret = do_pseudo_sigreturn(thread, tcr);
2151#if 0
2152      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2153#endif
2154       
2155    } else if (code1 == (int)enable_fp_exceptions) {
2156      kret = thread_set_fp_exceptions_enabled(thread, true);
2157    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2158  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2159    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2160    kret = 17;
2161  } else {
2162    switch (exception) {
2163    case EXC_BAD_ACCESS:
2164      signum = SIGSEGV;
2165      break;
2166       
2167    case EXC_BAD_INSTRUCTION:
2168      signum = SIGILL;
2169      break;
2170     
2171    case EXC_SOFTWARE:
2172      if (code == EXC_PPC_TRAP) {
2173        signum = SIGTRAP;
2174      }
2175      break;
2176     
2177    case EXC_ARITHMETIC:
2178      signum = SIGFPE;
2179      break;
2180
2181    default:
2182      break;
2183    }
2184    if (signum) {
2185      kret = setup_signal_frame(thread,
2186                                (void *)pseudo_signal_handler,
2187                                signum,
2188                                code,
2189                                tcr);
2190#if 0
2191      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2192#endif
2193
2194    } else {
2195      kret = 17;
2196    }
2197  }
2198
2199  return kret;
2200}
2201
2202
2203
2204typedef struct {
2205  mach_msg_header_t Head;
2206  /* start of the kernel processed data */
2207  mach_msg_body_t msgh_body;
2208  mach_msg_port_descriptor_t thread;
2209  mach_msg_port_descriptor_t task;
2210  /* end of the kernel processed data */
2211  NDR_record_t NDR;
2212  exception_type_t exception;
2213  mach_msg_type_number_t codeCnt;
2214  integer_t code[2];
2215  mach_msg_trailer_t trailer;
2216} exceptionRequest;
2217
2218
2219boolean_t
2220openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2221{
2222  static NDR_record_t _NDR = {0};
2223  kern_return_t handled;
2224  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2225  exceptionRequest *req = (exceptionRequest *) in;
2226
2227  reply->NDR = _NDR;
2228
2229  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2230  out->msgh_remote_port = in->msgh_remote_port;
2231  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2232  out->msgh_local_port = MACH_PORT_NULL;
2233  out->msgh_id = in->msgh_id+100;
2234
2235  /* Could handle other exception flavors in the range 2401-2403 */
2236
2237
2238  if (in->msgh_id != 2401) {
2239    reply->RetCode = MIG_BAD_ID;
2240    return FALSE;
2241  }
2242  handled = catch_exception_raise(req->Head.msgh_local_port,
2243                                  req->thread.name,
2244                                  req->task.name,
2245                                  req->exception,
2246                                  req->code,
2247                                  req->codeCnt);
2248  reply->RetCode = handled;
2249  return TRUE;
2250}
2251
2252/*
2253  The initial function for an exception-handling thread.
2254*/
2255
2256void *
2257exception_handler_proc(void *arg)
2258{
2259  extern boolean_t exc_server();
2260  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2261
2262  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2263  /* Should never return. */
2264  abort();
2265}
2266
2267
2268
2269mach_port_t
2270mach_exception_port_set()
2271{
2272  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2273  kern_return_t kret; 
2274  if (__exception_port_set == MACH_PORT_NULL) {
2275    kret = mach_port_allocate(mach_task_self(),
2276                              MACH_PORT_RIGHT_PORT_SET,
2277                              &__exception_port_set);
2278    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2279    create_system_thread(0,
2280                         NULL,
2281                         exception_handler_proc, 
2282                         (void *)((natural)__exception_port_set));
2283  }
2284  return __exception_port_set;
2285}
2286
2287/*
2288  Setup a new thread to handle those exceptions specified by
2289  the mask "which".  This involves creating a special Mach
2290  message port, telling the Mach kernel to send exception
2291  messages for the calling thread to that port, and setting
2292  up a handler thread which listens for and responds to
2293  those messages.
2294
2295*/
2296
2297/*
2298  Establish the lisp thread's TCR as its exception port, and determine
2299  whether any other ports have been established by foreign code for
2300  exceptions that lisp cares about.
2301
2302  If this happens at all, it should happen on return from foreign
2303  code and on entry to lisp code via a callback.
2304
2305  This is a lot of trouble (and overhead) to support Java, or other
2306  embeddable systems that clobber their caller's thread exception ports.
2307 
2308*/
2309kern_return_t
2310tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2311{
2312  kern_return_t kret;
2313  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2314  int i;
2315  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2316  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2317  exception_mask_t mask = 0;
2318
2319  kret = thread_swap_exception_ports(thread,
2320                                     LISP_EXCEPTIONS_HANDLED_MASK,
2321                                     lisp_port,
2322                                     EXCEPTION_DEFAULT,
2323                                     THREAD_STATE_NONE,
2324                                     fxs->masks,
2325                                     &n,
2326                                     fxs->ports,
2327                                     fxs->behaviors,
2328                                     fxs->flavors);
2329  if (kret == KERN_SUCCESS) {
2330    fxs->foreign_exception_port_count = n;
2331    for (i = 0; i < n; i ++) {
2332      foreign_port = fxs->ports[i];
2333
2334      if ((foreign_port != lisp_port) &&
2335          (foreign_port != MACH_PORT_NULL)) {
2336        mask |= fxs->masks[i];
2337      }
2338    }
2339    tcr->foreign_exception_status = (int) mask;
2340  }
2341  return kret;
2342}
2343
2344kern_return_t
2345tcr_establish_lisp_exception_port(TCR *tcr)
2346{
2347  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2348}
2349
2350/*
2351  Do this when calling out to or returning from foreign code, if
2352  any conflicting foreign exception ports were established when we
2353  last entered lisp code.
2354*/
2355kern_return_t
2356restore_foreign_exception_ports(TCR *tcr)
2357{
2358  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2359 
2360  if (m) {
2361    MACH_foreign_exception_state *fxs  = 
2362      (MACH_foreign_exception_state *) tcr->native_thread_info;
2363    int i, n = fxs->foreign_exception_port_count;
2364    exception_mask_t tm;
2365
2366    for (i = 0; i < n; i++) {
2367      if ((tm = fxs->masks[i]) & m) {
2368        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2369                                   tm,
2370                                   fxs->ports[i],
2371                                   fxs->behaviors[i],
2372                                   fxs->flavors[i]);
2373      }
2374    }
2375  }
2376}
2377                                   
2378
2379/*
2380  This assumes that a Mach port (to be used as the thread's exception port) whose
2381  "name" matches the TCR's 32-bit address has already been allocated.
2382*/
2383
2384kern_return_t
2385setup_mach_exception_handling(TCR *tcr)
2386{
2387  mach_port_t
2388    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2389    task_self = mach_task_self();
2390  kern_return_t kret;
2391
2392  kret = mach_port_insert_right(task_self,
2393                                thread_exception_port,
2394                                thread_exception_port,
2395                                MACH_MSG_TYPE_MAKE_SEND);
2396  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2397
2398  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2399  if (kret == KERN_SUCCESS) {
2400    mach_port_t exception_port_set = mach_exception_port_set();
2401
2402    kret = mach_port_move_member(task_self,
2403                                 thread_exception_port,
2404                                 exception_port_set);
2405  }
2406  return kret;
2407}
2408
2409void
2410darwin_exception_init(TCR *tcr)
2411{
2412  void tcr_monitor_exception_handling(TCR*, Boolean);
2413  kern_return_t kret;
2414  MACH_foreign_exception_state *fxs = 
2415    calloc(1, sizeof(MACH_foreign_exception_state));
2416 
2417  tcr->native_thread_info = (void *) fxs;
2418
2419  if ((kret = setup_mach_exception_handling(tcr))
2420      != KERN_SUCCESS) {
2421    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2422    terminate_lisp();
2423  }
2424}
2425
2426/*
2427  The tcr is the "name" of the corresponding thread's exception port.
2428  Destroying the port should remove it from all port sets of which it's
2429  a member (notably, the exception port set.)
2430*/
2431void
2432darwin_exception_cleanup(TCR *tcr)
2433{
2434  void *fxs = tcr->native_thread_info;
2435  extern Boolean use_mach_exception_handling;
2436
2437  if (fxs) {
2438    tcr->native_thread_info = NULL;
2439    free(fxs);
2440  }
2441  if (use_mach_exception_handling) {
2442    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2443    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2444  }
2445}
2446
2447
2448Boolean
2449suspend_mach_thread(mach_port_t mach_thread)
2450{
2451  kern_return_t status;
2452  Boolean aborted = false;
2453 
2454  do {
2455    aborted = false;
2456    status = thread_suspend(mach_thread);
2457    if (status == KERN_SUCCESS) {
2458      status = thread_abort_safely(mach_thread);
2459      if (status == KERN_SUCCESS) {
2460        aborted = true;
2461      } else {
2462        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2463        thread_resume(mach_thread);
2464      }
2465    } else {
2466      return false;
2467    }
2468  } while (! aborted);
2469  return true;
2470}
2471
2472/*
2473  Only do this if pthread_kill indicated that the pthread isn't
2474  listening to signals anymore, as can happen as soon as pthread_exit()
2475  is called on Darwin.  The thread could still call out to lisp as it
2476  is exiting, so we need another way to suspend it in this case.
2477*/
2478Boolean
2479mach_suspend_tcr(TCR *tcr)
2480{
2481  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2482  ExceptionInformation *pseudosigcontext;
2483  Boolean result = false;
2484 
2485  result = suspend_mach_thread(mach_thread);
2486  if (result) {
2487    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2488    pseudosigcontext->uc_onstack = 0;
2489    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2490    tcr->suspend_context = pseudosigcontext;
2491  }
2492  return result;
2493}
2494
2495void
2496mach_resume_tcr(TCR *tcr)
2497{
2498  ExceptionInformation *xp;
2499  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2500 
2501  xp = tcr->suspend_context;
2502#ifdef DEBUG_MACH_EXCEPTIONS
2503  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2504          tcr, tcr->pending_exception_context);
2505#endif
2506  tcr->suspend_context = NULL;
2507  restore_mach_thread_state(mach_thread, xp);
2508#ifdef DEBUG_MACH_EXCEPTIONS
2509  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2510          tcr, tcr->pending_exception_context);
2511#endif
2512  thread_resume(mach_thread);
2513}
2514
2515void
2516fatal_mach_error(char *format, ...)
2517{
2518  va_list args;
2519  char s[512];
2520 
2521
2522  va_start(args, format);
2523  vsnprintf(s, sizeof(s),format, args);
2524  va_end(args);
2525
2526  Fatal("Mach error", s);
2527}
2528
2529void
2530pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2531{
2532  interrupt_handler(signum, NULL, context);
2533}
2534
2535int
2536mach_raise_thread_interrupt(TCR *target)
2537{
2538  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2539  kern_return_t kret;
2540  Boolean result = false;
2541  TCR *current = get_tcr(false);
2542  thread_basic_info_data_t info; 
2543  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2544
2545  LOCK(lisp_global(TCR_AREA_LOCK), current);
2546
2547  if (suspend_mach_thread(mach_thread)) {
2548    if (thread_info(mach_thread,
2549                    THREAD_BASIC_INFO,
2550                    (thread_info_t)&info,
2551                    &info_count) == KERN_SUCCESS) {
2552      if (info.suspend_count == 1) {
2553        if ((target->valence == TCR_STATE_LISP) &&
2554            (!target->unwinding) &&
2555            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2556          kret = setup_signal_frame(mach_thread,
2557                                    (void *)pseudo_interrupt_handler,
2558                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2559                                    0,
2560                                    target);
2561          if (kret == KERN_SUCCESS) {
2562            result = true;
2563          }
2564        }
2565      }
2566    }
2567    if (! result) {
2568      target->interrupt_pending = 1 << fixnumshift;
2569    }
2570    thread_resume(mach_thread);
2571   
2572  }
2573  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2574  return 0;
2575}
2576
2577#endif
Note: See TracBrowser for help on using the repository browser.