source: branches/arm/lisp-kernel/arm-exceptions.c @ 13923

Last change on this file since 13923 was 13921, checked in by gb, 9 years ago

arm-constants.h, thread_manager.c: tcr.last_lisp_frame is just a natural.

arm-exceptions.c: maintain tcr.last_lisp_frame when entering/exiting
signal handlers. Signal thread interrupts by calling back to cmain
with signal 0.

arm-spentry.s: add an entrypoint that calls to undefined functions
wind up at. Dont' really need .SPtfuncallvsp. Check for pending
interrupts on ffcall return. Box the unboxed callback index in
.SPeabi_callback, don't unbox it even more.

arm-uuo.s: closer to lisp's idea of UUO encoding, but still not there.

xfasload.lisp: build the undefined function object differently.

arm-asm.lisp, arm-disassemble.lisp: uuo-slot-unbound encodes 3 registers

arm-lapmacros.lisp: define SET-GLOBAL; needs an extra temp reg.

arm-vinsns.lisp: scale-1bit-misc-index needs another shift. 3-operand
slot-unbound UUO. EEP-unresolved UUO operand order. No more .SPtfuncallvsp.
Make sure that nargs doesn't get clobbered in UNBIND-INTERRUPT-LEVEL-INLINE.

arm-array.lisp: in @string case of %init-misc, shift value, not tag.

arm-misc.lisp: add PENDING-USER-INTERRUPT, %%SAVE-APPLICATION.

arm-callback-support.lisp, arm-error-signal.lisp,
arm-trap-support.lisp,l1-boot-3.lisp: try to get basic stuff working
well enough to enable callbacks. Enable callbacks.

arm-backtrace.lisp: a little bit of platform-specific code and some
code from the PPC port, so that backtrace sort of works.

Status: can save an image (and it's more-or-less worth doing so.)
Crashes (somewhere in the type-system) compiling db-io.lisp, so I
don't yet know what undefined things would be warned about.

File size: 67.9 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call.  No need to update cs_area */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672  }
673
674
675  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
676  if (cur_allocptr) {
677    update_bytes_allocated(tcr, cur_allocptr);
678    if (freeptr) {
679      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
680    }
681  }
682}
683
684TCR *gc_tcr = NULL;
685
686/* Suspend and "normalize" other tcrs, then call a gc-like function
687   in that context.  Resume the other tcrs, then return what the
688   function returned */
689
690signed_natural
691gc_like_from_xp(ExceptionInformation *xp, 
692                signed_natural(*fun)(TCR *, signed_natural), 
693                signed_natural param)
694{
695  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
696  int result;
697  signed_natural inhibit;
698
699  suspend_other_threads(true);
700  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
701  if (inhibit != 0) {
702    if (inhibit > 0) {
703      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
704    }
705    resume_other_threads(true);
706    gc_deferred++;
707    return 0;
708  }
709  gc_deferred = 0;
710
711  gc_tcr = tcr;
712
713  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751
752/* Returns #bytes freed by invoking GC */
753
754signed_natural
755gc_from_tcr(TCR *tcr, signed_natural param)
756{
757  area *a;
758  BytePtr oldfree, newfree;
759  BytePtr oldend, newend;
760
761  a = active_dynamic_area;
762  oldend = a->high;
763  oldfree = a->active;
764  gc(tcr, param);
765  newfree = a->active;
766  newend = a->high;
767#if 0
768  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
769#endif
770  return ((oldfree-newfree)+(newend-oldend));
771}
772
773signed_natural
774gc_from_xp(ExceptionInformation *xp, signed_natural param)
775{
776  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
777
778  freeGCptrs();
779  return status;
780}
781
782signed_natural
783purify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, purify, param);
786}
787
788signed_natural
789impurify_from_xp(ExceptionInformation *xp, signed_natural param)
790{
791  return gc_like_from_xp(xp, impurify, param);
792}
793
794
795
796
797
798
799protection_handler
800 * protection_handlers[] = {
801   do_spurious_wp_fault,
802   do_soft_stack_overflow,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_hard_stack_overflow,   
806   do_hard_stack_overflow,
807   do_hard_stack_overflow
808   };
809
810
811Boolean
812is_write_fault(ExceptionInformation *xp, siginfo_t *info)
813{
814#ifdef LINUX
815  /* Based on experiments with a small sample size; need to R TFM. */
816  return ((xp->uc_mcontext.trap_no == 0xe) &&
817          (xp->uc_mcontext.error_code == 0x817));
818#endif
819}
820
821Boolean
822handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
823{
824  BytePtr addr;
825  protected_area_ptr area;
826  protection_handler *handler;
827  extern Boolean touch_page(void *);
828  extern void touch_page_end(void);
829
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835
836  if (addr && (addr == tcr->safe_ref_address)) {
837    adjust_exception_pc(xp,4);
838
839    xpGPR(xp,imm0) = 0;
840    return true;
841  }
842
843  if (xpPC(xp) == (pc)touch_page) {
844    xpGPR(xp,imm0) = 0;
845    xpPC(xp) = (pc)touch_page_end;
846    return true;
847  }
848
849
850  if (is_write_fault(xp,info)) {
851    area = find_protected_area(addr);
852    if (area != NULL) {
853      handler = protection_handlers[area->why];
854      return handler(xp, area, addr);
855    } else {
856      if ((addr >= readonly_area->low) &&
857          (addr < readonly_area->active)) {
858        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
859                        page_size);
860        return true;
861      }
862    }
863  }
864  if (old_valence == TCR_STATE_LISP) {
865    LispObj cmain = nrs_CMAIN.vcell;
866   
867    if ((fulltag_of(cmain) == fulltag_misc) &&
868      (header_subtag(header_of(cmain)) == subtag_macptr)) {
869     
870      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
871    }
872  }
873  return false;
874}
875
876
877
878
879
880OSStatus
881do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
882{
883#ifdef SUPPORT_PRAGMA_UNUSED
884#pragma unused(area,addr)
885#endif
886  reset_lisp_process(xp);
887  return -1;
888}
889
890extern area*
891allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893extern area*
894allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
895
896
897
898
899
900
901Boolean
902lisp_frame_p(lisp_frame *spPtr)
903{
904  return (spPtr->marker == lisp_frame_marker);
905}
906
907
908int ffcall_overflow_count = 0;
909
910
911
912
913
914
915/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
916  the current value of VSP (TSP) or an older area.  */
917
918OSStatus
919do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
920{
921  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
922  area *a = tcr->vs_area;
923  protected_area_ptr vsp_soft = a->softprot;
924  unprotect_area(vsp_soft);
925  signal_stack_soft_overflow(xp,vsp);
926  return 0;
927}
928
929
930
931OSStatus
932do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
933{
934  /* Trying to write into a guard page on the vstack or tstack.
935     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
936     signal an error_stack_overflow condition.
937      */
938  do_vsp_overflow(xp,addr);
939}
940
941OSStatus
942do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
943{
944#ifdef SUPPORT_PRAGMA_UNUSED
945#pragma unused(xp,area,addr)
946#endif
947  return -1;
948}
949
950
951
952
953     
954
955
956
957
958
959Boolean
960handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
961{
962  return false;
963}
964
965
966Boolean
967handle_unimplemented_instruction(ExceptionInformation *xp,
968                                 opcode instruction,
969                                 TCR *tcr)
970{
971
972  return false;
973}
974
975Boolean
976handle_exception(int xnum, 
977                 ExceptionInformation *xp, 
978                 TCR *tcr, 
979                 siginfo_t *info,
980                 int old_valence)
981{
982  pc program_counter;
983  opcode instruction = 0;
984
985  if (old_valence != TCR_STATE_LISP) {
986    return false;
987  }
988
989  program_counter = xpPC(xp);
990 
991  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
992    instruction = *program_counter;
993  }
994
995  if (IS_ALLOC_TRAP(instruction)) {
996    return handle_alloc_trap(xp, tcr);
997  } else if ((xnum == SIGSEGV) ||
998             (xnum == SIGBUS)) {
999    return handle_protection_violation(xp, info, tcr, old_valence);
1000  } else if (xnum == SIGFPE) {
1001    return handle_sigfpe(xp, tcr);
1002  } else if ((xnum == SIGILL)) {
1003    if (IS_GC_TRAP(instruction)) {
1004      return handle_gc_trap(xp, tcr);
1005    } else if (IS_UUO(instruction)) {
1006      return handle_uuo(xp, info, instruction);
1007    } else {
1008      return handle_unimplemented_instruction(xp,instruction,tcr);
1009    }
1010  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1011    tcr->interrupt_pending = 0;
1012    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0);
1013    return true;
1014  }
1015
1016  return false;
1017}
1018
1019void
1020adjust_exception_pc(ExceptionInformation *xp, int delta)
1021{
1022  xpPC(xp) += (delta >> 2);
1023}
1024
1025
1026/*
1027  This wants to scan backwards until "where" points to an instruction
1028   whose major opcode is either 63 (double-float) or 59 (single-float)
1029*/
1030
1031OSStatus
1032handle_fpux_binop(ExceptionInformation *xp, pc where)
1033{
1034  OSStatus err = -1;
1035  opcode *there = (opcode *) where, instr, errnum = 0;
1036  return err;
1037}
1038
1039Boolean
1040handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1041{
1042  unsigned 
1043    format = UUO_FORMAT(the_uuo);
1044  Boolean handled = false;
1045  int bump = 4;
1046  TCR *tcr = get_tcr(true);
1047
1048  switch (format) {
1049  case uuo_format_kernel_service:
1050    {
1051      TCR * target = (TCR *)xpGPR(xp,arg_z);
1052      int service = UUO_UNARY_field(the_uuo);
1053
1054      switch (service) {
1055      case error_propagate_suspend:
1056        handled = true;
1057        break;
1058      case error_interrupt:
1059        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1060        handled = true;
1061        break;
1062      case error_suspend:
1063        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1064        handled = true;
1065        break;
1066      case error_suspend_all:
1067        lisp_suspend_other_threads();
1068        handled = true;
1069        break;
1070      case error_resume:
1071        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1072        handled = true;
1073        break;
1074      case error_resume_all:
1075        lisp_resume_other_threads();
1076        handled = true;
1077        break;
1078      case error_kill:
1079        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1080        handled = true;
1081        break;
1082      case error_allocate_list:
1083        allocate_list(xp,tcr);
1084        handled = true;
1085        break;
1086      default:
1087        handled = false;
1088        break;
1089      }
1090      break;
1091    }
1092
1093  case uuo_format_unary:
1094    switch(UUO_UNARY_field(the_uuo)) {
1095    case 3:
1096      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1097        handled = true;
1098        bump = 4;
1099        break;
1100      }
1101      /* fall in */
1102    default:
1103      handled = false;
1104      break;
1105
1106    }
1107    break;
1108
1109  case uuo_format_nullary:
1110    switch (UUOA_field(the_uuo)) {
1111    case 3:
1112      adjust_exception_pc(xp, bump);
1113      bump = 0;
1114      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1115      handled = true;
1116      break;
1117
1118    case 4:
1119      tcr->interrupt_pending = 0;
1120      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0);
1121      handled = true;
1122      break;
1123    default:
1124      handled = false;
1125      break;
1126    }
1127    break;
1128
1129
1130  case uuo_format_error_lisptag:
1131  case uuo_format_error_fulltag:
1132  case uuo_format_error_xtype:
1133  case uuo_format_nullary_error:
1134  case uuo_format_unary_error:
1135  case uuo_format_binary_error:
1136    handled = handle_error(xp,0,the_uuo);
1137    break;
1138
1139  default:
1140    handled = false;
1141    bump = 0;
1142  }
1143 
1144  if (handled && bump) {
1145    adjust_exception_pc(xp, bump);
1146  }
1147  return handled;
1148}
1149
1150natural
1151register_codevector_contains_pc (natural lisp_function, pc where)
1152{
1153  natural code_vector, size;
1154
1155  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1156      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1157    code_vector = deref(lisp_function, 2);
1158    size = header_element_count(header_of(code_vector)) << 2;
1159    if ((untag(code_vector) < (natural)where) && 
1160        ((natural)where < (code_vector + size)))
1161      return(code_vector);
1162  }
1163
1164  return(0);
1165}
1166
1167Boolean
1168callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1169{
1170  return callback_to_lisp(callback_macptr, xp, info,arg);
1171}
1172
1173Boolean
1174callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1175                  natural arg1, natural arg2)
1176{
1177  natural  callback_ptr;
1178  area *a;
1179  natural fnreg = fn,  codevector, offset;
1180  pc where = xpPC(xp);
1181
1182  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1183  if (codevector == 0) {
1184    fnreg = nfn;
1185    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1186    if (codevector == 0) {
1187      fnreg = 0;
1188    }
1189  }
1190  if (codevector) {
1191    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1192  } else {
1193    offset = (natural)where;
1194  }
1195                                                 
1196                                               
1197
1198  TCR *tcr = get_tcr(true);
1199
1200  /* Put the active stack pointer where .SPcallback expects it */
1201  a = tcr->cs_area;
1202  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1203
1204  /* Copy globals from the exception frame to tcr */
1205  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1206  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1207
1208
1209
1210  /* Call back.
1211     Lisp will handle trampolining through some code that
1212     will push lr/fn & pc/nfn stack frames for backtrace.
1213  */
1214  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1215  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1216  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1217  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1218
1219
1220
1221  /* Copy GC registers back into exception frame */
1222  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1223  return true;
1224}
1225
1226area *
1227allocate_no_stack (natural size)
1228{
1229#ifdef SUPPORT_PRAGMA_UNUSED
1230#pragma unused(size)
1231#endif
1232
1233  return (area *) NULL;
1234}
1235
1236
1237
1238
1239
1240
1241/* callback to (symbol-value cmain) if it is a macptr,
1242   otherwise report cause and function name to console.
1243   Returns noErr if exception handled OK */
1244OSStatus
1245handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1246{
1247  LispObj   cmain = nrs_CMAIN.vcell;
1248  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1249
1250}
1251
1252
1253
1254
1255void non_fatal_error( char *msg )
1256{
1257  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1258  fflush( dbgout );
1259}
1260
1261
1262
1263Boolean
1264handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1265{
1266  LispObj   errdisp = nrs_ERRDISP.vcell;
1267
1268  if ((fulltag_of(errdisp) == fulltag_misc) &&
1269      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1270    /* errdisp is a macptr, we can call back to lisp */
1271    return callback_for_trap(errdisp, xp, arg1, arg2);
1272    }
1273
1274  return false;
1275}
1276               
1277
1278/*
1279   Current thread has all signals masked.  Before unmasking them,
1280   make it appear that the current thread has been suspended.
1281   (This is to handle the case where another thread is trying
1282   to GC before this thread is able to sieze the exception lock.)
1283*/
1284int
1285prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1286{
1287  int old_valence = tcr->valence;
1288
1289  tcr->pending_exception_context = context;
1290  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1291
1292  ALLOW_EXCEPTIONS(context);
1293  return old_valence;
1294} 
1295
1296void
1297wait_for_exception_lock_in_handler(TCR *tcr, 
1298                                   ExceptionInformation *context,
1299                                   xframe_list *xf)
1300{
1301
1302  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1303  xf->curr = context;
1304  xf->prev = tcr->xframe;
1305  tcr->xframe =  xf;
1306  tcr->pending_exception_context = NULL;
1307  tcr->valence = TCR_STATE_FOREIGN; 
1308}
1309
1310void
1311unlock_exception_lock_in_handler(TCR *tcr)
1312{
1313  tcr->pending_exception_context = tcr->xframe->curr;
1314  tcr->xframe = tcr->xframe->prev;
1315  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1316  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1317}
1318
1319/*
1320   If an interrupt is pending on exception exit, try to ensure
1321   that the thread sees it as soon as it's able to run.
1322*/
1323void
1324raise_pending_interrupt(TCR *tcr)
1325{
1326  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1327    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1328  }
1329}
1330
1331void
1332exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1333{
1334  sigset_t mask;
1335  sigfillset(&mask);
1336 
1337  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1338  tcr->valence = old_valence;
1339  tcr->pending_exception_context = NULL;
1340  tcr->last_lisp_frame = old_last_lisp_frame;
1341}
1342
1343
1344void
1345signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1346{
1347  xframe_list xframe_link;
1348
1349  if (!use_mach_exception_handling) {
1350   
1351    tcr = (TCR *) get_interrupt_tcr(false);
1352 
1353    /* The signal handler's entered with all signals (notably the
1354       thread_suspend signal) blocked.  Don't allow any other signals
1355       (notably the thread_suspend signal) to preempt us until we've
1356       set the TCR's xframe slot to include the current exception
1357       context.
1358    */
1359   
1360    old_last_lisp_frame = tcr->last_lisp_frame;
1361    tcr->last_lisp_frame = xpGPR(context,Rsp);
1362    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1363  }
1364
1365  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1366    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1367    pthread_kill(pthread_self(), thread_suspend_signal);
1368  }
1369
1370 
1371  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1372  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1373    char msg[512];
1374    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1375    if (lisp_Debugger(context, info, signum, false, msg)) {
1376      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1377    }
1378  }
1379  unlock_exception_lock_in_handler(tcr);
1380
1381  /* This thread now looks like a thread that was suspended while
1382     executing lisp code.  If some other thread gets the exception
1383     lock and GCs, the context (this thread's suspend_context) will
1384     be updated.  (That's only of concern if it happens before we
1385     can return to the kernel/to the Mach exception handler).
1386  */
1387  if (!use_mach_exception_handling) {
1388    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1389    raise_pending_interrupt(tcr);
1390  }
1391}
1392
1393/*
1394  If it looks like we're in the middle of an atomic operation, make
1395  it seem as if that operation is either complete or hasn't started
1396  yet.
1397
1398  The cases handled include:
1399
1400  a) storing into a newly-allocated lisp frame on the stack.
1401  b) marking a newly-allocated TSP frame as containing "raw" data.
1402  c) consing: the GC has its own ideas about how this should be
1403     handled, but other callers would be best advised to back
1404     up or move forward, according to whether we're in the middle
1405     of allocating a cons cell or allocating a uvector.
1406  d) a STMW to the vsp
1407  e) EGC write-barrier subprims.
1408*/
1409
1410extern opcode
1411  egc_write_barrier_start,
1412  egc_write_barrier_end, 
1413  egc_store_node_conditional, 
1414  egc_store_node_conditional_test,
1415  egc_set_hash_key,
1416  egc_gvset,
1417  egc_rplaca,
1418  egc_rplacd,
1419  egc_set_hash_key_conditional,
1420  egc_set_hash_key_conditional_test;
1421
1422
1423extern opcode ffcall_return_window, ffcall_return_window_end;
1424
1425void
1426pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1427{
1428  pc program_counter = xpPC(xp);
1429  opcode instr = *program_counter;
1430  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1431  LispObj cur_allocptr = xpGPR(xp, allocptr);
1432  int allocptr_tag = fulltag_of(cur_allocptr);
1433 
1434
1435
1436  if ((program_counter < &egc_write_barrier_end) && 
1437      (program_counter >= &egc_write_barrier_start)) {
1438    LispObj *ea = 0, val = 0, root = 0;
1439    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1440    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1441
1442    if (program_counter >= &egc_set_hash_key_conditional) {
1443      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1444          ((program_counter == &egc_set_hash_key_conditional_test) &&
1445           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1446        return;
1447      }
1448      need_store = false;
1449      root = xpGPR(xp,arg_x);
1450      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1451      need_memoize_root = true;
1452    } else if (program_counter >= &egc_store_node_conditional) {
1453      if ((program_counter < &egc_store_node_conditional_test) ||
1454          ((program_counter == &egc_store_node_conditional_test) &&
1455           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1456        /* The conditional store either hasn't been attempted yet, or
1457           has failed.  No need to adjust the PC, or do memoization. */
1458        return;
1459      }
1460      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1461      xpGPR(xp,arg_z) = t_value;
1462      need_store = false;
1463    } else if (program_counter >= &egc_set_hash_key) {
1464      root = xpGPR(xp,arg_x);
1465      val = xpGPR(xp,arg_z);
1466      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1467      need_memoize_root = true;
1468    } else if (program_counter >= &egc_gvset) {
1469      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1470      val = xpGPR(xp,arg_z);
1471    } else if (program_counter >= &egc_rplacd) {
1472      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1473      val = xpGPR(xp,arg_z);
1474    } else {                      /* egc_rplaca */
1475      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1476      val = xpGPR(xp,arg_z);
1477    }
1478    if (need_store) {
1479      *ea = val;
1480    }
1481    if (need_check_memo) {
1482      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1483      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1484          ((LispObj)ea < val)) {
1485        atomic_set_bit(refbits, bitnumber);
1486        if (need_memoize_root) {
1487          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1488          atomic_set_bit(refbits, bitnumber);
1489        }
1490      }
1491    }
1492    xpPC(xp) = xpLR(xp);
1493    return;
1494  }
1495
1496
1497 
1498  if (allocptr_tag != tag_fixnum) {
1499    signed_natural disp = allocptr_displacement(xp);
1500
1501    if (disp) {
1502      /* Being architecturally "at" the alloc trap doesn't tell
1503         us much (in particular, it doesn't tell us whether
1504         or not the thread has committed to taking the trap
1505         and is waiting for the exception lock (or waiting
1506         for the Mach exception thread to tell it how bad
1507         things are) or is about to execute a conditional
1508         trap.
1509         Regardless of which case applies, we want the
1510         other thread to take (or finish taking) the
1511         trap, and we don't want it to consider its
1512         current allocptr to be valid.
1513         The difference between this case (suspend other
1514         thread for GC) and the previous case (suspend
1515         current thread for interrupt) is solely a
1516         matter of what happens after we leave this
1517         function: some non-current thread will stay
1518         suspended until the GC finishes, then take
1519         (or start processing) the alloc trap.   The
1520         current thread will go off and do PROCESS-INTERRUPT
1521         or something, and may return from the interrupt
1522         and need to finish the allocation that got interrupted.
1523      */
1524
1525      if (alloc_disp) {
1526        *alloc_disp = disp;
1527        xpGPR(xp,allocptr) += disp;
1528        /* Leave the PC at the alloc trap.  When the interrupt
1529           handler returns, it'll decrement allocptr by disp
1530           and the trap may or may not be taken.
1531        */
1532      } else {
1533        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1534        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1535      }
1536    } else {
1537      /* If we're already past the alloc_trap, finish allocating
1538         the object. */
1539      if (allocptr_tag == fulltag_cons) {
1540        finish_allocating_cons(xp);
1541      } else {
1542        if (allocptr_tag == fulltag_misc) {
1543          finish_allocating_uvector(xp);
1544        } else {
1545          Bug(xp, "what's being allocated here ?");
1546        }
1547      }
1548      /* Whatever we finished allocating, reset allocptr/allocbase to
1549         VOID_ALLOCPTR */
1550      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1551    }
1552    return;
1553  }
1554}
1555
1556void
1557interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1558{
1559  TCR *tcr = get_interrupt_tcr(false);
1560  if (tcr) {
1561    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1562      tcr->interrupt_pending = 1 << fixnumshift;
1563    } else {
1564      LispObj cmain = nrs_CMAIN.vcell;
1565
1566      if ((fulltag_of(cmain) == fulltag_misc) &&
1567          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1568        /*
1569           This thread can (allegedly) take an interrupt now.
1570           It's tricky to do that if we're executing
1571           foreign code (especially Linuxthreads code, much
1572           of which isn't reentrant.)
1573           If we're unwinding the stack, we also want to defer
1574           the interrupt.
1575        */
1576        if ((tcr->valence != TCR_STATE_LISP) ||
1577            (tcr->unwinding != 0)) {
1578          tcr->interrupt_pending = 1 << fixnumshift;
1579        } else {
1580          xframe_list xframe_link;
1581          int old_valence;
1582          signed_natural disp=0;
1583          natural old_last_lisp_frame = tcr->last_lisp_frame;
1584         
1585          tcr->last_lisp_frame = xpGPR(context,Rsp);
1586          pc_luser_xp(context, tcr, &disp);
1587          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1588          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1589          handle_exception(signum, context, tcr, info, old_valence);
1590          if (disp) {
1591            xpGPR(context,allocptr) -= disp;
1592          }
1593          unlock_exception_lock_in_handler(tcr);
1594          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1595        }
1596      }
1597    }
1598  }
1599#ifdef DARWIN
1600    DarwinSigReturn(context);
1601#endif
1602}
1603
1604
1605
1606void
1607install_signal_handler(int signo, void *handler)
1608{
1609  struct sigaction sa;
1610 
1611  sa.sa_sigaction = (void *)handler;
1612  sigfillset(&sa.sa_mask);
1613  sa.sa_flags = 
1614    0 /* SA_RESTART */
1615    | SA_SIGINFO
1616    ;
1617
1618  sigaction(signo, &sa, NULL);
1619}
1620
1621void
1622install_pmcl_exception_handlers()
1623{
1624#ifdef DARWIN
1625  extern Boolean use_mach_exception_handling;
1626#endif
1627
1628  Boolean install_signal_handlers_for_exceptions =
1629#ifdef DARWIN
1630    !use_mach_exception_handling
1631#else
1632    true
1633#endif
1634    ;
1635  if (install_signal_handlers_for_exceptions) {
1636    extern int no_sigtrap;
1637    install_signal_handler(SIGILL, (void *)signal_handler);
1638    if (no_sigtrap != 1) {
1639      install_signal_handler(SIGTRAP, (void *)signal_handler);
1640    }
1641    install_signal_handler(SIGBUS,  (void *)signal_handler);
1642    install_signal_handler(SIGSEGV, (void *)signal_handler);
1643    install_signal_handler(SIGFPE, (void *)signal_handler);
1644  }
1645 
1646  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1647                         (void *)interrupt_handler);
1648  signal(SIGPIPE, SIG_IGN);
1649}
1650
1651void
1652thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1653{
1654  TCR *tcr = get_tcr(false);
1655  area *a;
1656  sigset_t mask;
1657 
1658  sigemptyset(&mask);
1659
1660  if (tcr) {
1661    tcr->valence = TCR_STATE_FOREIGN;
1662    a = tcr->vs_area;
1663    if (a) {
1664      a->active = a->high;
1665    }
1666    a = tcr->cs_area;
1667    if (a) {
1668      a->active = a->high;
1669    }
1670  }
1671 
1672  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1673  pthread_exit(NULL);
1674}
1675
1676void
1677thread_signal_setup()
1678{
1679  thread_suspend_signal = SIG_SUSPEND_THREAD;
1680  thread_kill_signal = SIG_KILL_THREAD;
1681
1682  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1683  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1684}
1685
1686
1687
1688void
1689unprotect_all_areas()
1690{
1691  protected_area_ptr p;
1692
1693  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1694    unprotect_area(p);
1695  }
1696}
1697
1698/*
1699  A binding subprim has just done "twlle limit_regno,idx_regno" and
1700  the trap's been taken.  Extend the tcr's tlb so that the index will
1701  be in bounds and the new limit will be on a page boundary, filling
1702  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1703  the tcr fields and the registers in the xp and return true if this
1704  all works, false otherwise.
1705
1706  Note that the tlb was allocated via malloc, so realloc can do some
1707  of the hard work.
1708*/
1709Boolean
1710extend_tcr_tlb(TCR *tcr, 
1711               ExceptionInformation *xp, 
1712               unsigned idx_regno)
1713{
1714  unsigned
1715    index = (unsigned) (xpGPR(xp,idx_regno)),
1716    old_limit = tcr->tlb_limit,
1717    new_limit = align_to_power_of_2(index+1,12),
1718    new_bytes = new_limit-old_limit;
1719  LispObj
1720    *old_tlb = tcr->tlb_pointer,
1721    *new_tlb = realloc(old_tlb, new_limit),
1722    *work;
1723
1724  if (new_tlb == NULL) {
1725    return false;
1726  }
1727 
1728  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1729
1730  while (new_bytes) {
1731    *work++ = no_thread_local_binding_marker;
1732    new_bytes -= sizeof(LispObj);
1733  }
1734  tcr->tlb_pointer = new_tlb;
1735  tcr->tlb_limit = new_limit;
1736  return true;
1737}
1738
1739
1740
1741void
1742exception_init()
1743{
1744  install_pmcl_exception_handlers();
1745}
1746
1747
1748
1749
1750
1751#ifdef DARWIN
1752
1753
1754#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1755#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1756
1757
1758
1759#define LISP_EXCEPTIONS_HANDLED_MASK \
1760 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1761
1762/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1763#define NUM_LISP_EXCEPTIONS_HANDLED 4
1764
1765typedef struct {
1766  int foreign_exception_port_count;
1767  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1768  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1769  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1770  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1771} MACH_foreign_exception_state;
1772
1773
1774
1775
1776/*
1777  Mach's exception mechanism works a little better than its signal
1778  mechanism (and, not incidentally, it gets along with GDB a lot
1779  better.
1780
1781  Initially, we install an exception handler to handle each native
1782  thread's exceptions.  This process involves creating a distinguished
1783  thread which listens for kernel exception messages on a set of
1784  0 or more thread exception ports.  As threads are created, they're
1785  added to that port set; a thread's exception port is destroyed
1786  (and therefore removed from the port set) when the thread exits.
1787
1788  A few exceptions can be handled directly in the handler thread;
1789  others require that we resume the user thread (and that the
1790  exception thread resumes listening for exceptions.)  The user
1791  thread might eventually want to return to the original context
1792  (possibly modified somewhat.)
1793
1794  As it turns out, the simplest way to force the faulting user
1795  thread to handle its own exceptions is to do pretty much what
1796  signal() does: the exception handlng thread sets up a sigcontext
1797  on the user thread's stack and forces the user thread to resume
1798  execution as if a signal handler had been called with that
1799  context as an argument.  We can use a distinguished UUO at a
1800  distinguished address to do something like sigreturn(); that'll
1801  have the effect of resuming the user thread's execution in
1802  the (pseudo-) signal context.
1803
1804  Since:
1805    a) we have miles of code in C and in Lisp that knows how to
1806    deal with Linux sigcontexts
1807    b) Linux sigcontexts contain a little more useful information
1808    (the DAR, DSISR, etc.) than their Darwin counterparts
1809    c) we have to create a sigcontext ourselves when calling out
1810    to the user thread: we aren't really generating a signal, just
1811    leveraging existing signal-handling code.
1812
1813  we create a Linux sigcontext struct.
1814
1815  Simple ?  Hopefully from the outside it is ...
1816
1817  We want the process of passing a thread's own context to it to
1818  appear to be atomic: in particular, we don't want the GC to suspend
1819  a thread that's had an exception but has not yet had its user-level
1820  exception handler called, and we don't want the thread's exception
1821  context to be modified by a GC while the Mach handler thread is
1822  copying it around.  On Linux (and on Jaguar), we avoid this issue
1823  because (a) the kernel sets up the user-level signal handler and
1824  (b) the signal handler blocks signals (including the signal used
1825  by the GC to suspend threads) until tcr->xframe is set up.
1826
1827  The GC and the Mach server thread therefore contend for the lock
1828  "mach_exception_lock".  The Mach server thread holds the lock
1829  when copying exception information between the kernel and the
1830  user thread; the GC holds this lock during most of its execution
1831  (delaying exception processing until it can be done without
1832  GC interference.)
1833
1834*/
1835
1836
1837#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1838
1839void
1840fatal_mach_error(char *format, ...);
1841
1842#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1843
1844
1845void
1846restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1847{
1848  kern_return_t kret;
1849  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1850
1851  /* Set the thread's FP state from the pseudosigcontext */
1852  kret = thread_set_state(thread,
1853                          ARM_FLOAT_STATE,
1854                          (thread_state_t)&(mc->__fs),
1855                          ARM_FLOAT_STATE_COUNT);
1856
1857  MACH_CHECK_ERROR("setting thread FP state", kret);
1858
1859  /* The thread'll be as good as new ... */
1860  kret = thread_set_state(thread, 
1861                          MACHINE_THREAD_STATE,
1862                          (thread_state_t)&(mc->__ss),
1863                          MACHINE_THREAD_STATE_COUNT);
1864  MACH_CHECK_ERROR("setting thread state", kret);
1865} 
1866
1867/* This code runs in the exception handling thread, in response
1868   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1869   in response to a call to pseudo_sigreturn() from the specified
1870   user thread.
1871   Find that context (the user thread's R3 points to it), then
1872   use that context to set the user thread's state.  When this
1873   function's caller returns, the Mach kernel will resume the
1874   user thread.
1875*/
1876
1877kern_return_t
1878do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1879{
1880  ExceptionInformation *xp;
1881
1882#ifdef DEBUG_MACH_EXCEPTIONS
1883  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1884#endif
1885  xp = tcr->pending_exception_context;
1886  if (xp) {
1887    tcr->pending_exception_context = NULL;
1888    tcr->valence = TCR_STATE_LISP;
1889    restore_mach_thread_state(thread, xp);
1890    raise_pending_interrupt(tcr);
1891  } else {
1892    Bug(NULL, "no xp here!\n");
1893  }
1894#ifdef DEBUG_MACH_EXCEPTIONS
1895  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1896#endif
1897  return KERN_SUCCESS;
1898} 
1899
1900ExceptionInformation *
1901create_thread_context_frame(mach_port_t thread, 
1902                            natural *new_stack_top)
1903{
1904  arm_thread_state_t ts;
1905  mach_msg_type_number_t thread_state_count;
1906  kern_return_t result;
1907  ExceptionInformation *pseudosigcontext;
1908  MCONTEXT_T mc;
1909  natural stackp, backlink;
1910
1911  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1912  result = thread_get_state(thread, 
1913                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1914                            (thread_state_t)&ts,
1915                            &thread_state_count);
1916 
1917  if (result != KERN_SUCCESS) {
1918    get_tcr(true);
1919    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1920  }
1921  stackp = ts.__r1;
1922  backlink = stackp;
1923  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1924  stackp -= sizeof(*pseudosigcontext);
1925  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1926
1927  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1928  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1929  memmove(&(mc->__ss),&ts,sizeof(ts));
1930
1931  thread_state_count = PPC_FLOAT_STATE_COUNT;
1932  thread_get_state(thread,
1933                   PPC_FLOAT_STATE,
1934                   (thread_state_t)&(mc->__fs),
1935                   &thread_state_count);
1936
1937
1938#ifdef PPC64
1939  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1940#else
1941  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1942#endif
1943  thread_get_state(thread,
1944#ifdef PPC64
1945                   PPC_EXCEPTION_STATE64,
1946#else
1947                   PPC_EXCEPTION_STATE,
1948#endif
1949                   (thread_state_t)&(mc->__es),
1950                   &thread_state_count);
1951
1952
1953  UC_MCONTEXT(pseudosigcontext) = mc;
1954  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1955  stackp -= C_LINKAGE_LEN;
1956  *(natural *)ptr_from_lispobj(stackp) = backlink;
1957  if (new_stack_top) {
1958    *new_stack_top = stackp;
1959  }
1960  return pseudosigcontext;
1961}
1962
1963/*
1964  This code sets up the user thread so that it executes a "pseudo-signal
1965  handler" function when it resumes.  Create a linux sigcontext struct
1966  on the thread's stack and pass it as an argument to the pseudo-signal
1967  handler.
1968
1969  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1970  which will restore the thread's context.
1971
1972  If the handler invokes code that throws (or otherwise never sigreturn()'s
1973  to the context), that's fine.
1974
1975  Actually, check that: throw (and variants) may need to be careful and
1976  pop the tcr's xframe list until it's younger than any frame being
1977  entered.
1978*/
1979
1980int
1981setup_signal_frame(mach_port_t thread,
1982                   void *handler_address,
1983                   int signum,
1984                   int code,
1985                   TCR *tcr)
1986{
1987#ifdef PPC64
1988  ppc_thread_state64_t ts;
1989#else
1990  ppc_thread_state_t ts;
1991#endif
1992  ExceptionInformation *pseudosigcontext;
1993  int old_valence = tcr->valence;
1994  natural stackp;
1995
1996#ifdef DEBUG_MACH_EXCEPTIONS
1997  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1998#endif
1999  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2000  pseudosigcontext->uc_onstack = 0;
2001  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2002  tcr->pending_exception_context = pseudosigcontext;
2003  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2004 
2005
2006  /*
2007     It seems like we've created a  sigcontext on the thread's
2008     stack.  Set things up so that we call the handler (with appropriate
2009     args) when the thread's resumed.
2010  */
2011
2012  ts.__srr0 = (natural) handler_address;
2013  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2014  ts.__r1 = stackp;
2015  ts.__r3 = signum;
2016  ts.__r4 = (natural)pseudosigcontext;
2017  ts.__r5 = (natural)tcr;
2018  ts.__r6 = (natural)old_valence;
2019  ts.__lr = (natural)pseudo_sigreturn;
2020
2021
2022#ifdef PPC64
2023  ts.__r13 = xpGPR(pseudosigcontext,13);
2024  thread_set_state(thread,
2025                   PPC_THREAD_STATE64,
2026                   (thread_state_t)&ts,
2027                   PPC_THREAD_STATE64_COUNT);
2028#else
2029  thread_set_state(thread, 
2030                   MACHINE_THREAD_STATE,
2031                   (thread_state_t)&ts,
2032                   MACHINE_THREAD_STATE_COUNT);
2033#endif
2034#ifdef DEBUG_MACH_EXCEPTIONS
2035  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2036#endif
2037  return 0;
2038}
2039
2040
2041void
2042pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2043{
2044  signal_handler(signum, NULL, context, tcr, old_valence);
2045} 
2046
2047
2048int
2049thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2050{
2051#ifdef PPC64
2052  ppc_thread_state64_t ts;
2053#else
2054  ppc_thread_state_t ts;
2055#endif
2056  mach_msg_type_number_t thread_state_count;
2057
2058#ifdef PPC64
2059  thread_state_count = PPC_THREAD_STATE64_COUNT;
2060#else
2061  thread_state_count = PPC_THREAD_STATE_COUNT;
2062#endif
2063  thread_get_state(thread, 
2064#ifdef PPC64
2065                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2066#else
2067                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2068#endif
2069                   (thread_state_t)&ts,
2070                   &thread_state_count);
2071  if (enabled) {
2072    ts.__srr1 |= MSR_FE0_FE1_MASK;
2073  } else {
2074    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2075  }
2076  /*
2077     Hack-o-rama warning (isn't it about time for such a warning?):
2078     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2079     Our handler for lisp's use of pthread_kill() pushes a phony
2080     lisp frame on the stack and force the context to resume at
2081     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2082     lisp frame contains the -real- address that process_interrupt
2083     should have returned to, and the fact that it's in a lisp
2084     frame should convince the GC to notice that address if it
2085     runs in the tiny time window between returning from our
2086     interrupt handler and ... here.
2087     If the top frame on the stack is a lisp frame, discard it
2088     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2089     just adjust ts.srr0 to skip over the UUO.
2090  */
2091  {
2092    lisp_frame *tos = (lisp_frame *)ts.__r1,
2093      *next_frame = tos->backlink;
2094   
2095    if (tos == (next_frame -1)) {
2096      ts.__srr0 = tos->savelr;
2097      ts.__r1 = (LispObj) next_frame;
2098    } else {
2099      ts.__srr0 += 4;
2100    }
2101  }
2102  thread_set_state(thread, 
2103#ifdef PPC64
2104                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2105#else
2106                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2107#endif
2108                   (thread_state_t)&ts,
2109#ifdef PPC64
2110                   PPC_THREAD_STATE64_COUNT
2111#else
2112                   PPC_THREAD_STATE_COUNT
2113#endif
2114                   );
2115
2116  return 0;
2117}
2118
2119/*
2120  This function runs in the exception handling thread.  It's
2121  called (by this precise name) from the library function "exc_server()"
2122  when the thread's exception ports are set up.  (exc_server() is called
2123  via mach_msg_server(), which is a function that waits for and dispatches
2124  on exception messages from the Mach kernel.)
2125
2126  This checks to see if the exception was caused by a pseudo_sigreturn()
2127  UUO; if so, it arranges for the thread to have its state restored
2128  from the specified context.
2129
2130  Otherwise, it tries to map the exception to a signal number and
2131  arranges that the thread run a "pseudo signal handler" to handle
2132  the exception.
2133
2134  Some exceptions could and should be handled here directly.
2135*/
2136
2137kern_return_t
2138catch_exception_raise(mach_port_t exception_port,
2139                      mach_port_t thread,
2140                      mach_port_t task, 
2141                      exception_type_t exception,
2142                      exception_data_t code_vector,
2143                      mach_msg_type_number_t code_count)
2144{
2145  int signum = 0, code = *code_vector, code1;
2146  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2147  kern_return_t kret;
2148
2149#ifdef DEBUG_MACH_EXCEPTIONS
2150  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2151#endif
2152
2153  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2154    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2155  } 
2156  if ((exception == EXC_BAD_INSTRUCTION) &&
2157      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2158      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2159       (code1 == (int)enable_fp_exceptions) ||
2160       (code1 == (int)disable_fp_exceptions))) {
2161    if (code1 == (int)pseudo_sigreturn) {
2162      kret = do_pseudo_sigreturn(thread, tcr);
2163#if 0
2164      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2165#endif
2166       
2167    } else if (code1 == (int)enable_fp_exceptions) {
2168      kret = thread_set_fp_exceptions_enabled(thread, true);
2169    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2170  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2171    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2172    kret = 17;
2173  } else {
2174    switch (exception) {
2175    case EXC_BAD_ACCESS:
2176      signum = SIGSEGV;
2177      break;
2178       
2179    case EXC_BAD_INSTRUCTION:
2180      signum = SIGILL;
2181      break;
2182     
2183    case EXC_SOFTWARE:
2184      if (code == EXC_PPC_TRAP) {
2185        signum = SIGTRAP;
2186      }
2187      break;
2188     
2189    case EXC_ARITHMETIC:
2190      signum = SIGFPE;
2191      break;
2192
2193    default:
2194      break;
2195    }
2196    if (signum) {
2197      kret = setup_signal_frame(thread,
2198                                (void *)pseudo_signal_handler,
2199                                signum,
2200                                code,
2201                                tcr);
2202#if 0
2203      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2204#endif
2205
2206    } else {
2207      kret = 17;
2208    }
2209  }
2210
2211  return kret;
2212}
2213
2214
2215
2216typedef struct {
2217  mach_msg_header_t Head;
2218  /* start of the kernel processed data */
2219  mach_msg_body_t msgh_body;
2220  mach_msg_port_descriptor_t thread;
2221  mach_msg_port_descriptor_t task;
2222  /* end of the kernel processed data */
2223  NDR_record_t NDR;
2224  exception_type_t exception;
2225  mach_msg_type_number_t codeCnt;
2226  integer_t code[2];
2227  mach_msg_trailer_t trailer;
2228} exceptionRequest;
2229
2230
2231boolean_t
2232openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2233{
2234  static NDR_record_t _NDR = {0};
2235  kern_return_t handled;
2236  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2237  exceptionRequest *req = (exceptionRequest *) in;
2238
2239  reply->NDR = _NDR;
2240
2241  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2242  out->msgh_remote_port = in->msgh_remote_port;
2243  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2244  out->msgh_local_port = MACH_PORT_NULL;
2245  out->msgh_id = in->msgh_id+100;
2246
2247  /* Could handle other exception flavors in the range 2401-2403 */
2248
2249
2250  if (in->msgh_id != 2401) {
2251    reply->RetCode = MIG_BAD_ID;
2252    return FALSE;
2253  }
2254  handled = catch_exception_raise(req->Head.msgh_local_port,
2255                                  req->thread.name,
2256                                  req->task.name,
2257                                  req->exception,
2258                                  req->code,
2259                                  req->codeCnt);
2260  reply->RetCode = handled;
2261  return TRUE;
2262}
2263
2264/*
2265  The initial function for an exception-handling thread.
2266*/
2267
2268void *
2269exception_handler_proc(void *arg)
2270{
2271  extern boolean_t exc_server();
2272  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2273
2274  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2275  /* Should never return. */
2276  abort();
2277}
2278
2279
2280
2281mach_port_t
2282mach_exception_port_set()
2283{
2284  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2285  kern_return_t kret; 
2286  if (__exception_port_set == MACH_PORT_NULL) {
2287    kret = mach_port_allocate(mach_task_self(),
2288                              MACH_PORT_RIGHT_PORT_SET,
2289                              &__exception_port_set);
2290    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2291    create_system_thread(0,
2292                         NULL,
2293                         exception_handler_proc, 
2294                         (void *)((natural)__exception_port_set));
2295  }
2296  return __exception_port_set;
2297}
2298
2299/*
2300  Setup a new thread to handle those exceptions specified by
2301  the mask "which".  This involves creating a special Mach
2302  message port, telling the Mach kernel to send exception
2303  messages for the calling thread to that port, and setting
2304  up a handler thread which listens for and responds to
2305  those messages.
2306
2307*/
2308
2309/*
2310  Establish the lisp thread's TCR as its exception port, and determine
2311  whether any other ports have been established by foreign code for
2312  exceptions that lisp cares about.
2313
2314  If this happens at all, it should happen on return from foreign
2315  code and on entry to lisp code via a callback.
2316
2317  This is a lot of trouble (and overhead) to support Java, or other
2318  embeddable systems that clobber their caller's thread exception ports.
2319 
2320*/
2321kern_return_t
2322tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2323{
2324  kern_return_t kret;
2325  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2326  int i;
2327  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2328  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2329  exception_mask_t mask = 0;
2330
2331  kret = thread_swap_exception_ports(thread,
2332                                     LISP_EXCEPTIONS_HANDLED_MASK,
2333                                     lisp_port,
2334                                     EXCEPTION_DEFAULT,
2335                                     THREAD_STATE_NONE,
2336                                     fxs->masks,
2337                                     &n,
2338                                     fxs->ports,
2339                                     fxs->behaviors,
2340                                     fxs->flavors);
2341  if (kret == KERN_SUCCESS) {
2342    fxs->foreign_exception_port_count = n;
2343    for (i = 0; i < n; i ++) {
2344      foreign_port = fxs->ports[i];
2345
2346      if ((foreign_port != lisp_port) &&
2347          (foreign_port != MACH_PORT_NULL)) {
2348        mask |= fxs->masks[i];
2349      }
2350    }
2351    tcr->foreign_exception_status = (int) mask;
2352  }
2353  return kret;
2354}
2355
2356kern_return_t
2357tcr_establish_lisp_exception_port(TCR *tcr)
2358{
2359  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2360}
2361
2362/*
2363  Do this when calling out to or returning from foreign code, if
2364  any conflicting foreign exception ports were established when we
2365  last entered lisp code.
2366*/
2367kern_return_t
2368restore_foreign_exception_ports(TCR *tcr)
2369{
2370  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2371 
2372  if (m) {
2373    MACH_foreign_exception_state *fxs  = 
2374      (MACH_foreign_exception_state *) tcr->native_thread_info;
2375    int i, n = fxs->foreign_exception_port_count;
2376    exception_mask_t tm;
2377
2378    for (i = 0; i < n; i++) {
2379      if ((tm = fxs->masks[i]) & m) {
2380        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2381                                   tm,
2382                                   fxs->ports[i],
2383                                   fxs->behaviors[i],
2384                                   fxs->flavors[i]);
2385      }
2386    }
2387  }
2388}
2389                                   
2390
2391/*
2392  This assumes that a Mach port (to be used as the thread's exception port) whose
2393  "name" matches the TCR's 32-bit address has already been allocated.
2394*/
2395
2396kern_return_t
2397setup_mach_exception_handling(TCR *tcr)
2398{
2399  mach_port_t
2400    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2401    task_self = mach_task_self();
2402  kern_return_t kret;
2403
2404  kret = mach_port_insert_right(task_self,
2405                                thread_exception_port,
2406                                thread_exception_port,
2407                                MACH_MSG_TYPE_MAKE_SEND);
2408  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2409
2410  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2411  if (kret == KERN_SUCCESS) {
2412    mach_port_t exception_port_set = mach_exception_port_set();
2413
2414    kret = mach_port_move_member(task_self,
2415                                 thread_exception_port,
2416                                 exception_port_set);
2417  }
2418  return kret;
2419}
2420
2421void
2422darwin_exception_init(TCR *tcr)
2423{
2424  void tcr_monitor_exception_handling(TCR*, Boolean);
2425  kern_return_t kret;
2426  MACH_foreign_exception_state *fxs = 
2427    calloc(1, sizeof(MACH_foreign_exception_state));
2428 
2429  tcr->native_thread_info = (void *) fxs;
2430
2431  if ((kret = setup_mach_exception_handling(tcr))
2432      != KERN_SUCCESS) {
2433    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2434    terminate_lisp();
2435  }
2436}
2437
2438/*
2439  The tcr is the "name" of the corresponding thread's exception port.
2440  Destroying the port should remove it from all port sets of which it's
2441  a member (notably, the exception port set.)
2442*/
2443void
2444darwin_exception_cleanup(TCR *tcr)
2445{
2446  void *fxs = tcr->native_thread_info;
2447  extern Boolean use_mach_exception_handling;
2448
2449  if (fxs) {
2450    tcr->native_thread_info = NULL;
2451    free(fxs);
2452  }
2453  if (use_mach_exception_handling) {
2454    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2455    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2456  }
2457}
2458
2459
2460Boolean
2461suspend_mach_thread(mach_port_t mach_thread)
2462{
2463  kern_return_t status;
2464  Boolean aborted = false;
2465 
2466  do {
2467    aborted = false;
2468    status = thread_suspend(mach_thread);
2469    if (status == KERN_SUCCESS) {
2470      status = thread_abort_safely(mach_thread);
2471      if (status == KERN_SUCCESS) {
2472        aborted = true;
2473      } else {
2474        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2475        thread_resume(mach_thread);
2476      }
2477    } else {
2478      return false;
2479    }
2480  } while (! aborted);
2481  return true;
2482}
2483
2484/*
2485  Only do this if pthread_kill indicated that the pthread isn't
2486  listening to signals anymore, as can happen as soon as pthread_exit()
2487  is called on Darwin.  The thread could still call out to lisp as it
2488  is exiting, so we need another way to suspend it in this case.
2489*/
2490Boolean
2491mach_suspend_tcr(TCR *tcr)
2492{
2493  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2494  ExceptionInformation *pseudosigcontext;
2495  Boolean result = false;
2496 
2497  result = suspend_mach_thread(mach_thread);
2498  if (result) {
2499    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2500    pseudosigcontext->uc_onstack = 0;
2501    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2502    tcr->suspend_context = pseudosigcontext;
2503  }
2504  return result;
2505}
2506
2507void
2508mach_resume_tcr(TCR *tcr)
2509{
2510  ExceptionInformation *xp;
2511  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2512 
2513  xp = tcr->suspend_context;
2514#ifdef DEBUG_MACH_EXCEPTIONS
2515  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2516          tcr, tcr->pending_exception_context);
2517#endif
2518  tcr->suspend_context = NULL;
2519  restore_mach_thread_state(mach_thread, xp);
2520#ifdef DEBUG_MACH_EXCEPTIONS
2521  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2522          tcr, tcr->pending_exception_context);
2523#endif
2524  thread_resume(mach_thread);
2525}
2526
2527void
2528fatal_mach_error(char *format, ...)
2529{
2530  va_list args;
2531  char s[512];
2532 
2533
2534  va_start(args, format);
2535  vsnprintf(s, sizeof(s),format, args);
2536  va_end(args);
2537
2538  Fatal("Mach error", s);
2539}
2540
2541void
2542pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2543{
2544  interrupt_handler(signum, NULL, context);
2545}
2546
2547int
2548mach_raise_thread_interrupt(TCR *target)
2549{
2550  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2551  kern_return_t kret;
2552  Boolean result = false;
2553  TCR *current = get_tcr(false);
2554  thread_basic_info_data_t info; 
2555  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2556
2557  LOCK(lisp_global(TCR_AREA_LOCK), current);
2558
2559  if (suspend_mach_thread(mach_thread)) {
2560    if (thread_info(mach_thread,
2561                    THREAD_BASIC_INFO,
2562                    (thread_info_t)&info,
2563                    &info_count) == KERN_SUCCESS) {
2564      if (info.suspend_count == 1) {
2565        if ((target->valence == TCR_STATE_LISP) &&
2566            (!target->unwinding) &&
2567            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2568          kret = setup_signal_frame(mach_thread,
2569                                    (void *)pseudo_interrupt_handler,
2570                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2571                                    0,
2572                                    target);
2573          if (kret == KERN_SUCCESS) {
2574            result = true;
2575          }
2576        }
2577      }
2578    }
2579    if (! result) {
2580      target->interrupt_pending = 1 << fixnumshift;
2581    }
2582    thread_resume(mach_thread);
2583   
2584  }
2585  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2586  return 0;
2587}
2588
2589#endif
Note: See TracBrowser for help on using the repository browser.