source: branches/arm/lisp-kernel/arm-exceptions.c @ 13924

Last change on this file since 13924 was 13924, checked in by gb, 9 years ago

arm-asmutils.s: don't use magic Linux kernel function for store_condtional,
just use ldrex/strex.
arm-constants.s: byte order in definitions of bytes_consed_[high,low] slots
in TCR.
arm-exceptions.s: in normalize_tcr(), if other_tcr is in ff-call, need to
update the cs_area based on tcr->last_lisp_frame.
arm-gc.c: check for running off end of cstack in mark_cstack_area().

File size: 67.9 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call. */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
673  }
674
675
676  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
677  if (cur_allocptr) {
678    update_bytes_allocated(tcr, cur_allocptr);
679    if (freeptr) {
680      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
681    }
682  }
683}
684
685TCR *gc_tcr = NULL;
686
687/* Suspend and "normalize" other tcrs, then call a gc-like function
688   in that context.  Resume the other tcrs, then return what the
689   function returned */
690
691signed_natural
692gc_like_from_xp(ExceptionInformation *xp, 
693                signed_natural(*fun)(TCR *, signed_natural), 
694                signed_natural param)
695{
696  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
697  int result;
698  signed_natural inhibit;
699
700  suspend_other_threads(true);
701  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
702  if (inhibit != 0) {
703    if (inhibit > 0) {
704      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
705    }
706    resume_other_threads(true);
707    gc_deferred++;
708    return 0;
709  }
710  gc_deferred = 0;
711
712  gc_tcr = tcr;
713
714  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
715
716  normalize_tcr(xp, tcr, false);
717
718
719  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
720    if (other_tcr->pending_exception_context) {
721      other_tcr->gc_context = other_tcr->pending_exception_context;
722    } else if (other_tcr->valence == TCR_STATE_LISP) {
723      other_tcr->gc_context = other_tcr->suspend_context;
724    } else {
725      /* no pending exception, didn't suspend in lisp state:
726         must have executed a synchronous ff-call.
727      */
728      other_tcr->gc_context = NULL;
729    }
730    normalize_tcr(other_tcr->gc_context, other_tcr, true);
731  }
732   
733
734
735  result = fun(tcr, param);
736
737  other_tcr = tcr;
738  do {
739    other_tcr->gc_context = NULL;
740    other_tcr = other_tcr->next;
741  } while (other_tcr != tcr);
742
743  gc_tcr = NULL;
744
745  resume_other_threads(true);
746
747  return result;
748
749}
750
751
752
753/* Returns #bytes freed by invoking GC */
754
755signed_natural
756gc_from_tcr(TCR *tcr, signed_natural param)
757{
758  area *a;
759  BytePtr oldfree, newfree;
760  BytePtr oldend, newend;
761
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774signed_natural
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783signed_natural
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789signed_natural
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815#ifdef LINUX
816  /* Based on experiments with a small sample size; need to R TFM. */
817  return ((xp->uc_mcontext.trap_no == 0xe) &&
818          (xp->uc_mcontext.error_code == 0x817));
819#endif
820}
821
822Boolean
823handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
824{
825  BytePtr addr;
826  protected_area_ptr area;
827  protection_handler *handler;
828  extern Boolean touch_page(void *);
829  extern void touch_page_end(void);
830
831  if (info) {
832    addr = (BytePtr)(info->si_addr);
833  } else {
834    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
835  }
836
837  if (addr && (addr == tcr->safe_ref_address)) {
838    adjust_exception_pc(xp,4);
839
840    xpGPR(xp,imm0) = 0;
841    return true;
842  }
843
844  if (xpPC(xp) == (pc)touch_page) {
845    xpGPR(xp,imm0) = 0;
846    xpPC(xp) = (pc)touch_page_end;
847    return true;
848  }
849
850
851  if (is_write_fault(xp,info)) {
852    area = find_protected_area(addr);
853    if (area != NULL) {
854      handler = protection_handlers[area->why];
855      return handler(xp, area, addr);
856    } else {
857      if ((addr >= readonly_area->low) &&
858          (addr < readonly_area->active)) {
859        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
860                        page_size);
861        return true;
862      }
863    }
864  }
865  if (old_valence == TCR_STATE_LISP) {
866    LispObj cmain = nrs_CMAIN.vcell;
867   
868    if ((fulltag_of(cmain) == fulltag_misc) &&
869      (header_subtag(header_of(cmain)) == subtag_macptr)) {
870     
871      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
872    }
873  }
874  return false;
875}
876
877
878
879
880
881OSStatus
882do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
883{
884#ifdef SUPPORT_PRAGMA_UNUSED
885#pragma unused(area,addr)
886#endif
887  reset_lisp_process(xp);
888  return -1;
889}
890
891extern area*
892allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
893
894extern area*
895allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
896
897
898
899
900
901
902Boolean
903lisp_frame_p(lisp_frame *spPtr)
904{
905  return (spPtr->marker == lisp_frame_marker);
906}
907
908
909int ffcall_overflow_count = 0;
910
911
912
913
914
915
916/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
917  the current value of VSP (TSP) or an older area.  */
918
919OSStatus
920do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
921{
922  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
923  area *a = tcr->vs_area;
924  protected_area_ptr vsp_soft = a->softprot;
925  unprotect_area(vsp_soft);
926  signal_stack_soft_overflow(xp,vsp);
927  return 0;
928}
929
930
931
932OSStatus
933do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
934{
935  /* Trying to write into a guard page on the vstack or tstack.
936     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
937     signal an error_stack_overflow condition.
938      */
939  do_vsp_overflow(xp,addr);
940}
941
942OSStatus
943do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
944{
945#ifdef SUPPORT_PRAGMA_UNUSED
946#pragma unused(xp,area,addr)
947#endif
948  return -1;
949}
950
951
952
953
954     
955
956
957
958
959
960Boolean
961handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
962{
963  return false;
964}
965
966
967Boolean
968handle_unimplemented_instruction(ExceptionInformation *xp,
969                                 opcode instruction,
970                                 TCR *tcr)
971{
972
973  return false;
974}
975
976Boolean
977handle_exception(int xnum, 
978                 ExceptionInformation *xp, 
979                 TCR *tcr, 
980                 siginfo_t *info,
981                 int old_valence)
982{
983  pc program_counter;
984  opcode instruction = 0;
985
986  if (old_valence != TCR_STATE_LISP) {
987    return false;
988  }
989
990  program_counter = xpPC(xp);
991 
992  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
993    instruction = *program_counter;
994  }
995
996  if (IS_ALLOC_TRAP(instruction)) {
997    return handle_alloc_trap(xp, tcr);
998  } else if ((xnum == SIGSEGV) ||
999             (xnum == SIGBUS)) {
1000    return handle_protection_violation(xp, info, tcr, old_valence);
1001  } else if (xnum == SIGFPE) {
1002    return handle_sigfpe(xp, tcr);
1003  } else if ((xnum == SIGILL)) {
1004    if (IS_GC_TRAP(instruction)) {
1005      return handle_gc_trap(xp, tcr);
1006    } else if (IS_UUO(instruction)) {
1007      return handle_uuo(xp, info, instruction);
1008    } else {
1009      return handle_unimplemented_instruction(xp,instruction,tcr);
1010    }
1011  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1012    tcr->interrupt_pending = 0;
1013    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0);
1014    return true;
1015  }
1016
1017  return false;
1018}
1019
1020void
1021adjust_exception_pc(ExceptionInformation *xp, int delta)
1022{
1023  xpPC(xp) += (delta >> 2);
1024}
1025
1026
1027/*
1028  This wants to scan backwards until "where" points to an instruction
1029   whose major opcode is either 63 (double-float) or 59 (single-float)
1030*/
1031
1032OSStatus
1033handle_fpux_binop(ExceptionInformation *xp, pc where)
1034{
1035  OSStatus err = -1;
1036  opcode *there = (opcode *) where, instr, errnum = 0;
1037  return err;
1038}
1039
1040Boolean
1041handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1042{
1043  unsigned 
1044    format = UUO_FORMAT(the_uuo);
1045  Boolean handled = false;
1046  int bump = 4;
1047  TCR *tcr = get_tcr(true);
1048
1049  switch (format) {
1050  case uuo_format_kernel_service:
1051    {
1052      TCR * target = (TCR *)xpGPR(xp,arg_z);
1053      int service = UUO_UNARY_field(the_uuo);
1054
1055      switch (service) {
1056      case error_propagate_suspend:
1057        handled = true;
1058        break;
1059      case error_interrupt:
1060        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1061        handled = true;
1062        break;
1063      case error_suspend:
1064        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1065        handled = true;
1066        break;
1067      case error_suspend_all:
1068        lisp_suspend_other_threads();
1069        handled = true;
1070        break;
1071      case error_resume:
1072        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1073        handled = true;
1074        break;
1075      case error_resume_all:
1076        lisp_resume_other_threads();
1077        handled = true;
1078        break;
1079      case error_kill:
1080        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1081        handled = true;
1082        break;
1083      case error_allocate_list:
1084        allocate_list(xp,tcr);
1085        handled = true;
1086        break;
1087      default:
1088        handled = false;
1089        break;
1090      }
1091      break;
1092    }
1093
1094  case uuo_format_unary:
1095    switch(UUO_UNARY_field(the_uuo)) {
1096    case 3:
1097      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1098        handled = true;
1099        bump = 4;
1100        break;
1101      }
1102      /* fall in */
1103    default:
1104      handled = false;
1105      break;
1106
1107    }
1108    break;
1109
1110  case uuo_format_nullary:
1111    switch (UUOA_field(the_uuo)) {
1112    case 3:
1113      adjust_exception_pc(xp, bump);
1114      bump = 0;
1115      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1116      handled = true;
1117      break;
1118
1119    case 4:
1120      tcr->interrupt_pending = 0;
1121      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0);
1122      handled = true;
1123      break;
1124    default:
1125      handled = false;
1126      break;
1127    }
1128    break;
1129
1130
1131  case uuo_format_error_lisptag:
1132  case uuo_format_error_fulltag:
1133  case uuo_format_error_xtype:
1134  case uuo_format_nullary_error:
1135  case uuo_format_unary_error:
1136  case uuo_format_binary_error:
1137    handled = handle_error(xp,0,the_uuo);
1138    break;
1139
1140  default:
1141    handled = false;
1142    bump = 0;
1143  }
1144 
1145  if (handled && bump) {
1146    adjust_exception_pc(xp, bump);
1147  }
1148  return handled;
1149}
1150
1151natural
1152register_codevector_contains_pc (natural lisp_function, pc where)
1153{
1154  natural code_vector, size;
1155
1156  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1157      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1158    code_vector = deref(lisp_function, 2);
1159    size = header_element_count(header_of(code_vector)) << 2;
1160    if ((untag(code_vector) < (natural)where) && 
1161        ((natural)where < (code_vector + size)))
1162      return(code_vector);
1163  }
1164
1165  return(0);
1166}
1167
1168Boolean
1169callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1170{
1171  return callback_to_lisp(callback_macptr, xp, info,arg);
1172}
1173
1174Boolean
1175callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1176                  natural arg1, natural arg2)
1177{
1178  natural  callback_ptr;
1179  area *a;
1180  natural fnreg = fn,  codevector, offset;
1181  pc where = xpPC(xp);
1182
1183  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1184  if (codevector == 0) {
1185    fnreg = nfn;
1186    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1187    if (codevector == 0) {
1188      fnreg = 0;
1189    }
1190  }
1191  if (codevector) {
1192    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1193  } else {
1194    offset = (natural)where;
1195  }
1196                                                 
1197                                               
1198
1199  TCR *tcr = get_tcr(true);
1200
1201  /* Put the active stack pointer where .SPcallback expects it */
1202  a = tcr->cs_area;
1203  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1204
1205  /* Copy globals from the exception frame to tcr */
1206  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1207  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1208
1209
1210
1211  /* Call back.
1212     Lisp will handle trampolining through some code that
1213     will push lr/fn & pc/nfn stack frames for backtrace.
1214  */
1215  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1216  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1217  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1218  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1219
1220
1221
1222  /* Copy GC registers back into exception frame */
1223  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1224  return true;
1225}
1226
1227area *
1228allocate_no_stack (natural size)
1229{
1230#ifdef SUPPORT_PRAGMA_UNUSED
1231#pragma unused(size)
1232#endif
1233
1234  return (area *) NULL;
1235}
1236
1237
1238
1239
1240
1241
1242/* callback to (symbol-value cmain) if it is a macptr,
1243   otherwise report cause and function name to console.
1244   Returns noErr if exception handled OK */
1245OSStatus
1246handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1247{
1248  LispObj   cmain = nrs_CMAIN.vcell;
1249  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1250
1251}
1252
1253
1254
1255
1256void non_fatal_error( char *msg )
1257{
1258  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1259  fflush( dbgout );
1260}
1261
1262
1263
1264Boolean
1265handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1266{
1267  LispObj   errdisp = nrs_ERRDISP.vcell;
1268
1269  if ((fulltag_of(errdisp) == fulltag_misc) &&
1270      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1271    /* errdisp is a macptr, we can call back to lisp */
1272    return callback_for_trap(errdisp, xp, arg1, arg2);
1273    }
1274
1275  return false;
1276}
1277               
1278
1279/*
1280   Current thread has all signals masked.  Before unmasking them,
1281   make it appear that the current thread has been suspended.
1282   (This is to handle the case where another thread is trying
1283   to GC before this thread is able to sieze the exception lock.)
1284*/
1285int
1286prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1287{
1288  int old_valence = tcr->valence;
1289
1290  tcr->pending_exception_context = context;
1291  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1292
1293  ALLOW_EXCEPTIONS(context);
1294  return old_valence;
1295} 
1296
1297void
1298wait_for_exception_lock_in_handler(TCR *tcr, 
1299                                   ExceptionInformation *context,
1300                                   xframe_list *xf)
1301{
1302
1303  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1304  xf->curr = context;
1305  xf->prev = tcr->xframe;
1306  tcr->xframe =  xf;
1307  tcr->pending_exception_context = NULL;
1308  tcr->valence = TCR_STATE_FOREIGN; 
1309}
1310
1311void
1312unlock_exception_lock_in_handler(TCR *tcr)
1313{
1314  tcr->pending_exception_context = tcr->xframe->curr;
1315  tcr->xframe = tcr->xframe->prev;
1316  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1317  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1318}
1319
1320/*
1321   If an interrupt is pending on exception exit, try to ensure
1322   that the thread sees it as soon as it's able to run.
1323*/
1324void
1325raise_pending_interrupt(TCR *tcr)
1326{
1327  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1328    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1329  }
1330}
1331
1332void
1333exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1334{
1335  sigset_t mask;
1336  sigfillset(&mask);
1337 
1338  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1339  tcr->valence = old_valence;
1340  tcr->pending_exception_context = NULL;
1341  tcr->last_lisp_frame = old_last_lisp_frame;
1342}
1343
1344
1345void
1346signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1347{
1348  xframe_list xframe_link;
1349
1350  if (!use_mach_exception_handling) {
1351   
1352    tcr = (TCR *) get_interrupt_tcr(false);
1353 
1354    /* The signal handler's entered with all signals (notably the
1355       thread_suspend signal) blocked.  Don't allow any other signals
1356       (notably the thread_suspend signal) to preempt us until we've
1357       set the TCR's xframe slot to include the current exception
1358       context.
1359    */
1360   
1361    old_last_lisp_frame = tcr->last_lisp_frame;
1362    tcr->last_lisp_frame = xpGPR(context,Rsp);
1363    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1364  }
1365
1366  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1367    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1368    pthread_kill(pthread_self(), thread_suspend_signal);
1369  }
1370
1371 
1372  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1373  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1374    char msg[512];
1375    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1376    if (lisp_Debugger(context, info, signum, false, msg)) {
1377      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1378    }
1379  }
1380  unlock_exception_lock_in_handler(tcr);
1381
1382  /* This thread now looks like a thread that was suspended while
1383     executing lisp code.  If some other thread gets the exception
1384     lock and GCs, the context (this thread's suspend_context) will
1385     be updated.  (That's only of concern if it happens before we
1386     can return to the kernel/to the Mach exception handler).
1387  */
1388  if (!use_mach_exception_handling) {
1389    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1390    raise_pending_interrupt(tcr);
1391  }
1392}
1393
1394/*
1395  If it looks like we're in the middle of an atomic operation, make
1396  it seem as if that operation is either complete or hasn't started
1397  yet.
1398
1399  The cases handled include:
1400
1401  a) storing into a newly-allocated lisp frame on the stack.
1402  b) marking a newly-allocated TSP frame as containing "raw" data.
1403  c) consing: the GC has its own ideas about how this should be
1404     handled, but other callers would be best advised to back
1405     up or move forward, according to whether we're in the middle
1406     of allocating a cons cell or allocating a uvector.
1407  d) a STMW to the vsp
1408  e) EGC write-barrier subprims.
1409*/
1410
1411extern opcode
1412  egc_write_barrier_start,
1413  egc_write_barrier_end, 
1414  egc_store_node_conditional, 
1415  egc_store_node_conditional_test,
1416  egc_set_hash_key,
1417  egc_gvset,
1418  egc_rplaca,
1419  egc_rplacd,
1420  egc_set_hash_key_conditional,
1421  egc_set_hash_key_conditional_test;
1422
1423
1424extern opcode ffcall_return_window, ffcall_return_window_end;
1425
1426void
1427pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1428{
1429  pc program_counter = xpPC(xp);
1430  opcode instr = *program_counter;
1431  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1432  LispObj cur_allocptr = xpGPR(xp, allocptr);
1433  int allocptr_tag = fulltag_of(cur_allocptr);
1434 
1435
1436
1437  if ((program_counter < &egc_write_barrier_end) && 
1438      (program_counter >= &egc_write_barrier_start)) {
1439    LispObj *ea = 0, val = 0, root = 0;
1440    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1441    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1442
1443    if (program_counter >= &egc_set_hash_key_conditional) {
1444      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1445          ((program_counter == &egc_set_hash_key_conditional_test) &&
1446           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1447        return;
1448      }
1449      need_store = false;
1450      root = xpGPR(xp,arg_x);
1451      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1452      need_memoize_root = true;
1453    } else if (program_counter >= &egc_store_node_conditional) {
1454      if ((program_counter < &egc_store_node_conditional_test) ||
1455          ((program_counter == &egc_store_node_conditional_test) &&
1456           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1457        /* The conditional store either hasn't been attempted yet, or
1458           has failed.  No need to adjust the PC, or do memoization. */
1459        return;
1460      }
1461      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1462      xpGPR(xp,arg_z) = t_value;
1463      need_store = false;
1464    } else if (program_counter >= &egc_set_hash_key) {
1465      root = xpGPR(xp,arg_x);
1466      val = xpGPR(xp,arg_z);
1467      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1468      need_memoize_root = true;
1469    } else if (program_counter >= &egc_gvset) {
1470      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1471      val = xpGPR(xp,arg_z);
1472    } else if (program_counter >= &egc_rplacd) {
1473      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1474      val = xpGPR(xp,arg_z);
1475    } else {                      /* egc_rplaca */
1476      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1477      val = xpGPR(xp,arg_z);
1478    }
1479    if (need_store) {
1480      *ea = val;
1481    }
1482    if (need_check_memo) {
1483      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1484      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1485          ((LispObj)ea < val)) {
1486        atomic_set_bit(refbits, bitnumber);
1487        if (need_memoize_root) {
1488          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1489          atomic_set_bit(refbits, bitnumber);
1490        }
1491      }
1492    }
1493    xpPC(xp) = xpLR(xp);
1494    return;
1495  }
1496
1497
1498 
1499  if (allocptr_tag != tag_fixnum) {
1500    signed_natural disp = allocptr_displacement(xp);
1501
1502    if (disp) {
1503      /* Being architecturally "at" the alloc trap doesn't tell
1504         us much (in particular, it doesn't tell us whether
1505         or not the thread has committed to taking the trap
1506         and is waiting for the exception lock (or waiting
1507         for the Mach exception thread to tell it how bad
1508         things are) or is about to execute a conditional
1509         trap.
1510         Regardless of which case applies, we want the
1511         other thread to take (or finish taking) the
1512         trap, and we don't want it to consider its
1513         current allocptr to be valid.
1514         The difference between this case (suspend other
1515         thread for GC) and the previous case (suspend
1516         current thread for interrupt) is solely a
1517         matter of what happens after we leave this
1518         function: some non-current thread will stay
1519         suspended until the GC finishes, then take
1520         (or start processing) the alloc trap.   The
1521         current thread will go off and do PROCESS-INTERRUPT
1522         or something, and may return from the interrupt
1523         and need to finish the allocation that got interrupted.
1524      */
1525
1526      if (alloc_disp) {
1527        *alloc_disp = disp;
1528        xpGPR(xp,allocptr) += disp;
1529        /* Leave the PC at the alloc trap.  When the interrupt
1530           handler returns, it'll decrement allocptr by disp
1531           and the trap may or may not be taken.
1532        */
1533      } else {
1534        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1535        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1536      }
1537    } else {
1538      /* If we're already past the alloc_trap, finish allocating
1539         the object. */
1540      if (allocptr_tag == fulltag_cons) {
1541        finish_allocating_cons(xp);
1542      } else {
1543        if (allocptr_tag == fulltag_misc) {
1544          finish_allocating_uvector(xp);
1545        } else {
1546          Bug(xp, "what's being allocated here ?");
1547        }
1548      }
1549      /* Whatever we finished allocating, reset allocptr/allocbase to
1550         VOID_ALLOCPTR */
1551      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1552    }
1553    return;
1554  }
1555}
1556
1557void
1558interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1559{
1560  TCR *tcr = get_interrupt_tcr(false);
1561  if (tcr) {
1562    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1563      tcr->interrupt_pending = 1 << fixnumshift;
1564    } else {
1565      LispObj cmain = nrs_CMAIN.vcell;
1566
1567      if ((fulltag_of(cmain) == fulltag_misc) &&
1568          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1569        /*
1570           This thread can (allegedly) take an interrupt now.
1571           It's tricky to do that if we're executing
1572           foreign code (especially Linuxthreads code, much
1573           of which isn't reentrant.)
1574           If we're unwinding the stack, we also want to defer
1575           the interrupt.
1576        */
1577        if ((tcr->valence != TCR_STATE_LISP) ||
1578            (tcr->unwinding != 0)) {
1579          tcr->interrupt_pending = 1 << fixnumshift;
1580        } else {
1581          xframe_list xframe_link;
1582          int old_valence;
1583          signed_natural disp=0;
1584          natural old_last_lisp_frame = tcr->last_lisp_frame;
1585         
1586          tcr->last_lisp_frame = xpGPR(context,Rsp);
1587          pc_luser_xp(context, tcr, &disp);
1588          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1589          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1590          handle_exception(signum, context, tcr, info, old_valence);
1591          if (disp) {
1592            xpGPR(context,allocptr) -= disp;
1593          }
1594          unlock_exception_lock_in_handler(tcr);
1595          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1596        }
1597      }
1598    }
1599  }
1600#ifdef DARWIN
1601    DarwinSigReturn(context);
1602#endif
1603}
1604
1605
1606
1607void
1608install_signal_handler(int signo, void *handler)
1609{
1610  struct sigaction sa;
1611 
1612  sa.sa_sigaction = (void *)handler;
1613  sigfillset(&sa.sa_mask);
1614  sa.sa_flags = 
1615    0 /* SA_RESTART */
1616    | SA_SIGINFO
1617    ;
1618
1619  sigaction(signo, &sa, NULL);
1620}
1621
1622void
1623install_pmcl_exception_handlers()
1624{
1625#ifdef DARWIN
1626  extern Boolean use_mach_exception_handling;
1627#endif
1628
1629  Boolean install_signal_handlers_for_exceptions =
1630#ifdef DARWIN
1631    !use_mach_exception_handling
1632#else
1633    true
1634#endif
1635    ;
1636  if (install_signal_handlers_for_exceptions) {
1637    extern int no_sigtrap;
1638    install_signal_handler(SIGILL, (void *)signal_handler);
1639    if (no_sigtrap != 1) {
1640      install_signal_handler(SIGTRAP, (void *)signal_handler);
1641    }
1642    install_signal_handler(SIGBUS,  (void *)signal_handler);
1643    install_signal_handler(SIGSEGV, (void *)signal_handler);
1644    install_signal_handler(SIGFPE, (void *)signal_handler);
1645  }
1646 
1647  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1648                         (void *)interrupt_handler);
1649  signal(SIGPIPE, SIG_IGN);
1650}
1651
1652void
1653thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1654{
1655  TCR *tcr = get_tcr(false);
1656  area *a;
1657  sigset_t mask;
1658 
1659  sigemptyset(&mask);
1660
1661  if (tcr) {
1662    tcr->valence = TCR_STATE_FOREIGN;
1663    a = tcr->vs_area;
1664    if (a) {
1665      a->active = a->high;
1666    }
1667    a = tcr->cs_area;
1668    if (a) {
1669      a->active = a->high;
1670    }
1671  }
1672 
1673  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1674  pthread_exit(NULL);
1675}
1676
1677void
1678thread_signal_setup()
1679{
1680  thread_suspend_signal = SIG_SUSPEND_THREAD;
1681  thread_kill_signal = SIG_KILL_THREAD;
1682
1683  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1684  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1685}
1686
1687
1688
1689void
1690unprotect_all_areas()
1691{
1692  protected_area_ptr p;
1693
1694  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1695    unprotect_area(p);
1696  }
1697}
1698
1699/*
1700  A binding subprim has just done "twlle limit_regno,idx_regno" and
1701  the trap's been taken.  Extend the tcr's tlb so that the index will
1702  be in bounds and the new limit will be on a page boundary, filling
1703  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1704  the tcr fields and the registers in the xp and return true if this
1705  all works, false otherwise.
1706
1707  Note that the tlb was allocated via malloc, so realloc can do some
1708  of the hard work.
1709*/
1710Boolean
1711extend_tcr_tlb(TCR *tcr, 
1712               ExceptionInformation *xp, 
1713               unsigned idx_regno)
1714{
1715  unsigned
1716    index = (unsigned) (xpGPR(xp,idx_regno)),
1717    old_limit = tcr->tlb_limit,
1718    new_limit = align_to_power_of_2(index+1,12),
1719    new_bytes = new_limit-old_limit;
1720  LispObj
1721    *old_tlb = tcr->tlb_pointer,
1722    *new_tlb = realloc(old_tlb, new_limit),
1723    *work;
1724
1725  if (new_tlb == NULL) {
1726    return false;
1727  }
1728 
1729  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1730
1731  while (new_bytes) {
1732    *work++ = no_thread_local_binding_marker;
1733    new_bytes -= sizeof(LispObj);
1734  }
1735  tcr->tlb_pointer = new_tlb;
1736  tcr->tlb_limit = new_limit;
1737  return true;
1738}
1739
1740
1741
1742void
1743exception_init()
1744{
1745  install_pmcl_exception_handlers();
1746}
1747
1748
1749
1750
1751
1752#ifdef DARWIN
1753
1754
1755#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1756#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1757
1758
1759
1760#define LISP_EXCEPTIONS_HANDLED_MASK \
1761 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1762
1763/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1764#define NUM_LISP_EXCEPTIONS_HANDLED 4
1765
1766typedef struct {
1767  int foreign_exception_port_count;
1768  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1769  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1770  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1771  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1772} MACH_foreign_exception_state;
1773
1774
1775
1776
1777/*
1778  Mach's exception mechanism works a little better than its signal
1779  mechanism (and, not incidentally, it gets along with GDB a lot
1780  better.
1781
1782  Initially, we install an exception handler to handle each native
1783  thread's exceptions.  This process involves creating a distinguished
1784  thread which listens for kernel exception messages on a set of
1785  0 or more thread exception ports.  As threads are created, they're
1786  added to that port set; a thread's exception port is destroyed
1787  (and therefore removed from the port set) when the thread exits.
1788
1789  A few exceptions can be handled directly in the handler thread;
1790  others require that we resume the user thread (and that the
1791  exception thread resumes listening for exceptions.)  The user
1792  thread might eventually want to return to the original context
1793  (possibly modified somewhat.)
1794
1795  As it turns out, the simplest way to force the faulting user
1796  thread to handle its own exceptions is to do pretty much what
1797  signal() does: the exception handlng thread sets up a sigcontext
1798  on the user thread's stack and forces the user thread to resume
1799  execution as if a signal handler had been called with that
1800  context as an argument.  We can use a distinguished UUO at a
1801  distinguished address to do something like sigreturn(); that'll
1802  have the effect of resuming the user thread's execution in
1803  the (pseudo-) signal context.
1804
1805  Since:
1806    a) we have miles of code in C and in Lisp that knows how to
1807    deal with Linux sigcontexts
1808    b) Linux sigcontexts contain a little more useful information
1809    (the DAR, DSISR, etc.) than their Darwin counterparts
1810    c) we have to create a sigcontext ourselves when calling out
1811    to the user thread: we aren't really generating a signal, just
1812    leveraging existing signal-handling code.
1813
1814  we create a Linux sigcontext struct.
1815
1816  Simple ?  Hopefully from the outside it is ...
1817
1818  We want the process of passing a thread's own context to it to
1819  appear to be atomic: in particular, we don't want the GC to suspend
1820  a thread that's had an exception but has not yet had its user-level
1821  exception handler called, and we don't want the thread's exception
1822  context to be modified by a GC while the Mach handler thread is
1823  copying it around.  On Linux (and on Jaguar), we avoid this issue
1824  because (a) the kernel sets up the user-level signal handler and
1825  (b) the signal handler blocks signals (including the signal used
1826  by the GC to suspend threads) until tcr->xframe is set up.
1827
1828  The GC and the Mach server thread therefore contend for the lock
1829  "mach_exception_lock".  The Mach server thread holds the lock
1830  when copying exception information between the kernel and the
1831  user thread; the GC holds this lock during most of its execution
1832  (delaying exception processing until it can be done without
1833  GC interference.)
1834
1835*/
1836
1837
1838#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1839
1840void
1841fatal_mach_error(char *format, ...);
1842
1843#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1844
1845
1846void
1847restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1848{
1849  kern_return_t kret;
1850  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1851
1852  /* Set the thread's FP state from the pseudosigcontext */
1853  kret = thread_set_state(thread,
1854                          ARM_FLOAT_STATE,
1855                          (thread_state_t)&(mc->__fs),
1856                          ARM_FLOAT_STATE_COUNT);
1857
1858  MACH_CHECK_ERROR("setting thread FP state", kret);
1859
1860  /* The thread'll be as good as new ... */
1861  kret = thread_set_state(thread, 
1862                          MACHINE_THREAD_STATE,
1863                          (thread_state_t)&(mc->__ss),
1864                          MACHINE_THREAD_STATE_COUNT);
1865  MACH_CHECK_ERROR("setting thread state", kret);
1866} 
1867
1868/* This code runs in the exception handling thread, in response
1869   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1870   in response to a call to pseudo_sigreturn() from the specified
1871   user thread.
1872   Find that context (the user thread's R3 points to it), then
1873   use that context to set the user thread's state.  When this
1874   function's caller returns, the Mach kernel will resume the
1875   user thread.
1876*/
1877
1878kern_return_t
1879do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1880{
1881  ExceptionInformation *xp;
1882
1883#ifdef DEBUG_MACH_EXCEPTIONS
1884  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1885#endif
1886  xp = tcr->pending_exception_context;
1887  if (xp) {
1888    tcr->pending_exception_context = NULL;
1889    tcr->valence = TCR_STATE_LISP;
1890    restore_mach_thread_state(thread, xp);
1891    raise_pending_interrupt(tcr);
1892  } else {
1893    Bug(NULL, "no xp here!\n");
1894  }
1895#ifdef DEBUG_MACH_EXCEPTIONS
1896  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1897#endif
1898  return KERN_SUCCESS;
1899} 
1900
1901ExceptionInformation *
1902create_thread_context_frame(mach_port_t thread, 
1903                            natural *new_stack_top)
1904{
1905  arm_thread_state_t ts;
1906  mach_msg_type_number_t thread_state_count;
1907  kern_return_t result;
1908  ExceptionInformation *pseudosigcontext;
1909  MCONTEXT_T mc;
1910  natural stackp, backlink;
1911
1912  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1913  result = thread_get_state(thread, 
1914                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1915                            (thread_state_t)&ts,
1916                            &thread_state_count);
1917 
1918  if (result != KERN_SUCCESS) {
1919    get_tcr(true);
1920    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1921  }
1922  stackp = ts.__r1;
1923  backlink = stackp;
1924  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1925  stackp -= sizeof(*pseudosigcontext);
1926  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1927
1928  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1929  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1930  memmove(&(mc->__ss),&ts,sizeof(ts));
1931
1932  thread_state_count = PPC_FLOAT_STATE_COUNT;
1933  thread_get_state(thread,
1934                   PPC_FLOAT_STATE,
1935                   (thread_state_t)&(mc->__fs),
1936                   &thread_state_count);
1937
1938
1939#ifdef PPC64
1940  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1941#else
1942  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1943#endif
1944  thread_get_state(thread,
1945#ifdef PPC64
1946                   PPC_EXCEPTION_STATE64,
1947#else
1948                   PPC_EXCEPTION_STATE,
1949#endif
1950                   (thread_state_t)&(mc->__es),
1951                   &thread_state_count);
1952
1953
1954  UC_MCONTEXT(pseudosigcontext) = mc;
1955  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1956  stackp -= C_LINKAGE_LEN;
1957  *(natural *)ptr_from_lispobj(stackp) = backlink;
1958  if (new_stack_top) {
1959    *new_stack_top = stackp;
1960  }
1961  return pseudosigcontext;
1962}
1963
1964/*
1965  This code sets up the user thread so that it executes a "pseudo-signal
1966  handler" function when it resumes.  Create a linux sigcontext struct
1967  on the thread's stack and pass it as an argument to the pseudo-signal
1968  handler.
1969
1970  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1971  which will restore the thread's context.
1972
1973  If the handler invokes code that throws (or otherwise never sigreturn()'s
1974  to the context), that's fine.
1975
1976  Actually, check that: throw (and variants) may need to be careful and
1977  pop the tcr's xframe list until it's younger than any frame being
1978  entered.
1979*/
1980
1981int
1982setup_signal_frame(mach_port_t thread,
1983                   void *handler_address,
1984                   int signum,
1985                   int code,
1986                   TCR *tcr)
1987{
1988#ifdef PPC64
1989  ppc_thread_state64_t ts;
1990#else
1991  ppc_thread_state_t ts;
1992#endif
1993  ExceptionInformation *pseudosigcontext;
1994  int old_valence = tcr->valence;
1995  natural stackp;
1996
1997#ifdef DEBUG_MACH_EXCEPTIONS
1998  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1999#endif
2000  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2001  pseudosigcontext->uc_onstack = 0;
2002  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2003  tcr->pending_exception_context = pseudosigcontext;
2004  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2005 
2006
2007  /*
2008     It seems like we've created a  sigcontext on the thread's
2009     stack.  Set things up so that we call the handler (with appropriate
2010     args) when the thread's resumed.
2011  */
2012
2013  ts.__srr0 = (natural) handler_address;
2014  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2015  ts.__r1 = stackp;
2016  ts.__r3 = signum;
2017  ts.__r4 = (natural)pseudosigcontext;
2018  ts.__r5 = (natural)tcr;
2019  ts.__r6 = (natural)old_valence;
2020  ts.__lr = (natural)pseudo_sigreturn;
2021
2022
2023#ifdef PPC64
2024  ts.__r13 = xpGPR(pseudosigcontext,13);
2025  thread_set_state(thread,
2026                   PPC_THREAD_STATE64,
2027                   (thread_state_t)&ts,
2028                   PPC_THREAD_STATE64_COUNT);
2029#else
2030  thread_set_state(thread, 
2031                   MACHINE_THREAD_STATE,
2032                   (thread_state_t)&ts,
2033                   MACHINE_THREAD_STATE_COUNT);
2034#endif
2035#ifdef DEBUG_MACH_EXCEPTIONS
2036  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2037#endif
2038  return 0;
2039}
2040
2041
2042void
2043pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2044{
2045  signal_handler(signum, NULL, context, tcr, old_valence);
2046} 
2047
2048
2049int
2050thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2051{
2052#ifdef PPC64
2053  ppc_thread_state64_t ts;
2054#else
2055  ppc_thread_state_t ts;
2056#endif
2057  mach_msg_type_number_t thread_state_count;
2058
2059#ifdef PPC64
2060  thread_state_count = PPC_THREAD_STATE64_COUNT;
2061#else
2062  thread_state_count = PPC_THREAD_STATE_COUNT;
2063#endif
2064  thread_get_state(thread, 
2065#ifdef PPC64
2066                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2067#else
2068                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2069#endif
2070                   (thread_state_t)&ts,
2071                   &thread_state_count);
2072  if (enabled) {
2073    ts.__srr1 |= MSR_FE0_FE1_MASK;
2074  } else {
2075    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2076  }
2077  /*
2078     Hack-o-rama warning (isn't it about time for such a warning?):
2079     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2080     Our handler for lisp's use of pthread_kill() pushes a phony
2081     lisp frame on the stack and force the context to resume at
2082     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2083     lisp frame contains the -real- address that process_interrupt
2084     should have returned to, and the fact that it's in a lisp
2085     frame should convince the GC to notice that address if it
2086     runs in the tiny time window between returning from our
2087     interrupt handler and ... here.
2088     If the top frame on the stack is a lisp frame, discard it
2089     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2090     just adjust ts.srr0 to skip over the UUO.
2091  */
2092  {
2093    lisp_frame *tos = (lisp_frame *)ts.__r1,
2094      *next_frame = tos->backlink;
2095   
2096    if (tos == (next_frame -1)) {
2097      ts.__srr0 = tos->savelr;
2098      ts.__r1 = (LispObj) next_frame;
2099    } else {
2100      ts.__srr0 += 4;
2101    }
2102  }
2103  thread_set_state(thread, 
2104#ifdef PPC64
2105                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2106#else
2107                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2108#endif
2109                   (thread_state_t)&ts,
2110#ifdef PPC64
2111                   PPC_THREAD_STATE64_COUNT
2112#else
2113                   PPC_THREAD_STATE_COUNT
2114#endif
2115                   );
2116
2117  return 0;
2118}
2119
2120/*
2121  This function runs in the exception handling thread.  It's
2122  called (by this precise name) from the library function "exc_server()"
2123  when the thread's exception ports are set up.  (exc_server() is called
2124  via mach_msg_server(), which is a function that waits for and dispatches
2125  on exception messages from the Mach kernel.)
2126
2127  This checks to see if the exception was caused by a pseudo_sigreturn()
2128  UUO; if so, it arranges for the thread to have its state restored
2129  from the specified context.
2130
2131  Otherwise, it tries to map the exception to a signal number and
2132  arranges that the thread run a "pseudo signal handler" to handle
2133  the exception.
2134
2135  Some exceptions could and should be handled here directly.
2136*/
2137
2138kern_return_t
2139catch_exception_raise(mach_port_t exception_port,
2140                      mach_port_t thread,
2141                      mach_port_t task, 
2142                      exception_type_t exception,
2143                      exception_data_t code_vector,
2144                      mach_msg_type_number_t code_count)
2145{
2146  int signum = 0, code = *code_vector, code1;
2147  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2148  kern_return_t kret;
2149
2150#ifdef DEBUG_MACH_EXCEPTIONS
2151  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2152#endif
2153
2154  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2155    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2156  } 
2157  if ((exception == EXC_BAD_INSTRUCTION) &&
2158      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2159      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2160       (code1 == (int)enable_fp_exceptions) ||
2161       (code1 == (int)disable_fp_exceptions))) {
2162    if (code1 == (int)pseudo_sigreturn) {
2163      kret = do_pseudo_sigreturn(thread, tcr);
2164#if 0
2165      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2166#endif
2167       
2168    } else if (code1 == (int)enable_fp_exceptions) {
2169      kret = thread_set_fp_exceptions_enabled(thread, true);
2170    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2171  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2172    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2173    kret = 17;
2174  } else {
2175    switch (exception) {
2176    case EXC_BAD_ACCESS:
2177      signum = SIGSEGV;
2178      break;
2179       
2180    case EXC_BAD_INSTRUCTION:
2181      signum = SIGILL;
2182      break;
2183     
2184    case EXC_SOFTWARE:
2185      if (code == EXC_PPC_TRAP) {
2186        signum = SIGTRAP;
2187      }
2188      break;
2189     
2190    case EXC_ARITHMETIC:
2191      signum = SIGFPE;
2192      break;
2193
2194    default:
2195      break;
2196    }
2197    if (signum) {
2198      kret = setup_signal_frame(thread,
2199                                (void *)pseudo_signal_handler,
2200                                signum,
2201                                code,
2202                                tcr);
2203#if 0
2204      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2205#endif
2206
2207    } else {
2208      kret = 17;
2209    }
2210  }
2211
2212  return kret;
2213}
2214
2215
2216
2217typedef struct {
2218  mach_msg_header_t Head;
2219  /* start of the kernel processed data */
2220  mach_msg_body_t msgh_body;
2221  mach_msg_port_descriptor_t thread;
2222  mach_msg_port_descriptor_t task;
2223  /* end of the kernel processed data */
2224  NDR_record_t NDR;
2225  exception_type_t exception;
2226  mach_msg_type_number_t codeCnt;
2227  integer_t code[2];
2228  mach_msg_trailer_t trailer;
2229} exceptionRequest;
2230
2231
2232boolean_t
2233openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2234{
2235  static NDR_record_t _NDR = {0};
2236  kern_return_t handled;
2237  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2238  exceptionRequest *req = (exceptionRequest *) in;
2239
2240  reply->NDR = _NDR;
2241
2242  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2243  out->msgh_remote_port = in->msgh_remote_port;
2244  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2245  out->msgh_local_port = MACH_PORT_NULL;
2246  out->msgh_id = in->msgh_id+100;
2247
2248  /* Could handle other exception flavors in the range 2401-2403 */
2249
2250
2251  if (in->msgh_id != 2401) {
2252    reply->RetCode = MIG_BAD_ID;
2253    return FALSE;
2254  }
2255  handled = catch_exception_raise(req->Head.msgh_local_port,
2256                                  req->thread.name,
2257                                  req->task.name,
2258                                  req->exception,
2259                                  req->code,
2260                                  req->codeCnt);
2261  reply->RetCode = handled;
2262  return TRUE;
2263}
2264
2265/*
2266  The initial function for an exception-handling thread.
2267*/
2268
2269void *
2270exception_handler_proc(void *arg)
2271{
2272  extern boolean_t exc_server();
2273  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2274
2275  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2276  /* Should never return. */
2277  abort();
2278}
2279
2280
2281
2282mach_port_t
2283mach_exception_port_set()
2284{
2285  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2286  kern_return_t kret; 
2287  if (__exception_port_set == MACH_PORT_NULL) {
2288    kret = mach_port_allocate(mach_task_self(),
2289                              MACH_PORT_RIGHT_PORT_SET,
2290                              &__exception_port_set);
2291    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2292    create_system_thread(0,
2293                         NULL,
2294                         exception_handler_proc, 
2295                         (void *)((natural)__exception_port_set));
2296  }
2297  return __exception_port_set;
2298}
2299
2300/*
2301  Setup a new thread to handle those exceptions specified by
2302  the mask "which".  This involves creating a special Mach
2303  message port, telling the Mach kernel to send exception
2304  messages for the calling thread to that port, and setting
2305  up a handler thread which listens for and responds to
2306  those messages.
2307
2308*/
2309
2310/*
2311  Establish the lisp thread's TCR as its exception port, and determine
2312  whether any other ports have been established by foreign code for
2313  exceptions that lisp cares about.
2314
2315  If this happens at all, it should happen on return from foreign
2316  code and on entry to lisp code via a callback.
2317
2318  This is a lot of trouble (and overhead) to support Java, or other
2319  embeddable systems that clobber their caller's thread exception ports.
2320 
2321*/
2322kern_return_t
2323tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2324{
2325  kern_return_t kret;
2326  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2327  int i;
2328  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2329  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2330  exception_mask_t mask = 0;
2331
2332  kret = thread_swap_exception_ports(thread,
2333                                     LISP_EXCEPTIONS_HANDLED_MASK,
2334                                     lisp_port,
2335                                     EXCEPTION_DEFAULT,
2336                                     THREAD_STATE_NONE,
2337                                     fxs->masks,
2338                                     &n,
2339                                     fxs->ports,
2340                                     fxs->behaviors,
2341                                     fxs->flavors);
2342  if (kret == KERN_SUCCESS) {
2343    fxs->foreign_exception_port_count = n;
2344    for (i = 0; i < n; i ++) {
2345      foreign_port = fxs->ports[i];
2346
2347      if ((foreign_port != lisp_port) &&
2348          (foreign_port != MACH_PORT_NULL)) {
2349        mask |= fxs->masks[i];
2350      }
2351    }
2352    tcr->foreign_exception_status = (int) mask;
2353  }
2354  return kret;
2355}
2356
2357kern_return_t
2358tcr_establish_lisp_exception_port(TCR *tcr)
2359{
2360  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2361}
2362
2363/*
2364  Do this when calling out to or returning from foreign code, if
2365  any conflicting foreign exception ports were established when we
2366  last entered lisp code.
2367*/
2368kern_return_t
2369restore_foreign_exception_ports(TCR *tcr)
2370{
2371  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2372 
2373  if (m) {
2374    MACH_foreign_exception_state *fxs  = 
2375      (MACH_foreign_exception_state *) tcr->native_thread_info;
2376    int i, n = fxs->foreign_exception_port_count;
2377    exception_mask_t tm;
2378
2379    for (i = 0; i < n; i++) {
2380      if ((tm = fxs->masks[i]) & m) {
2381        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2382                                   tm,
2383                                   fxs->ports[i],
2384                                   fxs->behaviors[i],
2385                                   fxs->flavors[i]);
2386      }
2387    }
2388  }
2389}
2390                                   
2391
2392/*
2393  This assumes that a Mach port (to be used as the thread's exception port) whose
2394  "name" matches the TCR's 32-bit address has already been allocated.
2395*/
2396
2397kern_return_t
2398setup_mach_exception_handling(TCR *tcr)
2399{
2400  mach_port_t
2401    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2402    task_self = mach_task_self();
2403  kern_return_t kret;
2404
2405  kret = mach_port_insert_right(task_self,
2406                                thread_exception_port,
2407                                thread_exception_port,
2408                                MACH_MSG_TYPE_MAKE_SEND);
2409  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2410
2411  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2412  if (kret == KERN_SUCCESS) {
2413    mach_port_t exception_port_set = mach_exception_port_set();
2414
2415    kret = mach_port_move_member(task_self,
2416                                 thread_exception_port,
2417                                 exception_port_set);
2418  }
2419  return kret;
2420}
2421
2422void
2423darwin_exception_init(TCR *tcr)
2424{
2425  void tcr_monitor_exception_handling(TCR*, Boolean);
2426  kern_return_t kret;
2427  MACH_foreign_exception_state *fxs = 
2428    calloc(1, sizeof(MACH_foreign_exception_state));
2429 
2430  tcr->native_thread_info = (void *) fxs;
2431
2432  if ((kret = setup_mach_exception_handling(tcr))
2433      != KERN_SUCCESS) {
2434    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2435    terminate_lisp();
2436  }
2437}
2438
2439/*
2440  The tcr is the "name" of the corresponding thread's exception port.
2441  Destroying the port should remove it from all port sets of which it's
2442  a member (notably, the exception port set.)
2443*/
2444void
2445darwin_exception_cleanup(TCR *tcr)
2446{
2447  void *fxs = tcr->native_thread_info;
2448  extern Boolean use_mach_exception_handling;
2449
2450  if (fxs) {
2451    tcr->native_thread_info = NULL;
2452    free(fxs);
2453  }
2454  if (use_mach_exception_handling) {
2455    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2456    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2457  }
2458}
2459
2460
2461Boolean
2462suspend_mach_thread(mach_port_t mach_thread)
2463{
2464  kern_return_t status;
2465  Boolean aborted = false;
2466 
2467  do {
2468    aborted = false;
2469    status = thread_suspend(mach_thread);
2470    if (status == KERN_SUCCESS) {
2471      status = thread_abort_safely(mach_thread);
2472      if (status == KERN_SUCCESS) {
2473        aborted = true;
2474      } else {
2475        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2476        thread_resume(mach_thread);
2477      }
2478    } else {
2479      return false;
2480    }
2481  } while (! aborted);
2482  return true;
2483}
2484
2485/*
2486  Only do this if pthread_kill indicated that the pthread isn't
2487  listening to signals anymore, as can happen as soon as pthread_exit()
2488  is called on Darwin.  The thread could still call out to lisp as it
2489  is exiting, so we need another way to suspend it in this case.
2490*/
2491Boolean
2492mach_suspend_tcr(TCR *tcr)
2493{
2494  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2495  ExceptionInformation *pseudosigcontext;
2496  Boolean result = false;
2497 
2498  result = suspend_mach_thread(mach_thread);
2499  if (result) {
2500    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2501    pseudosigcontext->uc_onstack = 0;
2502    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2503    tcr->suspend_context = pseudosigcontext;
2504  }
2505  return result;
2506}
2507
2508void
2509mach_resume_tcr(TCR *tcr)
2510{
2511  ExceptionInformation *xp;
2512  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2513 
2514  xp = tcr->suspend_context;
2515#ifdef DEBUG_MACH_EXCEPTIONS
2516  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2517          tcr, tcr->pending_exception_context);
2518#endif
2519  tcr->suspend_context = NULL;
2520  restore_mach_thread_state(mach_thread, xp);
2521#ifdef DEBUG_MACH_EXCEPTIONS
2522  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2523          tcr, tcr->pending_exception_context);
2524#endif
2525  thread_resume(mach_thread);
2526}
2527
2528void
2529fatal_mach_error(char *format, ...)
2530{
2531  va_list args;
2532  char s[512];
2533 
2534
2535  va_start(args, format);
2536  vsnprintf(s, sizeof(s),format, args);
2537  va_end(args);
2538
2539  Fatal("Mach error", s);
2540}
2541
2542void
2543pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2544{
2545  interrupt_handler(signum, NULL, context);
2546}
2547
2548int
2549mach_raise_thread_interrupt(TCR *target)
2550{
2551  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2552  kern_return_t kret;
2553  Boolean result = false;
2554  TCR *current = get_tcr(false);
2555  thread_basic_info_data_t info; 
2556  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2557
2558  LOCK(lisp_global(TCR_AREA_LOCK), current);
2559
2560  if (suspend_mach_thread(mach_thread)) {
2561    if (thread_info(mach_thread,
2562                    THREAD_BASIC_INFO,
2563                    (thread_info_t)&info,
2564                    &info_count) == KERN_SUCCESS) {
2565      if (info.suspend_count == 1) {
2566        if ((target->valence == TCR_STATE_LISP) &&
2567            (!target->unwinding) &&
2568            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2569          kret = setup_signal_frame(mach_thread,
2570                                    (void *)pseudo_interrupt_handler,
2571                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2572                                    0,
2573                                    target);
2574          if (kret == KERN_SUCCESS) {
2575            result = true;
2576          }
2577        }
2578      }
2579    }
2580    if (! result) {
2581      target->interrupt_pending = 1 << fixnumshift;
2582    }
2583    thread_resume(mach_thread);
2584   
2585  }
2586  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2587  return 0;
2588}
2589
2590#endif
Note: See TracBrowser for help on using the repository browser.