source: branches/arm/lisp-kernel/arm-exceptions.c @ 14117

Last change on this file since 14117 was 14117, checked in by gb, 9 years ago

Scheme for stack-overflow recovery (e.g., for re-protecting guard pages
after the stack is unwound past the point of overflow.)

A new TCR field, a new UUO, use of that UUO in nthrow subprims, handler
for the UUO.

File size: 68.1 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = get_tcr(true);
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509 
510  handle_error(xp, error_stack_overflow, reg, NULL);
511}
512
513/*
514  Lower (move toward 0) the "end" of the soft protected area associated
515  with a by a page, if we can.
516*/
517
518void
519adjust_soft_protection_limit(area *a)
520{
521  char *proposed_new_soft_limit = a->softlimit - 4096;
522  protected_area_ptr p = a->softprot;
523 
524  if (proposed_new_soft_limit >= (p->start+16384)) {
525    p->end = proposed_new_soft_limit;
526    p->protsize = p->end-p->start;
527    a->softlimit = proposed_new_soft_limit;
528  }
529  protect_area(p);
530}
531
532void
533restore_soft_stack_limit(unsigned stkreg)
534{
535  area *a;
536  TCR *tcr = get_tcr(true);
537
538  switch (stkreg) {
539  case Rsp:
540    a = tcr->cs_area;
541    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
542      a->softlimit -= 4096;
543    }
544    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
545    break;
546  case vsp:
547    a = tcr->vs_area;
548    adjust_soft_protection_limit(a);
549    break;
550  }
551}
552
553/* Maybe this'll work someday.  We may have to do something to
554   make the thread look like it's not handling an exception */
555void
556reset_lisp_process(ExceptionInformation *xp)
557{
558  TCR *tcr = get_tcr(true);
559  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
560
561  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
562
563  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
564
565  start_lisp(tcr, 1);
566}
567
568/*
569  This doesn't GC; it returns true if it made enough room, false
570  otherwise.
571  If "extend" is true, it can try to extend the dynamic area to
572  satisfy the request.
573*/
574
575Boolean
576new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
577{
578  area *a;
579  natural newlimit, oldlimit;
580  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
581
582  a  = active_dynamic_area;
583  oldlimit = (natural) a->active;
584  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
585              align_to_power_of_2(need, log2_allocation_quantum));
586  if (newlimit > (natural) (a->high)) {
587    if (extend) {
588      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
589      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
590      do {
591        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
592          break;
593        }
594        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
595        if (extend_by < 4<<20) {
596          return false;
597        }
598      } while (1);
599    } else {
600      return false;
601    }
602  }
603  a->active = (BytePtr) newlimit;
604  tcr->last_allocptr = (void *)newlimit;
605  xpGPR(xp,allocptr) = (LispObj) newlimit;
606  tcr->save_allocbase = (void*) oldlimit;
607
608  return true;
609}
610
611 
612void
613update_area_active (area **aptr, BytePtr value)
614{
615  area *a = *aptr;
616  for (; a; a = a->older) {
617    if ((a->low <= value) && (a->high >= value)) break;
618  };
619  if (a == NULL) Bug(NULL, "Can't find active area");
620  a->active = value;
621  *aptr = a;
622
623  for (a = a->younger; a; a = a->younger) {
624    a->active = a->high;
625  }
626}
627
628LispObj *
629tcr_frame_ptr(TCR *tcr)
630{
631  ExceptionInformation *xp;
632  LispObj *bp = NULL;
633
634  if (tcr->pending_exception_context)
635    xp = tcr->pending_exception_context;
636  else {
637    xp = tcr->suspend_context;
638  }
639  if (xp) {
640    bp = (LispObj *) xpGPR(xp, Rsp);
641  }
642  return bp;
643}
644
645void
646normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
647{
648  void *cur_allocptr = NULL;
649  LispObj freeptr = 0;
650
651  if (xp) {
652    if (is_other_tcr) {
653      pc_luser_xp(xp, tcr, NULL);
654      freeptr = xpGPR(xp, allocptr);
655      if (fulltag_of(freeptr) == 0){
656        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
657      }
658    }
659    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
660    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
661  } else {
662    /* In ff-call. */
663    cur_allocptr = (void *) (tcr->save_allocptr);
664    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
665    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
666  }
667
668
669  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
670  if (cur_allocptr) {
671    update_bytes_allocated(tcr, cur_allocptr);
672    if (freeptr) {
673      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
674    }
675  }
676}
677
678TCR *gc_tcr = NULL;
679
680/* Suspend and "normalize" other tcrs, then call a gc-like function
681   in that context.  Resume the other tcrs, then return what the
682   function returned */
683
684signed_natural
685gc_like_from_xp(ExceptionInformation *xp, 
686                signed_natural(*fun)(TCR *, signed_natural), 
687                signed_natural param)
688{
689  TCR *tcr = get_tcr(true), *other_tcr;
690  int result;
691  signed_natural inhibit;
692
693  suspend_other_threads(true);
694  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
695  if (inhibit != 0) {
696    if (inhibit > 0) {
697      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
698    }
699    resume_other_threads(true);
700    gc_deferred++;
701    return 0;
702  }
703  gc_deferred = 0;
704
705  gc_tcr = tcr;
706
707  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
708
709  normalize_tcr(xp, tcr, false);
710
711
712  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
713    if (other_tcr->pending_exception_context) {
714      other_tcr->gc_context = other_tcr->pending_exception_context;
715    } else if (other_tcr->valence == TCR_STATE_LISP) {
716      other_tcr->gc_context = other_tcr->suspend_context;
717    } else {
718      /* no pending exception, didn't suspend in lisp state:
719         must have executed a synchronous ff-call.
720      */
721      other_tcr->gc_context = NULL;
722    }
723    normalize_tcr(other_tcr->gc_context, other_tcr, true);
724  }
725   
726
727
728  result = fun(tcr, param);
729
730  other_tcr = tcr;
731  do {
732    other_tcr->gc_context = NULL;
733    other_tcr = other_tcr->next;
734  } while (other_tcr != tcr);
735
736  gc_tcr = NULL;
737
738  resume_other_threads(true);
739
740  return result;
741
742}
743
744
745
746/* Returns #bytes freed by invoking GC */
747
748signed_natural
749gc_from_tcr(TCR *tcr, signed_natural param)
750{
751  area *a;
752  BytePtr oldfree, newfree;
753  BytePtr oldend, newend;
754
755  a = active_dynamic_area;
756  oldend = a->high;
757  oldfree = a->active;
758  gc(tcr, param);
759  newfree = a->active;
760  newend = a->high;
761#if 0
762  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
763#endif
764  return ((oldfree-newfree)+(newend-oldend));
765}
766
767signed_natural
768gc_from_xp(ExceptionInformation *xp, signed_natural param)
769{
770  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
771
772  freeGCptrs();
773  return status;
774}
775
776signed_natural
777purify_from_xp(ExceptionInformation *xp, signed_natural param)
778{
779  return gc_like_from_xp(xp, purify, param);
780}
781
782signed_natural
783impurify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, impurify, param);
786}
787
788
789
790
791
792
793protection_handler
794 * protection_handlers[] = {
795   do_spurious_wp_fault,
796   do_soft_stack_overflow,
797   do_soft_stack_overflow,
798   do_soft_stack_overflow,
799   do_hard_stack_overflow,   
800   do_hard_stack_overflow,
801   do_hard_stack_overflow
802   };
803
804
805Boolean
806is_write_fault(ExceptionInformation *xp, siginfo_t *info)
807{
808#ifdef LINUX
809  /* Based on experiments with a small sample size; need to R TFM. */
810  return ((xp->uc_mcontext.trap_no == 0xe) &&
811          ((xp->uc_mcontext.error_code & 0xfffffff7) == 0x817));
812#endif
813}
814
815Boolean
816handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
817{
818  BytePtr addr;
819  protected_area_ptr area;
820  protection_handler *handler;
821  extern Boolean touch_page(void *);
822  extern void touch_page_end(void);
823
824#ifdef LINUX
825  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
826#else
827  if (info) {
828    addr = (BytePtr)(info->si_addr);
829  } else {
830    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
831  }
832#endif
833
834  if (addr && (addr == tcr->safe_ref_address)) {
835    adjust_exception_pc(xp,4);
836
837    xpGPR(xp,imm0) = 0;
838    return true;
839  }
840
841  if (xpPC(xp) == (pc)touch_page) {
842    xpGPR(xp,imm0) = 0;
843    xpPC(xp) = (pc)touch_page_end;
844    return true;
845  }
846
847
848  if (is_write_fault(xp,info)) {
849    area = find_protected_area(addr);
850    if (area != NULL) {
851      handler = protection_handlers[area->why];
852      return handler(xp, area, addr);
853    } else {
854      if ((addr >= readonly_area->low) &&
855          (addr < readonly_area->active)) {
856        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
857                        page_size);
858        return true;
859      }
860    }
861  }
862  if (old_valence == TCR_STATE_LISP) {
863    LispObj cmain = nrs_CMAIN.vcell;
864   
865    if ((fulltag_of(cmain) == fulltag_misc) &&
866      (header_subtag(header_of(cmain)) == subtag_macptr)) {
867     
868      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
869    }
870  }
871  return false;
872}
873
874
875
876
877
878OSStatus
879do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
880{
881#ifdef SUPPORT_PRAGMA_UNUSED
882#pragma unused(area,addr)
883#endif
884  reset_lisp_process(xp);
885  return -1;
886}
887
888extern area*
889allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
890
891extern area*
892allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
893
894
895
896
897
898
899Boolean
900lisp_frame_p(lisp_frame *spPtr)
901{
902  return (spPtr->marker == lisp_frame_marker);
903}
904
905
906int ffcall_overflow_count = 0;
907
908
909
910
911
912
913/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
914  the current value of VSP (TSP) or an older area.  */
915
916OSStatus
917do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
918{
919  TCR* tcr = get_tcr(true);
920  area *a = tcr->vs_area;
921  protected_area_ptr vsp_soft = a->softprot;
922  unprotect_area(vsp_soft);
923  tcr->stack_overflowed |= STACK_OVERFLOW_VSTACK;
924  signal_stack_soft_overflow(xp,vsp);
925  return 0;
926}
927
928
929
930OSStatus
931do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
932{
933  TCR *tcr = get_tcr(true);
934
935  /* Trying to write into a guard page on the vstack or tstack.
936     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
937     signal an error_stack_overflow condition.
938      */
939  if (prot_area->why == kVSPsoftguard) {
940    return do_vsp_overflow(xp,addr);
941  }
942  unprotect_area(prot_area);
943  tcr->stack_overflowed |= STACK_OVERFLOW_CSTACK;
944  signal_stack_soft_overflow(xp,Rsp);
945  return 0;
946}
947
948OSStatus
949do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
950{
951#ifdef SUPPORT_PRAGMA_UNUSED
952#pragma unused(xp,area,addr)
953#endif
954  return -1;
955}
956
957
958
959
960     
961
962
963
964
965
966Boolean
967handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
968{
969  return false;
970}
971
972
973Boolean
974handle_unimplemented_instruction(ExceptionInformation *xp,
975                                 opcode instruction,
976                                 TCR *tcr)
977{
978
979  return false;
980}
981
982Boolean
983handle_exception(int xnum, 
984                 ExceptionInformation *xp, 
985                 TCR *tcr, 
986                 siginfo_t *info,
987                 int old_valence)
988{
989  pc program_counter;
990  opcode instruction = 0;
991
992  if (old_valence != TCR_STATE_LISP) {
993    return false;
994  }
995
996  program_counter = xpPC(xp);
997 
998  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
999    instruction = *program_counter;
1000  }
1001
1002  if (IS_ALLOC_TRAP(instruction)) {
1003    return handle_alloc_trap(xp, tcr);
1004  } else if ((xnum == SIGSEGV) ||
1005             (xnum == SIGBUS)) {
1006    return handle_protection_violation(xp, info, tcr, old_valence);
1007  } else if (xnum == SIGFPE) {
1008    return handle_sigfpe(xp, tcr);
1009  } else if ((xnum == SIGILL)) {
1010    if (IS_GC_TRAP(instruction)) {
1011      return handle_gc_trap(xp, tcr);
1012    } else if (IS_UUO(instruction)) {
1013      return handle_uuo(xp, info, instruction);
1014    } else {
1015      return handle_unimplemented_instruction(xp,instruction,tcr);
1016    }
1017  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1018    tcr->interrupt_pending = 0;
1019    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1020    return true;
1021  }
1022
1023  return false;
1024}
1025
1026void
1027adjust_exception_pc(ExceptionInformation *xp, int delta)
1028{
1029  xpPC(xp) += (delta >> 2);
1030}
1031
1032
1033/*
1034  This wants to scan backwards until "where" points to an instruction
1035   whose major opcode is either 63 (double-float) or 59 (single-float)
1036*/
1037
1038OSStatus
1039handle_fpux_binop(ExceptionInformation *xp, pc where)
1040{
1041  OSStatus err = -1;
1042  opcode *there = (opcode *) where, instr, errnum = 0;
1043  return err;
1044}
1045
1046Boolean
1047try_stack_overflow_recovery(ExceptionInformation *xp, TCR *tcr)
1048{
1049  area *a;
1050  protected_area_ptr soft;
1051
1052  if ((tcr->stack_overflowed) & STACK_OVERFLOW_CSTACK) {
1053    a = tcr->cs_area;
1054    soft = a->softprot;
1055    if (((BytePtr)xpGPR(xp,Rsp)-STACK_RECOVERY_MARGIN) >
1056        (soft->end)) {
1057      protect_area(soft);
1058      tcr->stack_overflowed &= ~STACK_OVERFLOW_CSTACK;
1059    }
1060  }
1061
1062  if ((tcr->stack_overflowed) & STACK_OVERFLOW_VSTACK) {
1063    a = tcr->vs_area;
1064    soft = a->softprot;
1065    if (((BytePtr)xpGPR(xp,vsp)-STACK_RECOVERY_MARGIN) >
1066        (soft->end)) {
1067      protect_area(soft);
1068      tcr->stack_overflowed &= ~STACK_OVERFLOW_VSTACK;
1069    }
1070  }
1071}
1072
1073Boolean
1074handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1075{
1076  unsigned 
1077    format = UUO_FORMAT(the_uuo);
1078  Boolean handled = false;
1079  int bump = 4;
1080  TCR *tcr = get_tcr(true);
1081
1082  switch (format) {
1083  case uuo_format_kernel_service:
1084    {
1085      TCR * target = (TCR *)xpGPR(xp,arg_z);
1086      int service = UUO_UNARY_field(the_uuo);
1087
1088      switch (service) {
1089      case error_propagate_suspend:
1090        handled = true;
1091        break;
1092      case error_interrupt:
1093        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1094        handled = true;
1095        break;
1096      case error_suspend:
1097        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1098        handled = true;
1099        break;
1100      case error_suspend_all:
1101        lisp_suspend_other_threads();
1102        handled = true;
1103        break;
1104      case error_resume:
1105        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1106        handled = true;
1107        break;
1108      case error_resume_all:
1109        lisp_resume_other_threads();
1110        handled = true;
1111        break;
1112      case error_kill:
1113        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1114        handled = true;
1115        break;
1116      case error_allocate_list:
1117        allocate_list(xp,tcr);
1118        handled = true;
1119        break;
1120      default:
1121        handled = false;
1122        break;
1123      }
1124      break;
1125    }
1126
1127  case uuo_format_unary:
1128    switch(UUO_UNARY_field(the_uuo)) {
1129    case 3:
1130      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1131        handled = true;
1132        bump = 4;
1133        break;
1134      }
1135      /* fall in */
1136    default:
1137      handled = false;
1138      break;
1139
1140    }
1141    break;
1142
1143  case uuo_format_nullary:
1144    switch (UUOA_field(the_uuo)) {
1145    case 3:
1146      adjust_exception_pc(xp, bump);
1147      bump = 0;
1148      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1149      handled = true;
1150      break;
1151
1152    case 4:
1153      tcr->interrupt_pending = 0;
1154      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, &bump);
1155      handled = true;
1156      break;
1157    case 6:
1158      try_stack_overflow_recovery(xp,tcr);
1159      handled = true;
1160      break;
1161    default:
1162      handled = false;
1163      break;
1164    }
1165    break;
1166
1167
1168  case uuo_format_error_lisptag:
1169  case uuo_format_error_fulltag:
1170  case uuo_format_error_xtype:
1171  case uuo_format_cerror_lisptag:
1172  case uuo_format_cerror_fulltag:
1173  case uuo_format_cerror_xtype:
1174  case uuo_format_nullary_error:
1175  case uuo_format_unary_error:
1176  case uuo_format_binary_error:
1177  case uuo_format_ternary:
1178    handled = handle_error(xp,0,the_uuo, &bump);
1179    break;
1180
1181  default:
1182    handled = false;
1183    bump = 0;
1184  }
1185 
1186  if (handled && bump) {
1187    adjust_exception_pc(xp, bump);
1188  }
1189  return handled;
1190}
1191
1192natural
1193register_codevector_contains_pc (natural lisp_function, pc where)
1194{
1195  natural code_vector, size;
1196
1197  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1198      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1199    code_vector = deref(lisp_function, 2);
1200    size = header_element_count(header_of(code_vector)) << 2;
1201    if ((untag(code_vector) < (natural)where) && 
1202        ((natural)where < (code_vector + size)))
1203      return(code_vector);
1204  }
1205
1206  return(0);
1207}
1208
1209Boolean
1210callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1211{
1212  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1213}
1214
1215Boolean
1216callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1217                  natural arg1, natural arg2, int *bumpP)
1218{
1219  natural  callback_ptr;
1220  area *a;
1221  natural fnreg = fn,  codevector, offset;
1222  pc where = xpPC(xp);
1223  int delta;
1224
1225  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1226  if (codevector == 0) {
1227    fnreg = nfn;
1228    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1229    if (codevector == 0) {
1230      fnreg = 0;
1231    }
1232  }
1233  if (codevector) {
1234    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1235  } else {
1236    offset = (natural)where;
1237  }
1238                                                 
1239                                               
1240
1241  TCR *tcr = get_tcr(true);
1242
1243  /* Put the active stack pointer where .SPcallback expects it */
1244  a = tcr->cs_area;
1245  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1246
1247  /* Copy globals from the exception frame to tcr */
1248  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1249  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1250
1251
1252
1253  /* Call back.
1254     Lisp will handle trampolining through some code that
1255     will push lr/fn & pc/nfn stack frames for backtrace.
1256  */
1257  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1258  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1259  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1260  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1261
1262  if (bumpP) {
1263    *bumpP = delta;
1264  }
1265
1266  /* Copy GC registers back into exception frame */
1267  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1268  return true;
1269}
1270
1271area *
1272allocate_no_stack (natural size)
1273{
1274#ifdef SUPPORT_PRAGMA_UNUSED
1275#pragma unused(size)
1276#endif
1277
1278  return (area *) NULL;
1279}
1280
1281
1282
1283
1284
1285
1286/* callback to (symbol-value cmain) if it is a macptr,
1287   otherwise report cause and function name to console.
1288   Returns noErr if exception handled OK */
1289OSStatus
1290handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1291{
1292  LispObj   cmain = nrs_CMAIN.vcell;
1293  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1294
1295}
1296
1297
1298
1299
1300void non_fatal_error( char *msg )
1301{
1302  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1303  fflush( dbgout );
1304}
1305
1306
1307
1308Boolean
1309handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1310{
1311  LispObj   errdisp = nrs_ERRDISP.vcell;
1312
1313  if ((fulltag_of(errdisp) == fulltag_misc) &&
1314      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1315    /* errdisp is a macptr, we can call back to lisp */
1316    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1317    }
1318
1319  return false;
1320}
1321               
1322
1323/*
1324   Current thread has all signals masked.  Before unmasking them,
1325   make it appear that the current thread has been suspended.
1326   (This is to handle the case where another thread is trying
1327   to GC before this thread is able to sieze the exception lock.)
1328*/
1329int
1330prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1331{
1332  int old_valence = tcr->valence;
1333
1334  tcr->pending_exception_context = context;
1335  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1336
1337  ALLOW_EXCEPTIONS(context);
1338  return old_valence;
1339} 
1340
1341void
1342wait_for_exception_lock_in_handler(TCR *tcr, 
1343                                   ExceptionInformation *context,
1344                                   xframe_list *xf)
1345{
1346
1347  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1348  xf->curr = context;
1349  xf->prev = tcr->xframe;
1350  tcr->xframe =  xf;
1351  tcr->pending_exception_context = NULL;
1352  tcr->valence = TCR_STATE_FOREIGN; 
1353}
1354
1355void
1356unlock_exception_lock_in_handler(TCR *tcr)
1357{
1358  tcr->pending_exception_context = tcr->xframe->curr;
1359  tcr->xframe = tcr->xframe->prev;
1360  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1361  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1362}
1363
1364/*
1365   If an interrupt is pending on exception exit, try to ensure
1366   that the thread sees it as soon as it's able to run.
1367*/
1368void
1369raise_pending_interrupt(TCR *tcr)
1370{
1371  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1372    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1373  }
1374}
1375
1376void
1377exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1378{
1379  sigset_t mask;
1380  sigfillset(&mask);
1381 
1382  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1383  tcr->valence = old_valence;
1384  tcr->pending_exception_context = NULL;
1385  tcr->last_lisp_frame = old_last_lisp_frame;
1386}
1387
1388
1389void
1390signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1391{
1392  xframe_list xframe_link;
1393
1394  if (!use_mach_exception_handling) {
1395   
1396    tcr = (TCR *) get_interrupt_tcr(false);
1397 
1398    /* The signal handler's entered with all signals (notably the
1399       thread_suspend signal) blocked.  Don't allow any other signals
1400       (notably the thread_suspend signal) to preempt us until we've
1401       set the TCR's xframe slot to include the current exception
1402       context.
1403    */
1404   
1405    old_last_lisp_frame = tcr->last_lisp_frame;
1406    tcr->last_lisp_frame = xpGPR(context,Rsp);
1407    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1408  }
1409
1410  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1411    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1412    pthread_kill(pthread_self(), thread_suspend_signal);
1413  }
1414
1415 
1416  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1417  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1418    char msg[512];
1419    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1420    if (lisp_Debugger(context, info, signum, false, msg)) {
1421      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1422    }
1423  }
1424  unlock_exception_lock_in_handler(tcr);
1425
1426  /* This thread now looks like a thread that was suspended while
1427     executing lisp code.  If some other thread gets the exception
1428     lock and GCs, the context (this thread's suspend_context) will
1429     be updated.  (That's only of concern if it happens before we
1430     can return to the kernel/to the Mach exception handler).
1431  */
1432  if (!use_mach_exception_handling) {
1433    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1434    raise_pending_interrupt(tcr);
1435  }
1436}
1437
1438#ifdef USE_SIGALTSTACK
1439void
1440invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1441{
1442  ExceptionInformation *xp_copy;
1443  siginfo_t *info_copy;
1444  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1445 
1446  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1447  target_sp -= sizeof(ucontext_t);
1448  xp_copy = (ExceptionInformation *)target_sp;
1449  memmove(target_sp,xp,sizeof(*xp));
1450  xp_copy->uc_stack.ss_sp = 0;
1451  xp_copy->uc_stack.ss_size = 0;
1452  xp_copy->uc_stack.ss_flags = 0;
1453  xp_copy->uc_link = NULL;
1454  target_sp -= sizeof(siginfo_t);
1455  info_copy = (siginfo_t *)target_sp;
1456  memmove(target_sp,info,sizeof(*info));
1457  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1458}
1459 
1460void
1461altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1462{
1463  TCR *tcr=get_tcr(true);
1464  if (signo == SIGSEGV) {
1465    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1466    area *a = tcr->cs_area;
1467   
1468    if ((addr >= a->low) &&
1469        (addr < a->softlimit)) {
1470      if (addr < a->hardlimit) {
1471        Bug(xp, "hard stack overflow");
1472      } else {
1473        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1474      }
1475    }
1476  }
1477  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1478}
1479#endif
1480
1481/*
1482  If it looks like we're in the middle of an atomic operation, make
1483  it seem as if that operation is either complete or hasn't started
1484  yet.
1485
1486  The cases handled include:
1487
1488  a) storing into a newly-allocated lisp frame on the stack.
1489  b) marking a newly-allocated TSP frame as containing "raw" data.
1490  c) consing: the GC has its own ideas about how this should be
1491     handled, but other callers would be best advised to back
1492     up or move forward, according to whether we're in the middle
1493     of allocating a cons cell or allocating a uvector.
1494  d) a STMW to the vsp
1495  e) EGC write-barrier subprims.
1496*/
1497
1498extern opcode
1499  egc_write_barrier_start,
1500  egc_write_barrier_end, 
1501  egc_store_node_conditional, 
1502  egc_store_node_conditional_test,
1503  egc_set_hash_key,
1504  egc_gvset,
1505  egc_rplaca,
1506  egc_rplacd,
1507  egc_set_hash_key_conditional,
1508  egc_set_hash_key_conditional_test;
1509
1510
1511extern opcode ffcall_return_window, ffcall_return_window_end;
1512
1513void
1514pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1515{
1516  pc program_counter = xpPC(xp);
1517  opcode instr = *program_counter;
1518  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1519  LispObj cur_allocptr = xpGPR(xp, allocptr);
1520  int allocptr_tag = fulltag_of(cur_allocptr);
1521 
1522
1523
1524  if ((program_counter < &egc_write_barrier_end) && 
1525      (program_counter >= &egc_write_barrier_start)) {
1526    LispObj *ea = 0, val = 0, root = 0;
1527    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1528    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1529
1530    if (program_counter >= &egc_set_hash_key_conditional) {
1531      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1532          ((program_counter == &egc_set_hash_key_conditional_test) &&
1533           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1534        return;
1535      }
1536      need_store = false;
1537      root = xpGPR(xp,arg_x);
1538      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1539      need_memoize_root = true;
1540    } else if (program_counter >= &egc_store_node_conditional) {
1541      if ((program_counter < &egc_store_node_conditional_test) ||
1542          ((program_counter == &egc_store_node_conditional_test) &&
1543           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1544        /* The conditional store either hasn't been attempted yet, or
1545           has failed.  No need to adjust the PC, or do memoization. */
1546        return;
1547      }
1548      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1549      xpGPR(xp,arg_z) = t_value;
1550      need_store = false;
1551    } else if (program_counter >= &egc_set_hash_key) {
1552      root = xpGPR(xp,arg_x);
1553      val = xpGPR(xp,arg_z);
1554      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1555      need_memoize_root = true;
1556    } else if (program_counter >= &egc_gvset) {
1557      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1558      val = xpGPR(xp,arg_z);
1559    } else if (program_counter >= &egc_rplacd) {
1560      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1561      val = xpGPR(xp,arg_z);
1562    } else {                      /* egc_rplaca */
1563      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1564      val = xpGPR(xp,arg_z);
1565    }
1566    if (need_store) {
1567      *ea = val;
1568    }
1569    if (need_check_memo) {
1570      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1571      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1572          ((LispObj)ea < val)) {
1573        atomic_set_bit(refbits, bitnumber);
1574        if (need_memoize_root) {
1575          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1576          atomic_set_bit(refbits, bitnumber);
1577        }
1578      }
1579    }
1580    xpPC(xp) = xpLR(xp);
1581    return;
1582  }
1583
1584
1585 
1586  if (allocptr_tag != tag_fixnum) {
1587    signed_natural disp = allocptr_displacement(xp);
1588
1589    if (disp) {
1590      /* Being architecturally "at" the alloc trap doesn't tell
1591         us much (in particular, it doesn't tell us whether
1592         or not the thread has committed to taking the trap
1593         and is waiting for the exception lock (or waiting
1594         for the Mach exception thread to tell it how bad
1595         things are) or is about to execute a conditional
1596         trap.
1597         Regardless of which case applies, we want the
1598         other thread to take (or finish taking) the
1599         trap, and we don't want it to consider its
1600         current allocptr to be valid.
1601         The difference between this case (suspend other
1602         thread for GC) and the previous case (suspend
1603         current thread for interrupt) is solely a
1604         matter of what happens after we leave this
1605         function: some non-current thread will stay
1606         suspended until the GC finishes, then take
1607         (or start processing) the alloc trap.   The
1608         current thread will go off and do PROCESS-INTERRUPT
1609         or something, and may return from the interrupt
1610         and need to finish the allocation that got interrupted.
1611      */
1612
1613      if (alloc_disp) {
1614        *alloc_disp = disp;
1615        xpGPR(xp,allocptr) += disp;
1616        /* Leave the PC at the alloc trap.  When the interrupt
1617           handler returns, it'll decrement allocptr by disp
1618           and the trap may or may not be taken.
1619        */
1620      } else {
1621        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1622        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1623      }
1624    } else {
1625      /* If we're already past the alloc_trap, finish allocating
1626         the object. */
1627      if (allocptr_tag == fulltag_cons) {
1628        finish_allocating_cons(xp);
1629      } else {
1630        if (allocptr_tag == fulltag_misc) {
1631          finish_allocating_uvector(xp);
1632        } else {
1633          Bug(xp, "what's being allocated here ?");
1634        }
1635      }
1636      /* Whatever we finished allocating, reset allocptr/allocbase to
1637         VOID_ALLOCPTR */
1638      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1639    }
1640    return;
1641  }
1642}
1643
1644void
1645interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1646{
1647  TCR *tcr = get_interrupt_tcr(false);
1648  if (tcr) {
1649    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1650      tcr->interrupt_pending = 1 << fixnumshift;
1651    } else {
1652      LispObj cmain = nrs_CMAIN.vcell;
1653
1654      if ((fulltag_of(cmain) == fulltag_misc) &&
1655          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1656        /*
1657           This thread can (allegedly) take an interrupt now.
1658           It's tricky to do that if we're executing
1659           foreign code (especially Linuxthreads code, much
1660           of which isn't reentrant.)
1661           If we're unwinding the stack, we also want to defer
1662           the interrupt.
1663        */
1664        if ((tcr->valence != TCR_STATE_LISP) ||
1665            (tcr->unwinding != 0)) {
1666          tcr->interrupt_pending = 1 << fixnumshift;
1667        } else {
1668          xframe_list xframe_link;
1669          int old_valence;
1670          signed_natural disp=0;
1671          natural old_last_lisp_frame = tcr->last_lisp_frame;
1672         
1673          tcr->last_lisp_frame = xpGPR(context,Rsp);
1674          pc_luser_xp(context, tcr, &disp);
1675          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1676          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1677          handle_exception(signum, context, tcr, info, old_valence);
1678          if (disp) {
1679            xpGPR(context,allocptr) -= disp;
1680          }
1681          unlock_exception_lock_in_handler(tcr);
1682          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1683        }
1684      }
1685    }
1686  }
1687#ifdef DARWIN
1688    DarwinSigReturn(context);
1689#endif
1690}
1691
1692#ifdef USE_SIGALTSTACK
1693void
1694altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1695{
1696  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1697}
1698#endif
1699
1700
1701void
1702install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1703{
1704  struct sigaction sa;
1705 
1706  sa.sa_sigaction = (void *)handler;
1707  sigfillset(&sa.sa_mask);
1708  sa.sa_flags = 
1709    0 /* SA_RESTART */
1710    | SA_SIGINFO
1711#ifdef USE_SIGALTSTACK
1712    | (on_altstack ? SA_ONSTACK : 0)
1713#endif
1714    ;
1715
1716  sigaction(signo, &sa, NULL);
1717}
1718
1719
1720void
1721install_pmcl_exception_handlers()
1722{
1723#ifdef DARWIN
1724  extern Boolean use_mach_exception_handling;
1725#endif
1726
1727  Boolean install_signal_handlers_for_exceptions =
1728#ifdef DARWIN
1729    !use_mach_exception_handling
1730#else
1731    true
1732#endif
1733    ;
1734  if (install_signal_handlers_for_exceptions) {
1735    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
1736    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1737
1738  }
1739 
1740  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1741                         (void *)interrupt_handler, true, false);
1742  signal(SIGPIPE, SIG_IGN);
1743}
1744
1745#ifdef USE_SIGALTSTACK
1746void
1747setup_sigaltstack(area *a)
1748{
1749  stack_t stack;
1750#if 0
1751  stack.ss_sp = a->low;
1752  a->low += SIGSTKSZ*8;
1753#endif
1754  stack.ss_size = SIGSTKSZ*8;
1755  stack.ss_flags = 0;
1756  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1757  if (sigaltstack(&stack, NULL) != 0) {
1758    perror("sigaltstack");
1759    exit(-1);
1760  }
1761}
1762#endif
1763
1764void
1765thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1766{
1767  TCR *tcr = get_tcr(false);
1768  area *a;
1769  sigset_t mask;
1770 
1771  sigemptyset(&mask);
1772
1773  if (tcr) {
1774    tcr->valence = TCR_STATE_FOREIGN;
1775    a = tcr->vs_area;
1776    if (a) {
1777      a->active = a->high;
1778    }
1779    a = tcr->cs_area;
1780    if (a) {
1781      a->active = a->high;
1782    }
1783  }
1784 
1785  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1786  pthread_exit(NULL);
1787}
1788
1789#ifdef USE_SIGALTSTACK
1790void
1791altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1792{
1793  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1794}
1795#endif
1796
1797void
1798thread_signal_setup()
1799{
1800  thread_suspend_signal = SIG_SUSPEND_THREAD;
1801  thread_kill_signal = SIG_KILL_THREAD;
1802
1803  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1804  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1805}
1806
1807
1808
1809void
1810unprotect_all_areas()
1811{
1812  protected_area_ptr p;
1813
1814  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1815    unprotect_area(p);
1816  }
1817}
1818
1819/*
1820  A binding subprim has just done "twlle limit_regno,idx_regno" and
1821  the trap's been taken.  Extend the tcr's tlb so that the index will
1822  be in bounds and the new limit will be on a page boundary, filling
1823  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1824  the tcr fields and the registers in the xp and return true if this
1825  all works, false otherwise.
1826
1827  Note that the tlb was allocated via malloc, so realloc can do some
1828  of the hard work.
1829*/
1830Boolean
1831extend_tcr_tlb(TCR *tcr, 
1832               ExceptionInformation *xp, 
1833               unsigned idx_regno)
1834{
1835  unsigned
1836    index = (unsigned) (xpGPR(xp,idx_regno)),
1837    old_limit = tcr->tlb_limit,
1838    new_limit = align_to_power_of_2(index+1,12),
1839    new_bytes = new_limit-old_limit;
1840  LispObj
1841    *old_tlb = tcr->tlb_pointer,
1842    *new_tlb = realloc(old_tlb, new_limit),
1843    *work;
1844
1845  if (new_tlb == NULL) {
1846    return false;
1847  }
1848 
1849  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1850
1851  while (new_bytes) {
1852    *work++ = no_thread_local_binding_marker;
1853    new_bytes -= sizeof(LispObj);
1854  }
1855  tcr->tlb_pointer = new_tlb;
1856  tcr->tlb_limit = new_limit;
1857  return true;
1858}
1859
1860
1861
1862void
1863exception_init()
1864{
1865  install_pmcl_exception_handlers();
1866}
1867
1868
1869
1870
1871
1872#ifdef DARWIN
1873
1874
1875#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1876#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1877
1878
1879
1880#define LISP_EXCEPTIONS_HANDLED_MASK \
1881 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1882
1883/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1884#define NUM_LISP_EXCEPTIONS_HANDLED 4
1885
1886typedef struct {
1887  int foreign_exception_port_count;
1888  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1889  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1890  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1891  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1892} MACH_foreign_exception_state;
1893
1894
1895
1896
1897/*
1898  Mach's exception mechanism works a little better than its signal
1899  mechanism (and, not incidentally, it gets along with GDB a lot
1900  better.
1901
1902  Initially, we install an exception handler to handle each native
1903  thread's exceptions.  This process involves creating a distinguished
1904  thread which listens for kernel exception messages on a set of
1905  0 or more thread exception ports.  As threads are created, they're
1906  added to that port set; a thread's exception port is destroyed
1907  (and therefore removed from the port set) when the thread exits.
1908
1909  A few exceptions can be handled directly in the handler thread;
1910  others require that we resume the user thread (and that the
1911  exception thread resumes listening for exceptions.)  The user
1912  thread might eventually want to return to the original context
1913  (possibly modified somewhat.)
1914
1915  As it turns out, the simplest way to force the faulting user
1916  thread to handle its own exceptions is to do pretty much what
1917  signal() does: the exception handlng thread sets up a sigcontext
1918  on the user thread's stack and forces the user thread to resume
1919  execution as if a signal handler had been called with that
1920  context as an argument.  We can use a distinguished UUO at a
1921  distinguished address to do something like sigreturn(); that'll
1922  have the effect of resuming the user thread's execution in
1923  the (pseudo-) signal context.
1924
1925  Since:
1926    a) we have miles of code in C and in Lisp that knows how to
1927    deal with Linux sigcontexts
1928    b) Linux sigcontexts contain a little more useful information
1929    (the DAR, DSISR, etc.) than their Darwin counterparts
1930    c) we have to create a sigcontext ourselves when calling out
1931    to the user thread: we aren't really generating a signal, just
1932    leveraging existing signal-handling code.
1933
1934  we create a Linux sigcontext struct.
1935
1936  Simple ?  Hopefully from the outside it is ...
1937
1938  We want the process of passing a thread's own context to it to
1939  appear to be atomic: in particular, we don't want the GC to suspend
1940  a thread that's had an exception but has not yet had its user-level
1941  exception handler called, and we don't want the thread's exception
1942  context to be modified by a GC while the Mach handler thread is
1943  copying it around.  On Linux (and on Jaguar), we avoid this issue
1944  because (a) the kernel sets up the user-level signal handler and
1945  (b) the signal handler blocks signals (including the signal used
1946  by the GC to suspend threads) until tcr->xframe is set up.
1947
1948  The GC and the Mach server thread therefore contend for the lock
1949  "mach_exception_lock".  The Mach server thread holds the lock
1950  when copying exception information between the kernel and the
1951  user thread; the GC holds this lock during most of its execution
1952  (delaying exception processing until it can be done without
1953  GC interference.)
1954
1955*/
1956
1957
1958#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1959
1960void
1961fatal_mach_error(char *format, ...);
1962
1963#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1964
1965
1966void
1967restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1968{
1969  kern_return_t kret;
1970  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
1971
1972  /* Set the thread's FP state from the pseudosigcontext */
1973  kret = thread_set_state(thread,
1974                          ARM_VFP_STATE,
1975                          (thread_state_t)&(mc->__fs),
1976                          ARM_VFP_STATE_COUNT);
1977
1978  MACH_CHECK_ERROR("setting thread FP state", kret);
1979
1980  /* The thread'll be as good as new ... */
1981  kret = thread_set_state(thread, 
1982                          MACHINE_THREAD_STATE,
1983                          (thread_state_t)&(mc->__ss),
1984                          MACHINE_THREAD_STATE_COUNT);
1985  MACH_CHECK_ERROR("setting thread state", kret);
1986} 
1987
1988/* This code runs in the exception handling thread, in response
1989   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1990   in response to a call to pseudo_sigreturn() from the specified
1991   user thread.
1992   Find that context (the user thread's R3 points to it), then
1993   use that context to set the user thread's state.  When this
1994   function's caller returns, the Mach kernel will resume the
1995   user thread.
1996*/
1997
1998kern_return_t
1999do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2000{
2001  ExceptionInformation *xp;
2002
2003#ifdef DEBUG_MACH_EXCEPTIONS
2004  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2005#endif
2006  xp = tcr->pending_exception_context;
2007  if (xp) {
2008    tcr->pending_exception_context = NULL;
2009    tcr->valence = TCR_STATE_LISP;
2010    restore_mach_thread_state(thread, xp);
2011    raise_pending_interrupt(tcr);
2012  } else {
2013    Bug(NULL, "no xp here!\n");
2014  }
2015#ifdef DEBUG_MACH_EXCEPTIONS
2016  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2017#endif
2018  return KERN_SUCCESS;
2019} 
2020
2021ExceptionInformation *
2022create_thread_context_frame(mach_port_t thread, 
2023                            natural *new_stack_top)
2024{
2025  arm_thread_state_t ts;
2026  mach_msg_type_number_t thread_state_count;
2027  kern_return_t result;
2028  ExceptionInformation *pseudosigcontext;
2029  _STRUCT_MCONTEXT *mc;
2030  natural stackp, backlink;
2031
2032  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2033  result = thread_get_state(thread, 
2034                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2035                            (thread_state_t)&ts,
2036                            &thread_state_count);
2037 
2038  if (result != KERN_SUCCESS) {
2039    get_tcr(true);
2040    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2041  }
2042  stackp = ts.__sp;
2043  backlink = stackp;
2044
2045  stackp -= sizeof(*pseudosigcontext);
2046  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2047
2048  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2049  memmove(&(mc->__ss),&ts,sizeof(ts));
2050
2051  thread_state_count = ARM_VFP_STATE_COUNT;
2052  thread_get_state(thread,
2053                   ARM_VFP_STATE,
2054                   (thread_state_t)&(mc->__fs),
2055                   &thread_state_count);
2056
2057
2058  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2059  thread_get_state(thread,
2060                   ARM_EXCEPTION_STATE,
2061                   (thread_state_t)&(mc->__es),
2062                   &thread_state_count);
2063
2064
2065  UC_MCONTEXT(pseudosigcontext) = mc;
2066  if (new_stack_top) {
2067    *new_stack_top = stackp;
2068  }
2069  return pseudosigcontext;
2070}
2071
2072/*
2073  This code sets up the user thread so that it executes a "pseudo-signal
2074  handler" function when it resumes.  Create a linux sigcontext struct
2075  on the thread's stack and pass it as an argument to the pseudo-signal
2076  handler.
2077
2078  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2079  which will restore the thread's context.
2080
2081  If the handler invokes code that throws (or otherwise never sigreturn()'s
2082  to the context), that's fine.
2083
2084  Actually, check that: throw (and variants) may need to be careful and
2085  pop the tcr's xframe list until it's younger than any frame being
2086  entered.
2087*/
2088
2089int
2090setup_signal_frame(mach_port_t thread,
2091                   void *handler_address,
2092                   int signum,
2093                   int code,
2094                   TCR *tcr)
2095{
2096  arm_thread_state_t ts;
2097  ExceptionInformation *pseudosigcontext;
2098  int old_valence = tcr->valence;
2099  natural stackp;
2100
2101#ifdef DEBUG_MACH_EXCEPTIONS
2102  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2103#endif
2104  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2105  pseudosigcontext->uc_onstack = 0;
2106  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2107  tcr->pending_exception_context = pseudosigcontext;
2108  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2109 
2110
2111  /*
2112     It seems like we've created a  sigcontext on the thread's
2113     stack.  Set things up so that we call the handler (with appropriate
2114     args) when the thread's resumed.
2115  */
2116
2117  ts.__pc = (natural) handler_address;
2118  ts.__sp = stackp;
2119  ts.__r[0] = signum;
2120  ts.__r[1] = (natural)pseudosigcontext;
2121  ts.__r[2] = (natural)tcr;
2122  ts.__r[3] = (natural)old_valence;
2123  ts.__lr = (natural)pseudo_sigreturn;
2124
2125
2126  thread_set_state(thread, 
2127                   MACHINE_THREAD_STATE,
2128                   (thread_state_t)&ts,
2129                   MACHINE_THREAD_STATE_COUNT);
2130#ifdef DEBUG_MACH_EXCEPTIONS
2131  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2132#endif
2133  return 0;
2134}
2135
2136
2137void
2138pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2139{
2140  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2141} 
2142
2143
2144int
2145thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2146{
2147  /* Likely hopeless. */
2148  return 0;
2149}
2150
2151/*
2152  This function runs in the exception handling thread.  It's
2153  called (by this precise name) from the library function "exc_server()"
2154  when the thread's exception ports are set up.  (exc_server() is called
2155  via mach_msg_server(), which is a function that waits for and dispatches
2156  on exception messages from the Mach kernel.)
2157
2158  This checks to see if the exception was caused by a pseudo_sigreturn()
2159  UUO; if so, it arranges for the thread to have its state restored
2160  from the specified context.
2161
2162  Otherwise, it tries to map the exception to a signal number and
2163  arranges that the thread run a "pseudo signal handler" to handle
2164  the exception.
2165
2166  Some exceptions could and should be handled here directly.
2167*/
2168
2169kern_return_t
2170catch_exception_raise(mach_port_t exception_port,
2171                      mach_port_t thread,
2172                      mach_port_t task, 
2173                      exception_type_t exception,
2174                      exception_data_t code_vector,
2175                      mach_msg_type_number_t code_count)
2176{
2177  int signum = 0, code = *code_vector, code1;
2178  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2179  kern_return_t kret;
2180
2181#ifdef DEBUG_MACH_EXCEPTIONS
2182  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2183#endif
2184
2185  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2186    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2187  } 
2188  if ((exception == EXC_BAD_INSTRUCTION) &&
2189      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2190      (((code1 = code_vector[1]) == (int)pseudo_sigreturn))) {
2191    kret = do_pseudo_sigreturn(thread, tcr);
2192  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2193    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2194    kret = 17;
2195  } else {
2196    switch (exception) {
2197    case EXC_BAD_ACCESS:
2198      signum = SIGSEGV;
2199      break;
2200       
2201    case EXC_BAD_INSTRUCTION:
2202      signum = SIGILL;
2203      break;
2204     
2205      break;
2206     
2207    case EXC_ARITHMETIC:
2208      signum = SIGFPE;
2209      break;
2210
2211    default:
2212      break;
2213    }
2214    if (signum) {
2215      kret = setup_signal_frame(thread,
2216                                (void *)pseudo_signal_handler,
2217                                signum,
2218                                code,
2219                                tcr);
2220#if 0
2221      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2222#endif
2223
2224    } else {
2225      kret = 17;
2226    }
2227  }
2228
2229  return kret;
2230}
2231
2232
2233
2234typedef struct {
2235  mach_msg_header_t Head;
2236  /* start of the kernel processed data */
2237  mach_msg_body_t msgh_body;
2238  mach_msg_port_descriptor_t thread;
2239  mach_msg_port_descriptor_t task;
2240  /* end of the kernel processed data */
2241  NDR_record_t NDR;
2242  exception_type_t exception;
2243  mach_msg_type_number_t codeCnt;
2244  integer_t code[2];
2245  mach_msg_trailer_t trailer;
2246} exceptionRequest;
2247
2248
2249boolean_t
2250openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2251{
2252  static NDR_record_t _NDR = {0};
2253  kern_return_t handled;
2254  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2255  exceptionRequest *req = (exceptionRequest *) in;
2256
2257  reply->NDR = _NDR;
2258
2259  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2260  out->msgh_remote_port = in->msgh_remote_port;
2261  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2262  out->msgh_local_port = MACH_PORT_NULL;
2263  out->msgh_id = in->msgh_id+100;
2264
2265  /* Could handle other exception flavors in the range 2401-2403 */
2266
2267
2268  if (in->msgh_id != 2401) {
2269    reply->RetCode = MIG_BAD_ID;
2270    return FALSE;
2271  }
2272  handled = catch_exception_raise(req->Head.msgh_local_port,
2273                                  req->thread.name,
2274                                  req->task.name,
2275                                  req->exception,
2276                                  req->code,
2277                                  req->codeCnt);
2278  reply->RetCode = handled;
2279  return TRUE;
2280}
2281
2282/*
2283  The initial function for an exception-handling thread.
2284*/
2285
2286void *
2287exception_handler_proc(void *arg)
2288{
2289  extern boolean_t exc_server();
2290  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2291
2292  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2293  /* Should never return. */
2294  abort();
2295}
2296
2297
2298
2299mach_port_t
2300mach_exception_port_set()
2301{
2302  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2303  kern_return_t kret; 
2304  if (__exception_port_set == MACH_PORT_NULL) {
2305    kret = mach_port_allocate(mach_task_self(),
2306                              MACH_PORT_RIGHT_PORT_SET,
2307                              &__exception_port_set);
2308    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2309    create_system_thread(0,
2310                         NULL,
2311                         exception_handler_proc, 
2312                         (void *)((natural)__exception_port_set));
2313  }
2314  return __exception_port_set;
2315}
2316
2317/*
2318  Setup a new thread to handle those exceptions specified by
2319  the mask "which".  This involves creating a special Mach
2320  message port, telling the Mach kernel to send exception
2321  messages for the calling thread to that port, and setting
2322  up a handler thread which listens for and responds to
2323  those messages.
2324
2325*/
2326
2327/*
2328  Establish the lisp thread's TCR as its exception port, and determine
2329  whether any other ports have been established by foreign code for
2330  exceptions that lisp cares about.
2331
2332  If this happens at all, it should happen on return from foreign
2333  code and on entry to lisp code via a callback.
2334
2335  This is a lot of trouble (and overhead) to support Java, or other
2336  embeddable systems that clobber their caller's thread exception ports.
2337 
2338*/
2339kern_return_t
2340tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2341{
2342  kern_return_t kret;
2343  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2344  int i;
2345  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2346  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2347  exception_mask_t mask = 0;
2348
2349  kret = thread_swap_exception_ports(thread,
2350                                     LISP_EXCEPTIONS_HANDLED_MASK,
2351                                     lisp_port,
2352                                     EXCEPTION_DEFAULT,
2353                                     THREAD_STATE_NONE,
2354                                     fxs->masks,
2355                                     &n,
2356                                     fxs->ports,
2357                                     fxs->behaviors,
2358                                     fxs->flavors);
2359  if (kret == KERN_SUCCESS) {
2360    fxs->foreign_exception_port_count = n;
2361    for (i = 0; i < n; i ++) {
2362      foreign_port = fxs->ports[i];
2363
2364      if ((foreign_port != lisp_port) &&
2365          (foreign_port != MACH_PORT_NULL)) {
2366        mask |= fxs->masks[i];
2367      }
2368    }
2369    tcr->foreign_exception_status = (int) mask;
2370  }
2371  return kret;
2372}
2373
2374kern_return_t
2375tcr_establish_lisp_exception_port(TCR *tcr)
2376{
2377  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2378}
2379
2380/*
2381  Do this when calling out to or returning from foreign code, if
2382  any conflicting foreign exception ports were established when we
2383  last entered lisp code.
2384*/
2385kern_return_t
2386restore_foreign_exception_ports(TCR *tcr)
2387{
2388  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2389 
2390  if (m) {
2391    MACH_foreign_exception_state *fxs  = 
2392      (MACH_foreign_exception_state *) tcr->native_thread_info;
2393    int i, n = fxs->foreign_exception_port_count;
2394    exception_mask_t tm;
2395
2396    for (i = 0; i < n; i++) {
2397      if ((tm = fxs->masks[i]) & m) {
2398        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2399                                   tm,
2400                                   fxs->ports[i],
2401                                   fxs->behaviors[i],
2402                                   fxs->flavors[i]);
2403      }
2404    }
2405  }
2406}
2407                                   
2408
2409/*
2410  This assumes that a Mach port (to be used as the thread's exception port) whose
2411  "name" matches the TCR's 32-bit address has already been allocated.
2412*/
2413
2414kern_return_t
2415setup_mach_exception_handling(TCR *tcr)
2416{
2417  mach_port_t
2418    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2419    task_self = mach_task_self();
2420  kern_return_t kret;
2421
2422  kret = mach_port_insert_right(task_self,
2423                                thread_exception_port,
2424                                thread_exception_port,
2425                                MACH_MSG_TYPE_MAKE_SEND);
2426  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2427
2428  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2429  if (kret == KERN_SUCCESS) {
2430    mach_port_t exception_port_set = mach_exception_port_set();
2431
2432    kret = mach_port_move_member(task_self,
2433                                 thread_exception_port,
2434                                 exception_port_set);
2435  }
2436  return kret;
2437}
2438
2439void
2440darwin_exception_init(TCR *tcr)
2441{
2442  void tcr_monitor_exception_handling(TCR*, Boolean);
2443  kern_return_t kret;
2444  MACH_foreign_exception_state *fxs = 
2445    calloc(1, sizeof(MACH_foreign_exception_state));
2446 
2447  tcr->native_thread_info = (void *) fxs;
2448
2449  if ((kret = setup_mach_exception_handling(tcr))
2450      != KERN_SUCCESS) {
2451    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2452    terminate_lisp();
2453  }
2454}
2455
2456/*
2457  The tcr is the "name" of the corresponding thread's exception port.
2458  Destroying the port should remove it from all port sets of which it's
2459  a member (notably, the exception port set.)
2460*/
2461void
2462darwin_exception_cleanup(TCR *tcr)
2463{
2464  void *fxs = tcr->native_thread_info;
2465  extern Boolean use_mach_exception_handling;
2466
2467  if (fxs) {
2468    tcr->native_thread_info = NULL;
2469    free(fxs);
2470  }
2471  if (use_mach_exception_handling) {
2472    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2473    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2474  }
2475}
2476
2477
2478Boolean
2479suspend_mach_thread(mach_port_t mach_thread)
2480{
2481  kern_return_t status;
2482  Boolean aborted = false;
2483 
2484  do {
2485    aborted = false;
2486    status = thread_suspend(mach_thread);
2487    if (status == KERN_SUCCESS) {
2488      status = thread_abort_safely(mach_thread);
2489      if (status == KERN_SUCCESS) {
2490        aborted = true;
2491      } else {
2492        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2493        thread_resume(mach_thread);
2494      }
2495    } else {
2496      return false;
2497    }
2498  } while (! aborted);
2499  return true;
2500}
2501
2502/*
2503  Only do this if pthread_kill indicated that the pthread isn't
2504  listening to signals anymore, as can happen as soon as pthread_exit()
2505  is called on Darwin.  The thread could still call out to lisp as it
2506  is exiting, so we need another way to suspend it in this case.
2507*/
2508Boolean
2509mach_suspend_tcr(TCR *tcr)
2510{
2511  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2512  ExceptionInformation *pseudosigcontext;
2513  Boolean result = false;
2514 
2515  result = suspend_mach_thread(mach_thread);
2516  if (result) {
2517    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2518    pseudosigcontext->uc_onstack = 0;
2519    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2520    tcr->suspend_context = pseudosigcontext;
2521  }
2522  return result;
2523}
2524
2525void
2526mach_resume_tcr(TCR *tcr)
2527{
2528  ExceptionInformation *xp;
2529  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2530 
2531  xp = tcr->suspend_context;
2532#ifdef DEBUG_MACH_EXCEPTIONS
2533  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2534          tcr, tcr->pending_exception_context);
2535#endif
2536  tcr->suspend_context = NULL;
2537  restore_mach_thread_state(mach_thread, xp);
2538#ifdef DEBUG_MACH_EXCEPTIONS
2539  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2540          tcr, tcr->pending_exception_context);
2541#endif
2542  thread_resume(mach_thread);
2543}
2544
2545void
2546fatal_mach_error(char *format, ...)
2547{
2548  va_list args;
2549  char s[512];
2550 
2551
2552  va_start(args, format);
2553  vsnprintf(s, sizeof(s),format, args);
2554  va_end(args);
2555
2556  Fatal("Mach error", s);
2557}
2558
2559void
2560pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2561{
2562  interrupt_handler(signum, NULL, context);
2563}
2564
2565int
2566mach_raise_thread_interrupt(TCR *target)
2567{
2568  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2569  kern_return_t kret;
2570  Boolean result = false;
2571  TCR *current = get_tcr(false);
2572  thread_basic_info_data_t info; 
2573  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2574
2575  LOCK(lisp_global(TCR_AREA_LOCK), current);
2576
2577  if (suspend_mach_thread(mach_thread)) {
2578    if (thread_info(mach_thread,
2579                    THREAD_BASIC_INFO,
2580                    (thread_info_t)&info,
2581                    &info_count) == KERN_SUCCESS) {
2582      if (info.suspend_count == 1) {
2583        if ((target->valence == TCR_STATE_LISP) &&
2584            (!target->unwinding) &&
2585            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2586          kret = setup_signal_frame(mach_thread,
2587                                    (void *)pseudo_interrupt_handler,
2588                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2589                                    0,
2590                                    target);
2591          if (kret == KERN_SUCCESS) {
2592            result = true;
2593          }
2594        }
2595      }
2596    }
2597    if (! result) {
2598      target->interrupt_pending = 1 << fixnumshift;
2599    }
2600    thread_resume(mach_thread);
2601   
2602  }
2603  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2604  return 0;
2605}
2606
2607#endif
Note: See TracBrowser for help on using the repository browser.