source: trunk/source/lisp-kernel/arm-exceptions.c @ 14754

Last change on this file since 14754 was 14754, checked in by gb, 9 years ago

In pc_luser_xp(), recognize that the alloc_trap is now immediately
preceded by a branch around it; the COMPARE_ALLOCPTR_TO_RM that we're
looking for precedes the branch. Note that the LinuxARM heap image
in the trunk currently does things this way; older images may not.

This seems to fix ticket:855 (the test case from ticket:717 runs
to completion, and other things that cons hysterically while creating
threads for no apparent reason - like SLIME - will hopefully not
be affected by this.)

File size: 71.1 KB
Line 
1
2
3/*
4   Copyright (C) 2010 Clozure Associates
5   This file is part of Clozure CL. 
6
7   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
8   License , known as the LLGPL and distributed with Clozure CL as the
9   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
10   which is distributed with Clozure CL as the file "LGPL".  Where these
11   conflict, the preamble takes precedence. 
12
13   Clozure CL is referenced in the preamble as the "LIBRARY."
14
15   The LLGPL is also available online at
16   http://opensource.franz.com/preamble.html
17*/
18
19#include "lisp.h"
20#include "lisp-exceptions.h"
21#include "lisp_globals.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#ifndef ANDROID
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36#endif
37
38#ifdef DARWIN
39#include <sys/mman.h>
40#ifndef SA_NODEFER
41#define SA_NODEFER 0
42#endif
43#include <sysexits.h>
44
45
46/* a distinguished UUO at a distinguished address */
47extern void pseudo_sigreturn(ExceptionInformation *);
48#endif
49
50
51#include "threads.h"
52
53#ifdef ANDROID
54#define pthread_sigmask(how,in,out) rt_sigprocmask(how,in,out,8)
55#endif
56
57#ifdef LINUX
58
59void
60enable_fp_exceptions()
61{
62}
63
64void
65disable_fp_exceptions()
66{
67}
68#endif
69
70/*
71  Handle exceptions.
72
73*/
74
75extern LispObj lisp_nil;
76
77extern natural lisp_heap_gc_threshold;
78extern Boolean grow_dynamic_area(natural);
79
80
81
82
83
84
85int
86page_size = 4096;
87
88int
89log2_page_size = 12;
90
91
92
93
94
95/*
96  If the PC is pointing to an allocation trap, the previous instruction
97  must have decremented allocptr.  Return the non-zero amount by which
98  allocptr was decremented.
99*/
100signed_natural
101allocptr_displacement(ExceptionInformation *xp)
102{
103  pc program_counter = xpPC(xp);
104  opcode instr = *program_counter, prev_instr;
105  int delta = -3;
106
107  if (IS_ALLOC_TRAP(instr)) {
108    /* The alloc trap must have been preceded by a cmp and a
109       load from tcr.allocbase. */
110    if (IS_BRANCH_AROUND_ALLOC_TRAP(program_counter[-1])) {
111      delta = -4;
112    }
113    prev_instr = program_counter[delta];
114
115    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
116      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
117    }
118   
119    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)(prev_instr & 0xff));
121    }
122
123    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
124      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
125
126      instr = program_counter[delta-1];
127      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
128        return -((signed_natural)(disp | (instr & 0xff)));
129      }
130    }
131    Bug(xp, "Can't determine allocation displacement");
132  }
133  return 0;
134}
135
136
137/*
138  A cons cell's been successfully allocated, but the allocptr's
139  still tagged (as fulltag_cons, of course.)  Emulate any instructions
140  that might follow the allocation (stores to the car or cdr, an
141  assignment to the "result" gpr) that take place while the allocptr's
142  tag is non-zero, advancing over each such instruction.  When we're
143  done, the cons cell will be allocated and initialized, the result
144  register will point to it, the allocptr will be untagged, and
145  the PC will point past the instruction that clears the allocptr's
146  tag.
147*/
148void
149finish_allocating_cons(ExceptionInformation *xp)
150{
151  pc program_counter = xpPC(xp);
152  opcode instr;
153  LispObj cur_allocptr = xpGPR(xp, allocptr);
154  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
155  int target_reg;
156
157  while (1) {
158    instr = *program_counter++;
159
160    if (IS_CLR_ALLOCPTR_TAG(instr)) {
161      xpGPR(xp, allocptr) = untag(cur_allocptr);
162      xpPC(xp) = program_counter;
163      return;
164    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
165      c->car = xpGPR(xp,RD_field(instr));
166    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
167      c->cdr = xpGPR(xp,RD_field(instr));
168    } else {
169      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
170      xpGPR(xp,RD_field(instr)) = cur_allocptr;
171    }
172  }
173}
174
175/*
176  We were interrupted in the process of allocating a uvector; we
177  survived the allocation trap, and allocptr is tagged as fulltag_misc.
178  Emulate any instructions which store a header into the uvector,
179  assign the value of allocptr to some other register, and clear
180  allocptr's tag.  Don't expect/allow any other instructions in
181  this environment.
182*/
183void
184finish_allocating_uvector(ExceptionInformation *xp)
185{
186  pc program_counter = xpPC(xp);
187  opcode instr;
188  LispObj cur_allocptr = xpGPR(xp, allocptr);
189  int target_reg;
190
191  while (1) {
192    instr = *program_counter++;
193    if (IS_CLR_ALLOCPTR_TAG(instr)) {
194      xpGPR(xp, allocptr) = untag(cur_allocptr);
195      xpPC(xp) = program_counter;
196      return;
197    }
198    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
199      header_of(cur_allocptr) = xpGPR(xp,RD_field(instr));
200    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
201      xpGPR(xp,RD_field(instr)) = cur_allocptr;
202    } else {
203      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
204    }
205  }
206}
207
208
209Boolean
210allocate_object(ExceptionInformation *xp,
211                natural bytes_needed, 
212                signed_natural disp_from_allocptr,
213                TCR *tcr)
214{
215  area *a = active_dynamic_area;
216
217  /* Maybe do an EGC */
218  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
219    if (((a->active)-(a->low)) >= a->threshold) {
220      gc_from_xp(xp, 0L);
221    }
222  }
223
224  /* Life is pretty simple if we can simply grab a segment
225     without extending the heap.
226  */
227  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
228    xpGPR(xp, allocptr) += disp_from_allocptr;
229    return true;
230  }
231 
232  /* It doesn't make sense to try a full GC if the object
233     we're trying to allocate is larger than everything
234     allocated so far.
235  */
236  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
237    untenure_from_area(tenured_area); /* force a full GC */
238    gc_from_xp(xp, 0L);
239  }
240 
241  /* Try again, growing the heap if necessary */
242  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
243    xpGPR(xp, allocptr) += disp_from_allocptr;
244    return true;
245  }
246 
247  return false;
248}
249
250#ifndef XNOMEM
251#define XNOMEM 10
252#endif
253
254void
255update_bytes_allocated(TCR* tcr, void *cur_allocptr)
256{
257  BytePtr
258    last = (BytePtr) tcr->last_allocptr, 
259    current = (BytePtr) cur_allocptr;
260  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
261    tcr->bytes_allocated += last-current;
262  }
263  tcr->last_allocptr = 0;
264}
265
266void
267lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
268{
269  /* Couldn't allocate the object.  If it's smaller than some arbitrary
270     size (say 128K bytes), signal a "chronically out-of-memory" condition;
271     else signal a "allocation request failed" condition.
272  */
273  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
274  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
275}
276
277/*
278  Allocate a large list, where "large" means "large enough to
279  possibly trigger the EGC several times if this was done
280  by individually allocating each CONS."  The number of
281  ocnses in question is in arg_z; on successful return,
282  the list will be in arg_z
283*/
284
285Boolean
286allocate_list(ExceptionInformation *xp, TCR *tcr)
287{
288  natural
289    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
290    bytes_needed = (nconses << dnode_shift);
291  LispObj
292    prev = lisp_nil,
293    current,
294    initial = xpGPR(xp,arg_y);
295
296  if (nconses == 0) {
297    /* Silly case */
298    xpGPR(xp,arg_z) = lisp_nil;
299    xpGPR(xp,allocptr) = lisp_nil;
300    return true;
301  }
302  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
303  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
304    for (current = xpGPR(xp,allocptr);
305         nconses;
306         prev = current, current+= dnode_size, nconses--) {
307      deref(current,0) = prev;
308      deref(current,1) = initial;
309    }
310    xpGPR(xp,arg_z) = prev;
311    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
312    xpGPR(xp,allocptr)-=fulltag_cons;
313  } else {
314    lisp_allocation_failure(xp,tcr,bytes_needed);
315  }
316  return true;
317}
318
319Boolean
320handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
321{
322  pc program_counter;
323  natural cur_allocptr, bytes_needed = 0;
324  opcode prev_instr;
325  signed_natural disp = 0;
326  unsigned allocptr_tag;
327
328  cur_allocptr = xpGPR(xp,allocptr);
329
330  allocptr_tag = fulltag_of(cur_allocptr);
331
332  switch (allocptr_tag) {
333  case fulltag_cons:
334    bytes_needed = sizeof(cons);
335    disp = -sizeof(cons) + fulltag_cons;
336    break;
337
338  case fulltag_misc:
339    disp = allocptr_displacement(xp);
340    bytes_needed = (-disp) + fulltag_misc;
341    break;
342
343    /* else fall thru */
344  default:
345    return false;
346  }
347
348  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
349  if (allocate_object(xp, bytes_needed, disp, tcr)) {
350    adjust_exception_pc(xp,4);
351    return true;
352  }
353  lisp_allocation_failure(xp,tcr,bytes_needed);
354  return true;
355}
356
357natural gc_deferred = 0, full_gc_deferred = 0;
358
359signed_natural
360flash_freeze(TCR *tcr, signed_natural param)
361{
362  return 0;
363}
364
365Boolean
366handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
367{
368  LispObj
369    selector = xpGPR(xp,imm0), 
370    arg = xpGPR(xp,imm1);
371  area *a = active_dynamic_area;
372  Boolean egc_was_enabled = (a->older != NULL);
373  natural gc_previously_deferred = gc_deferred;
374
375
376  switch (selector) {
377  case GC_TRAP_FUNCTION_EGC_CONTROL:
378    egc_control(arg != 0, a->active);
379    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
380    break;
381
382  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
383    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
384    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
385    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
386    xpGPR(xp,arg_z) = lisp_nil+t_offset;
387    break;
388
389  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
390    if (((signed_natural) arg) > 0) {
391      lisp_heap_gc_threshold = 
392        align_to_power_of_2((arg-1) +
393                            (heap_segment_size - 1),
394                            log2_heap_segment_size);
395    }
396    /* fall through */
397  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
398    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
399    break;
400
401  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
402    /*  Try to put the current threshold in effect.  This may
403        need to disable/reenable the EGC. */
404    untenure_from_area(tenured_area);
405    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
406    if (egc_was_enabled) {
407      if ((a->high - a->active) >= a->threshold) {
408        tenure_to_area(tenured_area);
409      }
410    }
411    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
412    break;
413
414  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
415    ensure_static_conses(xp,tcr,32768);
416    break;
417
418  case GC_TRAP_FUNCTION_FLASH_FREEZE:
419    untenure_from_area(tenured_area);
420    gc_like_from_xp(xp,flash_freeze,0);
421    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
422    tenured_area->static_dnodes = area_dnode(a->active, a->low);
423    if (egc_was_enabled) {
424      tenure_to_area(tenured_area);
425    }
426    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
427    break;
428
429  default:
430    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
431
432    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
433      if (!full_gc_deferred) {
434        gc_from_xp(xp, 0L);
435        break;
436      }
437      /* Tried to do a full GC when gc was disabled.  That failed,
438         so try full GC now */
439      selector = GC_TRAP_FUNCTION_GC;
440    }
441   
442    if (egc_was_enabled) {
443      egc_control(false, (BytePtr) a->active);
444    }
445    gc_from_xp(xp, 0L);
446    if (gc_deferred > gc_previously_deferred) {
447      full_gc_deferred = 1;
448    } else {
449      full_gc_deferred = 0;
450    }
451    if (selector > GC_TRAP_FUNCTION_GC) {
452      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
453        impurify_from_xp(xp, 0L);
454        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
455        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
456        gc_from_xp(xp, 0L);
457      }
458      if (selector & GC_TRAP_FUNCTION_PURIFY) {
459        purify_from_xp(xp, 0L);
460        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
461        gc_from_xp(xp, 0L);
462      }
463      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
464        OSErr err;
465        extern OSErr save_application(unsigned, Boolean);
466        TCR *tcr = get_tcr(true);
467        area *vsarea = tcr->vs_area;
468       
469        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
470        err = save_application(arg, egc_was_enabled);
471        if (err == noErr) {
472          _exit(0);
473        }
474        fatal_oserr(": save_application", err);
475      }
476      switch (selector) {
477
478
479      case GC_TRAP_FUNCTION_FREEZE:
480        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
481        tenured_area->static_dnodes = area_dnode(a->active, a->low);
482        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
483        break;
484      default:
485        break;
486      }
487    }
488   
489    if (egc_was_enabled) {
490      egc_control(true, NULL);
491    }
492    break;
493   
494  }
495
496  adjust_exception_pc(xp,4);
497  return true;
498}
499
500
501
502void
503signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
504{
505  /* The cstack just overflowed.  Force the current thread's
506     control stack to do so until all stacks are well under their overflow
507     limits.
508  */
509
510#if 0
511  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
512#endif
513  handle_error(xp, error_stack_overflow, reg, NULL);
514}
515
516/*
517  Lower (move toward 0) the "end" of the soft protected area associated
518  with a by a page, if we can.
519*/
520
521void
522adjust_soft_protection_limit(area *a)
523{
524  char *proposed_new_soft_limit = a->softlimit - 4096;
525  protected_area_ptr p = a->softprot;
526 
527  if (proposed_new_soft_limit >= (p->start+16384)) {
528    p->end = proposed_new_soft_limit;
529    p->protsize = p->end-p->start;
530    a->softlimit = proposed_new_soft_limit;
531  }
532  protect_area(p);
533}
534
535void
536restore_soft_stack_limit(unsigned stkreg)
537{
538  area *a;
539  TCR *tcr = get_tcr(true);
540
541  switch (stkreg) {
542  case Rsp:
543    a = tcr->cs_area;
544    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
545      a->softlimit -= 4096;
546    }
547    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
548    break;
549  case vsp:
550    a = tcr->vs_area;
551    adjust_soft_protection_limit(a);
552    break;
553  }
554}
555
556/* Maybe this'll work someday.  We may have to do something to
557   make the thread look like it's not handling an exception */
558void
559reset_lisp_process(ExceptionInformation *xp)
560{
561  TCR *tcr = get_tcr(true);
562  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
563
564  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
565
566  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
567
568  start_lisp(tcr, 1);
569}
570
571/*
572  This doesn't GC; it returns true if it made enough room, false
573  otherwise.
574  If "extend" is true, it can try to extend the dynamic area to
575  satisfy the request.
576*/
577
578Boolean
579new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
580{
581  area *a;
582  natural newlimit, oldlimit;
583  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
584
585  a  = active_dynamic_area;
586  oldlimit = (natural) a->active;
587  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
588              align_to_power_of_2(need, log2_allocation_quantum));
589  if (newlimit > (natural) (a->high)) {
590    if (extend) {
591      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
592      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
593      do {
594        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
595          break;
596        }
597        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
598        if (extend_by < 4<<20) {
599          return false;
600        }
601      } while (1);
602    } else {
603      return false;
604    }
605  }
606  a->active = (BytePtr) newlimit;
607  tcr->last_allocptr = (void *)newlimit;
608  xpGPR(xp,allocptr) = (LispObj) newlimit;
609  tcr->save_allocbase = (void*) oldlimit;
610
611  return true;
612}
613
614 
615void
616update_area_active (area **aptr, BytePtr value)
617{
618  area *a = *aptr;
619  for (; a; a = a->older) {
620    if ((a->low <= value) && (a->high >= value)) break;
621  };
622  if (a == NULL) Bug(NULL, "Can't find active area");
623  a->active = value;
624  *aptr = a;
625
626  for (a = a->younger; a; a = a->younger) {
627    a->active = a->high;
628  }
629}
630
631LispObj *
632tcr_frame_ptr(TCR *tcr)
633{
634  ExceptionInformation *xp;
635  LispObj *bp = NULL;
636
637  if (tcr->pending_exception_context)
638    xp = tcr->pending_exception_context;
639  else {
640    xp = tcr->suspend_context;
641  }
642  if (xp) {
643    bp = (LispObj *) xpGPR(xp, Rsp);
644  }
645  return bp;
646}
647
648void
649normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
650{
651  void *cur_allocptr = NULL;
652  LispObj freeptr = 0;
653
654  if (xp) {
655    if (is_other_tcr) {
656      pc_luser_xp(xp, tcr, NULL);
657      freeptr = xpGPR(xp, allocptr);
658      if (fulltag_of(freeptr) == 0){
659        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
660      }
661    }
662    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
663    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
664  } else {
665    /* In ff-call. */
666    cur_allocptr = (void *) (tcr->save_allocptr);
667    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
668    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
669  }
670
671
672  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
673  if (cur_allocptr) {
674    update_bytes_allocated(tcr, cur_allocptr);
675    if (freeptr) {
676      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
677    }
678  }
679}
680
681TCR *gc_tcr = NULL;
682
683/* Suspend and "normalize" other tcrs, then call a gc-like function
684   in that context.  Resume the other tcrs, then return what the
685   function returned */
686
687signed_natural
688gc_like_from_xp(ExceptionInformation *xp, 
689                signed_natural(*fun)(TCR *, signed_natural), 
690                signed_natural param)
691{
692  TCR *tcr = get_tcr(true), *other_tcr;
693  int result;
694  signed_natural inhibit;
695
696  suspend_other_threads(true);
697  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
698  if (inhibit != 0) {
699    if (inhibit > 0) {
700      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
701    }
702    resume_other_threads(true);
703    gc_deferred++;
704    return 0;
705  }
706  gc_deferred = 0;
707
708  gc_tcr = tcr;
709
710  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
711
712  normalize_tcr(xp, tcr, false);
713
714
715  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
716    if (other_tcr->pending_exception_context) {
717      other_tcr->gc_context = other_tcr->pending_exception_context;
718    } else if (other_tcr->valence == TCR_STATE_LISP) {
719      other_tcr->gc_context = other_tcr->suspend_context;
720    } else {
721      /* no pending exception, didn't suspend in lisp state:
722         must have executed a synchronous ff-call.
723      */
724      other_tcr->gc_context = NULL;
725    }
726    normalize_tcr(other_tcr->gc_context, other_tcr, true);
727  }
728   
729
730
731  result = fun(tcr, param);
732
733  other_tcr = tcr;
734  do {
735    other_tcr->gc_context = NULL;
736    other_tcr = other_tcr->next;
737  } while (other_tcr != tcr);
738
739  gc_tcr = NULL;
740
741  resume_other_threads(true);
742
743  return result;
744
745}
746
747
748
749/* Returns #bytes freed by invoking GC */
750
751signed_natural
752gc_from_tcr(TCR *tcr, signed_natural param)
753{
754  area *a;
755  BytePtr oldfree, newfree;
756  BytePtr oldend, newend;
757
758  a = active_dynamic_area;
759  oldend = a->high;
760  oldfree = a->active;
761  gc(tcr, param);
762  newfree = a->active;
763  newend = a->high;
764#if 0
765  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
766#endif
767  return ((oldfree-newfree)+(newend-oldend));
768}
769
770signed_natural
771gc_from_xp(ExceptionInformation *xp, signed_natural param)
772{
773  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
774
775  freeGCptrs();
776  return status;
777}
778
779signed_natural
780purify_from_xp(ExceptionInformation *xp, signed_natural param)
781{
782  return gc_like_from_xp(xp, purify, param);
783}
784
785signed_natural
786impurify_from_xp(ExceptionInformation *xp, signed_natural param)
787{
788  return gc_like_from_xp(xp, impurify, param);
789}
790
791
792
793
794
795
796protection_handler
797 * protection_handlers[] = {
798   do_spurious_wp_fault,
799   do_soft_stack_overflow,
800   do_soft_stack_overflow,
801   do_soft_stack_overflow,
802   do_hard_stack_overflow,   
803   do_hard_stack_overflow,
804   do_hard_stack_overflow
805   };
806
807
808Boolean
809is_write_fault(ExceptionInformation *xp, siginfo_t *info)
810{
811  return ((xpFaultStatus(xp) & 0x800) != 0);
812}
813
814Boolean
815handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
816{
817  BytePtr addr;
818  protected_area_ptr area;
819  protection_handler *handler;
820  extern Boolean touch_page(void *);
821  extern void touch_page_end(void);
822
823#ifdef LINUX
824  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
825#else
826  if (info) {
827    addr = (BytePtr)(info->si_addr);
828  } else {
829    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
830  }
831#endif
832
833  if (addr && (addr == tcr->safe_ref_address)) {
834    adjust_exception_pc(xp,4);
835
836    xpGPR(xp,imm0) = 0;
837    return true;
838  }
839
840  if (xpPC(xp) == (pc)touch_page) {
841    xpGPR(xp,imm0) = 0;
842    xpPC(xp) = (pc)touch_page_end;
843    return true;
844  }
845
846
847  if (is_write_fault(xp,info)) {
848    area = find_protected_area(addr);
849    if (area != NULL) {
850      handler = protection_handlers[area->why];
851      return handler(xp, area, addr);
852    } else {
853      if ((addr >= readonly_area->low) &&
854          (addr < readonly_area->active)) {
855        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
856                        page_size);
857        return true;
858      }
859    }
860  }
861  if (old_valence == TCR_STATE_LISP) {
862    LispObj cmain = nrs_CMAIN.vcell;
863   
864    if ((fulltag_of(cmain) == fulltag_misc) &&
865      (header_subtag(header_of(cmain)) == subtag_macptr)) {
866     
867      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
868    }
869  }
870  return false;
871}
872
873
874
875
876
877OSStatus
878do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
879{
880#ifdef SUPPORT_PRAGMA_UNUSED
881#pragma unused(area,addr)
882#endif
883  reset_lisp_process(xp);
884  return -1;
885}
886
887extern area*
888allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
889
890extern area*
891allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893
894
895
896
897
898Boolean
899lisp_frame_p(lisp_frame *spPtr)
900{
901  return (spPtr->marker == lisp_frame_marker);
902}
903
904
905int ffcall_overflow_count = 0;
906
907
908
909
910
911
912/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
913  the current value of VSP (TSP) or an older area.  */
914
915OSStatus
916do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
917{
918  TCR* tcr = get_tcr(true);
919  area *a = tcr->vs_area;
920  protected_area_ptr vsp_soft = a->softprot;
921  unprotect_area(vsp_soft);
922  signal_stack_soft_overflow(xp,vsp);
923  return 0;
924}
925
926
927
928OSStatus
929do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
930{
931  /* Trying to write into a guard page on the vstack or tstack.
932     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
933     signal an error_stack_overflow condition.
934      */
935  if (prot_area->why == kVSPsoftguard) {
936    return do_vsp_overflow(xp,addr);
937  }
938  unprotect_area(prot_area);
939  signal_stack_soft_overflow(xp,Rsp);
940  return 0;
941}
942
943OSStatus
944do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
945{
946#ifdef SUPPORT_PRAGMA_UNUSED
947#pragma unused(xp,area,addr)
948#endif
949  return -1;
950}
951
952
953
954
955     
956
957
958
959
960
961Boolean
962handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
963{
964  return false;
965}
966
967
968Boolean
969handle_unimplemented_instruction(ExceptionInformation *xp,
970                                 opcode instruction,
971                                 TCR *tcr)
972{
973
974  return false;
975}
976
977Boolean
978handle_exception(int xnum, 
979                 ExceptionInformation *xp, 
980                 TCR *tcr, 
981                 siginfo_t *info,
982                 int old_valence)
983{
984  pc program_counter;
985  opcode instruction = 0;
986
987  if (old_valence != TCR_STATE_LISP) {
988    return false;
989  }
990
991  program_counter = xpPC(xp);
992 
993  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
994    instruction = *program_counter;
995  }
996
997  if (IS_ALLOC_TRAP(instruction)) {
998    return handle_alloc_trap(xp, tcr);
999  } else if ((xnum == SIGSEGV) ||
1000             (xnum == SIGBUS)) {
1001    return handle_protection_violation(xp, info, tcr, old_valence);
1002  } else if (xnum == SIGFPE) {
1003    return handle_sigfpe(xp, tcr);
1004  } else if ((xnum == SIGILL)) {
1005    if (IS_GC_TRAP(instruction)) {
1006      return handle_gc_trap(xp, tcr);
1007    } else if (IS_UUO(instruction)) {
1008      return handle_uuo(xp, info, instruction);
1009    } else {
1010      return handle_unimplemented_instruction(xp,instruction,tcr);
1011    }
1012  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1013    tcr->interrupt_pending = 0;
1014    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1015    return true;
1016  }
1017
1018  return false;
1019}
1020
1021void
1022adjust_exception_pc(ExceptionInformation *xp, int delta)
1023{
1024  xpPC(xp) += (delta >> 2);
1025}
1026
1027
1028/*
1029  This wants to scan backwards until "where" points to an instruction
1030   whose major opcode is either 63 (double-float) or 59 (single-float)
1031*/
1032
1033OSStatus
1034handle_fpux_binop(ExceptionInformation *xp, pc where)
1035{
1036  OSStatus err = -1;
1037  opcode *there = (opcode *) where, instr, errnum = 0;
1038  return err;
1039}
1040
1041Boolean
1042handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1043{
1044  unsigned 
1045    format = UUO_FORMAT(the_uuo);
1046  Boolean handled = false;
1047  int bump = 4;
1048  TCR *tcr = get_tcr(true);
1049
1050  switch (format) {
1051  case uuo_format_kernel_service:
1052    {
1053      TCR * target = (TCR *)xpGPR(xp,arg_z);
1054      int service = UUO_UNARY_field(the_uuo);
1055
1056      switch (service) {
1057      case error_propagate_suspend:
1058        handled = true;
1059        break;
1060      case error_interrupt:
1061        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1062        handled = true;
1063        break;
1064      case error_suspend:
1065        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1066        handled = true;
1067        break;
1068      case error_suspend_all:
1069        lisp_suspend_other_threads();
1070        handled = true;
1071        break;
1072      case error_resume:
1073        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1074        handled = true;
1075        break;
1076      case error_resume_all:
1077        lisp_resume_other_threads();
1078        handled = true;
1079        break;
1080      case error_kill:
1081        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1082        handled = true;
1083        break;
1084      case error_allocate_list:
1085        allocate_list(xp,tcr);
1086        handled = true;
1087        break;
1088      default:
1089        handled = false;
1090        break;
1091      }
1092      break;
1093    }
1094
1095  case uuo_format_unary:
1096    switch(UUO_UNARY_field(the_uuo)) {
1097    case 3:
1098      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1099        handled = true;
1100        bump = 4;
1101        break;
1102      }
1103      /* fall in */
1104    default:
1105      handled = false;
1106      break;
1107
1108    }
1109    break;
1110
1111  case uuo_format_nullary:
1112    switch (UUOA_field(the_uuo)) {
1113    case 3:
1114      adjust_exception_pc(xp, bump);
1115      bump = 0;
1116      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1117      handled = true;
1118      break;
1119
1120    case 4:
1121      tcr->interrupt_pending = 0;
1122      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1123      handled = true;
1124      break;
1125    default:
1126      handled = false;
1127      break;
1128    }
1129    break;
1130
1131
1132  case uuo_format_error_lisptag:
1133  case uuo_format_error_fulltag:
1134  case uuo_format_error_xtype:
1135  case uuo_format_cerror_lisptag:
1136  case uuo_format_cerror_fulltag:
1137  case uuo_format_cerror_xtype:
1138  case uuo_format_nullary_error:
1139  case uuo_format_unary_error:
1140  case uuo_format_binary_error:
1141  case uuo_format_ternary:
1142    handled = handle_error(xp,0,the_uuo, &bump);
1143    break;
1144
1145  default:
1146    handled = false;
1147    bump = 0;
1148  }
1149 
1150  if (handled && bump) {
1151    adjust_exception_pc(xp, bump);
1152  }
1153  return handled;
1154}
1155
1156natural
1157register_codevector_contains_pc (natural lisp_function, pc where)
1158{
1159  natural code_vector, size;
1160
1161  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1162      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1163    code_vector = deref(lisp_function, 2);
1164    size = header_element_count(header_of(code_vector)) << 2;
1165    if ((untag(code_vector) < (natural)where) && 
1166        ((natural)where < (code_vector + size)))
1167      return(code_vector);
1168  }
1169
1170  return(0);
1171}
1172
1173Boolean
1174callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1175{
1176  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1177}
1178
1179Boolean
1180callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1181                  natural arg1, natural arg2, int *bumpP)
1182{
1183  natural  callback_ptr;
1184  area *a;
1185  natural fnreg = Rfn,  codevector, offset;
1186  pc where = xpPC(xp);
1187  int delta;
1188
1189  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1190  if (codevector == 0) {
1191    fnreg = nfn;
1192    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1193    if (codevector == 0) {
1194      fnreg = 0;
1195    }
1196  }
1197  if (codevector) {
1198    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1199  } else {
1200    offset = (natural)where;
1201  }
1202                                                 
1203                                               
1204
1205  TCR *tcr = get_tcr(true);
1206
1207  /* Put the active stack pointer where .SPcallback expects it */
1208  a = tcr->cs_area;
1209  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1210
1211  /* Copy globals from the exception frame to tcr */
1212  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1213  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1214
1215
1216
1217  /* Call back.
1218     Lisp will handle trampolining through some code that
1219     will push lr/fn & pc/nfn stack frames for backtrace.
1220  */
1221  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1222  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1223  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1224  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1225
1226  if (bumpP) {
1227    *bumpP = delta;
1228  }
1229
1230  /* Copy GC registers back into exception frame */
1231  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1232  return true;
1233}
1234
1235area *
1236allocate_no_stack (natural size)
1237{
1238#ifdef SUPPORT_PRAGMA_UNUSED
1239#pragma unused(size)
1240#endif
1241
1242  return (area *) NULL;
1243}
1244
1245
1246
1247
1248
1249
1250/* callback to (symbol-value cmain) if it is a macptr,
1251   otherwise report cause and function name to console.
1252   Returns noErr if exception handled OK */
1253OSStatus
1254handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1255{
1256  LispObj   cmain = nrs_CMAIN.vcell;
1257  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1258
1259}
1260
1261
1262
1263
1264void non_fatal_error( char *msg )
1265{
1266  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1267  fflush( dbgout );
1268}
1269
1270
1271
1272Boolean
1273handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1274{
1275  LispObj   errdisp = nrs_ERRDISP.vcell;
1276
1277  if ((fulltag_of(errdisp) == fulltag_misc) &&
1278      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1279    /* errdisp is a macptr, we can call back to lisp */
1280    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1281    }
1282
1283  return false;
1284}
1285               
1286
1287/*
1288   Current thread has all signals masked.  Before unmasking them,
1289   make it appear that the current thread has been suspended.
1290   (This is to handle the case where another thread is trying
1291   to GC before this thread is able to sieze the exception lock.)
1292*/
1293int
1294prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1295{
1296  int old_valence = tcr->valence;
1297
1298  tcr->pending_exception_context = context;
1299  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1300
1301  ALLOW_EXCEPTIONS(context);
1302  return old_valence;
1303} 
1304
1305void
1306wait_for_exception_lock_in_handler(TCR *tcr, 
1307                                   ExceptionInformation *context,
1308                                   xframe_list *xf)
1309{
1310
1311  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1312  xf->curr = context;
1313  xf->prev = tcr->xframe;
1314  tcr->xframe =  xf;
1315  tcr->pending_exception_context = NULL;
1316  tcr->valence = TCR_STATE_FOREIGN; 
1317}
1318
1319void
1320unlock_exception_lock_in_handler(TCR *tcr)
1321{
1322  tcr->pending_exception_context = tcr->xframe->curr;
1323  tcr->xframe = tcr->xframe->prev;
1324  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1325  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1326}
1327
1328/*
1329   If an interrupt is pending on exception exit, try to ensure
1330   that the thread sees it as soon as it's able to run.
1331*/
1332void
1333raise_pending_interrupt(TCR *tcr)
1334{
1335  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1336    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1337  }
1338}
1339
1340void
1341exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1342{
1343#ifndef ANDROID
1344  sigset_t mask;
1345  sigfillset(&mask);
1346#else
1347  int mask [] = {0,0};
1348#endif
1349 
1350  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask, NULL);
1351  tcr->valence = old_valence;
1352  tcr->pending_exception_context = NULL;
1353  tcr->last_lisp_frame = old_last_lisp_frame;
1354}
1355
1356
1357void
1358signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1359{
1360  xframe_list xframe_link;
1361
1362  if (!use_mach_exception_handling) {
1363   
1364    tcr = (TCR *) get_interrupt_tcr(false);
1365 
1366    /* The signal handler's entered with all signals (notably the
1367       thread_suspend signal) blocked.  Don't allow any other signals
1368       (notably the thread_suspend signal) to preempt us until we've
1369       set the TCR's xframe slot to include the current exception
1370       context.
1371    */
1372   
1373    old_last_lisp_frame = tcr->last_lisp_frame;
1374    tcr->last_lisp_frame = xpGPR(context,Rsp);
1375    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1376  }
1377
1378  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1379    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1380    pthread_kill(pthread_self(), thread_suspend_signal);
1381  }
1382
1383 
1384  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1385  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1386    char msg[512];
1387    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1388    if (lisp_Debugger(context, info, signum, false, msg)) {
1389      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1390    }
1391  }
1392  unlock_exception_lock_in_handler(tcr);
1393
1394  /* This thread now looks like a thread that was suspended while
1395     executing lisp code.  If some other thread gets the exception
1396     lock and GCs, the context (this thread's suspend_context) will
1397     be updated.  (That's only of concern if it happens before we
1398     can return to the kernel/to the Mach exception handler).
1399  */
1400  if (!use_mach_exception_handling) {
1401    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1402    raise_pending_interrupt(tcr);
1403  }
1404}
1405
1406
1407void
1408sigill_handler(int signum, siginfo_t *info, ExceptionInformation  *xp)
1409{
1410  pc program_counter = xpPC(xp);
1411  opcode instr = *program_counter;
1412
1413  if (IS_UUO(instr)) {
1414    natural psr = xpPSR(xp);
1415    Boolean opcode_matched_condition = false,
1416      flip = ((instr & (1<<28)) != 0);
1417   
1418
1419    switch (instr >> 29) {
1420    case 0: 
1421      opcode_matched_condition = ((psr & PSR_Z_MASK) != 0);
1422      break;
1423    case 1:
1424      opcode_matched_condition = ((psr & PSR_C_MASK) != 0);
1425      break;
1426    case 2:
1427      opcode_matched_condition = ((psr & PSR_N_MASK) != 0);
1428      break;
1429    case 3:
1430      opcode_matched_condition = ((psr & PSR_V_MASK) != 0);
1431      break;
1432    case 4:
1433      opcode_matched_condition = (((psr & PSR_C_MASK) != 0) &&
1434                                  ((psr & PSR_Z_MASK) == 0));
1435      break;
1436    case 5:
1437      opcode_matched_condition = (((psr & PSR_N_MASK) != 0) ==
1438                                  ((psr & PSR_V_MASK) != 0));
1439      break;
1440    case 6:
1441      opcode_matched_condition = ((((psr & PSR_N_MASK) != 0) ==
1442                                   ((psr & PSR_V_MASK) != 0)) &&
1443                                  ((psr & PSR_Z_MASK) == 0));
1444      break;
1445    case 7:
1446      opcode_matched_condition = true;
1447      flip = false;
1448      break;
1449    }
1450    if (flip) {
1451      opcode_matched_condition = !opcode_matched_condition;
1452    }
1453    if (!opcode_matched_condition) {
1454      adjust_exception_pc(xp,4);
1455      return;
1456    }
1457  }
1458  signal_handler(signum,info,xp, NULL, 0, 0);
1459}
1460
1461
1462#ifdef USE_SIGALTSTACK
1463void
1464invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1465{
1466  ExceptionInformation *xp_copy;
1467  siginfo_t *info_copy;
1468  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1469 
1470  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1471  target_sp -= sizeof(ucontext_t);
1472  xp_copy = (ExceptionInformation *)target_sp;
1473  memmove(target_sp,xp,sizeof(*xp));
1474  xp_copy->uc_stack.ss_sp = 0;
1475  xp_copy->uc_stack.ss_size = 0;
1476  xp_copy->uc_stack.ss_flags = 0;
1477  xp_copy->uc_link = NULL;
1478  target_sp -= sizeof(siginfo_t);
1479  info_copy = (siginfo_t *)target_sp;
1480  memmove(target_sp,info,sizeof(*info));
1481  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1482}
1483 
1484void
1485altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1486{
1487  TCR *tcr=get_tcr(true);
1488 
1489  if (signo == SIGBUS) {
1490    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address); 
1491    area *a = tcr->cs_area;
1492    if (((BytePtr)truncate_to_power_of_2(addr,log2_page_size))== a->softlimit) 
1493{
1494      if (mmap(a->softlimit,
1495               page_size,
1496               PROT_READ|PROT_WRITE|PROT_EXEC,
1497               MAP_PRIVATE|MAP_ANON|MAP_FIXED,
1498               -1,
1499               0) == a->softlimit) {
1500        return;
1501      }
1502    }
1503  } else if (signo == SIGSEGV) {
1504    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1505    area *a = tcr->cs_area;
1506   
1507    if ((addr >= a->low) &&
1508        (addr < a->softlimit)) {
1509      if (addr < a->hardlimit) {
1510        Bug(xp, "hard stack overflow");
1511      } else {
1512        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1513      }
1514    }
1515  }
1516  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1517}
1518#endif
1519
1520/*
1521  If it looks like we're in the middle of an atomic operation, make
1522  it seem as if that operation is either complete or hasn't started
1523  yet.
1524
1525  The cases handled include:
1526
1527  a) storing into a newly-allocated lisp frame on the stack.
1528  b) marking a newly-allocated TSP frame as containing "raw" data.
1529  c) consing: the GC has its own ideas about how this should be
1530     handled, but other callers would be best advised to back
1531     up or move forward, according to whether we're in the middle
1532     of allocating a cons cell or allocating a uvector.
1533  d) a STMW to the vsp
1534  e) EGC write-barrier subprims.
1535*/
1536
1537extern opcode
1538  egc_write_barrier_start,
1539  egc_write_barrier_end, 
1540  egc_store_node_conditional, 
1541  egc_store_node_conditional_test,
1542  egc_set_hash_key,
1543  egc_gvset,
1544  egc_rplaca,
1545  egc_rplacd,
1546  egc_set_hash_key_conditional,
1547  egc_set_hash_key_conditional_test;
1548
1549
1550extern opcode ffcall_return_window, ffcall_return_window_end;
1551
1552void
1553pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1554{
1555  pc program_counter = xpPC(xp);
1556  opcode instr = *program_counter;
1557  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1558  LispObj cur_allocptr = xpGPR(xp, allocptr);
1559  int allocptr_tag = fulltag_of(cur_allocptr);
1560 
1561
1562
1563  if ((program_counter < &egc_write_barrier_end) && 
1564      (program_counter >= &egc_write_barrier_start)) {
1565    LispObj *ea = 0, val = 0, root = 0;
1566    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1567    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1568
1569    if (program_counter >= &egc_set_hash_key_conditional) {
1570      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1571          ((program_counter == &egc_set_hash_key_conditional_test) &&
1572           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1573        return;
1574      }
1575      need_store = false;
1576      root = xpGPR(xp,arg_x);
1577      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1578      need_memoize_root = true;
1579    } else if (program_counter >= &egc_store_node_conditional) {
1580      if ((program_counter < &egc_store_node_conditional_test) ||
1581          ((program_counter == &egc_store_node_conditional_test) &&
1582           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1583        /* The conditional store either hasn't been attempted yet, or
1584           has failed.  No need to adjust the PC, or do memoization. */
1585        return;
1586      }
1587      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1588      xpGPR(xp,arg_z) = t_value;
1589      need_store = false;
1590    } else if (program_counter >= &egc_set_hash_key) {
1591      root = xpGPR(xp,arg_x);
1592      val = xpGPR(xp,arg_z);
1593      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1594      need_memoize_root = true;
1595    } else if (program_counter >= &egc_gvset) {
1596      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1597      val = xpGPR(xp,arg_z);
1598    } else if (program_counter >= &egc_rplacd) {
1599      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1600      val = xpGPR(xp,arg_z);
1601    } else {                      /* egc_rplaca */
1602      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1603      val = xpGPR(xp,arg_z);
1604    }
1605    if (need_store) {
1606      *ea = val;
1607    }
1608    if (need_check_memo) {
1609      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1610      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1611          ((LispObj)ea < val)) {
1612        atomic_set_bit(refbits, bitnumber);
1613        if (need_memoize_root) {
1614          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1615          atomic_set_bit(refbits, bitnumber);
1616        }
1617      }
1618    }
1619    xpPC(xp) = xpLR(xp);
1620    return;
1621  }
1622
1623
1624 
1625  if (allocptr_tag != tag_fixnum) {
1626    signed_natural disp = allocptr_displacement(xp);
1627
1628    if (disp) {
1629      /* Being architecturally "at" the alloc trap doesn't tell
1630         us much (in particular, it doesn't tell us whether
1631         or not the thread has committed to taking the trap
1632         and is waiting for the exception lock (or waiting
1633         for the Mach exception thread to tell it how bad
1634         things are) or is about to execute a conditional
1635         trap.
1636         Regardless of which case applies, we want the
1637         other thread to take (or finish taking) the
1638         trap, and we don't want it to consider its
1639         current allocptr to be valid.
1640         The difference between this case (suspend other
1641         thread for GC) and the previous case (suspend
1642         current thread for interrupt) is solely a
1643         matter of what happens after we leave this
1644         function: some non-current thread will stay
1645         suspended until the GC finishes, then take
1646         (or start processing) the alloc trap.   The
1647         current thread will go off and do PROCESS-INTERRUPT
1648         or something, and may return from the interrupt
1649         and need to finish the allocation that got interrupted.
1650      */
1651
1652      if (alloc_disp) {
1653        *alloc_disp = disp;
1654        xpGPR(xp,allocptr) += disp;
1655        /* Leave the PC at the alloc trap.  When the interrupt
1656           handler returns, it'll decrement allocptr by disp
1657           and the trap may or may not be taken.
1658        */
1659      } else {
1660        Boolean ok = false;
1661        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1662        xpGPR(xp, allocptr) = VOID_ALLOCPTR + disp;
1663        instr = program_counter[-1];
1664        if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1665          instr = program_counter[-2];
1666          if (IS_COMPARE_ALLOCPTR_TO_RM(instr)){
1667            xpGPR(xp,RM_field(instr)) = VOID_ALLOCPTR;
1668            ok = true;
1669          }
1670        }
1671        if (ok) {
1672        /* Clear the carry bit, so that the trap will be taken. */
1673        xpPSR(xp) &= ~PSR_C_MASK;
1674        } else {
1675          Bug(NULL, "unexpected instruction preceding alloc trap.");
1676        }
1677      }
1678    } else {
1679      /* we may be before or after the alloc trap.  If before, set
1680         allocptr to VOID_ALLOCPTR and back up to the start of the
1681         instruction sequence; if after, finish the allocation. */
1682      Boolean before_alloc_trap = false;
1683
1684      if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1685        before_alloc_trap = true;
1686        --program_counter;
1687        instr = *program_counter;
1688      }
1689      if (IS_COMPARE_ALLOCPTR_TO_RM(instr)) {
1690        before_alloc_trap = true;
1691        --program_counter;
1692        instr = *program_counter;
1693      }
1694      if (IS_LOAD_RD_FROM_ALLOCBASE(instr)) {
1695        before_alloc_trap = true;
1696        --program_counter;
1697        instr = *program_counter;
1698      }
1699      if (IS_SUB_HI_FROM_ALLOCPTR(instr)) {
1700        before_alloc_trap = true;
1701        --program_counter;
1702      }
1703      if (before_alloc_trap) {
1704        xpPC(xp) = program_counter;
1705        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1706      } else {
1707        /* If we're already past the alloc_trap, finish allocating
1708           the object. */
1709        if (allocptr_tag == fulltag_cons) {
1710          finish_allocating_cons(xp);
1711        } else {
1712          if (allocptr_tag == fulltag_misc) {
1713            finish_allocating_uvector(xp);
1714          } else {
1715            Bug(xp, "what's being allocated here ?");
1716          }
1717        }
1718        /* Whatever we finished allocating, reset allocptr/allocbase to
1719           VOID_ALLOCPTR */
1720        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1721      }
1722    }
1723    return;
1724  }
1725}
1726
1727void
1728interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1729{
1730  TCR *tcr = get_interrupt_tcr(false);
1731  if (tcr) {
1732    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1733      tcr->interrupt_pending = 1 << fixnumshift;
1734    } else {
1735      LispObj cmain = nrs_CMAIN.vcell;
1736
1737      if ((fulltag_of(cmain) == fulltag_misc) &&
1738          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1739        /*
1740           This thread can (allegedly) take an interrupt now.
1741           It's tricky to do that if we're executing
1742           foreign code (especially Linuxthreads code, much
1743           of which isn't reentrant.)
1744           If we're unwinding the stack, we also want to defer
1745           the interrupt.
1746        */
1747        if ((tcr->valence != TCR_STATE_LISP) ||
1748            (tcr->unwinding != 0)) {
1749          tcr->interrupt_pending = 1 << fixnumshift;
1750        } else {
1751          xframe_list xframe_link;
1752          int old_valence;
1753          signed_natural disp=0;
1754          natural old_last_lisp_frame = tcr->last_lisp_frame;
1755         
1756          tcr->last_lisp_frame = xpGPR(context,Rsp);
1757          pc_luser_xp(context, tcr, &disp);
1758          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1759          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1760          handle_exception(signum, context, tcr, info, old_valence);
1761          if (disp) {
1762            xpGPR(context,allocptr) -= disp;
1763          }
1764          unlock_exception_lock_in_handler(tcr);
1765          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1766        }
1767      }
1768    }
1769  }
1770#ifdef DARWIN
1771    DarwinSigReturn(context);
1772#endif
1773}
1774
1775#ifdef USE_SIGALTSTACK
1776void
1777altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1778{
1779  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1780}
1781#endif
1782
1783
1784void
1785install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1786{
1787  struct sigaction sa;
1788
1789  sigfillset(&sa.sa_mask);
1790 
1791  sa.sa_sigaction = (void *)handler;
1792  sigfillset(&sa.sa_mask);
1793  sa.sa_flags = 
1794    0 /* SA_RESTART */
1795    | SA_NODEFER
1796    | SA_SIGINFO
1797#ifdef USE_SIGALTSTACK
1798    | (on_altstack ? SA_ONSTACK : 0)
1799#endif
1800    ;
1801
1802  sigaction(signo, &sa, NULL);
1803}
1804
1805
1806void
1807install_pmcl_exception_handlers()
1808{
1809#ifdef DARWIN
1810  extern Boolean use_mach_exception_handling;
1811#endif
1812
1813  Boolean install_signal_handlers_for_exceptions =
1814#ifdef DARWIN
1815    !use_mach_exception_handling
1816#else
1817    true
1818#endif
1819    ;
1820  if (install_signal_handlers_for_exceptions) {
1821    install_signal_handler(SIGILL, (void *)sigill_handler, true, false);
1822    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1823    install_signal_handler(SIGBUS, (void *)ALTSTACK(signal_handler),true,true);
1824
1825  }
1826 
1827  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1828                         (void *)interrupt_handler, true, false);
1829  signal(SIGPIPE, SIG_IGN);
1830}
1831
1832#ifdef USE_SIGALTSTACK
1833void
1834setup_sigaltstack(area *a)
1835{
1836  stack_t stack;
1837#if 0
1838  stack.ss_sp = a->low;
1839  a->low += SIGSTKSZ*8;
1840#endif
1841  stack.ss_size = SIGSTKSZ*8;
1842  stack.ss_flags = 0;
1843  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1844  if (sigaltstack(&stack, NULL) != 0) {
1845    perror("sigaltstack");
1846    exit(-1);
1847  }
1848}
1849#endif
1850
1851void
1852thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1853{
1854  TCR *tcr = get_tcr(false);
1855  area *a;
1856#ifndef ANDROID
1857  sigset_t mask;
1858 
1859  sigemptyset(&mask);
1860#else
1861  int mask[] = {0,0};
1862#endif
1863
1864  if (tcr) {
1865    tcr->valence = TCR_STATE_FOREIGN;
1866    a = tcr->vs_area;
1867    if (a) {
1868      a->active = a->high;
1869    }
1870    a = tcr->cs_area;
1871    if (a) {
1872      a->active = a->high;
1873    }
1874  }
1875 
1876  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask,NULL);
1877  pthread_exit(NULL);
1878}
1879
1880#ifdef USE_SIGALTSTACK
1881void
1882altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1883{
1884  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1885}
1886#endif
1887
1888void
1889thread_signal_setup()
1890{
1891  thread_suspend_signal = SIG_SUSPEND_THREAD;
1892  thread_kill_signal = SIG_KILL_THREAD;
1893
1894  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1895  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1896}
1897
1898
1899
1900void
1901unprotect_all_areas()
1902{
1903  protected_area_ptr p;
1904
1905  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1906    unprotect_area(p);
1907  }
1908}
1909
1910/*
1911  A binding subprim has just done "twlle limit_regno,idx_regno" and
1912  the trap's been taken.  Extend the tcr's tlb so that the index will
1913  be in bounds and the new limit will be on a page boundary, filling
1914  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1915  the tcr fields and the registers in the xp and return true if this
1916  all works, false otherwise.
1917
1918  Note that the tlb was allocated via malloc, so realloc can do some
1919  of the hard work.
1920*/
1921Boolean
1922extend_tcr_tlb(TCR *tcr, 
1923               ExceptionInformation *xp, 
1924               unsigned idx_regno)
1925{
1926  unsigned
1927    index = (unsigned) (xpGPR(xp,idx_regno)),
1928    old_limit = tcr->tlb_limit,
1929    new_limit = align_to_power_of_2(index+1,12),
1930    new_bytes = new_limit-old_limit;
1931  LispObj
1932    *old_tlb = tcr->tlb_pointer,
1933    *new_tlb = realloc(old_tlb, new_limit),
1934    *work;
1935
1936  if (new_tlb == NULL) {
1937    return false;
1938  }
1939 
1940  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1941
1942  while (new_bytes) {
1943    *work++ = no_thread_local_binding_marker;
1944    new_bytes -= sizeof(LispObj);
1945  }
1946  tcr->tlb_pointer = new_tlb;
1947  tcr->tlb_limit = new_limit;
1948  return true;
1949}
1950
1951
1952
1953void
1954exception_init()
1955{
1956  install_pmcl_exception_handlers();
1957}
1958
1959
1960
1961
1962
1963#ifdef DARWIN
1964
1965
1966#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1967#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1968
1969
1970
1971#define LISP_EXCEPTIONS_HANDLED_MASK \
1972 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1973
1974/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1975#define NUM_LISP_EXCEPTIONS_HANDLED 4
1976
1977typedef struct {
1978  int foreign_exception_port_count;
1979  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1980  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1981  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1982  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1983} MACH_foreign_exception_state;
1984
1985
1986
1987
1988/*
1989  Mach's exception mechanism works a little better than its signal
1990  mechanism (and, not incidentally, it gets along with GDB a lot
1991  better.
1992
1993  Initially, we install an exception handler to handle each native
1994  thread's exceptions.  This process involves creating a distinguished
1995  thread which listens for kernel exception messages on a set of
1996  0 or more thread exception ports.  As threads are created, they're
1997  added to that port set; a thread's exception port is destroyed
1998  (and therefore removed from the port set) when the thread exits.
1999
2000  A few exceptions can be handled directly in the handler thread;
2001  others require that we resume the user thread (and that the
2002  exception thread resumes listening for exceptions.)  The user
2003  thread might eventually want to return to the original context
2004  (possibly modified somewhat.)
2005
2006  As it turns out, the simplest way to force the faulting user
2007  thread to handle its own exceptions is to do pretty much what
2008  signal() does: the exception handlng thread sets up a sigcontext
2009  on the user thread's stack and forces the user thread to resume
2010  execution as if a signal handler had been called with that
2011  context as an argument.  We can use a distinguished UUO at a
2012  distinguished address to do something like sigreturn(); that'll
2013  have the effect of resuming the user thread's execution in
2014  the (pseudo-) signal context.
2015
2016  Since:
2017    a) we have miles of code in C and in Lisp that knows how to
2018    deal with Linux sigcontexts
2019    b) Linux sigcontexts contain a little more useful information
2020    (the DAR, DSISR, etc.) than their Darwin counterparts
2021    c) we have to create a sigcontext ourselves when calling out
2022    to the user thread: we aren't really generating a signal, just
2023    leveraging existing signal-handling code.
2024
2025  we create a Linux sigcontext struct.
2026
2027  Simple ?  Hopefully from the outside it is ...
2028
2029  We want the process of passing a thread's own context to it to
2030  appear to be atomic: in particular, we don't want the GC to suspend
2031  a thread that's had an exception but has not yet had its user-level
2032  exception handler called, and we don't want the thread's exception
2033  context to be modified by a GC while the Mach handler thread is
2034  copying it around.  On Linux (and on Jaguar), we avoid this issue
2035  because (a) the kernel sets up the user-level signal handler and
2036  (b) the signal handler blocks signals (including the signal used
2037  by the GC to suspend threads) until tcr->xframe is set up.
2038
2039  The GC and the Mach server thread therefore contend for the lock
2040  "mach_exception_lock".  The Mach server thread holds the lock
2041  when copying exception information between the kernel and the
2042  user thread; the GC holds this lock during most of its execution
2043  (delaying exception processing until it can be done without
2044  GC interference.)
2045
2046*/
2047
2048
2049#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2050
2051void
2052fatal_mach_error(char *format, ...);
2053
2054#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2055
2056
2057void
2058restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2059{
2060  kern_return_t kret;
2061  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
2062
2063  /* Set the thread's FP state from the pseudosigcontext */
2064  kret = thread_set_state(thread,
2065                          ARM_VFP_STATE,
2066                          (thread_state_t)&(mc->__fs),
2067                          ARM_VFP_STATE_COUNT);
2068
2069  MACH_CHECK_ERROR("setting thread FP state", kret);
2070
2071  /* The thread'll be as good as new ... */
2072  kret = thread_set_state(thread, 
2073                          MACHINE_THREAD_STATE,
2074                          (thread_state_t)&(mc->__ss),
2075                          MACHINE_THREAD_STATE_COUNT);
2076  MACH_CHECK_ERROR("setting thread state", kret);
2077} 
2078
2079/* This code runs in the exception handling thread, in response
2080   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2081   in response to a call to pseudo_sigreturn() from the specified
2082   user thread.
2083   Find that context (the user thread's R3 points to it), then
2084   use that context to set the user thread's state.  When this
2085   function's caller returns, the Mach kernel will resume the
2086   user thread.
2087*/
2088
2089kern_return_t
2090do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2091{
2092  ExceptionInformation *xp;
2093
2094#ifdef DEBUG_MACH_EXCEPTIONS
2095  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2096#endif
2097  tcr->last_lisp_frame = *((natural *)(tcr->last_lisp_frame));
2098  xp = tcr->pending_exception_context;
2099  if (xp) {
2100    tcr->pending_exception_context = NULL;
2101    tcr->valence = TCR_STATE_LISP;
2102    restore_mach_thread_state(thread, xp);
2103    raise_pending_interrupt(tcr);
2104  } else {
2105    Bug(NULL, "no xp here!\n");
2106  }
2107#ifdef DEBUG_MACH_EXCEPTIONS
2108  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2109#endif
2110  return KERN_SUCCESS;
2111} 
2112
2113ExceptionInformation *
2114create_thread_context_frame(mach_port_t thread, 
2115                            natural *new_stack_top)
2116{
2117  arm_thread_state_t ts;
2118  mach_msg_type_number_t thread_state_count;
2119  kern_return_t result;
2120  ExceptionInformation *pseudosigcontext;
2121  _STRUCT_MCONTEXT *mc;
2122  natural stackp, backlink;
2123
2124  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2125  result = thread_get_state(thread, 
2126                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2127                            (thread_state_t)&ts,
2128                            &thread_state_count);
2129 
2130  if (result != KERN_SUCCESS) {
2131    get_tcr(true);
2132    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2133  }
2134  stackp = ts.__sp;
2135  backlink = stackp;
2136
2137  stackp -= sizeof(*pseudosigcontext);
2138  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2139
2140  stackp -= sizeof(*mc);
2141  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2142  memmove(&(mc->__ss),&ts,sizeof(ts));
2143
2144  thread_state_count = ARM_VFP_STATE_COUNT;
2145  thread_get_state(thread,
2146                   ARM_VFP_STATE,
2147                   (thread_state_t)&(mc->__fs),
2148                   &thread_state_count);
2149
2150
2151  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2152  thread_get_state(thread,
2153                   ARM_EXCEPTION_STATE,
2154                   (thread_state_t)&(mc->__es),
2155                   &thread_state_count);
2156
2157
2158  UC_MCONTEXT(pseudosigcontext) = mc;
2159  if (new_stack_top) {
2160    *new_stack_top = stackp;
2161  }
2162  return pseudosigcontext;
2163}
2164
2165/*
2166  This code sets up the user thread so that it executes a "pseudo-signal
2167  handler" function when it resumes.  Create a linux sigcontext struct
2168  on the thread's stack and pass it as an argument to the pseudo-signal
2169  handler.
2170
2171  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2172  which will restore the thread's context.
2173
2174  If the handler invokes code that throws (or otherwise never sigreturn()'s
2175  to the context), that's fine.
2176
2177  Actually, check that: throw (and variants) may need to be careful and
2178  pop the tcr's xframe list until it's younger than any frame being
2179  entered.
2180*/
2181
2182int
2183setup_signal_frame(mach_port_t thread,
2184                   void *handler_address,
2185                   int signum,
2186                   int code,
2187                   TCR *tcr)
2188{
2189  arm_thread_state_t ts;
2190  ExceptionInformation *pseudosigcontext;
2191  int old_valence = tcr->valence;
2192  natural stackp, *pstackp;
2193
2194#ifdef DEBUG_MACH_EXCEPTIONS
2195  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2196#endif
2197  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2198  pstackp = (natural *)stackp;
2199  *--pstackp = tcr->last_lisp_frame;
2200  stackp = (natural)pstackp;
2201  tcr->last_lisp_frame = stackp;
2202  pseudosigcontext->uc_onstack = 0;
2203  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2204  pseudosigcontext->uc_mcsize = ARM_MCONTEXT_SIZE;
2205  tcr->pending_exception_context = pseudosigcontext;
2206  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2207 
2208
2209  /*
2210     It seems like we've created a  sigcontext on the thread's
2211     stack.  Set things up so that we call the handler (with appropriate
2212     args) when the thread's resumed.
2213  */
2214
2215  ts.__pc = (natural) handler_address;
2216  ts.__sp = stackp;
2217  ts.__r[0] = signum;
2218  ts.__r[1] = (natural)pseudosigcontext;
2219  ts.__r[2] = (natural)tcr;
2220  ts.__r[3] = (natural)old_valence;
2221  ts.__lr = (natural)pseudo_sigreturn;
2222  ts.__cpsr = xpPSR(pseudosigcontext);
2223
2224
2225  thread_set_state(thread, 
2226                   MACHINE_THREAD_STATE,
2227                   (thread_state_t)&ts,
2228                   MACHINE_THREAD_STATE_COUNT);
2229#ifdef DEBUG_MACH_EXCEPTIONS
2230  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2231#endif
2232  return 0;
2233}
2234
2235
2236void
2237pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2238{
2239  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2240} 
2241
2242
2243int
2244thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2245{
2246  /* Likely hopeless. */
2247  return 0;
2248}
2249
2250/*
2251  This function runs in the exception handling thread.  It's
2252  called (by this precise name) from the library function "exc_server()"
2253  when the thread's exception ports are set up.  (exc_server() is called
2254  via mach_msg_server(), which is a function that waits for and dispatches
2255  on exception messages from the Mach kernel.)
2256
2257  This checks to see if the exception was caused by a pseudo_sigreturn()
2258  UUO; if so, it arranges for the thread to have its state restored
2259  from the specified context.
2260
2261  Otherwise, it tries to map the exception to a signal number and
2262  arranges that the thread run a "pseudo signal handler" to handle
2263  the exception.
2264
2265  Some exceptions could and should be handled here directly.
2266*/
2267
2268kern_return_t
2269catch_exception_raise(mach_port_t exception_port,
2270                      mach_port_t thread,
2271                      mach_port_t task, 
2272                      exception_type_t exception,
2273                      exception_data_t code_vector,
2274                      mach_msg_type_number_t code_count)
2275{
2276  int signum = 0, code = *code_vector;
2277  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2278  kern_return_t kret;
2279
2280#ifdef DEBUG_MACH_EXCEPTIONS
2281  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2282#endif
2283
2284  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2285    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2286  } 
2287  /* On the ARM, code_vector[1] contains the undefined instruction
2288     in this case, not its address.  */
2289  if ((exception == EXC_BAD_INSTRUCTION) &&
2290      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2291      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2292    kret = do_pseudo_sigreturn(thread, tcr);
2293  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2294    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2295    kret = 17;
2296  } else {
2297    switch (exception) {
2298    case EXC_BAD_ACCESS:
2299      signum = SIGSEGV;
2300      break;
2301       
2302    case EXC_BAD_INSTRUCTION:
2303      signum = SIGILL;
2304      break;
2305     
2306      break;
2307     
2308    case EXC_ARITHMETIC:
2309      signum = SIGFPE;
2310      break;
2311
2312    default:
2313      break;
2314    }
2315    if (signum) {
2316      kret = setup_signal_frame(thread,
2317                                (void *)pseudo_signal_handler,
2318                                signum,
2319                                code,
2320                                tcr);
2321#if 0
2322      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2323#endif
2324
2325    } else {
2326      kret = 17;
2327    }
2328  }
2329
2330  return kret;
2331}
2332
2333
2334
2335typedef struct {
2336  mach_msg_header_t Head;
2337  /* start of the kernel processed data */
2338  mach_msg_body_t msgh_body;
2339  mach_msg_port_descriptor_t thread;
2340  mach_msg_port_descriptor_t task;
2341  /* end of the kernel processed data */
2342  NDR_record_t NDR;
2343  exception_type_t exception;
2344  mach_msg_type_number_t codeCnt;
2345  integer_t code[2];
2346  mach_msg_trailer_t trailer;
2347} exceptionRequest;
2348
2349
2350boolean_t
2351openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2352{
2353  static NDR_record_t _NDR = {0};
2354  kern_return_t handled;
2355  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2356  exceptionRequest *req = (exceptionRequest *) in;
2357
2358  reply->NDR = _NDR;
2359
2360  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2361  out->msgh_remote_port = in->msgh_remote_port;
2362  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2363  out->msgh_local_port = MACH_PORT_NULL;
2364  out->msgh_id = in->msgh_id+100;
2365
2366  /* Could handle other exception flavors in the range 2401-2403 */
2367
2368
2369  if (in->msgh_id != 2401) {
2370    reply->RetCode = MIG_BAD_ID;
2371    return FALSE;
2372  }
2373  handled = catch_exception_raise(req->Head.msgh_local_port,
2374                                  req->thread.name,
2375                                  req->task.name,
2376                                  req->exception,
2377                                  req->code,
2378                                  req->codeCnt);
2379  reply->RetCode = handled;
2380  return TRUE;
2381}
2382
2383/*
2384  The initial function for an exception-handling thread.
2385*/
2386
2387void *
2388exception_handler_proc(void *arg)
2389{
2390  extern boolean_t exc_server();
2391  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2392
2393  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2394  /* Should never return. */
2395  abort();
2396}
2397
2398
2399
2400mach_port_t
2401mach_exception_port_set()
2402{
2403  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2404  kern_return_t kret; 
2405  if (__exception_port_set == MACH_PORT_NULL) {
2406    kret = mach_port_allocate(mach_task_self(),
2407                              MACH_PORT_RIGHT_PORT_SET,
2408                              &__exception_port_set);
2409    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2410    create_system_thread(0,
2411                         NULL,
2412                         exception_handler_proc, 
2413                         (void *)((natural)__exception_port_set));
2414  }
2415  return __exception_port_set;
2416}
2417
2418/*
2419  Setup a new thread to handle those exceptions specified by
2420  the mask "which".  This involves creating a special Mach
2421  message port, telling the Mach kernel to send exception
2422  messages for the calling thread to that port, and setting
2423  up a handler thread which listens for and responds to
2424  those messages.
2425
2426*/
2427
2428/*
2429  Establish the lisp thread's TCR as its exception port, and determine
2430  whether any other ports have been established by foreign code for
2431  exceptions that lisp cares about.
2432
2433  If this happens at all, it should happen on return from foreign
2434  code and on entry to lisp code via a callback.
2435
2436  This is a lot of trouble (and overhead) to support Java, or other
2437  embeddable systems that clobber their caller's thread exception ports.
2438 
2439*/
2440kern_return_t
2441tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2442{
2443  kern_return_t kret;
2444  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2445  int i;
2446  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2447  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2448  exception_mask_t mask = 0;
2449
2450  kret = thread_swap_exception_ports(thread,
2451                                     LISP_EXCEPTIONS_HANDLED_MASK,
2452                                     lisp_port,
2453                                     EXCEPTION_DEFAULT,
2454                                     THREAD_STATE_NONE,
2455                                     fxs->masks,
2456                                     &n,
2457                                     fxs->ports,
2458                                     fxs->behaviors,
2459                                     fxs->flavors);
2460  if (kret == KERN_SUCCESS) {
2461    fxs->foreign_exception_port_count = n;
2462    for (i = 0; i < n; i ++) {
2463      foreign_port = fxs->ports[i];
2464
2465      if ((foreign_port != lisp_port) &&
2466          (foreign_port != MACH_PORT_NULL)) {
2467        mask |= fxs->masks[i];
2468      }
2469    }
2470    tcr->foreign_exception_status = (int) mask;
2471  }
2472  return kret;
2473}
2474
2475kern_return_t
2476tcr_establish_lisp_exception_port(TCR *tcr)
2477{
2478  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2479}
2480
2481/*
2482  Do this when calling out to or returning from foreign code, if
2483  any conflicting foreign exception ports were established when we
2484  last entered lisp code.
2485*/
2486kern_return_t
2487restore_foreign_exception_ports(TCR *tcr)
2488{
2489  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2490 
2491  if (m) {
2492    MACH_foreign_exception_state *fxs  = 
2493      (MACH_foreign_exception_state *) tcr->native_thread_info;
2494    int i, n = fxs->foreign_exception_port_count;
2495    exception_mask_t tm;
2496
2497    for (i = 0; i < n; i++) {
2498      if ((tm = fxs->masks[i]) & m) {
2499        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2500                                   tm,
2501                                   fxs->ports[i],
2502                                   fxs->behaviors[i],
2503                                   fxs->flavors[i]);
2504      }
2505    }
2506  }
2507}
2508                                   
2509
2510/*
2511  This assumes that a Mach port (to be used as the thread's exception port) whose
2512  "name" matches the TCR's 32-bit address has already been allocated.
2513*/
2514
2515kern_return_t
2516setup_mach_exception_handling(TCR *tcr)
2517{
2518  mach_port_t
2519    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2520    task_self = mach_task_self();
2521  kern_return_t kret;
2522
2523  kret = mach_port_insert_right(task_self,
2524                                thread_exception_port,
2525                                thread_exception_port,
2526                                MACH_MSG_TYPE_MAKE_SEND);
2527  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2528
2529  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2530  if (kret == KERN_SUCCESS) {
2531    mach_port_t exception_port_set = mach_exception_port_set();
2532
2533    kret = mach_port_move_member(task_self,
2534                                 thread_exception_port,
2535                                 exception_port_set);
2536  }
2537  return kret;
2538}
2539
2540void
2541darwin_exception_init(TCR *tcr)
2542{
2543  void tcr_monitor_exception_handling(TCR*, Boolean);
2544  kern_return_t kret;
2545  MACH_foreign_exception_state *fxs = 
2546    calloc(1, sizeof(MACH_foreign_exception_state));
2547 
2548  tcr->native_thread_info = (void *) fxs;
2549
2550  if ((kret = setup_mach_exception_handling(tcr))
2551      != KERN_SUCCESS) {
2552    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2553    terminate_lisp();
2554  }
2555}
2556
2557/*
2558  The tcr is the "name" of the corresponding thread's exception port.
2559  Destroying the port should remove it from all port sets of which it's
2560  a member (notably, the exception port set.)
2561*/
2562void
2563darwin_exception_cleanup(TCR *tcr)
2564{
2565  void *fxs = tcr->native_thread_info;
2566  extern Boolean use_mach_exception_handling;
2567
2568  if (fxs) {
2569    tcr->native_thread_info = NULL;
2570    free(fxs);
2571  }
2572  if (use_mach_exception_handling) {
2573    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2574    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2575  }
2576}
2577
2578
2579Boolean
2580suspend_mach_thread(mach_port_t mach_thread)
2581{
2582  kern_return_t status;
2583  Boolean aborted = false;
2584 
2585  do {
2586    aborted = false;
2587    status = thread_suspend(mach_thread);
2588    if (status == KERN_SUCCESS) {
2589      status = thread_abort_safely(mach_thread);
2590      if (status == KERN_SUCCESS) {
2591        aborted = true;
2592      } else {
2593        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2594        thread_resume(mach_thread);
2595      }
2596    } else {
2597      return false;
2598    }
2599  } while (! aborted);
2600  return true;
2601}
2602
2603/*
2604  Only do this if pthread_kill indicated that the pthread isn't
2605  listening to signals anymore, as can happen as soon as pthread_exit()
2606  is called on Darwin.  The thread could still call out to lisp as it
2607  is exiting, so we need another way to suspend it in this case.
2608*/
2609Boolean
2610mach_suspend_tcr(TCR *tcr)
2611{
2612  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2613  ExceptionInformation *pseudosigcontext;
2614  Boolean result = false;
2615 
2616  result = suspend_mach_thread(mach_thread);
2617  if (result) {
2618    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2619    pseudosigcontext->uc_onstack = 0;
2620    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2621    tcr->suspend_context = pseudosigcontext;
2622  }
2623  return result;
2624}
2625
2626void
2627mach_resume_tcr(TCR *tcr)
2628{
2629  ExceptionInformation *xp;
2630  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2631 
2632  xp = tcr->suspend_context;
2633#ifdef DEBUG_MACH_EXCEPTIONS
2634  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2635          tcr, tcr->pending_exception_context);
2636#endif
2637  tcr->suspend_context = NULL;
2638  restore_mach_thread_state(mach_thread, xp);
2639#ifdef DEBUG_MACH_EXCEPTIONS
2640  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2641          tcr, tcr->pending_exception_context);
2642#endif
2643  thread_resume(mach_thread);
2644}
2645
2646void
2647fatal_mach_error(char *format, ...)
2648{
2649  va_list args;
2650  char s[512];
2651 
2652
2653  va_start(args, format);
2654  vsnprintf(s, sizeof(s),format, args);
2655  va_end(args);
2656
2657  Fatal("Mach error", s);
2658}
2659
2660void
2661pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2662{
2663  interrupt_handler(signum, NULL, context);
2664}
2665
2666int
2667mach_raise_thread_interrupt(TCR *target)
2668{
2669  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2670  kern_return_t kret;
2671  Boolean result = false;
2672  TCR *current = get_tcr(false);
2673  thread_basic_info_data_t info; 
2674  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2675
2676  LOCK(lisp_global(TCR_AREA_LOCK), current);
2677
2678  if (suspend_mach_thread(mach_thread)) {
2679    if (thread_info(mach_thread,
2680                    THREAD_BASIC_INFO,
2681                    (thread_info_t)&info,
2682                    &info_count) == KERN_SUCCESS) {
2683      if (info.suspend_count == 1) {
2684        if ((target->valence == TCR_STATE_LISP) &&
2685            (!target->unwinding) &&
2686            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2687          kret = setup_signal_frame(mach_thread,
2688                                    (void *)pseudo_interrupt_handler,
2689                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2690                                    0,
2691                                    target);
2692          if (kret == KERN_SUCCESS) {
2693            result = true;
2694          }
2695        }
2696      }
2697    }
2698    if (! result) {
2699      target->interrupt_pending = 1 << fixnumshift;
2700    }
2701    thread_resume(mach_thread);
2702   
2703  }
2704  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2705  return 0;
2706}
2707
2708#endif
Note: See TracBrowser for help on using the repository browser.