source: trunk/source/lisp-kernel/arm-exceptions.c @ 14791

Last change on this file since 14791 was 14791, checked in by gb, 9 years ago

Don't use swp instructions: they're deprecated and may be disabled
and/or improperly emulated on some Linux kernels. (And they're generally
somewhat slow.)

Support the alternative instruction sequences (which involve loading a
PC into a register not expected to contain one) in pc_luser_xp().

File size: 71.7 KB
Line 
1
2
3/*
4   Copyright (C) 2010 Clozure Associates
5   This file is part of Clozure CL. 
6
7   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
8   License , known as the LLGPL and distributed with Clozure CL as the
9   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
10   which is distributed with Clozure CL as the file "LGPL".  Where these
11   conflict, the preamble takes precedence. 
12
13   Clozure CL is referenced in the preamble as the "LIBRARY."
14
15   The LLGPL is also available online at
16   http://opensource.franz.com/preamble.html
17*/
18
19#include "lisp.h"
20#include "lisp-exceptions.h"
21#include "lisp_globals.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#ifndef ANDROID
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36#endif
37
38#ifdef DARWIN
39#include <sys/mman.h>
40#ifndef SA_NODEFER
41#define SA_NODEFER 0
42#endif
43#include <sysexits.h>
44
45
46/* a distinguished UUO at a distinguished address */
47extern void pseudo_sigreturn(ExceptionInformation *);
48#endif
49
50
51#include "threads.h"
52
53#ifdef ANDROID
54#define pthread_sigmask(how,in,out) rt_sigprocmask(how,in,out,8)
55#endif
56
57#ifdef LINUX
58
59void
60enable_fp_exceptions()
61{
62}
63
64void
65disable_fp_exceptions()
66{
67}
68#endif
69
70/*
71  Handle exceptions.
72
73*/
74
75extern LispObj lisp_nil;
76
77extern natural lisp_heap_gc_threshold;
78extern Boolean grow_dynamic_area(natural);
79
80
81
82
83
84
85int
86page_size = 4096;
87
88int
89log2_page_size = 12;
90
91
92
93
94
95/*
96  If the PC is pointing to an allocation trap, the previous instruction
97  must have decremented allocptr.  Return the non-zero amount by which
98  allocptr was decremented.
99*/
100signed_natural
101allocptr_displacement(ExceptionInformation *xp)
102{
103  pc program_counter = xpPC(xp);
104  opcode instr = *program_counter, prev_instr;
105  int delta = -3;
106
107  if (IS_ALLOC_TRAP(instr)) {
108    /* The alloc trap must have been preceded by a cmp and a
109       load from tcr.allocbase. */
110    if (IS_BRANCH_AROUND_ALLOC_TRAP(program_counter[-1])) {
111      delta = -4;
112    }
113    prev_instr = program_counter[delta];
114
115    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
116      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
117    }
118   
119    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)(prev_instr & 0xff));
121    }
122
123    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
124      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
125
126      instr = program_counter[delta-1];
127      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
128        return -((signed_natural)(disp | (instr & 0xff)));
129      }
130    }
131    Bug(xp, "Can't determine allocation displacement");
132  }
133  return 0;
134}
135
136
137/*
138  A cons cell's been successfully allocated, but the allocptr's
139  still tagged (as fulltag_cons, of course.)  Emulate any instructions
140  that might follow the allocation (stores to the car or cdr, an
141  assignment to the "result" gpr) that take place while the allocptr's
142  tag is non-zero, advancing over each such instruction.  When we're
143  done, the cons cell will be allocated and initialized, the result
144  register will point to it, the allocptr will be untagged, and
145  the PC will point past the instruction that clears the allocptr's
146  tag.
147*/
148void
149finish_allocating_cons(ExceptionInformation *xp)
150{
151  pc program_counter = xpPC(xp);
152  opcode instr;
153  LispObj cur_allocptr = xpGPR(xp, allocptr);
154  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
155  int target_reg;
156
157  while (1) {
158    instr = *program_counter++;
159
160    if (IS_CLR_ALLOCPTR_TAG(instr)) {
161      xpGPR(xp, allocptr) = untag(cur_allocptr);
162      xpPC(xp) = program_counter;
163      return;
164    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
165      c->car = xpGPR(xp,RD_field(instr));
166    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
167      c->cdr = xpGPR(xp,RD_field(instr));
168    } else {
169      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
170      xpGPR(xp,RD_field(instr)) = cur_allocptr;
171    }
172  }
173}
174
175/*
176  We were interrupted in the process of allocating a uvector; we
177  survived the allocation trap, and allocptr is tagged as fulltag_misc.
178  Emulate any instructions which store a header into the uvector,
179  assign the value of allocptr to some other register, and clear
180  allocptr's tag.  Don't expect/allow any other instructions in
181  this environment.
182*/
183void
184finish_allocating_uvector(ExceptionInformation *xp)
185{
186  pc program_counter = xpPC(xp);
187  opcode instr;
188  LispObj cur_allocptr = xpGPR(xp, allocptr);
189  int target_reg;
190
191  while (1) {
192    instr = *program_counter++;
193    if (IS_CLR_ALLOCPTR_TAG(instr)) {
194      xpGPR(xp, allocptr) = untag(cur_allocptr);
195      xpPC(xp) = program_counter;
196      return;
197    }
198    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
199      header_of(cur_allocptr) = xpGPR(xp,RD_field(instr));
200    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
201      xpGPR(xp,RD_field(instr)) = cur_allocptr;
202    } else {
203      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
204    }
205  }
206}
207
208
209Boolean
210allocate_object(ExceptionInformation *xp,
211                natural bytes_needed, 
212                signed_natural disp_from_allocptr,
213                TCR *tcr)
214{
215  area *a = active_dynamic_area;
216
217  /* Maybe do an EGC */
218  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
219    if (((a->active)-(a->low)) >= a->threshold) {
220      gc_from_xp(xp, 0L);
221    }
222  }
223
224  /* Life is pretty simple if we can simply grab a segment
225     without extending the heap.
226  */
227  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
228    xpGPR(xp, allocptr) += disp_from_allocptr;
229    return true;
230  }
231 
232  /* It doesn't make sense to try a full GC if the object
233     we're trying to allocate is larger than everything
234     allocated so far.
235  */
236  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
237    untenure_from_area(tenured_area); /* force a full GC */
238    gc_from_xp(xp, 0L);
239  }
240 
241  /* Try again, growing the heap if necessary */
242  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
243    xpGPR(xp, allocptr) += disp_from_allocptr;
244    return true;
245  }
246 
247  return false;
248}
249
250#ifndef XNOMEM
251#define XNOMEM 10
252#endif
253
254void
255update_bytes_allocated(TCR* tcr, void *cur_allocptr)
256{
257  BytePtr
258    last = (BytePtr) tcr->last_allocptr, 
259    current = (BytePtr) cur_allocptr;
260  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
261    tcr->bytes_allocated += last-current;
262  }
263  tcr->last_allocptr = 0;
264}
265
266void
267lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
268{
269  /* Couldn't allocate the object.  If it's smaller than some arbitrary
270     size (say 128K bytes), signal a "chronically out-of-memory" condition;
271     else signal a "allocation request failed" condition.
272  */
273  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
274  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
275}
276
277/*
278  Allocate a large list, where "large" means "large enough to
279  possibly trigger the EGC several times if this was done
280  by individually allocating each CONS."  The number of
281  ocnses in question is in arg_z; on successful return,
282  the list will be in arg_z
283*/
284
285Boolean
286allocate_list(ExceptionInformation *xp, TCR *tcr)
287{
288  natural
289    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
290    bytes_needed = (nconses << dnode_shift);
291  LispObj
292    prev = lisp_nil,
293    current,
294    initial = xpGPR(xp,arg_y);
295
296  if (nconses == 0) {
297    /* Silly case */
298    xpGPR(xp,arg_z) = lisp_nil;
299    xpGPR(xp,allocptr) = lisp_nil;
300    return true;
301  }
302  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
303  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
304    for (current = xpGPR(xp,allocptr);
305         nconses;
306         prev = current, current+= dnode_size, nconses--) {
307      deref(current,0) = prev;
308      deref(current,1) = initial;
309    }
310    xpGPR(xp,arg_z) = prev;
311    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
312    xpGPR(xp,allocptr)-=fulltag_cons;
313  } else {
314    lisp_allocation_failure(xp,tcr,bytes_needed);
315  }
316  return true;
317}
318
319Boolean
320handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
321{
322  pc program_counter;
323  natural cur_allocptr, bytes_needed = 0;
324  opcode prev_instr;
325  signed_natural disp = 0;
326  unsigned allocptr_tag;
327
328  cur_allocptr = xpGPR(xp,allocptr);
329
330  allocptr_tag = fulltag_of(cur_allocptr);
331
332  switch (allocptr_tag) {
333  case fulltag_cons:
334    bytes_needed = sizeof(cons);
335    disp = -sizeof(cons) + fulltag_cons;
336    break;
337
338  case fulltag_misc:
339    disp = allocptr_displacement(xp);
340    bytes_needed = (-disp) + fulltag_misc;
341    break;
342
343    /* else fall thru */
344  default:
345    return false;
346  }
347
348  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
349  if (allocate_object(xp, bytes_needed, disp, tcr)) {
350    adjust_exception_pc(xp,4);
351    return true;
352  }
353  lisp_allocation_failure(xp,tcr,bytes_needed);
354  return true;
355}
356
357natural gc_deferred = 0, full_gc_deferred = 0;
358
359signed_natural
360flash_freeze(TCR *tcr, signed_natural param)
361{
362  return 0;
363}
364
365Boolean
366handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
367{
368  LispObj
369    selector = xpGPR(xp,imm0), 
370    arg = xpGPR(xp,imm1);
371  area *a = active_dynamic_area;
372  Boolean egc_was_enabled = (a->older != NULL);
373  natural gc_previously_deferred = gc_deferred;
374
375
376  switch (selector) {
377  case GC_TRAP_FUNCTION_EGC_CONTROL:
378    egc_control(arg != 0, a->active);
379    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
380    break;
381
382  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
383    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
384    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
385    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
386    xpGPR(xp,arg_z) = lisp_nil+t_offset;
387    break;
388
389  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
390    if (((signed_natural) arg) > 0) {
391      lisp_heap_gc_threshold = 
392        align_to_power_of_2((arg-1) +
393                            (heap_segment_size - 1),
394                            log2_heap_segment_size);
395    }
396    /* fall through */
397  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
398    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
399    break;
400
401  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
402    /*  Try to put the current threshold in effect.  This may
403        need to disable/reenable the EGC. */
404    untenure_from_area(tenured_area);
405    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
406    if (egc_was_enabled) {
407      if ((a->high - a->active) >= a->threshold) {
408        tenure_to_area(tenured_area);
409      }
410    }
411    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
412    break;
413
414  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
415    ensure_static_conses(xp,tcr,32768);
416    break;
417
418  case GC_TRAP_FUNCTION_FLASH_FREEZE:
419    untenure_from_area(tenured_area);
420    gc_like_from_xp(xp,flash_freeze,0);
421    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
422    tenured_area->static_dnodes = area_dnode(a->active, a->low);
423    if (egc_was_enabled) {
424      tenure_to_area(tenured_area);
425    }
426    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
427    break;
428
429  default:
430    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
431
432    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
433      if (!full_gc_deferred) {
434        gc_from_xp(xp, 0L);
435        break;
436      }
437      /* Tried to do a full GC when gc was disabled.  That failed,
438         so try full GC now */
439      selector = GC_TRAP_FUNCTION_GC;
440    }
441   
442    if (egc_was_enabled) {
443      egc_control(false, (BytePtr) a->active);
444    }
445    gc_from_xp(xp, 0L);
446    if (gc_deferred > gc_previously_deferred) {
447      full_gc_deferred = 1;
448    } else {
449      full_gc_deferred = 0;
450    }
451    if (selector > GC_TRAP_FUNCTION_GC) {
452      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
453        impurify_from_xp(xp, 0L);
454        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
455        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
456        gc_from_xp(xp, 0L);
457      }
458      if (selector & GC_TRAP_FUNCTION_PURIFY) {
459        purify_from_xp(xp, 0L);
460        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
461        gc_from_xp(xp, 0L);
462      }
463      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
464        OSErr err;
465        extern OSErr save_application(unsigned, Boolean);
466        TCR *tcr = get_tcr(true);
467        area *vsarea = tcr->vs_area;
468       
469        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
470        err = save_application(arg, egc_was_enabled);
471        if (err == noErr) {
472          _exit(0);
473        }
474        fatal_oserr(": save_application", err);
475      }
476      switch (selector) {
477
478
479      case GC_TRAP_FUNCTION_FREEZE:
480        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
481        tenured_area->static_dnodes = area_dnode(a->active, a->low);
482        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
483        break;
484      default:
485        break;
486      }
487    }
488   
489    if (egc_was_enabled) {
490      egc_control(true, NULL);
491    }
492    break;
493   
494  }
495
496  adjust_exception_pc(xp,4);
497  return true;
498}
499
500
501
502void
503signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
504{
505  /* The cstack just overflowed.  Force the current thread's
506     control stack to do so until all stacks are well under their overflow
507     limits.
508  */
509
510#if 0
511  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
512#endif
513  handle_error(xp, error_stack_overflow, reg, NULL);
514}
515
516/*
517  Lower (move toward 0) the "end" of the soft protected area associated
518  with a by a page, if we can.
519*/
520
521void
522adjust_soft_protection_limit(area *a)
523{
524  char *proposed_new_soft_limit = a->softlimit - 4096;
525  protected_area_ptr p = a->softprot;
526 
527  if (proposed_new_soft_limit >= (p->start+16384)) {
528    p->end = proposed_new_soft_limit;
529    p->protsize = p->end-p->start;
530    a->softlimit = proposed_new_soft_limit;
531  }
532  protect_area(p);
533}
534
535void
536restore_soft_stack_limit(unsigned stkreg)
537{
538  area *a;
539  TCR *tcr = get_tcr(true);
540
541  switch (stkreg) {
542  case Rsp:
543    a = tcr->cs_area;
544    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
545      a->softlimit -= 4096;
546    }
547    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
548    break;
549  case vsp:
550    a = tcr->vs_area;
551    adjust_soft_protection_limit(a);
552    break;
553  }
554}
555
556/* Maybe this'll work someday.  We may have to do something to
557   make the thread look like it's not handling an exception */
558void
559reset_lisp_process(ExceptionInformation *xp)
560{
561  TCR *tcr = get_tcr(true);
562  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
563
564  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
565
566  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
567
568  start_lisp(tcr, 1);
569}
570
571/*
572  This doesn't GC; it returns true if it made enough room, false
573  otherwise.
574  If "extend" is true, it can try to extend the dynamic area to
575  satisfy the request.
576*/
577
578Boolean
579new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
580{
581  area *a;
582  natural newlimit, oldlimit;
583  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
584
585  a  = active_dynamic_area;
586  oldlimit = (natural) a->active;
587  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
588              align_to_power_of_2(need, log2_allocation_quantum));
589  if (newlimit > (natural) (a->high)) {
590    if (extend) {
591      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
592      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
593      do {
594        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
595          break;
596        }
597        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
598        if (extend_by < 4<<20) {
599          return false;
600        }
601      } while (1);
602    } else {
603      return false;
604    }
605  }
606  a->active = (BytePtr) newlimit;
607  tcr->last_allocptr = (void *)newlimit;
608  xpGPR(xp,allocptr) = (LispObj) newlimit;
609  tcr->save_allocbase = (void*) oldlimit;
610
611  return true;
612}
613
614 
615void
616update_area_active (area **aptr, BytePtr value)
617{
618  area *a = *aptr;
619  for (; a; a = a->older) {
620    if ((a->low <= value) && (a->high >= value)) break;
621  };
622  if (a == NULL) Bug(NULL, "Can't find active area");
623  a->active = value;
624  *aptr = a;
625
626  for (a = a->younger; a; a = a->younger) {
627    a->active = a->high;
628  }
629}
630
631LispObj *
632tcr_frame_ptr(TCR *tcr)
633{
634  ExceptionInformation *xp;
635  LispObj *bp = NULL;
636
637  if (tcr->pending_exception_context)
638    xp = tcr->pending_exception_context;
639  else {
640    xp = tcr->suspend_context;
641  }
642  if (xp) {
643    bp = (LispObj *) xpGPR(xp, Rsp);
644  }
645  return bp;
646}
647
648void
649normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
650{
651  void *cur_allocptr = NULL;
652  LispObj freeptr = 0;
653
654  if (xp) {
655    if (is_other_tcr) {
656      pc_luser_xp(xp, tcr, NULL);
657      freeptr = xpGPR(xp, allocptr);
658      if (fulltag_of(freeptr) == 0){
659        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
660      }
661    }
662    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
663    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
664  } else {
665    /* In ff-call. */
666    cur_allocptr = (void *) (tcr->save_allocptr);
667    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
668    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
669  }
670
671
672  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
673  if (cur_allocptr) {
674    update_bytes_allocated(tcr, cur_allocptr);
675    if (freeptr) {
676      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
677    }
678  }
679}
680
681TCR *gc_tcr = NULL;
682
683/* Suspend and "normalize" other tcrs, then call a gc-like function
684   in that context.  Resume the other tcrs, then return what the
685   function returned */
686
687signed_natural
688gc_like_from_xp(ExceptionInformation *xp, 
689                signed_natural(*fun)(TCR *, signed_natural), 
690                signed_natural param)
691{
692  TCR *tcr = get_tcr(true), *other_tcr;
693  int result;
694  signed_natural inhibit;
695
696  suspend_other_threads(true);
697  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
698  if (inhibit != 0) {
699    if (inhibit > 0) {
700      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
701    }
702    resume_other_threads(true);
703    gc_deferred++;
704    return 0;
705  }
706  gc_deferred = 0;
707
708  gc_tcr = tcr;
709
710  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
711
712  normalize_tcr(xp, tcr, false);
713
714
715  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
716    if (other_tcr->pending_exception_context) {
717      other_tcr->gc_context = other_tcr->pending_exception_context;
718    } else if (other_tcr->valence == TCR_STATE_LISP) {
719      other_tcr->gc_context = other_tcr->suspend_context;
720    } else {
721      /* no pending exception, didn't suspend in lisp state:
722         must have executed a synchronous ff-call.
723      */
724      other_tcr->gc_context = NULL;
725    }
726    normalize_tcr(other_tcr->gc_context, other_tcr, true);
727  }
728   
729
730
731  result = fun(tcr, param);
732
733  other_tcr = tcr;
734  do {
735    other_tcr->gc_context = NULL;
736    other_tcr = other_tcr->next;
737  } while (other_tcr != tcr);
738
739  gc_tcr = NULL;
740
741  resume_other_threads(true);
742
743  return result;
744
745}
746
747
748
749/* Returns #bytes freed by invoking GC */
750
751signed_natural
752gc_from_tcr(TCR *tcr, signed_natural param)
753{
754  area *a;
755  BytePtr oldfree, newfree;
756  BytePtr oldend, newend;
757
758  a = active_dynamic_area;
759  oldend = a->high;
760  oldfree = a->active;
761  gc(tcr, param);
762  newfree = a->active;
763  newend = a->high;
764#if 0
765  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
766#endif
767  return ((oldfree-newfree)+(newend-oldend));
768}
769
770signed_natural
771gc_from_xp(ExceptionInformation *xp, signed_natural param)
772{
773  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
774
775  freeGCptrs();
776  return status;
777}
778
779signed_natural
780purify_from_xp(ExceptionInformation *xp, signed_natural param)
781{
782  return gc_like_from_xp(xp, purify, param);
783}
784
785signed_natural
786impurify_from_xp(ExceptionInformation *xp, signed_natural param)
787{
788  return gc_like_from_xp(xp, impurify, param);
789}
790
791
792
793
794
795
796protection_handler
797 * protection_handlers[] = {
798   do_spurious_wp_fault,
799   do_soft_stack_overflow,
800   do_soft_stack_overflow,
801   do_soft_stack_overflow,
802   do_hard_stack_overflow,   
803   do_hard_stack_overflow,
804   do_hard_stack_overflow
805   };
806
807
808Boolean
809is_write_fault(ExceptionInformation *xp, siginfo_t *info)
810{
811  return ((xpFaultStatus(xp) & 0x800) != 0);
812}
813
814Boolean
815handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
816{
817  BytePtr addr;
818  protected_area_ptr area;
819  protection_handler *handler;
820  extern Boolean touch_page(void *);
821  extern void touch_page_end(void);
822
823#ifdef LINUX
824  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
825#else
826  if (info) {
827    addr = (BytePtr)(info->si_addr);
828  } else {
829    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
830  }
831#endif
832
833  if (addr && (addr == tcr->safe_ref_address)) {
834    adjust_exception_pc(xp,4);
835
836    xpGPR(xp,imm0) = 0;
837    return true;
838  }
839
840  if (xpPC(xp) == (pc)touch_page) {
841    xpGPR(xp,imm0) = 0;
842    xpPC(xp) = (pc)touch_page_end;
843    return true;
844  }
845
846
847  if (is_write_fault(xp,info)) {
848    area = find_protected_area(addr);
849    if (area != NULL) {
850      handler = protection_handlers[area->why];
851      return handler(xp, area, addr);
852    } else {
853      if ((addr >= readonly_area->low) &&
854          (addr < readonly_area->active)) {
855        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
856                        page_size);
857        return true;
858      }
859    }
860  }
861  if (old_valence == TCR_STATE_LISP) {
862    LispObj cmain = nrs_CMAIN.vcell;
863   
864    if ((fulltag_of(cmain) == fulltag_misc) &&
865      (header_subtag(header_of(cmain)) == subtag_macptr)) {
866     
867      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
868    }
869  }
870  return false;
871}
872
873
874
875
876
877OSStatus
878do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
879{
880#ifdef SUPPORT_PRAGMA_UNUSED
881#pragma unused(area,addr)
882#endif
883  reset_lisp_process(xp);
884  return -1;
885}
886
887extern area*
888allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
889
890extern area*
891allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893
894
895
896
897
898Boolean
899lisp_frame_p(lisp_frame *spPtr)
900{
901  return (spPtr->marker == lisp_frame_marker);
902}
903
904
905int ffcall_overflow_count = 0;
906
907
908
909
910
911
912/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
913  the current value of VSP (TSP) or an older area.  */
914
915OSStatus
916do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
917{
918  TCR* tcr = get_tcr(true);
919  area *a = tcr->vs_area;
920  protected_area_ptr vsp_soft = a->softprot;
921  unprotect_area(vsp_soft);
922  signal_stack_soft_overflow(xp,vsp);
923  return 0;
924}
925
926
927
928OSStatus
929do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
930{
931  /* Trying to write into a guard page on the vstack or tstack.
932     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
933     signal an error_stack_overflow condition.
934      */
935  if (prot_area->why == kVSPsoftguard) {
936    return do_vsp_overflow(xp,addr);
937  }
938  unprotect_area(prot_area);
939  signal_stack_soft_overflow(xp,Rsp);
940  return 0;
941}
942
943OSStatus
944do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
945{
946#ifdef SUPPORT_PRAGMA_UNUSED
947#pragma unused(xp,area,addr)
948#endif
949  return -1;
950}
951
952
953
954
955     
956
957
958
959
960
961Boolean
962handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
963{
964  return false;
965}
966
967
968Boolean
969handle_unimplemented_instruction(ExceptionInformation *xp,
970                                 opcode instruction,
971                                 TCR *tcr)
972{
973
974  return false;
975}
976
977Boolean
978handle_exception(int xnum, 
979                 ExceptionInformation *xp, 
980                 TCR *tcr, 
981                 siginfo_t *info,
982                 int old_valence)
983{
984  pc program_counter;
985  opcode instruction = 0;
986
987  if (old_valence != TCR_STATE_LISP) {
988    return false;
989  }
990
991  program_counter = xpPC(xp);
992 
993  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
994    instruction = *program_counter;
995  }
996
997  if (IS_ALLOC_TRAP(instruction)) {
998    return handle_alloc_trap(xp, tcr);
999  } else if ((xnum == SIGSEGV) ||
1000             (xnum == SIGBUS)) {
1001    return handle_protection_violation(xp, info, tcr, old_valence);
1002  } else if (xnum == SIGFPE) {
1003    return handle_sigfpe(xp, tcr);
1004  } else if ((xnum == SIGILL)) {
1005    if (IS_GC_TRAP(instruction)) {
1006      return handle_gc_trap(xp, tcr);
1007    } else if (IS_UUO(instruction)) {
1008      return handle_uuo(xp, info, instruction);
1009    } else {
1010      return handle_unimplemented_instruction(xp,instruction,tcr);
1011    }
1012  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1013    tcr->interrupt_pending = 0;
1014    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1015    return true;
1016  }
1017
1018  return false;
1019}
1020
1021void
1022adjust_exception_pc(ExceptionInformation *xp, int delta)
1023{
1024  xpPC(xp) += (delta >> 2);
1025}
1026
1027
1028/*
1029  This wants to scan backwards until "where" points to an instruction
1030   whose major opcode is either 63 (double-float) or 59 (single-float)
1031*/
1032
1033OSStatus
1034handle_fpux_binop(ExceptionInformation *xp, pc where)
1035{
1036  OSStatus err = -1;
1037  opcode *there = (opcode *) where, instr, errnum = 0;
1038  return err;
1039}
1040
1041Boolean
1042handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1043{
1044  unsigned 
1045    format = UUO_FORMAT(the_uuo);
1046  Boolean handled = false;
1047  int bump = 4;
1048  TCR *tcr = get_tcr(true);
1049
1050  switch (format) {
1051  case uuo_format_kernel_service:
1052    {
1053      TCR * target = (TCR *)xpGPR(xp,arg_z);
1054      int service = UUO_UNARY_field(the_uuo);
1055
1056      switch (service) {
1057      case error_propagate_suspend:
1058        handled = true;
1059        break;
1060      case error_interrupt:
1061        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1062        handled = true;
1063        break;
1064      case error_suspend:
1065        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1066        handled = true;
1067        break;
1068      case error_suspend_all:
1069        lisp_suspend_other_threads();
1070        handled = true;
1071        break;
1072      case error_resume:
1073        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1074        handled = true;
1075        break;
1076      case error_resume_all:
1077        lisp_resume_other_threads();
1078        handled = true;
1079        break;
1080      case error_kill:
1081        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1082        handled = true;
1083        break;
1084      case error_allocate_list:
1085        allocate_list(xp,tcr);
1086        handled = true;
1087        break;
1088      default:
1089        handled = false;
1090        break;
1091      }
1092      break;
1093    }
1094
1095  case uuo_format_unary:
1096    switch(UUO_UNARY_field(the_uuo)) {
1097    case 3:
1098      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1099        handled = true;
1100        bump = 4;
1101        break;
1102      }
1103      /* fall in */
1104    default:
1105      handled = false;
1106      break;
1107
1108    }
1109    break;
1110
1111  case uuo_format_nullary:
1112    switch (UUOA_field(the_uuo)) {
1113    case 3:
1114      adjust_exception_pc(xp, bump);
1115      bump = 0;
1116      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1117      handled = true;
1118      break;
1119
1120    case 4:
1121      tcr->interrupt_pending = 0;
1122      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1123      handled = true;
1124      break;
1125    default:
1126      handled = false;
1127      break;
1128    }
1129    break;
1130
1131
1132  case uuo_format_error_lisptag:
1133  case uuo_format_error_fulltag:
1134  case uuo_format_error_xtype:
1135  case uuo_format_cerror_lisptag:
1136  case uuo_format_cerror_fulltag:
1137  case uuo_format_cerror_xtype:
1138  case uuo_format_nullary_error:
1139  case uuo_format_unary_error:
1140  case uuo_format_binary_error:
1141  case uuo_format_ternary:
1142    handled = handle_error(xp,0,the_uuo, &bump);
1143    break;
1144
1145  default:
1146    handled = false;
1147    bump = 0;
1148  }
1149 
1150  if (handled && bump) {
1151    adjust_exception_pc(xp, bump);
1152  }
1153  return handled;
1154}
1155
1156natural
1157register_codevector_contains_pc (natural lisp_function, pc where)
1158{
1159  natural code_vector, size;
1160
1161  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1162      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1163    code_vector = deref(lisp_function, 2);
1164    size = header_element_count(header_of(code_vector)) << 2;
1165    if ((untag(code_vector) < (natural)where) && 
1166        ((natural)where < (code_vector + size)))
1167      return(code_vector);
1168  }
1169
1170  return(0);
1171}
1172
1173Boolean
1174callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1175{
1176  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1177}
1178
1179Boolean
1180callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1181                  natural arg1, natural arg2, int *bumpP)
1182{
1183  natural  callback_ptr;
1184  area *a;
1185  natural fnreg = Rfn,  codevector, offset;
1186  pc where = xpPC(xp);
1187  int delta;
1188
1189  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1190  if (codevector == 0) {
1191    fnreg = nfn;
1192    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1193    if (codevector == 0) {
1194      fnreg = 0;
1195    }
1196  }
1197  if (codevector) {
1198    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1199  } else {
1200    offset = (natural)where;
1201  }
1202                                                 
1203                                               
1204
1205  TCR *tcr = get_tcr(true);
1206
1207  /* Put the active stack pointer where .SPcallback expects it */
1208  a = tcr->cs_area;
1209  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1210
1211  /* Copy globals from the exception frame to tcr */
1212  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1213  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1214
1215
1216
1217  /* Call back.
1218     Lisp will handle trampolining through some code that
1219     will push lr/fn & pc/nfn stack frames for backtrace.
1220  */
1221  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1222  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1223  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1224  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1225
1226  if (bumpP) {
1227    *bumpP = delta;
1228  }
1229
1230  /* Copy GC registers back into exception frame */
1231  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1232  return true;
1233}
1234
1235area *
1236allocate_no_stack (natural size)
1237{
1238#ifdef SUPPORT_PRAGMA_UNUSED
1239#pragma unused(size)
1240#endif
1241
1242  return (area *) NULL;
1243}
1244
1245
1246
1247
1248
1249
1250/* callback to (symbol-value cmain) if it is a macptr,
1251   otherwise report cause and function name to console.
1252   Returns noErr if exception handled OK */
1253OSStatus
1254handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1255{
1256  LispObj   cmain = nrs_CMAIN.vcell;
1257  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1258
1259}
1260
1261
1262
1263
1264void non_fatal_error( char *msg )
1265{
1266  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1267  fflush( dbgout );
1268}
1269
1270
1271
1272Boolean
1273handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1274{
1275  LispObj   errdisp = nrs_ERRDISP.vcell;
1276
1277  if ((fulltag_of(errdisp) == fulltag_misc) &&
1278      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1279    /* errdisp is a macptr, we can call back to lisp */
1280    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1281    }
1282
1283  return false;
1284}
1285               
1286
1287/*
1288   Current thread has all signals masked.  Before unmasking them,
1289   make it appear that the current thread has been suspended.
1290   (This is to handle the case where another thread is trying
1291   to GC before this thread is able to sieze the exception lock.)
1292*/
1293int
1294prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1295{
1296  int old_valence = tcr->valence;
1297
1298  tcr->pending_exception_context = context;
1299  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1300
1301  ALLOW_EXCEPTIONS(context);
1302  return old_valence;
1303} 
1304
1305void
1306wait_for_exception_lock_in_handler(TCR *tcr, 
1307                                   ExceptionInformation *context,
1308                                   xframe_list *xf)
1309{
1310
1311  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1312  xf->curr = context;
1313  xf->prev = tcr->xframe;
1314  tcr->xframe =  xf;
1315  tcr->pending_exception_context = NULL;
1316  tcr->valence = TCR_STATE_FOREIGN; 
1317}
1318
1319void
1320unlock_exception_lock_in_handler(TCR *tcr)
1321{
1322  tcr->pending_exception_context = tcr->xframe->curr;
1323  tcr->xframe = tcr->xframe->prev;
1324  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1325  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1326}
1327
1328/*
1329   If an interrupt is pending on exception exit, try to ensure
1330   that the thread sees it as soon as it's able to run.
1331*/
1332void
1333raise_pending_interrupt(TCR *tcr)
1334{
1335  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1336    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1337  }
1338}
1339
1340void
1341exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1342{
1343#ifndef ANDROID
1344  sigset_t mask;
1345  sigfillset(&mask);
1346#else
1347  int mask [] = {0,0};
1348#endif
1349 
1350  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask, NULL);
1351  tcr->valence = old_valence;
1352  tcr->pending_exception_context = NULL;
1353  tcr->last_lisp_frame = old_last_lisp_frame;
1354}
1355
1356
1357void
1358signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1359{
1360  xframe_list xframe_link;
1361
1362  if (!use_mach_exception_handling) {
1363   
1364    tcr = (TCR *) get_interrupt_tcr(false);
1365 
1366    /* The signal handler's entered with all signals (notably the
1367       thread_suspend signal) blocked.  Don't allow any other signals
1368       (notably the thread_suspend signal) to preempt us until we've
1369       set the TCR's xframe slot to include the current exception
1370       context.
1371    */
1372   
1373    old_last_lisp_frame = tcr->last_lisp_frame;
1374    tcr->last_lisp_frame = xpGPR(context,Rsp);
1375    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1376  }
1377
1378  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1379    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1380    pthread_kill(pthread_self(), thread_suspend_signal);
1381  }
1382
1383 
1384  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1385  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1386    char msg[512];
1387    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1388    if (lisp_Debugger(context, info, signum, false, msg)) {
1389      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1390    }
1391  }
1392  unlock_exception_lock_in_handler(tcr);
1393
1394  /* This thread now looks like a thread that was suspended while
1395     executing lisp code.  If some other thread gets the exception
1396     lock and GCs, the context (this thread's suspend_context) will
1397     be updated.  (That's only of concern if it happens before we
1398     can return to the kernel/to the Mach exception handler).
1399  */
1400  if (!use_mach_exception_handling) {
1401    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1402    raise_pending_interrupt(tcr);
1403  }
1404}
1405
1406
1407void
1408sigill_handler(int signum, siginfo_t *info, ExceptionInformation  *xp)
1409{
1410  pc program_counter = xpPC(xp);
1411  opcode instr = *program_counter;
1412
1413  if (IS_UUO(instr)) {
1414    natural psr = xpPSR(xp);
1415    Boolean opcode_matched_condition = false,
1416      flip = ((instr & (1<<28)) != 0);
1417   
1418
1419    switch (instr >> 29) {
1420    case 0: 
1421      opcode_matched_condition = ((psr & PSR_Z_MASK) != 0);
1422      break;
1423    case 1:
1424      opcode_matched_condition = ((psr & PSR_C_MASK) != 0);
1425      break;
1426    case 2:
1427      opcode_matched_condition = ((psr & PSR_N_MASK) != 0);
1428      break;
1429    case 3:
1430      opcode_matched_condition = ((psr & PSR_V_MASK) != 0);
1431      break;
1432    case 4:
1433      opcode_matched_condition = (((psr & PSR_C_MASK) != 0) &&
1434                                  ((psr & PSR_Z_MASK) == 0));
1435      break;
1436    case 5:
1437      opcode_matched_condition = (((psr & PSR_N_MASK) != 0) ==
1438                                  ((psr & PSR_V_MASK) != 0));
1439      break;
1440    case 6:
1441      opcode_matched_condition = ((((psr & PSR_N_MASK) != 0) ==
1442                                   ((psr & PSR_V_MASK) != 0)) &&
1443                                  ((psr & PSR_Z_MASK) == 0));
1444      break;
1445    case 7:
1446      opcode_matched_condition = true;
1447      flip = false;
1448      break;
1449    }
1450    if (flip) {
1451      opcode_matched_condition = !opcode_matched_condition;
1452    }
1453    if (!opcode_matched_condition) {
1454      adjust_exception_pc(xp,4);
1455      return;
1456    }
1457  }
1458  signal_handler(signum,info,xp, NULL, 0, 0);
1459}
1460
1461
1462#ifdef USE_SIGALTSTACK
1463void
1464invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1465{
1466  ExceptionInformation *xp_copy;
1467  siginfo_t *info_copy;
1468  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1469 
1470  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1471  target_sp -= sizeof(ucontext_t);
1472  xp_copy = (ExceptionInformation *)target_sp;
1473  memmove(target_sp,xp,sizeof(*xp));
1474  xp_copy->uc_stack.ss_sp = 0;
1475  xp_copy->uc_stack.ss_size = 0;
1476  xp_copy->uc_stack.ss_flags = 0;
1477  xp_copy->uc_link = NULL;
1478  target_sp -= sizeof(siginfo_t);
1479  info_copy = (siginfo_t *)target_sp;
1480  memmove(target_sp,info,sizeof(*info));
1481  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1482}
1483 
1484void
1485altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1486{
1487  TCR *tcr=get_tcr(true);
1488 
1489  if (signo == SIGBUS) {
1490    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address); 
1491    area *a = tcr->cs_area;
1492    if (((BytePtr)truncate_to_power_of_2(addr,log2_page_size))== a->softlimit) 
1493{
1494      if (mmap(a->softlimit,
1495               page_size,
1496               PROT_READ|PROT_WRITE|PROT_EXEC,
1497               MAP_PRIVATE|MAP_ANON|MAP_FIXED,
1498               -1,
1499               0) == a->softlimit) {
1500        return;
1501      }
1502    }
1503  } else if (signo == SIGSEGV) {
1504    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1505    area *a = tcr->cs_area;
1506   
1507    if ((addr >= a->low) &&
1508        (addr < a->softlimit)) {
1509      if (addr < a->hardlimit) {
1510        Bug(xp, "hard stack overflow");
1511      } else {
1512        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1513      }
1514    }
1515  }
1516  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1517}
1518#endif
1519
1520/*
1521  If it looks like we're in the middle of an atomic operation, make
1522  it seem as if that operation is either complete or hasn't started
1523  yet.
1524
1525  The cases handled include:
1526
1527  a) storing into a newly-allocated lisp frame on the stack.
1528  b) marking a newly-allocated TSP frame as containing "raw" data.
1529  c) consing: the GC has its own ideas about how this should be
1530     handled, but other callers would be best advised to back
1531     up or move forward, according to whether we're in the middle
1532     of allocating a cons cell or allocating a uvector.
1533  d) a STMW to the vsp
1534  e) EGC write-barrier subprims.
1535*/
1536
1537extern opcode
1538  egc_write_barrier_start,
1539  egc_write_barrier_end, 
1540  egc_store_node_conditional, 
1541  egc_store_node_conditional_test,
1542  egc_set_hash_key,
1543  egc_gvset,
1544  egc_rplaca,
1545  egc_rplacd,
1546  egc_set_hash_key_conditional,
1547  egc_set_hash_key_conditional_test,
1548  swap_lr_lisp_frame_temp0,
1549  swap_lr_lisp_frame_arg_z;
1550
1551
1552extern opcode ffcall_return_window, ffcall_return_window_end;
1553
1554void
1555pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1556{
1557  pc program_counter = xpPC(xp);
1558  opcode instr = *program_counter;
1559  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1560  LispObj cur_allocptr = xpGPR(xp, allocptr);
1561  int allocptr_tag = fulltag_of(cur_allocptr);
1562 
1563
1564
1565  if ((program_counter < &egc_write_barrier_end) && 
1566      (program_counter >= &egc_write_barrier_start)) {
1567    LispObj *ea = 0, val = 0, root = 0;
1568    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1569    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1570
1571    if (program_counter >= &egc_set_hash_key_conditional) {
1572      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1573          ((program_counter == &egc_set_hash_key_conditional_test) &&
1574           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1575        return;
1576      }
1577      need_store = false;
1578      root = xpGPR(xp,arg_x);
1579      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1580      need_memoize_root = true;
1581    } else if (program_counter >= &egc_store_node_conditional) {
1582      if ((program_counter < &egc_store_node_conditional_test) ||
1583          ((program_counter == &egc_store_node_conditional_test) &&
1584           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1585        /* The conditional store either hasn't been attempted yet, or
1586           has failed.  No need to adjust the PC, or do memoization. */
1587        return;
1588      }
1589      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1590      xpGPR(xp,arg_z) = t_value;
1591      need_store = false;
1592    } else if (program_counter >= &egc_set_hash_key) {
1593      root = xpGPR(xp,arg_x);
1594      val = xpGPR(xp,arg_z);
1595      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1596      need_memoize_root = true;
1597    } else if (program_counter >= &egc_gvset) {
1598      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1599      val = xpGPR(xp,arg_z);
1600    } else if (program_counter >= &egc_rplacd) {
1601      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1602      val = xpGPR(xp,arg_z);
1603    } else {                      /* egc_rplaca */
1604      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1605      val = xpGPR(xp,arg_z);
1606    }
1607    if (need_store) {
1608      *ea = val;
1609    }
1610    if (need_check_memo) {
1611      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1612      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1613          ((LispObj)ea < val)) {
1614        atomic_set_bit(refbits, bitnumber);
1615        if (need_memoize_root) {
1616          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1617          atomic_set_bit(refbits, bitnumber);
1618        }
1619      }
1620    }
1621    xpPC(xp) = xpLR(xp);
1622    return;
1623  }
1624
1625
1626 
1627  if (allocptr_tag != tag_fixnum) {
1628    signed_natural disp = allocptr_displacement(xp);
1629
1630    if (disp) {
1631      /* Being architecturally "at" the alloc trap doesn't tell
1632         us much (in particular, it doesn't tell us whether
1633         or not the thread has committed to taking the trap
1634         and is waiting for the exception lock (or waiting
1635         for the Mach exception thread to tell it how bad
1636         things are) or is about to execute a conditional
1637         trap.
1638         Regardless of which case applies, we want the
1639         other thread to take (or finish taking) the
1640         trap, and we don't want it to consider its
1641         current allocptr to be valid.
1642         The difference between this case (suspend other
1643         thread for GC) and the previous case (suspend
1644         current thread for interrupt) is solely a
1645         matter of what happens after we leave this
1646         function: some non-current thread will stay
1647         suspended until the GC finishes, then take
1648         (or start processing) the alloc trap.   The
1649         current thread will go off and do PROCESS-INTERRUPT
1650         or something, and may return from the interrupt
1651         and need to finish the allocation that got interrupted.
1652      */
1653
1654      if (alloc_disp) {
1655        *alloc_disp = disp;
1656        xpGPR(xp,allocptr) += disp;
1657        /* Leave the PC at the alloc trap.  When the interrupt
1658           handler returns, it'll decrement allocptr by disp
1659           and the trap may or may not be taken.
1660        */
1661      } else {
1662        Boolean ok = false;
1663        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1664        xpGPR(xp, allocptr) = VOID_ALLOCPTR + disp;
1665        instr = program_counter[-1];
1666        if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1667          instr = program_counter[-2];
1668          if (IS_COMPARE_ALLOCPTR_TO_RM(instr)){
1669            xpGPR(xp,RM_field(instr)) = VOID_ALLOCPTR;
1670            ok = true;
1671          }
1672        }
1673        if (ok) {
1674        /* Clear the carry bit, so that the trap will be taken. */
1675        xpPSR(xp) &= ~PSR_C_MASK;
1676        } else {
1677          Bug(NULL, "unexpected instruction preceding alloc trap.");
1678        }
1679      }
1680    } else {
1681      /* we may be before or after the alloc trap.  If before, set
1682         allocptr to VOID_ALLOCPTR and back up to the start of the
1683         instruction sequence; if after, finish the allocation. */
1684      Boolean before_alloc_trap = false;
1685
1686      if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1687        before_alloc_trap = true;
1688        --program_counter;
1689        instr = *program_counter;
1690      }
1691      if (IS_COMPARE_ALLOCPTR_TO_RM(instr)) {
1692        before_alloc_trap = true;
1693        --program_counter;
1694        instr = *program_counter;
1695      }
1696      if (IS_LOAD_RD_FROM_ALLOCBASE(instr)) {
1697        before_alloc_trap = true;
1698        --program_counter;
1699        instr = *program_counter;
1700      }
1701      if (IS_SUB_HI_FROM_ALLOCPTR(instr)) {
1702        before_alloc_trap = true;
1703        --program_counter;
1704      }
1705      if (before_alloc_trap) {
1706        xpPC(xp) = program_counter;
1707        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1708      } else {
1709        /* If we're already past the alloc_trap, finish allocating
1710           the object. */
1711        if (allocptr_tag == fulltag_cons) {
1712          finish_allocating_cons(xp);
1713        } else {
1714          if (allocptr_tag == fulltag_misc) {
1715            finish_allocating_uvector(xp);
1716          } else {
1717            Bug(xp, "what's being allocated here ?");
1718          }
1719        }
1720        /* Whatever we finished allocating, reset allocptr/allocbase to
1721           VOID_ALLOCPTR */
1722        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1723      }
1724      return;
1725    }
1726    return;
1727  }
1728  {
1729    lisp_frame *swap_frame = NULL;
1730    pc base = &swap_lr_lisp_frame_temp0;
1731   
1732    if ((program_counter >base)             /* sic */
1733        && (program_counter < (base+3))) {
1734      swap_frame = (lisp_frame *)xpGPR(xp,temp0);
1735    } else {
1736      base = &swap_lr_lisp_frame_arg_z;
1737      if ((program_counter > base) && (program_counter < (base+3))) { 
1738        swap_frame = (lisp_frame *)xpGPR(xp,arg_z);
1739      }
1740    }
1741    if (swap_frame) {
1742      if (program_counter == (base+1)) {
1743        swap_frame->savelr = xpGPR(xp,Rlr);
1744      }
1745      xpGPR(xp,Rlr) = xpGPR(xp,imm0);
1746      xpPC(xp) = base+3;
1747      return;
1748    }
1749  }
1750}
1751
1752void
1753interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1754{
1755  TCR *tcr = get_interrupt_tcr(false);
1756  if (tcr) {
1757    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1758      tcr->interrupt_pending = 1 << fixnumshift;
1759    } else {
1760      LispObj cmain = nrs_CMAIN.vcell;
1761
1762      if ((fulltag_of(cmain) == fulltag_misc) &&
1763          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1764        /*
1765           This thread can (allegedly) take an interrupt now.
1766           It's tricky to do that if we're executing
1767           foreign code (especially Linuxthreads code, much
1768           of which isn't reentrant.)
1769           If we're unwinding the stack, we also want to defer
1770           the interrupt.
1771        */
1772        if ((tcr->valence != TCR_STATE_LISP) ||
1773            (tcr->unwinding != 0)) {
1774          tcr->interrupt_pending = 1 << fixnumshift;
1775        } else {
1776          xframe_list xframe_link;
1777          int old_valence;
1778          signed_natural disp=0;
1779          natural old_last_lisp_frame = tcr->last_lisp_frame;
1780         
1781          tcr->last_lisp_frame = xpGPR(context,Rsp);
1782          pc_luser_xp(context, tcr, &disp);
1783          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1784          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1785          handle_exception(signum, context, tcr, info, old_valence);
1786          if (disp) {
1787            xpGPR(context,allocptr) -= disp;
1788          }
1789          unlock_exception_lock_in_handler(tcr);
1790          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1791        }
1792      }
1793    }
1794  }
1795#ifdef DARWIN
1796    DarwinSigReturn(context);
1797#endif
1798}
1799
1800#ifdef USE_SIGALTSTACK
1801void
1802altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1803{
1804  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1805}
1806#endif
1807
1808
1809void
1810install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1811{
1812  struct sigaction sa;
1813
1814  sigfillset(&sa.sa_mask);
1815 
1816  sa.sa_sigaction = (void *)handler;
1817  sigfillset(&sa.sa_mask);
1818  sa.sa_flags = 
1819    0 /* SA_RESTART */
1820    | SA_NODEFER
1821    | SA_SIGINFO
1822#ifdef USE_SIGALTSTACK
1823    | (on_altstack ? SA_ONSTACK : 0)
1824#endif
1825    ;
1826
1827  sigaction(signo, &sa, NULL);
1828}
1829
1830
1831void
1832install_pmcl_exception_handlers()
1833{
1834#ifdef DARWIN
1835  extern Boolean use_mach_exception_handling;
1836#endif
1837
1838  Boolean install_signal_handlers_for_exceptions =
1839#ifdef DARWIN
1840    !use_mach_exception_handling
1841#else
1842    true
1843#endif
1844    ;
1845  if (install_signal_handlers_for_exceptions) {
1846    install_signal_handler(SIGILL, (void *)sigill_handler, true, false);
1847    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1848    install_signal_handler(SIGBUS, (void *)ALTSTACK(signal_handler),true,true);
1849
1850  }
1851 
1852  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1853                         (void *)interrupt_handler, true, false);
1854  signal(SIGPIPE, SIG_IGN);
1855}
1856
1857#ifdef USE_SIGALTSTACK
1858void
1859setup_sigaltstack(area *a)
1860{
1861  stack_t stack;
1862#if 0
1863  stack.ss_sp = a->low;
1864  a->low += SIGSTKSZ*8;
1865#endif
1866  stack.ss_size = SIGSTKSZ*8;
1867  stack.ss_flags = 0;
1868  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1869  if (sigaltstack(&stack, NULL) != 0) {
1870    perror("sigaltstack");
1871    exit(-1);
1872  }
1873}
1874#endif
1875
1876void
1877thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1878{
1879  TCR *tcr = get_tcr(false);
1880  area *a;
1881#ifndef ANDROID
1882  sigset_t mask;
1883 
1884  sigemptyset(&mask);
1885#else
1886  int mask[] = {0,0};
1887#endif
1888
1889  if (tcr) {
1890    tcr->valence = TCR_STATE_FOREIGN;
1891    a = tcr->vs_area;
1892    if (a) {
1893      a->active = a->high;
1894    }
1895    a = tcr->cs_area;
1896    if (a) {
1897      a->active = a->high;
1898    }
1899  }
1900 
1901  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask,NULL);
1902  pthread_exit(NULL);
1903}
1904
1905#ifdef USE_SIGALTSTACK
1906void
1907altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1908{
1909  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1910}
1911#endif
1912
1913void
1914thread_signal_setup()
1915{
1916  thread_suspend_signal = SIG_SUSPEND_THREAD;
1917  thread_kill_signal = SIG_KILL_THREAD;
1918
1919  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1920  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1921}
1922
1923
1924
1925void
1926unprotect_all_areas()
1927{
1928  protected_area_ptr p;
1929
1930  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1931    unprotect_area(p);
1932  }
1933}
1934
1935/*
1936  A binding subprim has just done "twlle limit_regno,idx_regno" and
1937  the trap's been taken.  Extend the tcr's tlb so that the index will
1938  be in bounds and the new limit will be on a page boundary, filling
1939  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1940  the tcr fields and the registers in the xp and return true if this
1941  all works, false otherwise.
1942
1943  Note that the tlb was allocated via malloc, so realloc can do some
1944  of the hard work.
1945*/
1946Boolean
1947extend_tcr_tlb(TCR *tcr, 
1948               ExceptionInformation *xp, 
1949               unsigned idx_regno)
1950{
1951  unsigned
1952    index = (unsigned) (xpGPR(xp,idx_regno)),
1953    old_limit = tcr->tlb_limit,
1954    new_limit = align_to_power_of_2(index+1,12),
1955    new_bytes = new_limit-old_limit;
1956  LispObj
1957    *old_tlb = tcr->tlb_pointer,
1958    *new_tlb = realloc(old_tlb, new_limit),
1959    *work;
1960
1961  if (new_tlb == NULL) {
1962    return false;
1963  }
1964 
1965  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1966
1967  while (new_bytes) {
1968    *work++ = no_thread_local_binding_marker;
1969    new_bytes -= sizeof(LispObj);
1970  }
1971  tcr->tlb_pointer = new_tlb;
1972  tcr->tlb_limit = new_limit;
1973  return true;
1974}
1975
1976
1977
1978void
1979exception_init()
1980{
1981  install_pmcl_exception_handlers();
1982}
1983
1984
1985
1986
1987
1988#ifdef DARWIN
1989
1990
1991#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1992#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1993
1994
1995
1996#define LISP_EXCEPTIONS_HANDLED_MASK \
1997 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1998
1999/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2000#define NUM_LISP_EXCEPTIONS_HANDLED 4
2001
2002typedef struct {
2003  int foreign_exception_port_count;
2004  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2005  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2006  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2007  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2008} MACH_foreign_exception_state;
2009
2010
2011
2012
2013/*
2014  Mach's exception mechanism works a little better than its signal
2015  mechanism (and, not incidentally, it gets along with GDB a lot
2016  better.
2017
2018  Initially, we install an exception handler to handle each native
2019  thread's exceptions.  This process involves creating a distinguished
2020  thread which listens for kernel exception messages on a set of
2021  0 or more thread exception ports.  As threads are created, they're
2022  added to that port set; a thread's exception port is destroyed
2023  (and therefore removed from the port set) when the thread exits.
2024
2025  A few exceptions can be handled directly in the handler thread;
2026  others require that we resume the user thread (and that the
2027  exception thread resumes listening for exceptions.)  The user
2028  thread might eventually want to return to the original context
2029  (possibly modified somewhat.)
2030
2031  As it turns out, the simplest way to force the faulting user
2032  thread to handle its own exceptions is to do pretty much what
2033  signal() does: the exception handlng thread sets up a sigcontext
2034  on the user thread's stack and forces the user thread to resume
2035  execution as if a signal handler had been called with that
2036  context as an argument.  We can use a distinguished UUO at a
2037  distinguished address to do something like sigreturn(); that'll
2038  have the effect of resuming the user thread's execution in
2039  the (pseudo-) signal context.
2040
2041  Since:
2042    a) we have miles of code in C and in Lisp that knows how to
2043    deal with Linux sigcontexts
2044    b) Linux sigcontexts contain a little more useful information
2045    (the DAR, DSISR, etc.) than their Darwin counterparts
2046    c) we have to create a sigcontext ourselves when calling out
2047    to the user thread: we aren't really generating a signal, just
2048    leveraging existing signal-handling code.
2049
2050  we create a Linux sigcontext struct.
2051
2052  Simple ?  Hopefully from the outside it is ...
2053
2054  We want the process of passing a thread's own context to it to
2055  appear to be atomic: in particular, we don't want the GC to suspend
2056  a thread that's had an exception but has not yet had its user-level
2057  exception handler called, and we don't want the thread's exception
2058  context to be modified by a GC while the Mach handler thread is
2059  copying it around.  On Linux (and on Jaguar), we avoid this issue
2060  because (a) the kernel sets up the user-level signal handler and
2061  (b) the signal handler blocks signals (including the signal used
2062  by the GC to suspend threads) until tcr->xframe is set up.
2063
2064  The GC and the Mach server thread therefore contend for the lock
2065  "mach_exception_lock".  The Mach server thread holds the lock
2066  when copying exception information between the kernel and the
2067  user thread; the GC holds this lock during most of its execution
2068  (delaying exception processing until it can be done without
2069  GC interference.)
2070
2071*/
2072
2073
2074#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2075
2076void
2077fatal_mach_error(char *format, ...);
2078
2079#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2080
2081
2082void
2083restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2084{
2085  kern_return_t kret;
2086  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
2087
2088  /* Set the thread's FP state from the pseudosigcontext */
2089  kret = thread_set_state(thread,
2090                          ARM_VFP_STATE,
2091                          (thread_state_t)&(mc->__fs),
2092                          ARM_VFP_STATE_COUNT);
2093
2094  MACH_CHECK_ERROR("setting thread FP state", kret);
2095
2096  /* The thread'll be as good as new ... */
2097  kret = thread_set_state(thread, 
2098                          MACHINE_THREAD_STATE,
2099                          (thread_state_t)&(mc->__ss),
2100                          MACHINE_THREAD_STATE_COUNT);
2101  MACH_CHECK_ERROR("setting thread state", kret);
2102} 
2103
2104/* This code runs in the exception handling thread, in response
2105   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2106   in response to a call to pseudo_sigreturn() from the specified
2107   user thread.
2108   Find that context (the user thread's R3 points to it), then
2109   use that context to set the user thread's state.  When this
2110   function's caller returns, the Mach kernel will resume the
2111   user thread.
2112*/
2113
2114kern_return_t
2115do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2116{
2117  ExceptionInformation *xp;
2118
2119#ifdef DEBUG_MACH_EXCEPTIONS
2120  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2121#endif
2122  tcr->last_lisp_frame = *((natural *)(tcr->last_lisp_frame));
2123  xp = tcr->pending_exception_context;
2124  if (xp) {
2125    tcr->pending_exception_context = NULL;
2126    tcr->valence = TCR_STATE_LISP;
2127    restore_mach_thread_state(thread, xp);
2128    raise_pending_interrupt(tcr);
2129  } else {
2130    Bug(NULL, "no xp here!\n");
2131  }
2132#ifdef DEBUG_MACH_EXCEPTIONS
2133  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2134#endif
2135  return KERN_SUCCESS;
2136} 
2137
2138ExceptionInformation *
2139create_thread_context_frame(mach_port_t thread, 
2140                            natural *new_stack_top)
2141{
2142  arm_thread_state_t ts;
2143  mach_msg_type_number_t thread_state_count;
2144  kern_return_t result;
2145  ExceptionInformation *pseudosigcontext;
2146  _STRUCT_MCONTEXT *mc;
2147  natural stackp, backlink;
2148
2149  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2150  result = thread_get_state(thread, 
2151                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2152                            (thread_state_t)&ts,
2153                            &thread_state_count);
2154 
2155  if (result != KERN_SUCCESS) {
2156    get_tcr(true);
2157    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2158  }
2159  stackp = ts.__sp;
2160  backlink = stackp;
2161
2162  stackp -= sizeof(*pseudosigcontext);
2163  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2164
2165  stackp -= sizeof(*mc);
2166  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2167  memmove(&(mc->__ss),&ts,sizeof(ts));
2168
2169  thread_state_count = ARM_VFP_STATE_COUNT;
2170  thread_get_state(thread,
2171                   ARM_VFP_STATE,
2172                   (thread_state_t)&(mc->__fs),
2173                   &thread_state_count);
2174
2175
2176  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2177  thread_get_state(thread,
2178                   ARM_EXCEPTION_STATE,
2179                   (thread_state_t)&(mc->__es),
2180                   &thread_state_count);
2181
2182
2183  UC_MCONTEXT(pseudosigcontext) = mc;
2184  if (new_stack_top) {
2185    *new_stack_top = stackp;
2186  }
2187  return pseudosigcontext;
2188}
2189
2190/*
2191  This code sets up the user thread so that it executes a "pseudo-signal
2192  handler" function when it resumes.  Create a linux sigcontext struct
2193  on the thread's stack and pass it as an argument to the pseudo-signal
2194  handler.
2195
2196  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2197  which will restore the thread's context.
2198
2199  If the handler invokes code that throws (or otherwise never sigreturn()'s
2200  to the context), that's fine.
2201
2202  Actually, check that: throw (and variants) may need to be careful and
2203  pop the tcr's xframe list until it's younger than any frame being
2204  entered.
2205*/
2206
2207int
2208setup_signal_frame(mach_port_t thread,
2209                   void *handler_address,
2210                   int signum,
2211                   int code,
2212                   TCR *tcr)
2213{
2214  arm_thread_state_t ts;
2215  ExceptionInformation *pseudosigcontext;
2216  int old_valence = tcr->valence;
2217  natural stackp, *pstackp;
2218
2219#ifdef DEBUG_MACH_EXCEPTIONS
2220  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2221#endif
2222  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2223  pstackp = (natural *)stackp;
2224  *--pstackp = tcr->last_lisp_frame;
2225  stackp = (natural)pstackp;
2226  tcr->last_lisp_frame = stackp;
2227  pseudosigcontext->uc_onstack = 0;
2228  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2229  pseudosigcontext->uc_mcsize = ARM_MCONTEXT_SIZE;
2230  tcr->pending_exception_context = pseudosigcontext;
2231  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2232 
2233
2234  /*
2235     It seems like we've created a  sigcontext on the thread's
2236     stack.  Set things up so that we call the handler (with appropriate
2237     args) when the thread's resumed.
2238  */
2239
2240  ts.__pc = (natural) handler_address;
2241  ts.__sp = stackp;
2242  ts.__r[0] = signum;
2243  ts.__r[1] = (natural)pseudosigcontext;
2244  ts.__r[2] = (natural)tcr;
2245  ts.__r[3] = (natural)old_valence;
2246  ts.__lr = (natural)pseudo_sigreturn;
2247  ts.__cpsr = xpPSR(pseudosigcontext);
2248
2249
2250  thread_set_state(thread, 
2251                   MACHINE_THREAD_STATE,
2252                   (thread_state_t)&ts,
2253                   MACHINE_THREAD_STATE_COUNT);
2254#ifdef DEBUG_MACH_EXCEPTIONS
2255  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2256#endif
2257  return 0;
2258}
2259
2260
2261void
2262pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2263{
2264  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2265} 
2266
2267
2268int
2269thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2270{
2271  /* Likely hopeless. */
2272  return 0;
2273}
2274
2275/*
2276  This function runs in the exception handling thread.  It's
2277  called (by this precise name) from the library function "exc_server()"
2278  when the thread's exception ports are set up.  (exc_server() is called
2279  via mach_msg_server(), which is a function that waits for and dispatches
2280  on exception messages from the Mach kernel.)
2281
2282  This checks to see if the exception was caused by a pseudo_sigreturn()
2283  UUO; if so, it arranges for the thread to have its state restored
2284  from the specified context.
2285
2286  Otherwise, it tries to map the exception to a signal number and
2287  arranges that the thread run a "pseudo signal handler" to handle
2288  the exception.
2289
2290  Some exceptions could and should be handled here directly.
2291*/
2292
2293kern_return_t
2294catch_exception_raise(mach_port_t exception_port,
2295                      mach_port_t thread,
2296                      mach_port_t task, 
2297                      exception_type_t exception,
2298                      exception_data_t code_vector,
2299                      mach_msg_type_number_t code_count)
2300{
2301  int signum = 0, code = *code_vector;
2302  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2303  kern_return_t kret;
2304
2305#ifdef DEBUG_MACH_EXCEPTIONS
2306  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2307#endif
2308
2309  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2310    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2311  } 
2312  /* On the ARM, code_vector[1] contains the undefined instruction
2313     in this case, not its address.  */
2314  if ((exception == EXC_BAD_INSTRUCTION) &&
2315      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2316      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2317    kret = do_pseudo_sigreturn(thread, tcr);
2318  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2319    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2320    kret = 17;
2321  } else {
2322    switch (exception) {
2323    case EXC_BAD_ACCESS:
2324      signum = SIGSEGV;
2325      break;
2326       
2327    case EXC_BAD_INSTRUCTION:
2328      signum = SIGILL;
2329      break;
2330     
2331      break;
2332     
2333    case EXC_ARITHMETIC:
2334      signum = SIGFPE;
2335      break;
2336
2337    default:
2338      break;
2339    }
2340    if (signum) {
2341      kret = setup_signal_frame(thread,
2342                                (void *)pseudo_signal_handler,
2343                                signum,
2344                                code,
2345                                tcr);
2346#if 0
2347      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2348#endif
2349
2350    } else {
2351      kret = 17;
2352    }
2353  }
2354
2355  return kret;
2356}
2357
2358
2359
2360typedef struct {
2361  mach_msg_header_t Head;
2362  /* start of the kernel processed data */
2363  mach_msg_body_t msgh_body;
2364  mach_msg_port_descriptor_t thread;
2365  mach_msg_port_descriptor_t task;
2366  /* end of the kernel processed data */
2367  NDR_record_t NDR;
2368  exception_type_t exception;
2369  mach_msg_type_number_t codeCnt;
2370  integer_t code[2];
2371  mach_msg_trailer_t trailer;
2372} exceptionRequest;
2373
2374
2375boolean_t
2376openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2377{
2378  static NDR_record_t _NDR = {0};
2379  kern_return_t handled;
2380  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2381  exceptionRequest *req = (exceptionRequest *) in;
2382
2383  reply->NDR = _NDR;
2384
2385  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2386  out->msgh_remote_port = in->msgh_remote_port;
2387  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2388  out->msgh_local_port = MACH_PORT_NULL;
2389  out->msgh_id = in->msgh_id+100;
2390
2391  /* Could handle other exception flavors in the range 2401-2403 */
2392
2393
2394  if (in->msgh_id != 2401) {
2395    reply->RetCode = MIG_BAD_ID;
2396    return FALSE;
2397  }
2398  handled = catch_exception_raise(req->Head.msgh_local_port,
2399                                  req->thread.name,
2400                                  req->task.name,
2401                                  req->exception,
2402                                  req->code,
2403                                  req->codeCnt);
2404  reply->RetCode = handled;
2405  return TRUE;
2406}
2407
2408/*
2409  The initial function for an exception-handling thread.
2410*/
2411
2412void *
2413exception_handler_proc(void *arg)
2414{
2415  extern boolean_t exc_server();
2416  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2417
2418  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2419  /* Should never return. */
2420  abort();
2421}
2422
2423
2424
2425mach_port_t
2426mach_exception_port_set()
2427{
2428  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2429  kern_return_t kret; 
2430  if (__exception_port_set == MACH_PORT_NULL) {
2431    kret = mach_port_allocate(mach_task_self(),
2432                              MACH_PORT_RIGHT_PORT_SET,
2433                              &__exception_port_set);
2434    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2435    create_system_thread(0,
2436                         NULL,
2437                         exception_handler_proc, 
2438                         (void *)((natural)__exception_port_set));
2439  }
2440  return __exception_port_set;
2441}
2442
2443/*
2444  Setup a new thread to handle those exceptions specified by
2445  the mask "which".  This involves creating a special Mach
2446  message port, telling the Mach kernel to send exception
2447  messages for the calling thread to that port, and setting
2448  up a handler thread which listens for and responds to
2449  those messages.
2450
2451*/
2452
2453/*
2454  Establish the lisp thread's TCR as its exception port, and determine
2455  whether any other ports have been established by foreign code for
2456  exceptions that lisp cares about.
2457
2458  If this happens at all, it should happen on return from foreign
2459  code and on entry to lisp code via a callback.
2460
2461  This is a lot of trouble (and overhead) to support Java, or other
2462  embeddable systems that clobber their caller's thread exception ports.
2463 
2464*/
2465kern_return_t
2466tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2467{
2468  kern_return_t kret;
2469  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2470  int i;
2471  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2472  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2473  exception_mask_t mask = 0;
2474
2475  kret = thread_swap_exception_ports(thread,
2476                                     LISP_EXCEPTIONS_HANDLED_MASK,
2477                                     lisp_port,
2478                                     EXCEPTION_DEFAULT,
2479                                     THREAD_STATE_NONE,
2480                                     fxs->masks,
2481                                     &n,
2482                                     fxs->ports,
2483                                     fxs->behaviors,
2484                                     fxs->flavors);
2485  if (kret == KERN_SUCCESS) {
2486    fxs->foreign_exception_port_count = n;
2487    for (i = 0; i < n; i ++) {
2488      foreign_port = fxs->ports[i];
2489
2490      if ((foreign_port != lisp_port) &&
2491          (foreign_port != MACH_PORT_NULL)) {
2492        mask |= fxs->masks[i];
2493      }
2494    }
2495    tcr->foreign_exception_status = (int) mask;
2496  }
2497  return kret;
2498}
2499
2500kern_return_t
2501tcr_establish_lisp_exception_port(TCR *tcr)
2502{
2503  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2504}
2505
2506/*
2507  Do this when calling out to or returning from foreign code, if
2508  any conflicting foreign exception ports were established when we
2509  last entered lisp code.
2510*/
2511kern_return_t
2512restore_foreign_exception_ports(TCR *tcr)
2513{
2514  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2515 
2516  if (m) {
2517    MACH_foreign_exception_state *fxs  = 
2518      (MACH_foreign_exception_state *) tcr->native_thread_info;
2519    int i, n = fxs->foreign_exception_port_count;
2520    exception_mask_t tm;
2521
2522    for (i = 0; i < n; i++) {
2523      if ((tm = fxs->masks[i]) & m) {
2524        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2525                                   tm,
2526                                   fxs->ports[i],
2527                                   fxs->behaviors[i],
2528                                   fxs->flavors[i]);
2529      }
2530    }
2531  }
2532}
2533                                   
2534
2535/*
2536  This assumes that a Mach port (to be used as the thread's exception port) whose
2537  "name" matches the TCR's 32-bit address has already been allocated.
2538*/
2539
2540kern_return_t
2541setup_mach_exception_handling(TCR *tcr)
2542{
2543  mach_port_t
2544    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2545    task_self = mach_task_self();
2546  kern_return_t kret;
2547
2548  kret = mach_port_insert_right(task_self,
2549                                thread_exception_port,
2550                                thread_exception_port,
2551                                MACH_MSG_TYPE_MAKE_SEND);
2552  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2553
2554  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2555  if (kret == KERN_SUCCESS) {
2556    mach_port_t exception_port_set = mach_exception_port_set();
2557
2558    kret = mach_port_move_member(task_self,
2559                                 thread_exception_port,
2560                                 exception_port_set);
2561  }
2562  return kret;
2563}
2564
2565void
2566darwin_exception_init(TCR *tcr)
2567{
2568  void tcr_monitor_exception_handling(TCR*, Boolean);
2569  kern_return_t kret;
2570  MACH_foreign_exception_state *fxs = 
2571    calloc(1, sizeof(MACH_foreign_exception_state));
2572 
2573  tcr->native_thread_info = (void *) fxs;
2574
2575  if ((kret = setup_mach_exception_handling(tcr))
2576      != KERN_SUCCESS) {
2577    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2578    terminate_lisp();
2579  }
2580}
2581
2582/*
2583  The tcr is the "name" of the corresponding thread's exception port.
2584  Destroying the port should remove it from all port sets of which it's
2585  a member (notably, the exception port set.)
2586*/
2587void
2588darwin_exception_cleanup(TCR *tcr)
2589{
2590  void *fxs = tcr->native_thread_info;
2591  extern Boolean use_mach_exception_handling;
2592
2593  if (fxs) {
2594    tcr->native_thread_info = NULL;
2595    free(fxs);
2596  }
2597  if (use_mach_exception_handling) {
2598    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2599    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2600  }
2601}
2602
2603
2604Boolean
2605suspend_mach_thread(mach_port_t mach_thread)
2606{
2607  kern_return_t status;
2608  Boolean aborted = false;
2609 
2610  do {
2611    aborted = false;
2612    status = thread_suspend(mach_thread);
2613    if (status == KERN_SUCCESS) {
2614      status = thread_abort_safely(mach_thread);
2615      if (status == KERN_SUCCESS) {
2616        aborted = true;
2617      } else {
2618        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2619        thread_resume(mach_thread);
2620      }
2621    } else {
2622      return false;
2623    }
2624  } while (! aborted);
2625  return true;
2626}
2627
2628/*
2629  Only do this if pthread_kill indicated that the pthread isn't
2630  listening to signals anymore, as can happen as soon as pthread_exit()
2631  is called on Darwin.  The thread could still call out to lisp as it
2632  is exiting, so we need another way to suspend it in this case.
2633*/
2634Boolean
2635mach_suspend_tcr(TCR *tcr)
2636{
2637  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2638  ExceptionInformation *pseudosigcontext;
2639  Boolean result = false;
2640 
2641  result = suspend_mach_thread(mach_thread);
2642  if (result) {
2643    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2644    pseudosigcontext->uc_onstack = 0;
2645    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2646    tcr->suspend_context = pseudosigcontext;
2647  }
2648  return result;
2649}
2650
2651void
2652mach_resume_tcr(TCR *tcr)
2653{
2654  ExceptionInformation *xp;
2655  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2656 
2657  xp = tcr->suspend_context;
2658#ifdef DEBUG_MACH_EXCEPTIONS
2659  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2660          tcr, tcr->pending_exception_context);
2661#endif
2662  tcr->suspend_context = NULL;
2663  restore_mach_thread_state(mach_thread, xp);
2664#ifdef DEBUG_MACH_EXCEPTIONS
2665  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2666          tcr, tcr->pending_exception_context);
2667#endif
2668  thread_resume(mach_thread);
2669}
2670
2671void
2672fatal_mach_error(char *format, ...)
2673{
2674  va_list args;
2675  char s[512];
2676 
2677
2678  va_start(args, format);
2679  vsnprintf(s, sizeof(s),format, args);
2680  va_end(args);
2681
2682  Fatal("Mach error", s);
2683}
2684
2685void
2686pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2687{
2688  interrupt_handler(signum, NULL, context);
2689}
2690
2691int
2692mach_raise_thread_interrupt(TCR *target)
2693{
2694  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2695  kern_return_t kret;
2696  Boolean result = false;
2697  TCR *current = get_tcr(false);
2698  thread_basic_info_data_t info; 
2699  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2700
2701  LOCK(lisp_global(TCR_AREA_LOCK), current);
2702
2703  if (suspend_mach_thread(mach_thread)) {
2704    if (thread_info(mach_thread,
2705                    THREAD_BASIC_INFO,
2706                    (thread_info_t)&info,
2707                    &info_count) == KERN_SUCCESS) {
2708      if (info.suspend_count == 1) {
2709        if ((target->valence == TCR_STATE_LISP) &&
2710            (!target->unwinding) &&
2711            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2712          kret = setup_signal_frame(mach_thread,
2713                                    (void *)pseudo_interrupt_handler,
2714                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2715                                    0,
2716                                    target);
2717          if (kret == KERN_SUCCESS) {
2718            result = true;
2719          }
2720        }
2721      }
2722    }
2723    if (! result) {
2724      target->interrupt_pending = 1 << fixnumshift;
2725    }
2726    thread_resume(mach_thread);
2727   
2728  }
2729  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2730  return 0;
2731}
2732
2733#endif
Note: See TracBrowser for help on using the repository browser.