source: trunk/source/lisp-kernel/arm-exceptions.c @ 14807

Last change on this file since 14807 was 14807, checked in by gb, 8 years ago

Define and export the functions ALLOW-HEAP-ALLOCATION and
HEAP-ALLOCATION-ALLOWED-P and the condition type ALLOCATION-DISABLED.

(ALLOW-HEAP-ALLOCATION arg) : when ARG is NIL, causes any subsequent
attempts to heap-allocate lisp memory to signal (as if by CERROR)
an ALLOCATION-DISABLED condition. (Allocaton is enabled globally at
the point where the error is signaled.) Continuing from the CERROR
restarts the allocation attempt.

This is intended to help verify that code that's not expected to
cons doesn't do so.

(This is only implemented on the ARM at the moment, but the intent
is that it be supported on all platforms.)

Note that calling (ALLOW-HEAP-ALLOCATION NIL) in the REPL CERRORs
immediately, since the REPL will cons to create the new value of CL:/.

File size: 73.2 KB
Line 
1
2
3/*
4   Copyright (C) 2010 Clozure Associates
5   This file is part of Clozure CL. 
6
7   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
8   License , known as the LLGPL and distributed with Clozure CL as the
9   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
10   which is distributed with Clozure CL as the file "LGPL".  Where these
11   conflict, the preamble takes precedence. 
12
13   Clozure CL is referenced in the preamble as the "LIBRARY."
14
15   The LLGPL is also available online at
16   http://opensource.franz.com/preamble.html
17*/
18
19#include "lisp.h"
20#include "lisp-exceptions.h"
21#include "lisp_globals.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#ifndef ANDROID
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36#endif
37
38#ifdef DARWIN
39#include <sys/mman.h>
40#ifndef SA_NODEFER
41#define SA_NODEFER 0
42#endif
43#include <sysexits.h>
44
45
46/* a distinguished UUO at a distinguished address */
47extern void pseudo_sigreturn(ExceptionInformation *);
48#endif
49
50
51#include "threads.h"
52
53#ifdef ANDROID
54#define pthread_sigmask(how,in,out) rt_sigprocmask(how,in,out,8)
55#endif
56
57#ifdef LINUX
58
59void
60enable_fp_exceptions()
61{
62}
63
64void
65disable_fp_exceptions()
66{
67}
68#endif
69
70/*
71  Handle exceptions.
72
73*/
74
75extern LispObj lisp_nil;
76
77extern natural lisp_heap_gc_threshold;
78extern Boolean grow_dynamic_area(natural);
79
80Boolean allocation_enabled = true;
81
82
83
84
85int
86page_size = 4096;
87
88int
89log2_page_size = 12;
90
91
92
93
94
95/*
96  If the PC is pointing to an allocation trap, the previous instruction
97  must have decremented allocptr.  Return the non-zero amount by which
98  allocptr was decremented.
99*/
100signed_natural
101allocptr_displacement(ExceptionInformation *xp)
102{
103  pc program_counter = xpPC(xp);
104  opcode instr = *program_counter, prev_instr;
105  int delta = -3;
106
107  if (IS_ALLOC_TRAP(instr)) {
108    /* The alloc trap must have been preceded by a cmp and a
109       load from tcr.allocbase. */
110    if (IS_BRANCH_AROUND_ALLOC_TRAP(program_counter[-1])) {
111      delta = -4;
112    }
113    prev_instr = program_counter[delta];
114
115    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
116      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
117    }
118   
119    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)(prev_instr & 0xff));
121    }
122
123    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
124      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
125
126      instr = program_counter[delta-1];
127      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
128        return -((signed_natural)(disp | (instr & 0xff)));
129      }
130    }
131    Bug(xp, "Can't determine allocation displacement");
132  }
133  return 0;
134}
135
136
137/*
138  A cons cell's been successfully allocated, but the allocptr's
139  still tagged (as fulltag_cons, of course.)  Emulate any instructions
140  that might follow the allocation (stores to the car or cdr, an
141  assignment to the "result" gpr) that take place while the allocptr's
142  tag is non-zero, advancing over each such instruction.  When we're
143  done, the cons cell will be allocated and initialized, the result
144  register will point to it, the allocptr will be untagged, and
145  the PC will point past the instruction that clears the allocptr's
146  tag.
147*/
148void
149finish_allocating_cons(ExceptionInformation *xp)
150{
151  pc program_counter = xpPC(xp);
152  opcode instr;
153  LispObj cur_allocptr = xpGPR(xp, allocptr);
154  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
155  int target_reg;
156
157  while (1) {
158    instr = *program_counter++;
159
160    if (IS_CLR_ALLOCPTR_TAG(instr)) {
161      xpGPR(xp, allocptr) = untag(cur_allocptr);
162      xpPC(xp) = program_counter;
163      return;
164    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
165      c->car = xpGPR(xp,RD_field(instr));
166    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
167      c->cdr = xpGPR(xp,RD_field(instr));
168    } else {
169      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
170      xpGPR(xp,RD_field(instr)) = cur_allocptr;
171    }
172  }
173}
174
175/*
176  We were interrupted in the process of allocating a uvector; we
177  survived the allocation trap, and allocptr is tagged as fulltag_misc.
178  Emulate any instructions which store a header into the uvector,
179  assign the value of allocptr to some other register, and clear
180  allocptr's tag.  Don't expect/allow any other instructions in
181  this environment.
182*/
183void
184finish_allocating_uvector(ExceptionInformation *xp)
185{
186  pc program_counter = xpPC(xp);
187  opcode instr;
188  LispObj cur_allocptr = xpGPR(xp, allocptr);
189  int target_reg;
190
191  while (1) {
192    instr = *program_counter++;
193    if (IS_CLR_ALLOCPTR_TAG(instr)) {
194      xpGPR(xp, allocptr) = untag(cur_allocptr);
195      xpPC(xp) = program_counter;
196      return;
197    }
198    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
199      header_of(cur_allocptr) = xpGPR(xp,RD_field(instr));
200    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
201      xpGPR(xp,RD_field(instr)) = cur_allocptr;
202    } else {
203      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
204    }
205  }
206}
207
208
209Boolean
210allocate_object(ExceptionInformation *xp,
211                natural bytes_needed, 
212                signed_natural disp_from_allocptr,
213                TCR *tcr)
214{
215  area *a = active_dynamic_area;
216
217  /* Maybe do an EGC */
218  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
219    if (((a->active)-(a->low)) >= a->threshold) {
220      gc_from_xp(xp, 0L);
221    }
222  }
223
224  /* Life is pretty simple if we can simply grab a segment
225     without extending the heap.
226  */
227  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
228    xpGPR(xp, allocptr) += disp_from_allocptr;
229    return true;
230  }
231 
232  /* It doesn't make sense to try a full GC if the object
233     we're trying to allocate is larger than everything
234     allocated so far.
235  */
236  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
237    untenure_from_area(tenured_area); /* force a full GC */
238    gc_from_xp(xp, 0L);
239  }
240 
241  /* Try again, growing the heap if necessary */
242  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
243    xpGPR(xp, allocptr) += disp_from_allocptr;
244    return true;
245  }
246 
247  return false;
248}
249
250#ifndef XNOMEM
251#define XNOMEM 10
252#endif
253
254void
255update_bytes_allocated(TCR* tcr, void *cur_allocptr)
256{
257  BytePtr
258    last = (BytePtr) tcr->last_allocptr, 
259    current = (BytePtr) cur_allocptr;
260  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
261    tcr->bytes_allocated += last-current;
262  }
263  tcr->last_allocptr = 0;
264}
265
266void
267lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
268{
269  /* Couldn't allocate the object.  If it's smaller than some arbitrary
270     size (say 128K bytes), signal a "chronically out-of-memory" condition;
271     else signal a "allocation request failed" condition.
272  */
273  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
274  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
275}
276
277/*
278  Allocate a large list, where "large" means "large enough to
279  possibly trigger the EGC several times if this was done
280  by individually allocating each CONS."  The number of
281  ocnses in question is in arg_z; on successful return,
282  the list will be in arg_z
283*/
284
285Boolean
286allocate_list(ExceptionInformation *xp, TCR *tcr)
287{
288  natural
289    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
290    bytes_needed = (nconses << dnode_shift);
291  LispObj
292    prev = lisp_nil,
293    current,
294    initial = xpGPR(xp,arg_y);
295
296  if (nconses == 0) {
297    /* Silly case */
298    xpGPR(xp,arg_z) = lisp_nil;
299    xpGPR(xp,allocptr) = lisp_nil;
300    return true;
301  }
302  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
303  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
304    for (current = xpGPR(xp,allocptr);
305         nconses;
306         prev = current, current+= dnode_size, nconses--) {
307      deref(current,0) = prev;
308      deref(current,1) = initial;
309    }
310    xpGPR(xp,arg_z) = prev;
311    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
312    xpGPR(xp,allocptr)-=fulltag_cons;
313  } else {
314    lisp_allocation_failure(xp,tcr,bytes_needed);
315  }
316  return true;
317}
318
319Boolean
320handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
321{
322  pc program_counter;
323  natural cur_allocptr, bytes_needed = 0;
324  opcode prev_instr;
325  signed_natural disp = 0;
326  unsigned allocptr_tag;
327
328  if (!allocation_enabled) {
329    /* Back up before the alloc_trap, then let pc_luser_xp() back
330       up some more. */
331    xpPC(xp)-=1;
332    pc_luser_xp(xp,tcr, NULL);
333    allocation_enabled = true;
334    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
335    handle_error(xp, error_allocation_disabled,0,NULL);
336    return true;
337  }
338
339  cur_allocptr = xpGPR(xp,allocptr);
340
341  allocptr_tag = fulltag_of(cur_allocptr);
342
343  switch (allocptr_tag) {
344  case fulltag_cons:
345    bytes_needed = sizeof(cons);
346    disp = -sizeof(cons) + fulltag_cons;
347    break;
348
349  case fulltag_misc:
350    disp = allocptr_displacement(xp);
351    bytes_needed = (-disp) + fulltag_misc;
352    break;
353
354    /* else fall thru */
355  default:
356    return false;
357  }
358
359  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
360  if (allocate_object(xp, bytes_needed, disp, tcr)) {
361    adjust_exception_pc(xp,4);
362    return true;
363  }
364  lisp_allocation_failure(xp,tcr,bytes_needed);
365  return true;
366}
367
368natural gc_deferred = 0, full_gc_deferred = 0;
369
370signed_natural
371flash_freeze(TCR *tcr, signed_natural param)
372{
373  return 0;
374}
375
376Boolean
377handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
378{
379  LispObj
380    selector = xpGPR(xp,imm0), 
381    arg = xpGPR(xp,imm1);
382  area *a = active_dynamic_area;
383  Boolean egc_was_enabled = (a->older != NULL);
384  natural gc_previously_deferred = gc_deferred;
385
386
387  switch (selector) {
388  case GC_TRAP_FUNCTION_EGC_CONTROL:
389    egc_control(arg != 0, a->active);
390    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
391    break;
392
393  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
394    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
395    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
396    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
397    xpGPR(xp,arg_z) = lisp_nil+t_offset;
398    break;
399
400  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
401    if (((signed_natural) arg) > 0) {
402      lisp_heap_gc_threshold = 
403        align_to_power_of_2((arg-1) +
404                            (heap_segment_size - 1),
405                            log2_heap_segment_size);
406    }
407    /* fall through */
408  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
409    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
410    break;
411
412  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
413    /*  Try to put the current threshold in effect.  This may
414        need to disable/reenable the EGC. */
415    untenure_from_area(tenured_area);
416    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
417    if (egc_was_enabled) {
418      if ((a->high - a->active) >= a->threshold) {
419        tenure_to_area(tenured_area);
420      }
421    }
422    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
423    break;
424
425  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
426    ensure_static_conses(xp,tcr,32768);
427    break;
428
429  case GC_TRAP_FUNCTION_FLASH_FREEZE:
430    untenure_from_area(tenured_area);
431    gc_like_from_xp(xp,flash_freeze,0);
432    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
433    tenured_area->static_dnodes = area_dnode(a->active, a->low);
434    if (egc_was_enabled) {
435      tenure_to_area(tenured_area);
436    }
437    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
438    break;
439
440  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
441    switch(arg) {
442    case 0: /* disable if allocation enabled */
443      xpGPR(xp, arg_z) = lisp_nil;
444      if (allocation_enabled) {
445        TCR *other_tcr;
446        ExceptionInformation *other_context;
447        suspend_other_threads(true);
448        normalize_tcr(xp,tcr,false);
449        for (other_tcr=tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
450          other_context = other_tcr->pending_exception_context;
451          if (other_context == NULL) {
452            other_context = other_tcr->suspend_context;
453          }
454          normalize_tcr(other_context, other_tcr, true);
455        }
456        allocation_enabled = false;
457        xpGPR(xp, arg_z) = t_value;
458        resume_other_threads(true);
459      }
460      break;
461
462    case 1:                     /* enable if disabled */
463      xpGPR(xp, arg_z) = lisp_nil;
464      if (!allocation_enabled) {
465        allocation_enabled = true;
466        xpGPR(xp, arg_z) = t_value;
467      }
468      break;
469
470    default:
471      xpGPR(xp, arg_z) = lisp_nil;
472      if (allocation_enabled) {
473        xpGPR(xp, arg_z) = t_value;
474      }
475      break;
476    }
477    break;
478
479       
480  default:
481    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
482
483    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
484      if (!full_gc_deferred) {
485        gc_from_xp(xp, 0L);
486        break;
487      }
488      /* Tried to do a full GC when gc was disabled.  That failed,
489         so try full GC now */
490      selector = GC_TRAP_FUNCTION_GC;
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(false, (BytePtr) a->active);
495    }
496    gc_from_xp(xp, 0L);
497    if (gc_deferred > gc_previously_deferred) {
498      full_gc_deferred = 1;
499    } else {
500      full_gc_deferred = 0;
501    }
502    if (selector > GC_TRAP_FUNCTION_GC) {
503      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
504        impurify_from_xp(xp, 0L);
505        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
506        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
507        gc_from_xp(xp, 0L);
508      }
509      if (selector & GC_TRAP_FUNCTION_PURIFY) {
510        purify_from_xp(xp, 0L);
511        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
512        gc_from_xp(xp, 0L);
513      }
514      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
515        OSErr err;
516        extern OSErr save_application(unsigned, Boolean);
517        TCR *tcr = get_tcr(true);
518        area *vsarea = tcr->vs_area;
519       
520        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
521        err = save_application(arg, egc_was_enabled);
522        if (err == noErr) {
523          _exit(0);
524        }
525        fatal_oserr(": save_application", err);
526      }
527      switch (selector) {
528
529
530      case GC_TRAP_FUNCTION_FREEZE:
531        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
532        tenured_area->static_dnodes = area_dnode(a->active, a->low);
533        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
534        break;
535      default:
536        break;
537      }
538    }
539   
540    if (egc_was_enabled) {
541      egc_control(true, NULL);
542    }
543    break;
544   
545  }
546
547  adjust_exception_pc(xp,4);
548  return true;
549}
550
551
552
553void
554signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
555{
556  /* The cstack just overflowed.  Force the current thread's
557     control stack to do so until all stacks are well under their overflow
558     limits.
559  */
560
561#if 0
562  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
563#endif
564  handle_error(xp, error_stack_overflow, reg, NULL);
565}
566
567/*
568  Lower (move toward 0) the "end" of the soft protected area associated
569  with a by a page, if we can.
570*/
571
572void
573adjust_soft_protection_limit(area *a)
574{
575  char *proposed_new_soft_limit = a->softlimit - 4096;
576  protected_area_ptr p = a->softprot;
577 
578  if (proposed_new_soft_limit >= (p->start+16384)) {
579    p->end = proposed_new_soft_limit;
580    p->protsize = p->end-p->start;
581    a->softlimit = proposed_new_soft_limit;
582  }
583  protect_area(p);
584}
585
586void
587restore_soft_stack_limit(unsigned stkreg)
588{
589  area *a;
590  TCR *tcr = get_tcr(true);
591
592  switch (stkreg) {
593  case Rsp:
594    a = tcr->cs_area;
595    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
596      a->softlimit -= 4096;
597    }
598    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
599    break;
600  case vsp:
601    a = tcr->vs_area;
602    adjust_soft_protection_limit(a);
603    break;
604  }
605}
606
607/* Maybe this'll work someday.  We may have to do something to
608   make the thread look like it's not handling an exception */
609void
610reset_lisp_process(ExceptionInformation *xp)
611{
612  TCR *tcr = get_tcr(true);
613  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
614
615  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
616
617  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
618
619  start_lisp(tcr, 1);
620}
621
622/*
623  This doesn't GC; it returns true if it made enough room, false
624  otherwise.
625  If "extend" is true, it can try to extend the dynamic area to
626  satisfy the request.
627*/
628
629Boolean
630new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
631{
632  area *a;
633  natural newlimit, oldlimit;
634  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
635
636  a  = active_dynamic_area;
637  oldlimit = (natural) a->active;
638  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
639              align_to_power_of_2(need, log2_allocation_quantum));
640  if (newlimit > (natural) (a->high)) {
641    if (extend) {
642      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
643      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
644      do {
645        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
646          break;
647        }
648        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
649        if (extend_by < 4<<20) {
650          return false;
651        }
652      } while (1);
653    } else {
654      return false;
655    }
656  }
657  a->active = (BytePtr) newlimit;
658  tcr->last_allocptr = (void *)newlimit;
659  xpGPR(xp,allocptr) = (LispObj) newlimit;
660  tcr->save_allocbase = (void*) oldlimit;
661
662  return true;
663}
664
665 
666void
667update_area_active (area **aptr, BytePtr value)
668{
669  area *a = *aptr;
670  for (; a; a = a->older) {
671    if ((a->low <= value) && (a->high >= value)) break;
672  };
673  if (a == NULL) Bug(NULL, "Can't find active area");
674  a->active = value;
675  *aptr = a;
676
677  for (a = a->younger; a; a = a->younger) {
678    a->active = a->high;
679  }
680}
681
682LispObj *
683tcr_frame_ptr(TCR *tcr)
684{
685  ExceptionInformation *xp;
686  LispObj *bp = NULL;
687
688  if (tcr->pending_exception_context)
689    xp = tcr->pending_exception_context;
690  else {
691    xp = tcr->suspend_context;
692  }
693  if (xp) {
694    bp = (LispObj *) xpGPR(xp, Rsp);
695  }
696  return bp;
697}
698
699void
700normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
701
702{
703  void *cur_allocptr = NULL;
704  LispObj freeptr = 0;
705
706  if (xp) {
707    if (is_other_tcr) {
708      pc_luser_xp(xp, tcr, NULL);
709      freeptr = xpGPR(xp, allocptr);
710      if (fulltag_of(freeptr) == 0){
711        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
712      }
713    }
714    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
715    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
716  } else {
717    /* In ff-call. */
718    cur_allocptr = (void *) (tcr->save_allocptr);
719    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
720    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
721  }
722
723
724  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
725  if (cur_allocptr) {
726    update_bytes_allocated(tcr, cur_allocptr);
727    if (freeptr) {
728      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
729    }
730  }
731}
732
733TCR *gc_tcr = NULL;
734
735/* Suspend and "normalize" other tcrs, then call a gc-like function
736   in that context.  Resume the other tcrs, then return what the
737   function returned */
738
739signed_natural
740gc_like_from_xp(ExceptionInformation *xp, 
741                signed_natural(*fun)(TCR *, signed_natural), 
742                signed_natural param)
743{
744  TCR *tcr = get_tcr(true), *other_tcr;
745  int result;
746  signed_natural inhibit;
747
748  suspend_other_threads(true);
749  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
750  if (inhibit != 0) {
751    if (inhibit > 0) {
752      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
753    }
754    resume_other_threads(true);
755    gc_deferred++;
756    return 0;
757  }
758  gc_deferred = 0;
759
760  gc_tcr = tcr;
761
762  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763
764  normalize_tcr(xp, tcr, false);
765
766
767  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
768    if (other_tcr->pending_exception_context) {
769      other_tcr->gc_context = other_tcr->pending_exception_context;
770    } else if (other_tcr->valence == TCR_STATE_LISP) {
771      other_tcr->gc_context = other_tcr->suspend_context;
772    } else {
773      /* no pending exception, didn't suspend in lisp state:
774         must have executed a synchronous ff-call.
775      */
776      other_tcr->gc_context = NULL;
777    }
778    normalize_tcr(other_tcr->gc_context, other_tcr, true);
779  }
780   
781
782
783  result = fun(tcr, param);
784
785  other_tcr = tcr;
786  do {
787    other_tcr->gc_context = NULL;
788    other_tcr = other_tcr->next;
789  } while (other_tcr != tcr);
790
791  gc_tcr = NULL;
792
793  resume_other_threads(true);
794
795  return result;
796
797}
798
799
800
801/* Returns #bytes freed by invoking GC */
802
803signed_natural
804gc_from_tcr(TCR *tcr, signed_natural param)
805{
806  area *a;
807  BytePtr oldfree, newfree;
808  BytePtr oldend, newend;
809
810  a = active_dynamic_area;
811  oldend = a->high;
812  oldfree = a->active;
813  gc(tcr, param);
814  newfree = a->active;
815  newend = a->high;
816#if 0
817  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
818#endif
819  return ((oldfree-newfree)+(newend-oldend));
820}
821
822signed_natural
823gc_from_xp(ExceptionInformation *xp, signed_natural param)
824{
825  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
826
827  freeGCptrs();
828  return status;
829}
830
831signed_natural
832purify_from_xp(ExceptionInformation *xp, signed_natural param)
833{
834  return gc_like_from_xp(xp, purify, param);
835}
836
837signed_natural
838impurify_from_xp(ExceptionInformation *xp, signed_natural param)
839{
840  return gc_like_from_xp(xp, impurify, param);
841}
842
843
844
845
846
847
848protection_handler
849 * protection_handlers[] = {
850   do_spurious_wp_fault,
851   do_soft_stack_overflow,
852   do_soft_stack_overflow,
853   do_soft_stack_overflow,
854   do_hard_stack_overflow,   
855   do_hard_stack_overflow,
856   do_hard_stack_overflow
857   };
858
859
860Boolean
861is_write_fault(ExceptionInformation *xp, siginfo_t *info)
862{
863  return ((xpFaultStatus(xp) & 0x800) != 0);
864}
865
866Boolean
867handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
868{
869  BytePtr addr;
870  protected_area_ptr area;
871  protection_handler *handler;
872  extern Boolean touch_page(void *);
873  extern void touch_page_end(void);
874
875#ifdef LINUX
876  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
877#else
878  if (info) {
879    addr = (BytePtr)(info->si_addr);
880  } else {
881    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
882  }
883#endif
884
885  if (addr && (addr == tcr->safe_ref_address)) {
886    adjust_exception_pc(xp,4);
887
888    xpGPR(xp,imm0) = 0;
889    return true;
890  }
891
892  if (xpPC(xp) == (pc)touch_page) {
893    xpGPR(xp,imm0) = 0;
894    xpPC(xp) = (pc)touch_page_end;
895    return true;
896  }
897
898
899  if (is_write_fault(xp,info)) {
900    area = find_protected_area(addr);
901    if (area != NULL) {
902      handler = protection_handlers[area->why];
903      return handler(xp, area, addr);
904    } else {
905      if ((addr >= readonly_area->low) &&
906          (addr < readonly_area->active)) {
907        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
908                        page_size);
909        return true;
910      }
911    }
912  }
913  if (old_valence == TCR_STATE_LISP) {
914    LispObj cmain = nrs_CMAIN.vcell;
915   
916    if ((fulltag_of(cmain) == fulltag_misc) &&
917      (header_subtag(header_of(cmain)) == subtag_macptr)) {
918     
919      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
920    }
921  }
922  return false;
923}
924
925
926
927
928
929OSStatus
930do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
931{
932#ifdef SUPPORT_PRAGMA_UNUSED
933#pragma unused(area,addr)
934#endif
935  reset_lisp_process(xp);
936  return -1;
937}
938
939extern area*
940allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
941
942extern area*
943allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
944
945
946
947
948
949
950Boolean
951lisp_frame_p(lisp_frame *spPtr)
952{
953  return (spPtr->marker == lisp_frame_marker);
954}
955
956
957int ffcall_overflow_count = 0;
958
959
960
961
962
963
964/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
965  the current value of VSP (TSP) or an older area.  */
966
967OSStatus
968do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
969{
970  TCR* tcr = get_tcr(true);
971  area *a = tcr->vs_area;
972  protected_area_ptr vsp_soft = a->softprot;
973  unprotect_area(vsp_soft);
974  signal_stack_soft_overflow(xp,vsp);
975  return 0;
976}
977
978
979
980OSStatus
981do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
982{
983  /* Trying to write into a guard page on the vstack or tstack.
984     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
985     signal an error_stack_overflow condition.
986      */
987  if (prot_area->why == kVSPsoftguard) {
988    return do_vsp_overflow(xp,addr);
989  }
990  unprotect_area(prot_area);
991  signal_stack_soft_overflow(xp,Rsp);
992  return 0;
993}
994
995OSStatus
996do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
997{
998#ifdef SUPPORT_PRAGMA_UNUSED
999#pragma unused(xp,area,addr)
1000#endif
1001  return -1;
1002}
1003
1004
1005
1006
1007     
1008
1009
1010
1011
1012
1013Boolean
1014handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1015{
1016  return false;
1017}
1018
1019
1020Boolean
1021handle_unimplemented_instruction(ExceptionInformation *xp,
1022                                 opcode instruction,
1023                                 TCR *tcr)
1024{
1025
1026  return false;
1027}
1028
1029Boolean
1030handle_exception(int xnum, 
1031                 ExceptionInformation *xp, 
1032                 TCR *tcr, 
1033                 siginfo_t *info,
1034                 int old_valence)
1035{
1036  pc program_counter;
1037  opcode instruction = 0;
1038
1039  if (old_valence != TCR_STATE_LISP) {
1040    return false;
1041  }
1042
1043  program_counter = xpPC(xp);
1044 
1045  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1046    instruction = *program_counter;
1047  }
1048
1049  if (IS_ALLOC_TRAP(instruction)) {
1050    return handle_alloc_trap(xp, tcr);
1051  } else if ((xnum == SIGSEGV) ||
1052             (xnum == SIGBUS)) {
1053    return handle_protection_violation(xp, info, tcr, old_valence);
1054  } else if (xnum == SIGFPE) {
1055    return handle_sigfpe(xp, tcr);
1056  } else if ((xnum == SIGILL)) {
1057    if (IS_GC_TRAP(instruction)) {
1058      return handle_gc_trap(xp, tcr);
1059    } else if (IS_UUO(instruction)) {
1060      return handle_uuo(xp, info, instruction);
1061    } else {
1062      return handle_unimplemented_instruction(xp,instruction,tcr);
1063    }
1064  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1065    tcr->interrupt_pending = 0;
1066    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1067    return true;
1068  }
1069
1070  return false;
1071}
1072
1073void
1074adjust_exception_pc(ExceptionInformation *xp, int delta)
1075{
1076  xpPC(xp) += (delta >> 2);
1077}
1078
1079
1080/*
1081  This wants to scan backwards until "where" points to an instruction
1082   whose major opcode is either 63 (double-float) or 59 (single-float)
1083*/
1084
1085OSStatus
1086handle_fpux_binop(ExceptionInformation *xp, pc where)
1087{
1088  OSStatus err = -1;
1089  opcode *there = (opcode *) where, instr, errnum = 0;
1090  return err;
1091}
1092
1093Boolean
1094handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1095{
1096  unsigned 
1097    format = UUO_FORMAT(the_uuo);
1098  Boolean handled = false;
1099  int bump = 4;
1100  TCR *tcr = get_tcr(true);
1101
1102  switch (format) {
1103  case uuo_format_kernel_service:
1104    {
1105      TCR * target = (TCR *)xpGPR(xp,arg_z);
1106      int service = UUO_UNARY_field(the_uuo);
1107
1108      switch (service) {
1109      case error_propagate_suspend:
1110        handled = true;
1111        break;
1112      case error_interrupt:
1113        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1114        handled = true;
1115        break;
1116      case error_suspend:
1117        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1118        handled = true;
1119        break;
1120      case error_suspend_all:
1121        lisp_suspend_other_threads();
1122        handled = true;
1123        break;
1124      case error_resume:
1125        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1126        handled = true;
1127        break;
1128      case error_resume_all:
1129        lisp_resume_other_threads();
1130        handled = true;
1131        break;
1132      case error_kill:
1133        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1134        handled = true;
1135        break;
1136      case error_allocate_list:
1137        allocate_list(xp,tcr);
1138        handled = true;
1139        break;
1140      default:
1141        handled = false;
1142        break;
1143      }
1144      break;
1145    }
1146
1147  case uuo_format_unary:
1148    switch(UUO_UNARY_field(the_uuo)) {
1149    case 3:
1150      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1151        handled = true;
1152        bump = 4;
1153        break;
1154      }
1155      /* fall in */
1156    default:
1157      handled = false;
1158      break;
1159
1160    }
1161    break;
1162
1163  case uuo_format_nullary:
1164    switch (UUOA_field(the_uuo)) {
1165    case 3:
1166      adjust_exception_pc(xp, bump);
1167      bump = 0;
1168      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1169      handled = true;
1170      break;
1171
1172    case 4:
1173      tcr->interrupt_pending = 0;
1174      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1175      handled = true;
1176      break;
1177    default:
1178      handled = false;
1179      break;
1180    }
1181    break;
1182
1183
1184  case uuo_format_error_lisptag:
1185  case uuo_format_error_fulltag:
1186  case uuo_format_error_xtype:
1187  case uuo_format_cerror_lisptag:
1188  case uuo_format_cerror_fulltag:
1189  case uuo_format_cerror_xtype:
1190  case uuo_format_nullary_error:
1191  case uuo_format_unary_error:
1192  case uuo_format_binary_error:
1193  case uuo_format_ternary:
1194    handled = handle_error(xp,0,the_uuo, &bump);
1195    break;
1196
1197  default:
1198    handled = false;
1199    bump = 0;
1200  }
1201 
1202  if (handled && bump) {
1203    adjust_exception_pc(xp, bump);
1204  }
1205  return handled;
1206}
1207
1208natural
1209register_codevector_contains_pc (natural lisp_function, pc where)
1210{
1211  natural code_vector, size;
1212
1213  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1214      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1215    code_vector = deref(lisp_function, 2);
1216    size = header_element_count(header_of(code_vector)) << 2;
1217    if ((untag(code_vector) < (natural)where) && 
1218        ((natural)where < (code_vector + size)))
1219      return(code_vector);
1220  }
1221
1222  return(0);
1223}
1224
1225Boolean
1226callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1227{
1228  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1229}
1230
1231Boolean
1232callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1233                  natural arg1, natural arg2, int *bumpP)
1234{
1235  natural  callback_ptr;
1236  area *a;
1237  natural fnreg = Rfn,  codevector, offset;
1238  pc where = xpPC(xp);
1239  int delta;
1240
1241  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1242  if (codevector == 0) {
1243    fnreg = nfn;
1244    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1245    if (codevector == 0) {
1246      fnreg = 0;
1247    }
1248  }
1249  if (codevector) {
1250    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1251  } else {
1252    offset = (natural)where;
1253  }
1254                                                 
1255                                               
1256
1257  TCR *tcr = get_tcr(true);
1258
1259  /* Put the active stack pointer where .SPcallback expects it */
1260  a = tcr->cs_area;
1261  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1262
1263  /* Copy globals from the exception frame to tcr */
1264  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1265  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1266
1267
1268
1269  /* Call back.
1270     Lisp will handle trampolining through some code that
1271     will push lr/fn & pc/nfn stack frames for backtrace.
1272  */
1273  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1274  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1275  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1276  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1277
1278  if (bumpP) {
1279    *bumpP = delta;
1280  }
1281
1282  /* Copy GC registers back into exception frame */
1283  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1284  return true;
1285}
1286
1287area *
1288allocate_no_stack (natural size)
1289{
1290#ifdef SUPPORT_PRAGMA_UNUSED
1291#pragma unused(size)
1292#endif
1293
1294  return (area *) NULL;
1295}
1296
1297
1298
1299
1300
1301
1302/* callback to (symbol-value cmain) if it is a macptr,
1303   otherwise report cause and function name to console.
1304   Returns noErr if exception handled OK */
1305OSStatus
1306handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1307{
1308  LispObj   cmain = nrs_CMAIN.vcell;
1309  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1310
1311}
1312
1313
1314
1315
1316void non_fatal_error( char *msg )
1317{
1318  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1319  fflush( dbgout );
1320}
1321
1322
1323
1324Boolean
1325handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1326{
1327  LispObj   errdisp = nrs_ERRDISP.vcell;
1328
1329  if ((fulltag_of(errdisp) == fulltag_misc) &&
1330      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1331    /* errdisp is a macptr, we can call back to lisp */
1332    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1333    }
1334
1335  return false;
1336}
1337               
1338
1339/*
1340   Current thread has all signals masked.  Before unmasking them,
1341   make it appear that the current thread has been suspended.
1342   (This is to handle the case where another thread is trying
1343   to GC before this thread is able to sieze the exception lock.)
1344*/
1345int
1346prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1347{
1348  int old_valence = tcr->valence;
1349
1350  tcr->pending_exception_context = context;
1351  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1352
1353  ALLOW_EXCEPTIONS(context);
1354  return old_valence;
1355} 
1356
1357void
1358wait_for_exception_lock_in_handler(TCR *tcr, 
1359                                   ExceptionInformation *context,
1360                                   xframe_list *xf)
1361{
1362
1363  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1364  xf->curr = context;
1365  xf->prev = tcr->xframe;
1366  tcr->xframe =  xf;
1367  tcr->pending_exception_context = NULL;
1368  tcr->valence = TCR_STATE_FOREIGN; 
1369}
1370
1371void
1372unlock_exception_lock_in_handler(TCR *tcr)
1373{
1374  tcr->pending_exception_context = tcr->xframe->curr;
1375  tcr->xframe = tcr->xframe->prev;
1376  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1377  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1378}
1379
1380/*
1381   If an interrupt is pending on exception exit, try to ensure
1382   that the thread sees it as soon as it's able to run.
1383*/
1384void
1385raise_pending_interrupt(TCR *tcr)
1386{
1387  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1388    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1389  }
1390}
1391
1392void
1393exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1394{
1395#ifndef ANDROID
1396  sigset_t mask;
1397  sigfillset(&mask);
1398#else
1399  int mask [] = {0,0};
1400#endif
1401 
1402  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask, NULL);
1403  tcr->valence = old_valence;
1404  tcr->pending_exception_context = NULL;
1405  tcr->last_lisp_frame = old_last_lisp_frame;
1406}
1407
1408
1409void
1410signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1411{
1412  xframe_list xframe_link;
1413
1414  if (!use_mach_exception_handling) {
1415   
1416    tcr = (TCR *) get_interrupt_tcr(false);
1417 
1418    /* The signal handler's entered with all signals (notably the
1419       thread_suspend signal) blocked.  Don't allow any other signals
1420       (notably the thread_suspend signal) to preempt us until we've
1421       set the TCR's xframe slot to include the current exception
1422       context.
1423    */
1424   
1425    old_last_lisp_frame = tcr->last_lisp_frame;
1426    tcr->last_lisp_frame = xpGPR(context,Rsp);
1427    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1428  }
1429
1430  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1431    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1432    pthread_kill(pthread_self(), thread_suspend_signal);
1433  }
1434
1435 
1436  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1437  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1438    char msg[512];
1439    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1440    if (lisp_Debugger(context, info, signum, false, msg)) {
1441      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1442    }
1443  }
1444  unlock_exception_lock_in_handler(tcr);
1445
1446  /* This thread now looks like a thread that was suspended while
1447     executing lisp code.  If some other thread gets the exception
1448     lock and GCs, the context (this thread's suspend_context) will
1449     be updated.  (That's only of concern if it happens before we
1450     can return to the kernel/to the Mach exception handler).
1451  */
1452  if (!use_mach_exception_handling) {
1453    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1454    raise_pending_interrupt(tcr);
1455  }
1456}
1457
1458
1459void
1460sigill_handler(int signum, siginfo_t *info, ExceptionInformation  *xp)
1461{
1462  pc program_counter = xpPC(xp);
1463  opcode instr = *program_counter;
1464
1465  if (IS_UUO(instr)) {
1466    natural psr = xpPSR(xp);
1467    Boolean opcode_matched_condition = false,
1468      flip = ((instr & (1<<28)) != 0);
1469   
1470
1471    switch (instr >> 29) {
1472    case 0: 
1473      opcode_matched_condition = ((psr & PSR_Z_MASK) != 0);
1474      break;
1475    case 1:
1476      opcode_matched_condition = ((psr & PSR_C_MASK) != 0);
1477      break;
1478    case 2:
1479      opcode_matched_condition = ((psr & PSR_N_MASK) != 0);
1480      break;
1481    case 3:
1482      opcode_matched_condition = ((psr & PSR_V_MASK) != 0);
1483      break;
1484    case 4:
1485      opcode_matched_condition = (((psr & PSR_C_MASK) != 0) &&
1486                                  ((psr & PSR_Z_MASK) == 0));
1487      break;
1488    case 5:
1489      opcode_matched_condition = (((psr & PSR_N_MASK) != 0) ==
1490                                  ((psr & PSR_V_MASK) != 0));
1491      break;
1492    case 6:
1493      opcode_matched_condition = ((((psr & PSR_N_MASK) != 0) ==
1494                                   ((psr & PSR_V_MASK) != 0)) &&
1495                                  ((psr & PSR_Z_MASK) == 0));
1496      break;
1497    case 7:
1498      opcode_matched_condition = true;
1499      flip = false;
1500      break;
1501    }
1502    if (flip) {
1503      opcode_matched_condition = !opcode_matched_condition;
1504    }
1505    if (!opcode_matched_condition) {
1506      adjust_exception_pc(xp,4);
1507      return;
1508    }
1509  }
1510  signal_handler(signum,info,xp, NULL, 0, 0);
1511}
1512
1513
1514#ifdef USE_SIGALTSTACK
1515void
1516invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1517{
1518  ExceptionInformation *xp_copy;
1519  siginfo_t *info_copy;
1520  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1521 
1522  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1523  target_sp -= sizeof(ucontext_t);
1524  xp_copy = (ExceptionInformation *)target_sp;
1525  memmove(target_sp,xp,sizeof(*xp));
1526  xp_copy->uc_stack.ss_sp = 0;
1527  xp_copy->uc_stack.ss_size = 0;
1528  xp_copy->uc_stack.ss_flags = 0;
1529  xp_copy->uc_link = NULL;
1530  target_sp -= sizeof(siginfo_t);
1531  info_copy = (siginfo_t *)target_sp;
1532  memmove(target_sp,info,sizeof(*info));
1533  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1534}
1535 
1536void
1537altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1538{
1539  TCR *tcr=get_tcr(true);
1540 
1541  if (signo == SIGBUS) {
1542    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address); 
1543    area *a = tcr->cs_area;
1544    if (((BytePtr)truncate_to_power_of_2(addr,log2_page_size))== a->softlimit) 
1545{
1546      if (mmap(a->softlimit,
1547               page_size,
1548               PROT_READ|PROT_WRITE|PROT_EXEC,
1549               MAP_PRIVATE|MAP_ANON|MAP_FIXED,
1550               -1,
1551               0) == a->softlimit) {
1552        return;
1553      }
1554    }
1555  } else if (signo == SIGSEGV) {
1556    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1557    area *a = tcr->cs_area;
1558   
1559    if ((addr >= a->low) &&
1560        (addr < a->softlimit)) {
1561      if (addr < a->hardlimit) {
1562        Bug(xp, "hard stack overflow");
1563      } else {
1564        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1565      }
1566    }
1567  }
1568  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1569}
1570#endif
1571
1572/*
1573  If it looks like we're in the middle of an atomic operation, make
1574  it seem as if that operation is either complete or hasn't started
1575  yet.
1576
1577  The cases handled include:
1578
1579  a) storing into a newly-allocated lisp frame on the stack.
1580  b) marking a newly-allocated TSP frame as containing "raw" data.
1581  c) consing: the GC has its own ideas about how this should be
1582     handled, but other callers would be best advised to back
1583     up or move forward, according to whether we're in the middle
1584     of allocating a cons cell or allocating a uvector.
1585  d) a STMW to the vsp
1586  e) EGC write-barrier subprims.
1587*/
1588
1589extern opcode
1590  egc_write_barrier_start,
1591  egc_write_barrier_end, 
1592  egc_store_node_conditional, 
1593  egc_store_node_conditional_test,
1594  egc_set_hash_key,
1595  egc_gvset,
1596  egc_rplaca,
1597  egc_rplacd,
1598  egc_set_hash_key_conditional,
1599  egc_set_hash_key_conditional_test,
1600  swap_lr_lisp_frame_temp0,
1601  swap_lr_lisp_frame_arg_z;
1602
1603
1604extern opcode ffcall_return_window, ffcall_return_window_end;
1605
1606void
1607pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1608{
1609  pc program_counter = xpPC(xp);
1610  opcode instr = *program_counter;
1611  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1612  LispObj cur_allocptr = xpGPR(xp, allocptr);
1613  int allocptr_tag = fulltag_of(cur_allocptr);
1614 
1615
1616
1617  if ((program_counter < &egc_write_barrier_end) && 
1618      (program_counter >= &egc_write_barrier_start)) {
1619    LispObj *ea = 0, val = 0, root = 0;
1620    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1621    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1622
1623    if (program_counter >= &egc_set_hash_key_conditional) {
1624      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1625          ((program_counter == &egc_set_hash_key_conditional_test) &&
1626           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1627        return;
1628      }
1629      need_store = false;
1630      root = xpGPR(xp,arg_x);
1631      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1632      need_memoize_root = true;
1633    } else if (program_counter >= &egc_store_node_conditional) {
1634      if ((program_counter < &egc_store_node_conditional_test) ||
1635          ((program_counter == &egc_store_node_conditional_test) &&
1636           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1637        /* The conditional store either hasn't been attempted yet, or
1638           has failed.  No need to adjust the PC, or do memoization. */
1639        return;
1640      }
1641      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1642      xpGPR(xp,arg_z) = t_value;
1643      need_store = false;
1644    } else if (program_counter >= &egc_set_hash_key) {
1645      root = xpGPR(xp,arg_x);
1646      val = xpGPR(xp,arg_z);
1647      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1648      need_memoize_root = true;
1649    } else if (program_counter >= &egc_gvset) {
1650      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1651      val = xpGPR(xp,arg_z);
1652    } else if (program_counter >= &egc_rplacd) {
1653      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1654      val = xpGPR(xp,arg_z);
1655    } else {                      /* egc_rplaca */
1656      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1657      val = xpGPR(xp,arg_z);
1658    }
1659    if (need_store) {
1660      *ea = val;
1661    }
1662    if (need_check_memo) {
1663      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1664      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1665          ((LispObj)ea < val)) {
1666        atomic_set_bit(refbits, bitnumber);
1667        if (need_memoize_root) {
1668          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1669          atomic_set_bit(refbits, bitnumber);
1670        }
1671      }
1672    }
1673    xpPC(xp) = xpLR(xp);
1674    return;
1675  }
1676
1677
1678 
1679  if (allocptr_tag != tag_fixnum) {
1680    signed_natural disp = allocptr_displacement(xp);
1681
1682    if (disp) {
1683      /* Being architecturally "at" the alloc trap doesn't tell
1684         us much (in particular, it doesn't tell us whether
1685         or not the thread has committed to taking the trap
1686         and is waiting for the exception lock (or waiting
1687         for the Mach exception thread to tell it how bad
1688         things are) or is about to execute a conditional
1689         trap.
1690         Regardless of which case applies, we want the
1691         other thread to take (or finish taking) the
1692         trap, and we don't want it to consider its
1693         current allocptr to be valid.
1694         The difference between this case (suspend other
1695         thread for GC) and the previous case (suspend
1696         current thread for interrupt) is solely a
1697         matter of what happens after we leave this
1698         function: some non-current thread will stay
1699         suspended until the GC finishes, then take
1700         (or start processing) the alloc trap.   The
1701         current thread will go off and do PROCESS-INTERRUPT
1702         or something, and may return from the interrupt
1703         and need to finish the allocation that got interrupted.
1704      */
1705
1706      if (alloc_disp) {
1707        *alloc_disp = disp;
1708        xpGPR(xp,allocptr) -= disp;
1709        /* Leave the PC at the alloc trap.  When the interrupt
1710           handler returns, it'll decrement allocptr by disp
1711           and the trap may or may not be taken.
1712        */
1713      } else {
1714        Boolean ok = false;
1715        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr - disp));
1716        xpGPR(xp, allocptr) = VOID_ALLOCPTR + disp;
1717        instr = program_counter[-1];
1718        if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1719          instr = program_counter[-2];
1720          if (IS_COMPARE_ALLOCPTR_TO_RM(instr)){
1721            xpGPR(xp,RM_field(instr)) = VOID_ALLOCPTR;
1722            ok = true;
1723          }
1724        }
1725        if (ok) {
1726          /* Clear the carry bit, so that the trap will be taken. */
1727          xpPSR(xp) &= ~PSR_C_MASK;
1728        } else {
1729          Bug(NULL, "unexpected instruction preceding alloc trap.");
1730        }
1731      }
1732    } else {
1733      /* we may be before or after the alloc trap.  If before, set
1734         allocptr to VOID_ALLOCPTR and back up to the start of the
1735         instruction sequence; if after, finish the allocation. */
1736      Boolean before_alloc_trap = false;
1737
1738      if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1739        before_alloc_trap = true;
1740        --program_counter;
1741        instr = *program_counter;
1742      }
1743      if (IS_COMPARE_ALLOCPTR_TO_RM(instr)) {
1744        before_alloc_trap = true;
1745        --program_counter;
1746        instr = *program_counter;
1747      }
1748      if (IS_LOAD_RD_FROM_ALLOCBASE(instr)) {
1749        before_alloc_trap = true;
1750        --program_counter;
1751        instr = *program_counter;
1752      }
1753      if (IS_SUB_HI_FROM_ALLOCPTR(instr)) {
1754        before_alloc_trap = true;
1755        --program_counter;
1756      }
1757      if (before_alloc_trap) {
1758        xpPC(xp) = program_counter;
1759        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1760      } else {
1761        /* If we're already past the alloc_trap, finish allocating
1762           the object. */
1763        if (allocptr_tag == fulltag_cons) {
1764          finish_allocating_cons(xp);
1765        } else {
1766          if (allocptr_tag == fulltag_misc) {
1767            finish_allocating_uvector(xp);
1768          } else {
1769            Bug(xp, "what's being allocated here ?");
1770          }
1771        }
1772        /* Whatever we finished allocating, reset allocptr/allocbase to
1773           VOID_ALLOCPTR */
1774        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1775      }
1776      return;
1777    }
1778    return;
1779  }
1780  {
1781    lisp_frame *swap_frame = NULL;
1782    pc base = &swap_lr_lisp_frame_temp0;
1783   
1784    if ((program_counter >base)             /* sic */
1785        && (program_counter < (base+3))) {
1786      swap_frame = (lisp_frame *)xpGPR(xp,temp0);
1787    } else {
1788      base = &swap_lr_lisp_frame_arg_z;
1789      if ((program_counter > base) && (program_counter < (base+3))) { 
1790        swap_frame = (lisp_frame *)xpGPR(xp,arg_z);
1791      }
1792    }
1793    if (swap_frame) {
1794      if (program_counter == (base+1)) {
1795        swap_frame->savelr = xpGPR(xp,Rlr);
1796      }
1797      xpGPR(xp,Rlr) = xpGPR(xp,imm0);
1798      xpPC(xp) = base+3;
1799      return;
1800    }
1801  }
1802}
1803
1804void
1805interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1806{
1807  TCR *tcr = get_interrupt_tcr(false);
1808  if (tcr) {
1809    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1810      tcr->interrupt_pending = 1 << fixnumshift;
1811    } else {
1812      LispObj cmain = nrs_CMAIN.vcell;
1813
1814      if ((fulltag_of(cmain) == fulltag_misc) &&
1815          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1816        /*
1817           This thread can (allegedly) take an interrupt now.
1818           It's tricky to do that if we're executing
1819           foreign code (especially Linuxthreads code, much
1820           of which isn't reentrant.)
1821           If we're unwinding the stack, we also want to defer
1822           the interrupt.
1823        */
1824        if ((tcr->valence != TCR_STATE_LISP) ||
1825            (tcr->unwinding != 0)) {
1826          tcr->interrupt_pending = 1 << fixnumshift;
1827        } else {
1828          xframe_list xframe_link;
1829          int old_valence;
1830          signed_natural disp=0;
1831          natural old_last_lisp_frame = tcr->last_lisp_frame;
1832         
1833          tcr->last_lisp_frame = xpGPR(context,Rsp);
1834          pc_luser_xp(context, tcr, &disp);
1835          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1836          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1837          handle_exception(signum, context, tcr, info, old_valence);
1838          if (disp) {
1839            xpGPR(context,allocptr) -= disp;
1840          }
1841          unlock_exception_lock_in_handler(tcr);
1842          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1843        }
1844      }
1845    }
1846  }
1847#ifdef DARWIN
1848    DarwinSigReturn(context);
1849#endif
1850}
1851
1852#ifdef USE_SIGALTSTACK
1853void
1854altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1855{
1856  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1857}
1858#endif
1859
1860
1861void
1862install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1863{
1864  struct sigaction sa;
1865
1866  sigfillset(&sa.sa_mask);
1867 
1868  sa.sa_sigaction = (void *)handler;
1869  sigfillset(&sa.sa_mask);
1870  sa.sa_flags = 
1871    0 /* SA_RESTART */
1872    | SA_NODEFER
1873    | SA_SIGINFO
1874#ifdef USE_SIGALTSTACK
1875    | (on_altstack ? SA_ONSTACK : 0)
1876#endif
1877    ;
1878
1879  sigaction(signo, &sa, NULL);
1880}
1881
1882
1883void
1884install_pmcl_exception_handlers()
1885{
1886#ifdef DARWIN
1887  extern Boolean use_mach_exception_handling;
1888#endif
1889
1890  Boolean install_signal_handlers_for_exceptions =
1891#ifdef DARWIN
1892    !use_mach_exception_handling
1893#else
1894    true
1895#endif
1896    ;
1897  if (install_signal_handlers_for_exceptions) {
1898    install_signal_handler(SIGILL, (void *)sigill_handler, true, false);
1899    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1900    install_signal_handler(SIGBUS, (void *)ALTSTACK(signal_handler),true,true);
1901
1902  }
1903 
1904  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1905                         (void *)interrupt_handler, true, false);
1906  signal(SIGPIPE, SIG_IGN);
1907}
1908
1909#ifdef USE_SIGALTSTACK
1910void
1911setup_sigaltstack(area *a)
1912{
1913  stack_t stack;
1914#if 0
1915  stack.ss_sp = a->low;
1916  a->low += SIGSTKSZ*8;
1917#endif
1918  stack.ss_size = SIGSTKSZ*8;
1919  stack.ss_flags = 0;
1920  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1921  if (sigaltstack(&stack, NULL) != 0) {
1922    perror("sigaltstack");
1923    exit(-1);
1924  }
1925}
1926#endif
1927
1928void
1929thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1930{
1931  TCR *tcr = get_tcr(false);
1932  area *a;
1933#ifndef ANDROID
1934  sigset_t mask;
1935 
1936  sigemptyset(&mask);
1937#else
1938  int mask[] = {0,0};
1939#endif
1940
1941  if (tcr) {
1942    tcr->valence = TCR_STATE_FOREIGN;
1943    a = tcr->vs_area;
1944    if (a) {
1945      a->active = a->high;
1946    }
1947    a = tcr->cs_area;
1948    if (a) {
1949      a->active = a->high;
1950    }
1951  }
1952 
1953  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask,NULL);
1954  pthread_exit(NULL);
1955}
1956
1957#ifdef USE_SIGALTSTACK
1958void
1959altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1960{
1961  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1962}
1963#endif
1964
1965void
1966thread_signal_setup()
1967{
1968  thread_suspend_signal = SIG_SUSPEND_THREAD;
1969  thread_kill_signal = SIG_KILL_THREAD;
1970
1971  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1972  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1973}
1974
1975
1976
1977void
1978unprotect_all_areas()
1979{
1980  protected_area_ptr p;
1981
1982  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1983    unprotect_area(p);
1984  }
1985}
1986
1987/*
1988  A binding subprim has just done "twlle limit_regno,idx_regno" and
1989  the trap's been taken.  Extend the tcr's tlb so that the index will
1990  be in bounds and the new limit will be on a page boundary, filling
1991  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1992  the tcr fields and the registers in the xp and return true if this
1993  all works, false otherwise.
1994
1995  Note that the tlb was allocated via malloc, so realloc can do some
1996  of the hard work.
1997*/
1998Boolean
1999extend_tcr_tlb(TCR *tcr, 
2000               ExceptionInformation *xp, 
2001               unsigned idx_regno)
2002{
2003  unsigned
2004    index = (unsigned) (xpGPR(xp,idx_regno)),
2005    old_limit = tcr->tlb_limit,
2006    new_limit = align_to_power_of_2(index+1,12),
2007    new_bytes = new_limit-old_limit;
2008  LispObj
2009    *old_tlb = tcr->tlb_pointer,
2010    *new_tlb = realloc(old_tlb, new_limit),
2011    *work;
2012
2013  if (new_tlb == NULL) {
2014    return false;
2015  }
2016 
2017  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2018
2019  while (new_bytes) {
2020    *work++ = no_thread_local_binding_marker;
2021    new_bytes -= sizeof(LispObj);
2022  }
2023  tcr->tlb_pointer = new_tlb;
2024  tcr->tlb_limit = new_limit;
2025  return true;
2026}
2027
2028
2029
2030void
2031exception_init()
2032{
2033  install_pmcl_exception_handlers();
2034}
2035
2036
2037
2038
2039
2040#ifdef DARWIN
2041
2042
2043#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2044#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2045
2046
2047
2048#define LISP_EXCEPTIONS_HANDLED_MASK \
2049 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2050
2051/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2052#define NUM_LISP_EXCEPTIONS_HANDLED 4
2053
2054typedef struct {
2055  int foreign_exception_port_count;
2056  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2057  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2058  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2059  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2060} MACH_foreign_exception_state;
2061
2062
2063
2064
2065/*
2066  Mach's exception mechanism works a little better than its signal
2067  mechanism (and, not incidentally, it gets along with GDB a lot
2068  better.
2069
2070  Initially, we install an exception handler to handle each native
2071  thread's exceptions.  This process involves creating a distinguished
2072  thread which listens for kernel exception messages on a set of
2073  0 or more thread exception ports.  As threads are created, they're
2074  added to that port set; a thread's exception port is destroyed
2075  (and therefore removed from the port set) when the thread exits.
2076
2077  A few exceptions can be handled directly in the handler thread;
2078  others require that we resume the user thread (and that the
2079  exception thread resumes listening for exceptions.)  The user
2080  thread might eventually want to return to the original context
2081  (possibly modified somewhat.)
2082
2083  As it turns out, the simplest way to force the faulting user
2084  thread to handle its own exceptions is to do pretty much what
2085  signal() does: the exception handlng thread sets up a sigcontext
2086  on the user thread's stack and forces the user thread to resume
2087  execution as if a signal handler had been called with that
2088  context as an argument.  We can use a distinguished UUO at a
2089  distinguished address to do something like sigreturn(); that'll
2090  have the effect of resuming the user thread's execution in
2091  the (pseudo-) signal context.
2092
2093  Since:
2094    a) we have miles of code in C and in Lisp that knows how to
2095    deal with Linux sigcontexts
2096    b) Linux sigcontexts contain a little more useful information
2097    (the DAR, DSISR, etc.) than their Darwin counterparts
2098    c) we have to create a sigcontext ourselves when calling out
2099    to the user thread: we aren't really generating a signal, just
2100    leveraging existing signal-handling code.
2101
2102  we create a Linux sigcontext struct.
2103
2104  Simple ?  Hopefully from the outside it is ...
2105
2106  We want the process of passing a thread's own context to it to
2107  appear to be atomic: in particular, we don't want the GC to suspend
2108  a thread that's had an exception but has not yet had its user-level
2109  exception handler called, and we don't want the thread's exception
2110  context to be modified by a GC while the Mach handler thread is
2111  copying it around.  On Linux (and on Jaguar), we avoid this issue
2112  because (a) the kernel sets up the user-level signal handler and
2113  (b) the signal handler blocks signals (including the signal used
2114  by the GC to suspend threads) until tcr->xframe is set up.
2115
2116  The GC and the Mach server thread therefore contend for the lock
2117  "mach_exception_lock".  The Mach server thread holds the lock
2118  when copying exception information between the kernel and the
2119  user thread; the GC holds this lock during most of its execution
2120  (delaying exception processing until it can be done without
2121  GC interference.)
2122
2123*/
2124
2125
2126#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2127
2128void
2129fatal_mach_error(char *format, ...);
2130
2131#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2132
2133
2134void
2135restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2136{
2137  kern_return_t kret;
2138  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
2139
2140  /* Set the thread's FP state from the pseudosigcontext */
2141  kret = thread_set_state(thread,
2142                          ARM_VFP_STATE,
2143                          (thread_state_t)&(mc->__fs),
2144                          ARM_VFP_STATE_COUNT);
2145
2146  MACH_CHECK_ERROR("setting thread FP state", kret);
2147
2148  /* The thread'll be as good as new ... */
2149  kret = thread_set_state(thread, 
2150                          MACHINE_THREAD_STATE,
2151                          (thread_state_t)&(mc->__ss),
2152                          MACHINE_THREAD_STATE_COUNT);
2153  MACH_CHECK_ERROR("setting thread state", kret);
2154} 
2155
2156/* This code runs in the exception handling thread, in response
2157   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2158   in response to a call to pseudo_sigreturn() from the specified
2159   user thread.
2160   Find that context (the user thread's R3 points to it), then
2161   use that context to set the user thread's state.  When this
2162   function's caller returns, the Mach kernel will resume the
2163   user thread.
2164*/
2165
2166kern_return_t
2167do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2168{
2169  ExceptionInformation *xp;
2170
2171#ifdef DEBUG_MACH_EXCEPTIONS
2172  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2173#endif
2174  tcr->last_lisp_frame = *((natural *)(tcr->last_lisp_frame));
2175  xp = tcr->pending_exception_context;
2176  if (xp) {
2177    tcr->pending_exception_context = NULL;
2178    tcr->valence = TCR_STATE_LISP;
2179    restore_mach_thread_state(thread, xp);
2180    raise_pending_interrupt(tcr);
2181  } else {
2182    Bug(NULL, "no xp here!\n");
2183  }
2184#ifdef DEBUG_MACH_EXCEPTIONS
2185  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2186#endif
2187  return KERN_SUCCESS;
2188} 
2189
2190ExceptionInformation *
2191create_thread_context_frame(mach_port_t thread, 
2192                            natural *new_stack_top)
2193{
2194  arm_thread_state_t ts;
2195  mach_msg_type_number_t thread_state_count;
2196  kern_return_t result;
2197  ExceptionInformation *pseudosigcontext;
2198  _STRUCT_MCONTEXT *mc;
2199  natural stackp, backlink;
2200
2201  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2202  result = thread_get_state(thread, 
2203                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2204                            (thread_state_t)&ts,
2205                            &thread_state_count);
2206 
2207  if (result != KERN_SUCCESS) {
2208    get_tcr(true);
2209    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2210  }
2211  stackp = ts.__sp;
2212  backlink = stackp;
2213
2214  stackp -= sizeof(*pseudosigcontext);
2215  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2216
2217  stackp -= sizeof(*mc);
2218  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2219  memmove(&(mc->__ss),&ts,sizeof(ts));
2220
2221  thread_state_count = ARM_VFP_STATE_COUNT;
2222  thread_get_state(thread,
2223                   ARM_VFP_STATE,
2224                   (thread_state_t)&(mc->__fs),
2225                   &thread_state_count);
2226
2227
2228  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2229  thread_get_state(thread,
2230                   ARM_EXCEPTION_STATE,
2231                   (thread_state_t)&(mc->__es),
2232                   &thread_state_count);
2233
2234
2235  UC_MCONTEXT(pseudosigcontext) = mc;
2236  if (new_stack_top) {
2237    *new_stack_top = stackp;
2238  }
2239  return pseudosigcontext;
2240}
2241
2242/*
2243  This code sets up the user thread so that it executes a "pseudo-signal
2244  handler" function when it resumes.  Create a linux sigcontext struct
2245  on the thread's stack and pass it as an argument to the pseudo-signal
2246  handler.
2247
2248  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2249  which will restore the thread's context.
2250
2251  If the handler invokes code that throws (or otherwise never sigreturn()'s
2252  to the context), that's fine.
2253
2254  Actually, check that: throw (and variants) may need to be careful and
2255  pop the tcr's xframe list until it's younger than any frame being
2256  entered.
2257*/
2258
2259int
2260setup_signal_frame(mach_port_t thread,
2261                   void *handler_address,
2262                   int signum,
2263                   int code,
2264                   TCR *tcr)
2265{
2266  arm_thread_state_t ts;
2267  ExceptionInformation *pseudosigcontext;
2268  int old_valence = tcr->valence;
2269  natural stackp, *pstackp;
2270
2271#ifdef DEBUG_MACH_EXCEPTIONS
2272  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2273#endif
2274  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2275  pstackp = (natural *)stackp;
2276  *--pstackp = tcr->last_lisp_frame;
2277  stackp = (natural)pstackp;
2278  tcr->last_lisp_frame = stackp;
2279  pseudosigcontext->uc_onstack = 0;
2280  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2281  pseudosigcontext->uc_mcsize = ARM_MCONTEXT_SIZE;
2282  tcr->pending_exception_context = pseudosigcontext;
2283  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2284 
2285
2286  /*
2287     It seems like we've created a  sigcontext on the thread's
2288     stack.  Set things up so that we call the handler (with appropriate
2289     args) when the thread's resumed.
2290  */
2291
2292  ts.__pc = (natural) handler_address;
2293  ts.__sp = stackp;
2294  ts.__r[0] = signum;
2295  ts.__r[1] = (natural)pseudosigcontext;
2296  ts.__r[2] = (natural)tcr;
2297  ts.__r[3] = (natural)old_valence;
2298  ts.__lr = (natural)pseudo_sigreturn;
2299  ts.__cpsr = xpPSR(pseudosigcontext);
2300
2301
2302  thread_set_state(thread, 
2303                   MACHINE_THREAD_STATE,
2304                   (thread_state_t)&ts,
2305                   MACHINE_THREAD_STATE_COUNT);
2306#ifdef DEBUG_MACH_EXCEPTIONS
2307  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2308#endif
2309  return 0;
2310}
2311
2312
2313void
2314pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2315{
2316  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2317} 
2318
2319
2320int
2321thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2322{
2323  /* Likely hopeless. */
2324  return 0;
2325}
2326
2327/*
2328  This function runs in the exception handling thread.  It's
2329  called (by this precise name) from the library function "exc_server()"
2330  when the thread's exception ports are set up.  (exc_server() is called
2331  via mach_msg_server(), which is a function that waits for and dispatches
2332  on exception messages from the Mach kernel.)
2333
2334  This checks to see if the exception was caused by a pseudo_sigreturn()
2335  UUO; if so, it arranges for the thread to have its state restored
2336  from the specified context.
2337
2338  Otherwise, it tries to map the exception to a signal number and
2339  arranges that the thread run a "pseudo signal handler" to handle
2340  the exception.
2341
2342  Some exceptions could and should be handled here directly.
2343*/
2344
2345kern_return_t
2346catch_exception_raise(mach_port_t exception_port,
2347                      mach_port_t thread,
2348                      mach_port_t task, 
2349                      exception_type_t exception,
2350                      exception_data_t code_vector,
2351                      mach_msg_type_number_t code_count)
2352{
2353  int signum = 0, code = *code_vector;
2354  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2355  kern_return_t kret;
2356
2357#ifdef DEBUG_MACH_EXCEPTIONS
2358  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2359#endif
2360
2361  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2362    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2363  } 
2364  /* On the ARM, code_vector[1] contains the undefined instruction
2365     in this case, not its address.  */
2366  if ((exception == EXC_BAD_INSTRUCTION) &&
2367      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2368      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2369    kret = do_pseudo_sigreturn(thread, tcr);
2370  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2371    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2372    kret = 17;
2373  } else {
2374    switch (exception) {
2375    case EXC_BAD_ACCESS:
2376      signum = SIGSEGV;
2377      break;
2378       
2379    case EXC_BAD_INSTRUCTION:
2380      signum = SIGILL;
2381      break;
2382     
2383      break;
2384     
2385    case EXC_ARITHMETIC:
2386      signum = SIGFPE;
2387      break;
2388
2389    default:
2390      break;
2391    }
2392    if (signum) {
2393      kret = setup_signal_frame(thread,
2394                                (void *)pseudo_signal_handler,
2395                                signum,
2396                                code,
2397                                tcr);
2398#if 0
2399      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2400#endif
2401
2402    } else {
2403      kret = 17;
2404    }
2405  }
2406
2407  return kret;
2408}
2409
2410
2411
2412typedef struct {
2413  mach_msg_header_t Head;
2414  /* start of the kernel processed data */
2415  mach_msg_body_t msgh_body;
2416  mach_msg_port_descriptor_t thread;
2417  mach_msg_port_descriptor_t task;
2418  /* end of the kernel processed data */
2419  NDR_record_t NDR;
2420  exception_type_t exception;
2421  mach_msg_type_number_t codeCnt;
2422  integer_t code[2];
2423  mach_msg_trailer_t trailer;
2424} exceptionRequest;
2425
2426
2427boolean_t
2428openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2429{
2430  static NDR_record_t _NDR = {0};
2431  kern_return_t handled;
2432  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2433  exceptionRequest *req = (exceptionRequest *) in;
2434
2435  reply->NDR = _NDR;
2436
2437  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2438  out->msgh_remote_port = in->msgh_remote_port;
2439  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2440  out->msgh_local_port = MACH_PORT_NULL;
2441  out->msgh_id = in->msgh_id+100;
2442
2443  /* Could handle other exception flavors in the range 2401-2403 */
2444
2445
2446  if (in->msgh_id != 2401) {
2447    reply->RetCode = MIG_BAD_ID;
2448    return FALSE;
2449  }
2450  handled = catch_exception_raise(req->Head.msgh_local_port,
2451                                  req->thread.name,
2452                                  req->task.name,
2453                                  req->exception,
2454                                  req->code,
2455                                  req->codeCnt);
2456  reply->RetCode = handled;
2457  return TRUE;
2458}
2459
2460/*
2461  The initial function for an exception-handling thread.
2462*/
2463
2464void *
2465exception_handler_proc(void *arg)
2466{
2467  extern boolean_t exc_server();
2468  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2469
2470  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2471  /* Should never return. */
2472  abort();
2473}
2474
2475
2476
2477mach_port_t
2478mach_exception_port_set()
2479{
2480  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2481  kern_return_t kret; 
2482  if (__exception_port_set == MACH_PORT_NULL) {
2483    kret = mach_port_allocate(mach_task_self(),
2484                              MACH_PORT_RIGHT_PORT_SET,
2485                              &__exception_port_set);
2486    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2487    create_system_thread(0,
2488                         NULL,
2489                         exception_handler_proc, 
2490                         (void *)((natural)__exception_port_set));
2491  }
2492  return __exception_port_set;
2493}
2494
2495/*
2496  Setup a new thread to handle those exceptions specified by
2497  the mask "which".  This involves creating a special Mach
2498  message port, telling the Mach kernel to send exception
2499  messages for the calling thread to that port, and setting
2500  up a handler thread which listens for and responds to
2501  those messages.
2502
2503*/
2504
2505/*
2506  Establish the lisp thread's TCR as its exception port, and determine
2507  whether any other ports have been established by foreign code for
2508  exceptions that lisp cares about.
2509
2510  If this happens at all, it should happen on return from foreign
2511  code and on entry to lisp code via a callback.
2512
2513  This is a lot of trouble (and overhead) to support Java, or other
2514  embeddable systems that clobber their caller's thread exception ports.
2515 
2516*/
2517kern_return_t
2518tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2519{
2520  kern_return_t kret;
2521  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2522  int i;
2523  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2524  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2525  exception_mask_t mask = 0;
2526
2527  kret = thread_swap_exception_ports(thread,
2528                                     LISP_EXCEPTIONS_HANDLED_MASK,
2529                                     lisp_port,
2530                                     EXCEPTION_DEFAULT,
2531                                     THREAD_STATE_NONE,
2532                                     fxs->masks,
2533                                     &n,
2534                                     fxs->ports,
2535                                     fxs->behaviors,
2536                                     fxs->flavors);
2537  if (kret == KERN_SUCCESS) {
2538    fxs->foreign_exception_port_count = n;
2539    for (i = 0; i < n; i ++) {
2540      foreign_port = fxs->ports[i];
2541
2542      if ((foreign_port != lisp_port) &&
2543          (foreign_port != MACH_PORT_NULL)) {
2544        mask |= fxs->masks[i];
2545      }
2546    }
2547    tcr->foreign_exception_status = (int) mask;
2548  }
2549  return kret;
2550}
2551
2552kern_return_t
2553tcr_establish_lisp_exception_port(TCR *tcr)
2554{
2555  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2556}
2557
2558/*
2559  Do this when calling out to or returning from foreign code, if
2560  any conflicting foreign exception ports were established when we
2561  last entered lisp code.
2562*/
2563kern_return_t
2564restore_foreign_exception_ports(TCR *tcr)
2565{
2566  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2567 
2568  if (m) {
2569    MACH_foreign_exception_state *fxs  = 
2570      (MACH_foreign_exception_state *) tcr->native_thread_info;
2571    int i, n = fxs->foreign_exception_port_count;
2572    exception_mask_t tm;
2573
2574    for (i = 0; i < n; i++) {
2575      if ((tm = fxs->masks[i]) & m) {
2576        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2577                                   tm,
2578                                   fxs->ports[i],
2579                                   fxs->behaviors[i],
2580                                   fxs->flavors[i]);
2581      }
2582    }
2583  }
2584}
2585                                   
2586
2587/*
2588  This assumes that a Mach port (to be used as the thread's exception port) whose
2589  "name" matches the TCR's 32-bit address has already been allocated.
2590*/
2591
2592kern_return_t
2593setup_mach_exception_handling(TCR *tcr)
2594{
2595  mach_port_t
2596    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2597    task_self = mach_task_self();
2598  kern_return_t kret;
2599
2600  kret = mach_port_insert_right(task_self,
2601                                thread_exception_port,
2602                                thread_exception_port,
2603                                MACH_MSG_TYPE_MAKE_SEND);
2604  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2605
2606  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2607  if (kret == KERN_SUCCESS) {
2608    mach_port_t exception_port_set = mach_exception_port_set();
2609
2610    kret = mach_port_move_member(task_self,
2611                                 thread_exception_port,
2612                                 exception_port_set);
2613  }
2614  return kret;
2615}
2616
2617void
2618darwin_exception_init(TCR *tcr)
2619{
2620  void tcr_monitor_exception_handling(TCR*, Boolean);
2621  kern_return_t kret;
2622  MACH_foreign_exception_state *fxs = 
2623    calloc(1, sizeof(MACH_foreign_exception_state));
2624 
2625  tcr->native_thread_info = (void *) fxs;
2626
2627  if ((kret = setup_mach_exception_handling(tcr))
2628      != KERN_SUCCESS) {
2629    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2630    terminate_lisp();
2631  }
2632}
2633
2634/*
2635  The tcr is the "name" of the corresponding thread's exception port.
2636  Destroying the port should remove it from all port sets of which it's
2637  a member (notably, the exception port set.)
2638*/
2639void
2640darwin_exception_cleanup(TCR *tcr)
2641{
2642  void *fxs = tcr->native_thread_info;
2643  extern Boolean use_mach_exception_handling;
2644
2645  if (fxs) {
2646    tcr->native_thread_info = NULL;
2647    free(fxs);
2648  }
2649  if (use_mach_exception_handling) {
2650    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2651    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2652  }
2653}
2654
2655
2656Boolean
2657suspend_mach_thread(mach_port_t mach_thread)
2658{
2659  kern_return_t status;
2660  Boolean aborted = false;
2661 
2662  do {
2663    aborted = false;
2664    status = thread_suspend(mach_thread);
2665    if (status == KERN_SUCCESS) {
2666      status = thread_abort_safely(mach_thread);
2667      if (status == KERN_SUCCESS) {
2668        aborted = true;
2669      } else {
2670        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2671        thread_resume(mach_thread);
2672      }
2673    } else {
2674      return false;
2675    }
2676  } while (! aborted);
2677  return true;
2678}
2679
2680/*
2681  Only do this if pthread_kill indicated that the pthread isn't
2682  listening to signals anymore, as can happen as soon as pthread_exit()
2683  is called on Darwin.  The thread could still call out to lisp as it
2684  is exiting, so we need another way to suspend it in this case.
2685*/
2686Boolean
2687mach_suspend_tcr(TCR *tcr)
2688{
2689  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2690  ExceptionInformation *pseudosigcontext;
2691  Boolean result = false;
2692 
2693  result = suspend_mach_thread(mach_thread);
2694  if (result) {
2695    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2696    pseudosigcontext->uc_onstack = 0;
2697    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2698    tcr->suspend_context = pseudosigcontext;
2699  }
2700  return result;
2701}
2702
2703void
2704mach_resume_tcr(TCR *tcr)
2705{
2706  ExceptionInformation *xp;
2707  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2708 
2709  xp = tcr->suspend_context;
2710#ifdef DEBUG_MACH_EXCEPTIONS
2711  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2712          tcr, tcr->pending_exception_context);
2713#endif
2714  tcr->suspend_context = NULL;
2715  restore_mach_thread_state(mach_thread, xp);
2716#ifdef DEBUG_MACH_EXCEPTIONS
2717  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2718          tcr, tcr->pending_exception_context);
2719#endif
2720  thread_resume(mach_thread);
2721}
2722
2723void
2724fatal_mach_error(char *format, ...)
2725{
2726  va_list args;
2727  char s[512];
2728 
2729
2730  va_start(args, format);
2731  vsnprintf(s, sizeof(s),format, args);
2732  va_end(args);
2733
2734  Fatal("Mach error", s);
2735}
2736
2737void
2738pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2739{
2740  interrupt_handler(signum, NULL, context);
2741}
2742
2743int
2744mach_raise_thread_interrupt(TCR *target)
2745{
2746  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2747  kern_return_t kret;
2748  Boolean result = false;
2749  TCR *current = get_tcr(false);
2750  thread_basic_info_data_t info; 
2751  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2752
2753  LOCK(lisp_global(TCR_AREA_LOCK), current);
2754
2755  if (suspend_mach_thread(mach_thread)) {
2756    if (thread_info(mach_thread,
2757                    THREAD_BASIC_INFO,
2758                    (thread_info_t)&info,
2759                    &info_count) == KERN_SUCCESS) {
2760      if (info.suspend_count == 1) {
2761        if ((target->valence == TCR_STATE_LISP) &&
2762            (!target->unwinding) &&
2763            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2764          kret = setup_signal_frame(mach_thread,
2765                                    (void *)pseudo_interrupt_handler,
2766                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2767                                    0,
2768                                    target);
2769          if (kret == KERN_SUCCESS) {
2770            result = true;
2771          }
2772        }
2773      }
2774    }
2775    if (! result) {
2776      target->interrupt_pending = 1 << fixnumshift;
2777    }
2778    thread_resume(mach_thread);
2779   
2780  }
2781  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2782  return 0;
2783}
2784
2785#endif
Note: See TracBrowser for help on using the repository browser.