source: release/1.7/source/lisp-kernel/arm-exceptions.c @ 15267

Last change on this file since 15267 was 14880, checked in by rme, 8 years ago

Merge trunk changes.

File size: 73.6 KB
Line 
1
2
3/*
4   Copyright (C) 2010 Clozure Associates
5   This file is part of Clozure CL. 
6
7   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
8   License , known as the LLGPL and distributed with Clozure CL as the
9   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
10   which is distributed with Clozure CL as the file "LGPL".  Where these
11   conflict, the preamble takes precedence. 
12
13   Clozure CL is referenced in the preamble as the "LIBRARY."
14
15   The LLGPL is also available online at
16   http://opensource.franz.com/preamble.html
17*/
18
19#include "lisp.h"
20#include "lisp-exceptions.h"
21#include "lisp_globals.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#ifndef ANDROID
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36#endif
37
38#ifdef DARWIN
39#include <sys/mman.h>
40#ifndef SA_NODEFER
41#define SA_NODEFER 0
42#endif
43#include <sysexits.h>
44
45
46/* a distinguished UUO at a distinguished address */
47extern void pseudo_sigreturn(ExceptionInformation *);
48#endif
49
50
51#include "threads.h"
52
53#ifdef ANDROID
54#define pthread_sigmask(how,in,out) rt_sigprocmask(how,in,out,8)
55#endif
56
57#ifdef LINUX
58
59void
60enable_fp_exceptions()
61{
62}
63
64void
65disable_fp_exceptions()
66{
67}
68#endif
69
70/*
71  Handle exceptions.
72
73*/
74
75extern LispObj lisp_nil;
76
77extern natural lisp_heap_gc_threshold;
78extern Boolean grow_dynamic_area(natural);
79
80Boolean allocation_enabled = true;
81
82
83
84
85int
86page_size = 4096;
87
88int
89log2_page_size = 12;
90
91
92
93
94
95/*
96  If the PC is pointing to an allocation trap, the previous instruction
97  must have decremented allocptr.  Return the non-zero amount by which
98  allocptr was decremented.
99*/
100signed_natural
101allocptr_displacement(ExceptionInformation *xp)
102{
103  pc program_counter = xpPC(xp);
104  opcode instr = *program_counter, prev_instr;
105  int delta = -3;
106
107  if (IS_ALLOC_TRAP(instr)) {
108    /* The alloc trap must have been preceded by a cmp and a
109       load from tcr.allocbase. */
110    if (IS_BRANCH_AROUND_ALLOC_TRAP(program_counter[-1])) {
111      delta = -4;
112    }
113    prev_instr = program_counter[delta];
114
115    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
116      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
117    }
118   
119    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)(prev_instr & 0xff));
121    }
122
123    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
124      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
125
126      instr = program_counter[delta-1];
127      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
128        return -((signed_natural)(disp | (instr & 0xff)));
129      }
130    }
131    Bug(xp, "Can't determine allocation displacement");
132  }
133  return 0;
134}
135
136
137/*
138  A cons cell's been successfully allocated, but the allocptr's
139  still tagged (as fulltag_cons, of course.)  Emulate any instructions
140  that might follow the allocation (stores to the car or cdr, an
141  assignment to the "result" gpr) that take place while the allocptr's
142  tag is non-zero, advancing over each such instruction.  When we're
143  done, the cons cell will be allocated and initialized, the result
144  register will point to it, the allocptr will be untagged, and
145  the PC will point past the instruction that clears the allocptr's
146  tag.
147*/
148void
149finish_allocating_cons(ExceptionInformation *xp)
150{
151  pc program_counter = xpPC(xp);
152  opcode instr;
153  LispObj cur_allocptr = xpGPR(xp, allocptr);
154  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
155  int target_reg;
156
157  while (1) {
158    instr = *program_counter++;
159
160    if (IS_CLR_ALLOCPTR_TAG(instr)) {
161      xpGPR(xp, allocptr) = untag(cur_allocptr);
162      xpPC(xp) = program_counter;
163      return;
164    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
165      c->car = xpGPR(xp,RD_field(instr));
166    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
167      c->cdr = xpGPR(xp,RD_field(instr));
168    } else {
169      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
170      xpGPR(xp,RD_field(instr)) = cur_allocptr;
171    }
172  }
173}
174
175/*
176  We were interrupted in the process of allocating a uvector; we
177  survived the allocation trap, and allocptr is tagged as fulltag_misc.
178  Emulate any instructions which store a header into the uvector,
179  assign the value of allocptr to some other register, and clear
180  allocptr's tag.  Don't expect/allow any other instructions in
181  this environment.
182*/
183void
184finish_allocating_uvector(ExceptionInformation *xp)
185{
186  pc program_counter = xpPC(xp);
187  opcode instr;
188  LispObj cur_allocptr = xpGPR(xp, allocptr);
189  int target_reg;
190
191  while (1) {
192    instr = *program_counter++;
193    if (IS_CLR_ALLOCPTR_TAG(instr)) {
194      xpGPR(xp, allocptr) = untag(cur_allocptr);
195      xpPC(xp) = program_counter;
196      return;
197    }
198    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
199      header_of(cur_allocptr) = xpGPR(xp,RD_field(instr));
200    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
201      xpGPR(xp,RD_field(instr)) = cur_allocptr;
202    } else {
203      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
204    }
205  }
206}
207
208
209Boolean
210allocate_object(ExceptionInformation *xp,
211                natural bytes_needed, 
212                signed_natural disp_from_allocptr,
213                TCR *tcr)
214{
215  area *a = active_dynamic_area;
216
217  /* Maybe do an EGC */
218  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
219    if (((a->active)-(a->low)) >= a->threshold) {
220      gc_from_xp(xp, 0L);
221    }
222  }
223
224  /* Life is pretty simple if we can simply grab a segment
225     without extending the heap.
226  */
227  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
228    xpGPR(xp, allocptr) += disp_from_allocptr;
229    return true;
230  }
231 
232  /* It doesn't make sense to try a full GC if the object
233     we're trying to allocate is larger than everything
234     allocated so far.
235  */
236  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
237    untenure_from_area(tenured_area); /* force a full GC */
238    gc_from_xp(xp, 0L);
239  }
240 
241  /* Try again, growing the heap if necessary */
242  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
243    xpGPR(xp, allocptr) += disp_from_allocptr;
244    return true;
245  }
246 
247  return false;
248}
249
250#ifndef XNOMEM
251#define XNOMEM 10
252#endif
253
254void
255update_bytes_allocated(TCR* tcr, void *cur_allocptr)
256{
257  BytePtr
258    last = (BytePtr) tcr->last_allocptr, 
259    current = (BytePtr) cur_allocptr;
260  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
261    tcr->bytes_allocated += last-current;
262  }
263  tcr->last_allocptr = 0;
264}
265
266void
267lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
268{
269  /* Couldn't allocate the object.  If it's smaller than some arbitrary
270     size (say 128K bytes), signal a "chronically out-of-memory" condition;
271     else signal a "allocation request failed" condition.
272  */
273  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
274  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
275}
276
277/*
278  Allocate a large list, where "large" means "large enough to
279  possibly trigger the EGC several times if this was done
280  by individually allocating each CONS."  The number of
281  ocnses in question is in arg_z; on successful return,
282  the list will be in arg_z
283*/
284
285Boolean
286allocate_list(ExceptionInformation *xp, TCR *tcr)
287{
288  natural
289    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
290    bytes_needed = (nconses << dnode_shift);
291  LispObj
292    prev = lisp_nil,
293    current,
294    initial = xpGPR(xp,arg_y);
295
296  if (nconses == 0) {
297    /* Silly case */
298    xpGPR(xp,arg_z) = lisp_nil;
299    xpGPR(xp,allocptr) = lisp_nil;
300    return true;
301  }
302  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
303  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
304    for (current = xpGPR(xp,allocptr);
305         nconses;
306         prev = current, current+= dnode_size, nconses--) {
307      deref(current,0) = prev;
308      deref(current,1) = initial;
309    }
310    xpGPR(xp,arg_z) = prev;
311    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
312    xpGPR(xp,allocptr)-=fulltag_cons;
313  } else {
314    lisp_allocation_failure(xp,tcr,bytes_needed);
315  }
316  return true;
317}
318
319Boolean
320handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
321{
322  pc program_counter;
323  natural cur_allocptr, bytes_needed = 0;
324  opcode prev_instr;
325  signed_natural disp = 0;
326  unsigned allocptr_tag;
327
328  if (!allocation_enabled) {
329    /* Back up before the alloc_trap, then let pc_luser_xp() back
330       up some more. */
331    xpPC(xp)-=1;
332    pc_luser_xp(xp,tcr, NULL);
333    allocation_enabled = true;
334    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
335    handle_error(xp, error_allocation_disabled,0,NULL);
336    return true;
337  }
338
339  cur_allocptr = xpGPR(xp,allocptr);
340
341  allocptr_tag = fulltag_of(cur_allocptr);
342
343  switch (allocptr_tag) {
344  case fulltag_cons:
345    bytes_needed = sizeof(cons);
346    disp = -sizeof(cons) + fulltag_cons;
347    break;
348
349  case fulltag_misc:
350    disp = allocptr_displacement(xp);
351    bytes_needed = (-disp) + fulltag_misc;
352    break;
353
354    /* else fall thru */
355  default:
356    return false;
357  }
358
359  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
360  if (allocate_object(xp, bytes_needed, disp, tcr)) {
361    adjust_exception_pc(xp,4);
362    return true;
363  }
364  lisp_allocation_failure(xp,tcr,bytes_needed);
365  return true;
366}
367
368natural gc_deferred = 0, full_gc_deferred = 0;
369
370signed_natural
371flash_freeze(TCR *tcr, signed_natural param)
372{
373  return 0;
374}
375
376Boolean
377handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
378{
379  LispObj
380    selector = xpGPR(xp,imm0), 
381    arg = xpGPR(xp,imm1);
382  area *a = active_dynamic_area;
383  Boolean egc_was_enabled = (a->older != NULL);
384  natural gc_previously_deferred = gc_deferred;
385
386
387  switch (selector) {
388  case GC_TRAP_FUNCTION_EGC_CONTROL:
389    egc_control(arg != 0, a->active);
390    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
391    break;
392
393  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
394    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
395    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
396    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
397    xpGPR(xp,arg_z) = lisp_nil+t_offset;
398    break;
399
400  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
401    if (((signed_natural) arg) > 0) {
402      lisp_heap_gc_threshold = 
403        align_to_power_of_2((arg-1) +
404                            (heap_segment_size - 1),
405                            log2_heap_segment_size);
406    }
407    /* fall through */
408  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
409    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
410    break;
411
412  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
413    /*  Try to put the current threshold in effect.  This may
414        need to disable/reenable the EGC. */
415    untenure_from_area(tenured_area);
416    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
417    if (egc_was_enabled) {
418      if ((a->high - a->active) >= a->threshold) {
419        tenure_to_area(tenured_area);
420      }
421    }
422    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
423    break;
424
425  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
426    ensure_static_conses(xp,tcr,32768);
427    break;
428
429  case GC_TRAP_FUNCTION_FLASH_FREEZE:
430    untenure_from_area(tenured_area);
431    gc_like_from_xp(xp,flash_freeze,0);
432    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
433    tenured_area->static_dnodes = area_dnode(a->active, a->low);
434    if (egc_was_enabled) {
435      tenure_to_area(tenured_area);
436    }
437    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
438    break;
439
440  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
441    switch(arg) {
442    case 0: /* disable if allocation enabled */
443      xpGPR(xp, arg_z) = lisp_nil;
444      if (allocation_enabled) {
445        TCR *other_tcr;
446        ExceptionInformation *other_context;
447        suspend_other_threads(true);
448        normalize_tcr(xp,tcr,false);
449        for (other_tcr=tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
450          other_context = other_tcr->pending_exception_context;
451          if (other_context == NULL) {
452            other_context = other_tcr->suspend_context;
453          }
454          normalize_tcr(other_context, other_tcr, true);
455        }
456        allocation_enabled = false;
457        xpGPR(xp, arg_z) = t_value;
458        resume_other_threads(true);
459      }
460      break;
461
462    case 1:                     /* enable if disabled */
463      xpGPR(xp, arg_z) = lisp_nil;
464      if (!allocation_enabled) {
465        allocation_enabled = true;
466        xpGPR(xp, arg_z) = t_value;
467      }
468      break;
469
470    default:
471      xpGPR(xp, arg_z) = lisp_nil;
472      if (allocation_enabled) {
473        xpGPR(xp, arg_z) = t_value;
474      }
475      break;
476    }
477    break;
478
479       
480  default:
481    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
482
483    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
484      if (!full_gc_deferred) {
485        gc_from_xp(xp, 0L);
486        break;
487      }
488      /* Tried to do a full GC when gc was disabled.  That failed,
489         so try full GC now */
490      selector = GC_TRAP_FUNCTION_GC;
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(false, (BytePtr) a->active);
495    }
496    gc_from_xp(xp, 0L);
497    if (gc_deferred > gc_previously_deferred) {
498      full_gc_deferred = 1;
499    } else {
500      full_gc_deferred = 0;
501    }
502    if (selector > GC_TRAP_FUNCTION_GC) {
503      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
504        impurify_from_xp(xp, 0L);
505        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
506        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
507        gc_from_xp(xp, 0L);
508      }
509      if (selector & GC_TRAP_FUNCTION_PURIFY) {
510        purify_from_xp(xp, 0L);
511        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
512        gc_from_xp(xp, 0L);
513      }
514      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
515        OSErr err;
516        extern OSErr save_application(unsigned, Boolean);
517        TCR *tcr = get_tcr(true);
518        area *vsarea = tcr->vs_area;
519       
520        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
521        err = save_application(arg, egc_was_enabled);
522        if (err == noErr) {
523          _exit(0);
524        }
525        fatal_oserr(": save_application", err);
526      }
527      switch (selector) {
528
529
530      case GC_TRAP_FUNCTION_FREEZE:
531        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
532        tenured_area->static_dnodes = area_dnode(a->active, a->low);
533        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
534        break;
535      default:
536        break;
537      }
538    }
539   
540    if (egc_was_enabled) {
541      egc_control(true, NULL);
542    }
543    break;
544   
545  }
546
547  adjust_exception_pc(xp,4);
548  return true;
549}
550
551
552
553void
554signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
555{
556  /* The cstack just overflowed.  Force the current thread's
557     control stack to do so until all stacks are well under their overflow
558     limits.
559  */
560
561#if 0
562  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
563#endif
564  handle_error(xp, error_stack_overflow, reg, NULL);
565}
566
567/*
568  Lower (move toward 0) the "end" of the soft protected area associated
569  with a by a page, if we can.
570*/
571
572void
573adjust_soft_protection_limit(area *a)
574{
575  char *proposed_new_soft_limit = a->softlimit - 4096;
576  protected_area_ptr p = a->softprot;
577 
578  if (proposed_new_soft_limit >= (p->start+16384)) {
579    p->end = proposed_new_soft_limit;
580    p->protsize = p->end-p->start;
581    a->softlimit = proposed_new_soft_limit;
582  }
583  protect_area(p);
584}
585
586void
587restore_soft_stack_limit(unsigned stkreg)
588{
589  area *a;
590  TCR *tcr = get_tcr(true);
591
592  switch (stkreg) {
593  case Rsp:
594    a = tcr->cs_area;
595    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
596      a->softlimit -= 4096;
597    }
598    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
599    break;
600  case vsp:
601    a = tcr->vs_area;
602    adjust_soft_protection_limit(a);
603    break;
604  }
605}
606
607/* Maybe this'll work someday.  We may have to do something to
608   make the thread look like it's not handling an exception */
609void
610reset_lisp_process(ExceptionInformation *xp)
611{
612  TCR *tcr = get_tcr(true);
613  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
614
615  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
616
617  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
618
619  start_lisp(tcr, 1);
620}
621
622/*
623  This doesn't GC; it returns true if it made enough room, false
624  otherwise.
625  If "extend" is true, it can try to extend the dynamic area to
626  satisfy the request.
627*/
628
629Boolean
630new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
631{
632  area *a;
633  natural newlimit, oldlimit;
634  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
635
636  a  = active_dynamic_area;
637  oldlimit = (natural) a->active;
638  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
639              align_to_power_of_2(need, log2_allocation_quantum));
640  if (newlimit > (natural) (a->high)) {
641    if (extend) {
642      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
643      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
644      do {
645        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
646          break;
647        }
648        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
649        if (extend_by < 4<<20) {
650          return false;
651        }
652      } while (1);
653    } else {
654      return false;
655    }
656  }
657  a->active = (BytePtr) newlimit;
658  tcr->last_allocptr = (void *)newlimit;
659  xpGPR(xp,allocptr) = (LispObj) newlimit;
660  tcr->save_allocbase = (void*) oldlimit;
661
662  return true;
663}
664
665 
666void
667update_area_active (area **aptr, BytePtr value)
668{
669  area *a = *aptr;
670  for (; a; a = a->older) {
671    if ((a->low <= value) && (a->high >= value)) break;
672  };
673  if (a == NULL) Bug(NULL, "Can't find active area");
674  a->active = value;
675  *aptr = a;
676
677  for (a = a->younger; a; a = a->younger) {
678    a->active = a->high;
679  }
680}
681
682LispObj *
683tcr_frame_ptr(TCR *tcr)
684{
685  ExceptionInformation *xp;
686  LispObj *bp = NULL;
687
688  if (tcr->pending_exception_context)
689    xp = tcr->pending_exception_context;
690  else {
691    xp = tcr->suspend_context;
692  }
693  if (xp) {
694    bp = (LispObj *) xpGPR(xp, Rsp);
695  }
696  return bp;
697}
698
699void
700normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
701
702{
703  void *cur_allocptr = NULL;
704  LispObj freeptr = 0;
705
706  if (xp) {
707    if (is_other_tcr) {
708      pc_luser_xp(xp, tcr, NULL);
709      freeptr = xpGPR(xp, allocptr);
710      if (fulltag_of(freeptr) == 0){
711        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
712      }
713    }
714    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
715    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
716  } else {
717    /* In ff-call. */
718    cur_allocptr = (void *) (tcr->save_allocptr);
719    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
720    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
721  }
722
723
724  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
725  if (cur_allocptr) {
726    update_bytes_allocated(tcr, cur_allocptr);
727    if (freeptr) {
728      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
729    }
730  }
731}
732
733TCR *gc_tcr = NULL;
734
735/* Suspend and "normalize" other tcrs, then call a gc-like function
736   in that context.  Resume the other tcrs, then return what the
737   function returned */
738
739signed_natural
740gc_like_from_xp(ExceptionInformation *xp, 
741                signed_natural(*fun)(TCR *, signed_natural), 
742                signed_natural param)
743{
744  TCR *tcr = get_tcr(true), *other_tcr;
745  int result;
746  signed_natural inhibit;
747
748  suspend_other_threads(true);
749  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
750  if (inhibit != 0) {
751    if (inhibit > 0) {
752      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
753    }
754    resume_other_threads(true);
755    gc_deferred++;
756    return 0;
757  }
758  gc_deferred = 0;
759
760  gc_tcr = tcr;
761
762  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763
764  normalize_tcr(xp, tcr, false);
765
766
767  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
768    if (other_tcr->pending_exception_context) {
769      other_tcr->gc_context = other_tcr->pending_exception_context;
770    } else if (other_tcr->valence == TCR_STATE_LISP) {
771      other_tcr->gc_context = other_tcr->suspend_context;
772    } else {
773      /* no pending exception, didn't suspend in lisp state:
774         must have executed a synchronous ff-call.
775      */
776      other_tcr->gc_context = NULL;
777    }
778    normalize_tcr(other_tcr->gc_context, other_tcr, true);
779  }
780   
781
782
783  result = fun(tcr, param);
784
785  other_tcr = tcr;
786  do {
787    other_tcr->gc_context = NULL;
788    other_tcr = other_tcr->next;
789  } while (other_tcr != tcr);
790
791  gc_tcr = NULL;
792
793  resume_other_threads(true);
794
795  return result;
796
797}
798
799
800
801/* Returns #bytes freed by invoking GC */
802
803signed_natural
804gc_from_tcr(TCR *tcr, signed_natural param)
805{
806  area *a;
807  BytePtr oldfree, newfree;
808  BytePtr oldend, newend;
809
810  a = active_dynamic_area;
811  oldend = a->high;
812  oldfree = a->active;
813  gc(tcr, param);
814  newfree = a->active;
815  newend = a->high;
816#if 0
817  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
818#endif
819  return ((oldfree-newfree)+(newend-oldend));
820}
821
822signed_natural
823gc_from_xp(ExceptionInformation *xp, signed_natural param)
824{
825  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
826
827  freeGCptrs();
828  return status;
829}
830
831signed_natural
832purify_from_xp(ExceptionInformation *xp, signed_natural param)
833{
834  return gc_like_from_xp(xp, purify, param);
835}
836
837signed_natural
838impurify_from_xp(ExceptionInformation *xp, signed_natural param)
839{
840  return gc_like_from_xp(xp, impurify, param);
841}
842
843
844
845
846
847
848protection_handler
849 * protection_handlers[] = {
850   do_spurious_wp_fault,
851   do_soft_stack_overflow,
852   do_soft_stack_overflow,
853   do_soft_stack_overflow,
854   do_hard_stack_overflow,   
855   do_hard_stack_overflow,
856   do_hard_stack_overflow
857   };
858
859
860Boolean
861is_write_fault(ExceptionInformation *xp, siginfo_t *info)
862{
863  return ((xpFaultStatus(xp) & 0x800) != 0);
864}
865
866Boolean
867handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
868{
869  BytePtr addr;
870  protected_area_ptr area;
871  protection_handler *handler;
872  extern Boolean touch_page(void *);
873  extern void touch_page_end(void);
874
875#ifdef LINUX
876  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
877#else
878  if (info) {
879    addr = (BytePtr)(info->si_addr);
880  } else {
881    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
882  }
883#endif
884
885  if (addr && (addr == tcr->safe_ref_address)) {
886    adjust_exception_pc(xp,4);
887
888    xpGPR(xp,imm0) = 0;
889    return true;
890  }
891
892  if (xpPC(xp) == (pc)touch_page) {
893    xpGPR(xp,imm0) = 0;
894    xpPC(xp) = (pc)touch_page_end;
895    return true;
896  }
897
898
899  if (is_write_fault(xp,info)) {
900    area = find_protected_area(addr);
901    if (area != NULL) {
902      handler = protection_handlers[area->why];
903      return handler(xp, area, addr);
904    } else {
905      if ((addr >= readonly_area->low) &&
906          (addr < readonly_area->active)) {
907        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
908                        page_size);
909        return true;
910      }
911    }
912  }
913  if (old_valence == TCR_STATE_LISP) {
914    LispObj cmain = nrs_CMAIN.vcell;
915   
916    if ((fulltag_of(cmain) == fulltag_misc) &&
917      (header_subtag(header_of(cmain)) == subtag_macptr)) {
918     
919      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
920    }
921  }
922  return false;
923}
924
925
926
927
928
929OSStatus
930do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
931{
932#ifdef SUPPORT_PRAGMA_UNUSED
933#pragma unused(area,addr)
934#endif
935  reset_lisp_process(xp);
936  return -1;
937}
938
939extern area*
940allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
941
942extern area*
943allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
944
945
946
947
948
949
950Boolean
951lisp_frame_p(lisp_frame *spPtr)
952{
953  return (spPtr->marker == lisp_frame_marker);
954}
955
956
957int ffcall_overflow_count = 0;
958
959
960
961
962
963
964/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
965  the current value of VSP (TSP) or an older area.  */
966
967OSStatus
968do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
969{
970  TCR* tcr = get_tcr(true);
971  area *a = tcr->vs_area;
972  protected_area_ptr vsp_soft = a->softprot;
973  unprotect_area(vsp_soft);
974  signal_stack_soft_overflow(xp,vsp);
975  return 0;
976}
977
978
979
980OSStatus
981do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
982{
983  /* Trying to write into a guard page on the vstack or tstack.
984     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
985     signal an error_stack_overflow condition.
986      */
987  if (prot_area->why == kVSPsoftguard) {
988    return do_vsp_overflow(xp,addr);
989  }
990  unprotect_area(prot_area);
991  signal_stack_soft_overflow(xp,Rsp);
992  return 0;
993}
994
995OSStatus
996do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
997{
998#ifdef SUPPORT_PRAGMA_UNUSED
999#pragma unused(xp,area,addr)
1000#endif
1001  return -1;
1002}
1003
1004
1005
1006
1007     
1008
1009
1010
1011
1012
1013Boolean
1014handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1015{
1016  return false;
1017}
1018
1019
1020Boolean
1021handle_unimplemented_instruction(ExceptionInformation *xp,
1022                                 opcode instruction,
1023                                 TCR *tcr)
1024{
1025
1026  return false;
1027}
1028
1029Boolean
1030handle_exception(int xnum, 
1031                 ExceptionInformation *xp, 
1032                 TCR *tcr, 
1033                 siginfo_t *info,
1034                 int old_valence)
1035{
1036  pc program_counter;
1037  opcode instruction = 0;
1038
1039  if (old_valence != TCR_STATE_LISP) {
1040    return false;
1041  }
1042
1043  program_counter = xpPC(xp);
1044 
1045  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1046    instruction = *program_counter;
1047  }
1048
1049  if (IS_ALLOC_TRAP(instruction)) {
1050    return handle_alloc_trap(xp, tcr);
1051  } else if ((xnum == SIGSEGV) ||
1052             (xnum == SIGBUS)) {
1053    return handle_protection_violation(xp, info, tcr, old_valence);
1054  } else if (xnum == SIGFPE) {
1055    return handle_sigfpe(xp, tcr);
1056  } else if ((xnum == SIGILL)) {
1057    if (IS_GC_TRAP(instruction)) {
1058      return handle_gc_trap(xp, tcr);
1059    } else if (IS_UUO(instruction)) {
1060      return handle_uuo(xp, info, instruction);
1061    } else {
1062      return handle_unimplemented_instruction(xp,instruction,tcr);
1063    }
1064  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1065    tcr->interrupt_pending = 0;
1066    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1067    return true;
1068  }
1069
1070  return false;
1071}
1072
1073void
1074adjust_exception_pc(ExceptionInformation *xp, int delta)
1075{
1076  xpPC(xp) += (delta >> 2);
1077}
1078
1079
1080/*
1081  This wants to scan backwards until "where" points to an instruction
1082   whose major opcode is either 63 (double-float) or 59 (single-float)
1083*/
1084
1085OSStatus
1086handle_fpux_binop(ExceptionInformation *xp, pc where)
1087{
1088  OSStatus err = -1;
1089  opcode *there = (opcode *) where, instr, errnum = 0;
1090  return err;
1091}
1092
1093Boolean
1094handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1095{
1096  unsigned 
1097    format = UUO_FORMAT(the_uuo);
1098  Boolean handled = false;
1099  int bump = 4;
1100  TCR *tcr = get_tcr(true);
1101
1102  switch (format) {
1103  case uuo_format_kernel_service:
1104    {
1105      TCR * target = (TCR *)xpGPR(xp,arg_z);
1106      int service = UUO_UNARY_field(the_uuo);
1107
1108      switch (service) {
1109      case error_propagate_suspend:
1110        handled = true;
1111        break;
1112      case error_interrupt:
1113        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1114        handled = true;
1115        break;
1116      case error_suspend:
1117        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1118        handled = true;
1119        break;
1120      case error_suspend_all:
1121        lisp_suspend_other_threads();
1122        handled = true;
1123        break;
1124      case error_resume:
1125        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1126        handled = true;
1127        break;
1128      case error_resume_all:
1129        lisp_resume_other_threads();
1130        handled = true;
1131        break;
1132      case error_kill:
1133        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1134        handled = true;
1135        break;
1136      case error_allocate_list:
1137        allocate_list(xp,tcr);
1138        handled = true;
1139        break;
1140      default:
1141        handled = false;
1142        break;
1143      }
1144      break;
1145    }
1146
1147  case uuo_format_unary:
1148    switch(UUO_UNARY_field(the_uuo)) {
1149    case 3:
1150      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1151        handled = true;
1152        bump = 4;
1153        break;
1154      }
1155      /* fall in */
1156    default:
1157      handled = false;
1158      break;
1159
1160    }
1161    break;
1162
1163  case uuo_format_nullary:
1164    switch (UUOA_field(the_uuo)) {
1165    case 3:
1166      adjust_exception_pc(xp, bump);
1167      bump = 0;
1168      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1169      handled = true;
1170      break;
1171
1172    case 4:
1173      tcr->interrupt_pending = 0;
1174      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1175      handled = true;
1176      break;
1177    default:
1178      handled = false;
1179      break;
1180    }
1181    break;
1182
1183
1184  case uuo_format_error_lisptag:
1185  case uuo_format_error_fulltag:
1186  case uuo_format_error_xtype:
1187  case uuo_format_cerror_lisptag:
1188  case uuo_format_cerror_fulltag:
1189  case uuo_format_cerror_xtype:
1190  case uuo_format_nullary_error:
1191  case uuo_format_unary_error:
1192  case uuo_format_binary_error:
1193  case uuo_format_ternary:
1194    handled = handle_error(xp,0,the_uuo, &bump);
1195    break;
1196
1197  default:
1198    handled = false;
1199    bump = 0;
1200  }
1201 
1202  if (handled && bump) {
1203    adjust_exception_pc(xp, bump);
1204  }
1205  return handled;
1206}
1207
1208natural
1209register_codevector_contains_pc (natural lisp_function, pc where)
1210{
1211  natural code_vector, size;
1212
1213  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1214      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1215    code_vector = deref(lisp_function, 2);
1216    size = header_element_count(header_of(code_vector)) << 2;
1217    if ((untag(code_vector) < (natural)where) && 
1218        ((natural)where < (code_vector + size)))
1219      return(code_vector);
1220  }
1221
1222  return(0);
1223}
1224
1225Boolean
1226callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1227{
1228  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1229}
1230
1231Boolean
1232callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1233                  natural arg1, natural arg2, int *bumpP)
1234{
1235  natural  callback_ptr;
1236  area *a;
1237  natural fnreg = Rfn,  codevector, offset;
1238  pc where = xpPC(xp);
1239  int delta;
1240
1241  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1242  if (codevector == 0) {
1243    fnreg = nfn;
1244    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1245    if (codevector == 0) {
1246      fnreg = 0;
1247    }
1248  }
1249  if (codevector) {
1250    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1251  } else {
1252    offset = (natural)where;
1253  }
1254                                                 
1255                                               
1256
1257  TCR *tcr = get_tcr(true);
1258
1259  /* Put the active stack pointer where .SPcallback expects it */
1260  a = tcr->cs_area;
1261  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1262
1263  /* Copy globals from the exception frame to tcr */
1264  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1265  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1266
1267
1268
1269  /* Call back.
1270     Lisp will handle trampolining through some code that
1271     will push lr/fn & pc/nfn stack frames for backtrace.
1272  */
1273  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1274  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1275  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1276  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1277
1278  if (bumpP) {
1279    *bumpP = delta;
1280  }
1281
1282  /* Copy GC registers back into exception frame */
1283  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1284  return true;
1285}
1286
1287area *
1288allocate_no_stack (natural size)
1289{
1290#ifdef SUPPORT_PRAGMA_UNUSED
1291#pragma unused(size)
1292#endif
1293
1294  return (area *) NULL;
1295}
1296
1297
1298
1299
1300
1301
1302/* callback to (symbol-value cmain) if it is a macptr,
1303   otherwise report cause and function name to console.
1304   Returns noErr if exception handled OK */
1305OSStatus
1306handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1307{
1308  LispObj   cmain = nrs_CMAIN.vcell;
1309  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1310
1311}
1312
1313
1314
1315
1316void non_fatal_error( char *msg )
1317{
1318  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1319  fflush( dbgout );
1320}
1321
1322
1323
1324Boolean
1325handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1326{
1327  LispObj   errdisp = nrs_ERRDISP.vcell;
1328
1329  if ((fulltag_of(errdisp) == fulltag_misc) &&
1330      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1331    /* errdisp is a macptr, we can call back to lisp */
1332    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1333    }
1334
1335  return false;
1336}
1337               
1338
1339/*
1340   Current thread has all signals masked.  Before unmasking them,
1341   make it appear that the current thread has been suspended.
1342   (This is to handle the case where another thread is trying
1343   to GC before this thread is able to sieze the exception lock.)
1344*/
1345int
1346prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1347{
1348  int old_valence = tcr->valence;
1349
1350  tcr->pending_exception_context = context;
1351  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1352
1353  ALLOW_EXCEPTIONS(context);
1354  return old_valence;
1355} 
1356
1357void
1358wait_for_exception_lock_in_handler(TCR *tcr, 
1359                                   ExceptionInformation *context,
1360                                   xframe_list *xf)
1361{
1362
1363  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1364  xf->curr = context;
1365  xf->prev = tcr->xframe;
1366  tcr->xframe =  xf;
1367  tcr->pending_exception_context = NULL;
1368  tcr->valence = TCR_STATE_FOREIGN; 
1369}
1370
1371void
1372unlock_exception_lock_in_handler(TCR *tcr)
1373{
1374  tcr->pending_exception_context = tcr->xframe->curr;
1375  tcr->xframe = tcr->xframe->prev;
1376  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1377  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1378}
1379
1380/*
1381   If an interrupt is pending on exception exit, try to ensure
1382   that the thread sees it as soon as it's able to run.
1383*/
1384void
1385raise_pending_interrupt(TCR *tcr)
1386{
1387  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1388    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1389  }
1390}
1391
1392void
1393exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1394{
1395#ifndef ANDROID
1396  sigset_t mask;
1397  sigfillset(&mask);
1398#else
1399  int mask [] = {0,0};
1400#endif
1401 
1402  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask, NULL);
1403  tcr->valence = old_valence;
1404  tcr->pending_exception_context = NULL;
1405  tcr->last_lisp_frame = old_last_lisp_frame;
1406}
1407
1408
1409void
1410signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1411{
1412  xframe_list xframe_link;
1413
1414  if (!use_mach_exception_handling) {
1415   
1416    tcr = (TCR *) get_interrupt_tcr(false);
1417 
1418    /* The signal handler's entered with all signals (notably the
1419       thread_suspend signal) blocked.  Don't allow any other signals
1420       (notably the thread_suspend signal) to preempt us until we've
1421       set the TCR's xframe slot to include the current exception
1422       context.
1423    */
1424   
1425    old_last_lisp_frame = tcr->last_lisp_frame;
1426    tcr->last_lisp_frame = xpGPR(context,Rsp);
1427    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1428  }
1429
1430  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1431    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1432    pthread_kill(pthread_self(), thread_suspend_signal);
1433  }
1434
1435 
1436  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1437  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1438    char msg[512];
1439    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1440    if (lisp_Debugger(context, info, signum, false, msg)) {
1441      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1442    }
1443  }
1444  unlock_exception_lock_in_handler(tcr);
1445
1446  /* This thread now looks like a thread that was suspended while
1447     executing lisp code.  If some other thread gets the exception
1448     lock and GCs, the context (this thread's suspend_context) will
1449     be updated.  (That's only of concern if it happens before we
1450     can return to the kernel/to the Mach exception handler).
1451  */
1452  if (!use_mach_exception_handling) {
1453    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1454    raise_pending_interrupt(tcr);
1455  }
1456}
1457
1458
1459void
1460sigill_handler(int signum, siginfo_t *info, ExceptionInformation  *xp)
1461{
1462  pc program_counter = xpPC(xp);
1463  opcode instr = *program_counter;
1464
1465  if (IS_UUO(instr)) {
1466    natural psr = xpPSR(xp);
1467    Boolean opcode_matched_condition = false,
1468      flip = ((instr & (1<<28)) != 0);
1469   
1470
1471    switch (instr >> 29) {
1472    case 0: 
1473      opcode_matched_condition = ((psr & PSR_Z_MASK) != 0);
1474      break;
1475    case 1:
1476      opcode_matched_condition = ((psr & PSR_C_MASK) != 0);
1477      break;
1478    case 2:
1479      opcode_matched_condition = ((psr & PSR_N_MASK) != 0);
1480      break;
1481    case 3:
1482      opcode_matched_condition = ((psr & PSR_V_MASK) != 0);
1483      break;
1484    case 4:
1485      opcode_matched_condition = (((psr & PSR_C_MASK) != 0) &&
1486                                  ((psr & PSR_Z_MASK) == 0));
1487      break;
1488    case 5:
1489      opcode_matched_condition = (((psr & PSR_N_MASK) != 0) ==
1490                                  ((psr & PSR_V_MASK) != 0));
1491      break;
1492    case 6:
1493      opcode_matched_condition = ((((psr & PSR_N_MASK) != 0) ==
1494                                   ((psr & PSR_V_MASK) != 0)) &&
1495                                  ((psr & PSR_Z_MASK) == 0));
1496      break;
1497    case 7:
1498      opcode_matched_condition = true;
1499      flip = false;
1500      break;
1501    }
1502    if (flip) {
1503      opcode_matched_condition = !opcode_matched_condition;
1504    }
1505    if (!opcode_matched_condition) {
1506      adjust_exception_pc(xp,4);
1507      return;
1508    }
1509  }
1510  signal_handler(signum,info,xp, NULL, 0, 0);
1511}
1512
1513
1514#ifdef USE_SIGALTSTACK
1515void
1516invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1517{
1518  ExceptionInformation *xp_copy;
1519  siginfo_t *info_copy;
1520  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1521 
1522  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1523  target_sp -= sizeof(ucontext_t);
1524  xp_copy = (ExceptionInformation *)target_sp;
1525  memmove(target_sp,xp,sizeof(*xp));
1526  xp_copy->uc_stack.ss_sp = 0;
1527  xp_copy->uc_stack.ss_size = 0;
1528  xp_copy->uc_stack.ss_flags = 0;
1529  xp_copy->uc_link = NULL;
1530  target_sp -= sizeof(siginfo_t);
1531  info_copy = (siginfo_t *)target_sp;
1532  memmove(target_sp,info,sizeof(*info));
1533  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1534}
1535 
1536void
1537altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1538{
1539  TCR *tcr=get_tcr(true);
1540 
1541  if (signo == SIGBUS) {
1542    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address); 
1543    area *a = tcr->cs_area;
1544    if (((BytePtr)truncate_to_power_of_2(addr,log2_page_size))== a->softlimit) 
1545{
1546      if (mmap(a->softlimit,
1547               page_size,
1548               PROT_READ|PROT_WRITE|PROT_EXEC,
1549               MAP_PRIVATE|MAP_ANON|MAP_FIXED,
1550               -1,
1551               0) == a->softlimit) {
1552        return;
1553      }
1554    }
1555  } else if (signo == SIGSEGV) {
1556    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1557    area *a = tcr->cs_area;
1558   
1559    if ((addr >= a->low) &&
1560        (addr < a->softlimit)) {
1561      if (addr < a->hardlimit) {
1562        Bug(xp, "hard stack overflow");
1563      } else {
1564        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1565      }
1566    }
1567  }
1568  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1569}
1570#endif
1571
1572/*
1573  If it looks like we're in the middle of an atomic operation, make
1574  it seem as if that operation is either complete or hasn't started
1575  yet.
1576
1577  The cases handled include:
1578
1579  a) storing into a newly-allocated lisp frame on the stack.
1580  b) marking a newly-allocated TSP frame as containing "raw" data.
1581  c) consing: the GC has its own ideas about how this should be
1582     handled, but other callers would be best advised to back
1583     up or move forward, according to whether we're in the middle
1584     of allocating a cons cell or allocating a uvector.
1585  d) a STMW to the vsp
1586  e) EGC write-barrier subprims.
1587*/
1588
1589extern opcode
1590  egc_write_barrier_start,
1591  egc_write_barrier_end, 
1592  egc_store_node_conditional, 
1593  egc_store_node_conditional_test,
1594  egc_set_hash_key,
1595  egc_gvset,
1596  egc_rplaca,
1597  egc_rplacd,
1598  egc_set_hash_key_conditional,
1599  egc_set_hash_key_conditional_test,
1600  swap_lr_lisp_frame_temp0,
1601  swap_lr_lisp_frame_arg_z;
1602
1603
1604extern opcode ffcall_return_window, ffcall_return_window_end;
1605
1606void
1607pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1608{
1609  pc program_counter = xpPC(xp);
1610  opcode instr = *program_counter;
1611  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1612  LispObj cur_allocptr = xpGPR(xp, allocptr);
1613  int allocptr_tag = fulltag_of(cur_allocptr);
1614 
1615
1616
1617  if ((program_counter < &egc_write_barrier_end) && 
1618      (program_counter >= &egc_write_barrier_start)) {
1619    LispObj *ea = 0, val = 0, root = 0;
1620    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1621    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1622
1623    if (program_counter >= &egc_set_hash_key_conditional) {
1624      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1625          ((program_counter == &egc_set_hash_key_conditional_test) &&
1626           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1627        return;
1628      }
1629      need_store = false;
1630      root = xpGPR(xp,arg_x);
1631      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1632      need_memoize_root = true;
1633    } else if (program_counter >= &egc_store_node_conditional) {
1634      if ((program_counter < &egc_store_node_conditional_test) ||
1635          ((program_counter == &egc_store_node_conditional_test) &&
1636           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1637        /* The conditional store either hasn't been attempted yet, or
1638           has failed.  No need to adjust the PC, or do memoization. */
1639        return;
1640      }
1641      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1642      xpGPR(xp,arg_z) = t_value;
1643      need_store = false;
1644    } else if (program_counter >= &egc_set_hash_key) {
1645      root = xpGPR(xp,arg_x);
1646      val = xpGPR(xp,arg_z);
1647      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1648      need_memoize_root = true;
1649    } else if (program_counter >= &egc_gvset) {
1650      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1651      val = xpGPR(xp,arg_z);
1652    } else if (program_counter >= &egc_rplacd) {
1653      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1654      val = xpGPR(xp,arg_z);
1655    } else {                      /* egc_rplaca */
1656      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1657      val = xpGPR(xp,arg_z);
1658    }
1659    if (need_store) {
1660      *ea = val;
1661    }
1662    if (need_check_memo) {
1663      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1664      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1665          ((LispObj)ea < val)) {
1666        atomic_set_bit(refbits, bitnumber);
1667        if (need_memoize_root) {
1668          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1669          atomic_set_bit(refbits, bitnumber);
1670        }
1671      }
1672    }
1673    xpPC(xp) = xpLR(xp);
1674    return;
1675  }
1676
1677
1678 
1679  if (allocptr_tag != tag_fixnum) {
1680    signed_natural disp = allocptr_displacement(xp);
1681
1682    if (disp) {
1683      /* Being architecturally "at" the alloc trap doesn't tell
1684         us much (in particular, it doesn't tell us whether
1685         or not the thread has committed to taking the trap
1686         and is waiting for the exception lock (or waiting
1687         for the Mach exception thread to tell it how bad
1688         things are) or is about to execute a conditional
1689         trap.
1690         Regardless of which case applies, we want the
1691         other thread to take (or finish taking) the
1692         trap, and we don't want it to consider its
1693         current allocptr to be valid.
1694         The difference between this case (suspend other
1695         thread for GC) and the previous case (suspend
1696         current thread for interrupt) is solely a
1697         matter of what happens after we leave this
1698         function: some non-current thread will stay
1699         suspended until the GC finishes, then take
1700         (or start processing) the alloc trap.   The
1701         current thread will go off and do PROCESS-INTERRUPT
1702         or something, and may return from the interrupt
1703         and need to finish the allocation that got interrupted.
1704      */
1705
1706      if (alloc_disp) {
1707        *alloc_disp = disp;
1708        xpGPR(xp,allocptr) -= disp;
1709        /* Leave the PC at the alloc trap.  When the interrupt
1710           handler returns, it'll decrement allocptr by disp
1711           and the trap may or may not be taken.
1712        */
1713      } else {
1714        Boolean ok = false;
1715        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr - disp));
1716        xpGPR(xp, allocptr) = VOID_ALLOCPTR + disp;
1717        instr = program_counter[-1];
1718        if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1719          instr = program_counter[-2];
1720          if (IS_COMPARE_ALLOCPTR_TO_RM(instr)){
1721            xpGPR(xp,RM_field(instr)) = VOID_ALLOCPTR;
1722            ok = true;
1723          }
1724        }
1725        if (ok) {
1726          /* Clear the carry bit, so that the trap will be taken. */
1727          xpPSR(xp) &= ~PSR_C_MASK;
1728        } else {
1729          Bug(NULL, "unexpected instruction preceding alloc trap.");
1730        }
1731      }
1732    } else {
1733      /* we may be before or after the alloc trap.  If before, set
1734         allocptr to VOID_ALLOCPTR and back up to the start of the
1735         instruction sequence; if after, finish the allocation. */
1736      Boolean before_alloc_trap = false;
1737
1738      if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1739        before_alloc_trap = true;
1740        --program_counter;
1741        instr = *program_counter;
1742      }
1743      if (IS_COMPARE_ALLOCPTR_TO_RM(instr)) {
1744        before_alloc_trap = true;
1745        --program_counter;
1746        instr = *program_counter;
1747      }
1748      if (IS_LOAD_RD_FROM_ALLOCBASE(instr)) {
1749        before_alloc_trap = true;
1750        --program_counter;
1751        instr = *program_counter;
1752      }
1753      if (IS_SUB_HI_FROM_ALLOCPTR(instr)) {
1754        before_alloc_trap = true;
1755        --program_counter;
1756      }
1757      if (before_alloc_trap) {
1758        xpPC(xp) = program_counter;
1759        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1760      } else {
1761        /* If we're already past the alloc_trap, finish allocating
1762           the object. */
1763        if (allocptr_tag == fulltag_cons) {
1764          finish_allocating_cons(xp);
1765        } else {
1766          if (allocptr_tag == fulltag_misc) {
1767            finish_allocating_uvector(xp);
1768          } else {
1769            Bug(xp, "what's being allocated here ?");
1770          }
1771        }
1772        /* Whatever we finished allocating, reset allocptr/allocbase to
1773           VOID_ALLOCPTR */
1774        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1775      }
1776      return;
1777    }
1778    return;
1779  }
1780  {
1781    lisp_frame *swap_frame = NULL;
1782    pc base = &swap_lr_lisp_frame_temp0;
1783   
1784    if ((program_counter >base)             /* sic */
1785        && (program_counter < (base+3))) {
1786      swap_frame = (lisp_frame *)xpGPR(xp,temp0);
1787    } else {
1788      base = &swap_lr_lisp_frame_arg_z;
1789      if ((program_counter > base) && (program_counter < (base+3))) { 
1790        swap_frame = (lisp_frame *)xpGPR(xp,arg_z);
1791      }
1792    }
1793    if (swap_frame) {
1794      if (program_counter == (base+1)) {
1795        swap_frame->savelr = xpGPR(xp,Rlr);
1796      }
1797      xpGPR(xp,Rlr) = xpGPR(xp,imm0);
1798      xpPC(xp) = base+3;
1799      return;
1800    }
1801  }
1802}
1803
1804void
1805interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1806{
1807  TCR *tcr = get_interrupt_tcr(false);
1808  if (tcr) {
1809    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1810      tcr->interrupt_pending = 1 << fixnumshift;
1811    } else {
1812      LispObj cmain = nrs_CMAIN.vcell;
1813
1814      if ((fulltag_of(cmain) == fulltag_misc) &&
1815          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1816        /*
1817           This thread can (allegedly) take an interrupt now.
1818           It's tricky to do that if we're executing
1819           foreign code (especially Linuxthreads code, much
1820           of which isn't reentrant.)
1821           If we're unwinding the stack, we also want to defer
1822           the interrupt.
1823        */
1824        if ((tcr->valence != TCR_STATE_LISP) ||
1825            (tcr->unwinding != 0)) {
1826          tcr->interrupt_pending = 1 << fixnumshift;
1827        } else {
1828          xframe_list xframe_link;
1829          int old_valence;
1830          signed_natural disp=0;
1831          natural old_last_lisp_frame = tcr->last_lisp_frame;
1832         
1833          tcr->last_lisp_frame = xpGPR(context,Rsp);
1834          pc_luser_xp(context, tcr, &disp);
1835          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1836          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1837          handle_exception(signum, context, tcr, info, old_valence);
1838          if (disp) {
1839            xpGPR(context,allocptr) -= disp;
1840          }
1841          unlock_exception_lock_in_handler(tcr);
1842          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1843        }
1844      }
1845    }
1846  }
1847#ifdef DARWIN
1848    DarwinSigReturn(context);
1849#endif
1850}
1851
1852#ifdef USE_SIGALTSTACK
1853void
1854altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1855{
1856  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1857}
1858#endif
1859
1860
1861void
1862install_signal_handler(int signo, void *handler, unsigned flags)
1863{
1864  struct sigaction sa;
1865  int err;
1866
1867  sigfillset(&sa.sa_mask);
1868 
1869  sa.sa_sigaction = (void *)handler;
1870  sigfillset(&sa.sa_mask);
1871  sa.sa_flags = SA_SIGINFO;
1872
1873#ifdef ANDROID
1874  sa.sa_flags |= SA_NODEFER
1875#endif
1876#ifdef USE_SIGALTSTACK
1877  if (flags & ON_ALTSTACK)
1878    sa.sa_flags |= SA_ONSTACK;
1879#endif
1880  if (flags & RESTART_SYSCALLS)
1881    sa.sa_flags |= SA_RESTART;
1882  if (flags & RESERVE_FOR_LISP) {
1883    extern sigset_t user_signals_reserved;
1884    sigaddset(&user_signals_reserved, signo);
1885  }
1886
1887  err = sigaction(signo, &sa, NULL);
1888  if (err) {
1889    perror("sigaction");
1890    exit(1);
1891  }
1892}
1893
1894
1895void
1896install_pmcl_exception_handlers()
1897{
1898#ifdef DARWIN
1899  extern Boolean use_mach_exception_handling;
1900#endif
1901
1902  Boolean install_signal_handlers_for_exceptions =
1903#ifdef DARWIN
1904    !use_mach_exception_handling
1905#else
1906    true
1907#endif
1908    ;
1909  if (install_signal_handlers_for_exceptions) {
1910    install_signal_handler(SIGILL, (void *)sigill_handler, RESERVE_FOR_LISP);
1911    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),
1912                           RESERVE_FOR_LISP|ON_ALTSTACK);
1913    install_signal_handler(SIGBUS, (void *)ALTSTACK(signal_handler),
1914                           RESERVE_FOR_LISP|ON_ALTSTACK);
1915
1916  }
1917 
1918  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1919                         (void *)interrupt_handler, RESERVE_FOR_LISP);
1920  signal(SIGPIPE, SIG_IGN);
1921}
1922
1923#ifdef USE_SIGALTSTACK
1924void
1925setup_sigaltstack(area *a)
1926{
1927  stack_t stack;
1928#if 0
1929  stack.ss_sp = a->low;
1930  a->low += SIGSTKSZ*8;
1931#endif
1932  stack.ss_size = SIGSTKSZ*8;
1933  stack.ss_flags = 0;
1934  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1935  if (sigaltstack(&stack, NULL) != 0) {
1936    perror("sigaltstack");
1937    exit(-1);
1938  }
1939}
1940#endif
1941
1942void
1943thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1944{
1945  TCR *tcr = get_tcr(false);
1946  area *a;
1947#ifndef ANDROID
1948  sigset_t mask;
1949 
1950  sigemptyset(&mask);
1951#else
1952  int mask[] = {0,0};
1953#endif
1954
1955  if (tcr) {
1956    tcr->valence = TCR_STATE_FOREIGN;
1957    a = tcr->vs_area;
1958    if (a) {
1959      a->active = a->high;
1960    }
1961    a = tcr->cs_area;
1962    if (a) {
1963      a->active = a->high;
1964    }
1965  }
1966 
1967  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask,NULL);
1968  pthread_exit(NULL);
1969}
1970
1971#ifdef USE_SIGALTSTACK
1972void
1973altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1974{
1975  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1976}
1977#endif
1978
1979void
1980thread_signal_setup()
1981{
1982  thread_suspend_signal = SIG_SUSPEND_THREAD;
1983  thread_kill_signal = SIG_KILL_THREAD;
1984
1985  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
1986                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
1987  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler,
1988                         RESERVE_FOR_LISP);
1989}
1990
1991
1992
1993void
1994unprotect_all_areas()
1995{
1996  protected_area_ptr p;
1997
1998  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1999    unprotect_area(p);
2000  }
2001}
2002
2003/*
2004  A binding subprim has just done "twlle limit_regno,idx_regno" and
2005  the trap's been taken.  Extend the tcr's tlb so that the index will
2006  be in bounds and the new limit will be on a page boundary, filling
2007  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2008  the tcr fields and the registers in the xp and return true if this
2009  all works, false otherwise.
2010
2011  Note that the tlb was allocated via malloc, so realloc can do some
2012  of the hard work.
2013*/
2014Boolean
2015extend_tcr_tlb(TCR *tcr, 
2016               ExceptionInformation *xp, 
2017               unsigned idx_regno)
2018{
2019  unsigned
2020    index = (unsigned) (xpGPR(xp,idx_regno)),
2021    old_limit = tcr->tlb_limit,
2022    new_limit = align_to_power_of_2(index+1,12),
2023    new_bytes = new_limit-old_limit;
2024  LispObj
2025    *old_tlb = tcr->tlb_pointer,
2026    *new_tlb = realloc(old_tlb, new_limit),
2027    *work;
2028
2029  if (new_tlb == NULL) {
2030    return false;
2031  }
2032 
2033  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2034
2035  while (new_bytes) {
2036    *work++ = no_thread_local_binding_marker;
2037    new_bytes -= sizeof(LispObj);
2038  }
2039  tcr->tlb_pointer = new_tlb;
2040  tcr->tlb_limit = new_limit;
2041  return true;
2042}
2043
2044
2045
2046void
2047exception_init()
2048{
2049  install_pmcl_exception_handlers();
2050}
2051
2052
2053
2054
2055
2056#ifdef DARWIN
2057
2058
2059#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2060#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2061
2062
2063
2064#define LISP_EXCEPTIONS_HANDLED_MASK \
2065 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2066
2067/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2068#define NUM_LISP_EXCEPTIONS_HANDLED 4
2069
2070typedef struct {
2071  int foreign_exception_port_count;
2072  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2073  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2074  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2075  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2076} MACH_foreign_exception_state;
2077
2078
2079
2080
2081/*
2082  Mach's exception mechanism works a little better than its signal
2083  mechanism (and, not incidentally, it gets along with GDB a lot
2084  better.
2085
2086  Initially, we install an exception handler to handle each native
2087  thread's exceptions.  This process involves creating a distinguished
2088  thread which listens for kernel exception messages on a set of
2089  0 or more thread exception ports.  As threads are created, they're
2090  added to that port set; a thread's exception port is destroyed
2091  (and therefore removed from the port set) when the thread exits.
2092
2093  A few exceptions can be handled directly in the handler thread;
2094  others require that we resume the user thread (and that the
2095  exception thread resumes listening for exceptions.)  The user
2096  thread might eventually want to return to the original context
2097  (possibly modified somewhat.)
2098
2099  As it turns out, the simplest way to force the faulting user
2100  thread to handle its own exceptions is to do pretty much what
2101  signal() does: the exception handlng thread sets up a sigcontext
2102  on the user thread's stack and forces the user thread to resume
2103  execution as if a signal handler had been called with that
2104  context as an argument.  We can use a distinguished UUO at a
2105  distinguished address to do something like sigreturn(); that'll
2106  have the effect of resuming the user thread's execution in
2107  the (pseudo-) signal context.
2108
2109  Since:
2110    a) we have miles of code in C and in Lisp that knows how to
2111    deal with Linux sigcontexts
2112    b) Linux sigcontexts contain a little more useful information
2113    (the DAR, DSISR, etc.) than their Darwin counterparts
2114    c) we have to create a sigcontext ourselves when calling out
2115    to the user thread: we aren't really generating a signal, just
2116    leveraging existing signal-handling code.
2117
2118  we create a Linux sigcontext struct.
2119
2120  Simple ?  Hopefully from the outside it is ...
2121
2122  We want the process of passing a thread's own context to it to
2123  appear to be atomic: in particular, we don't want the GC to suspend
2124  a thread that's had an exception but has not yet had its user-level
2125  exception handler called, and we don't want the thread's exception
2126  context to be modified by a GC while the Mach handler thread is
2127  copying it around.  On Linux (and on Jaguar), we avoid this issue
2128  because (a) the kernel sets up the user-level signal handler and
2129  (b) the signal handler blocks signals (including the signal used
2130  by the GC to suspend threads) until tcr->xframe is set up.
2131
2132  The GC and the Mach server thread therefore contend for the lock
2133  "mach_exception_lock".  The Mach server thread holds the lock
2134  when copying exception information between the kernel and the
2135  user thread; the GC holds this lock during most of its execution
2136  (delaying exception processing until it can be done without
2137  GC interference.)
2138
2139*/
2140
2141
2142#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2143
2144void
2145fatal_mach_error(char *format, ...);
2146
2147#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2148
2149
2150void
2151restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2152{
2153  kern_return_t kret;
2154  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
2155
2156  /* Set the thread's FP state from the pseudosigcontext */
2157  kret = thread_set_state(thread,
2158                          ARM_VFP_STATE,
2159                          (thread_state_t)&(mc->__fs),
2160                          ARM_VFP_STATE_COUNT);
2161
2162  MACH_CHECK_ERROR("setting thread FP state", kret);
2163
2164  /* The thread'll be as good as new ... */
2165  kret = thread_set_state(thread, 
2166                          MACHINE_THREAD_STATE,
2167                          (thread_state_t)&(mc->__ss),
2168                          MACHINE_THREAD_STATE_COUNT);
2169  MACH_CHECK_ERROR("setting thread state", kret);
2170} 
2171
2172/* This code runs in the exception handling thread, in response
2173   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2174   in response to a call to pseudo_sigreturn() from the specified
2175   user thread.
2176   Find that context (the user thread's R3 points to it), then
2177   use that context to set the user thread's state.  When this
2178   function's caller returns, the Mach kernel will resume the
2179   user thread.
2180*/
2181
2182kern_return_t
2183do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2184{
2185  ExceptionInformation *xp;
2186
2187#ifdef DEBUG_MACH_EXCEPTIONS
2188  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2189#endif
2190  tcr->last_lisp_frame = *((natural *)(tcr->last_lisp_frame));
2191  xp = tcr->pending_exception_context;
2192  if (xp) {
2193    tcr->pending_exception_context = NULL;
2194    tcr->valence = TCR_STATE_LISP;
2195    restore_mach_thread_state(thread, xp);
2196    raise_pending_interrupt(tcr);
2197  } else {
2198    Bug(NULL, "no xp here!\n");
2199  }
2200#ifdef DEBUG_MACH_EXCEPTIONS
2201  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2202#endif
2203  return KERN_SUCCESS;
2204} 
2205
2206ExceptionInformation *
2207create_thread_context_frame(mach_port_t thread, 
2208                            natural *new_stack_top)
2209{
2210  arm_thread_state_t ts;
2211  mach_msg_type_number_t thread_state_count;
2212  kern_return_t result;
2213  ExceptionInformation *pseudosigcontext;
2214  _STRUCT_MCONTEXT *mc;
2215  natural stackp, backlink;
2216
2217  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2218  result = thread_get_state(thread, 
2219                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2220                            (thread_state_t)&ts,
2221                            &thread_state_count);
2222 
2223  if (result != KERN_SUCCESS) {
2224    get_tcr(true);
2225    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2226  }
2227  stackp = ts.__sp;
2228  backlink = stackp;
2229
2230  stackp -= sizeof(*pseudosigcontext);
2231  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2232
2233  stackp -= sizeof(*mc);
2234  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2235  memmove(&(mc->__ss),&ts,sizeof(ts));
2236
2237  thread_state_count = ARM_VFP_STATE_COUNT;
2238  thread_get_state(thread,
2239                   ARM_VFP_STATE,
2240                   (thread_state_t)&(mc->__fs),
2241                   &thread_state_count);
2242
2243
2244  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2245  thread_get_state(thread,
2246                   ARM_EXCEPTION_STATE,
2247                   (thread_state_t)&(mc->__es),
2248                   &thread_state_count);
2249
2250
2251  UC_MCONTEXT(pseudosigcontext) = mc;
2252  if (new_stack_top) {
2253    *new_stack_top = stackp;
2254  }
2255  return pseudosigcontext;
2256}
2257
2258/*
2259  This code sets up the user thread so that it executes a "pseudo-signal
2260  handler" function when it resumes.  Create a linux sigcontext struct
2261  on the thread's stack and pass it as an argument to the pseudo-signal
2262  handler.
2263
2264  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2265  which will restore the thread's context.
2266
2267  If the handler invokes code that throws (or otherwise never sigreturn()'s
2268  to the context), that's fine.
2269
2270  Actually, check that: throw (and variants) may need to be careful and
2271  pop the tcr's xframe list until it's younger than any frame being
2272  entered.
2273*/
2274
2275int
2276setup_signal_frame(mach_port_t thread,
2277                   void *handler_address,
2278                   int signum,
2279                   int code,
2280                   TCR *tcr)
2281{
2282  arm_thread_state_t ts;
2283  ExceptionInformation *pseudosigcontext;
2284  int old_valence = tcr->valence;
2285  natural stackp, *pstackp;
2286
2287#ifdef DEBUG_MACH_EXCEPTIONS
2288  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2289#endif
2290  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2291  pstackp = (natural *)stackp;
2292  *--pstackp = tcr->last_lisp_frame;
2293  stackp = (natural)pstackp;
2294  tcr->last_lisp_frame = stackp;
2295  pseudosigcontext->uc_onstack = 0;
2296  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2297  pseudosigcontext->uc_mcsize = ARM_MCONTEXT_SIZE;
2298  tcr->pending_exception_context = pseudosigcontext;
2299  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2300 
2301
2302  /*
2303     It seems like we've created a  sigcontext on the thread's
2304     stack.  Set things up so that we call the handler (with appropriate
2305     args) when the thread's resumed.
2306  */
2307
2308  ts.__pc = (natural) handler_address;
2309  ts.__sp = stackp;
2310  ts.__r[0] = signum;
2311  ts.__r[1] = (natural)pseudosigcontext;
2312  ts.__r[2] = (natural)tcr;
2313  ts.__r[3] = (natural)old_valence;
2314  ts.__lr = (natural)pseudo_sigreturn;
2315  ts.__cpsr = xpPSR(pseudosigcontext);
2316
2317
2318  thread_set_state(thread, 
2319                   MACHINE_THREAD_STATE,
2320                   (thread_state_t)&ts,
2321                   MACHINE_THREAD_STATE_COUNT);
2322#ifdef DEBUG_MACH_EXCEPTIONS
2323  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2324#endif
2325  return 0;
2326}
2327
2328
2329void
2330pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2331{
2332  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2333} 
2334
2335
2336int
2337thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2338{
2339  /* Likely hopeless. */
2340  return 0;
2341}
2342
2343/*
2344  This function runs in the exception handling thread.  It's
2345  called (by this precise name) from the library function "exc_server()"
2346  when the thread's exception ports are set up.  (exc_server() is called
2347  via mach_msg_server(), which is a function that waits for and dispatches
2348  on exception messages from the Mach kernel.)
2349
2350  This checks to see if the exception was caused by a pseudo_sigreturn()
2351  UUO; if so, it arranges for the thread to have its state restored
2352  from the specified context.
2353
2354  Otherwise, it tries to map the exception to a signal number and
2355  arranges that the thread run a "pseudo signal handler" to handle
2356  the exception.
2357
2358  Some exceptions could and should be handled here directly.
2359*/
2360
2361kern_return_t
2362catch_exception_raise(mach_port_t exception_port,
2363                      mach_port_t thread,
2364                      mach_port_t task, 
2365                      exception_type_t exception,
2366                      exception_data_t code_vector,
2367                      mach_msg_type_number_t code_count)
2368{
2369  int signum = 0, code = *code_vector;
2370  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2371  kern_return_t kret;
2372
2373#ifdef DEBUG_MACH_EXCEPTIONS
2374  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2375#endif
2376
2377  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2378    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2379  } 
2380  /* On the ARM, code_vector[1] contains the undefined instruction
2381     in this case, not its address.  */
2382  if ((exception == EXC_BAD_INSTRUCTION) &&
2383      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2384      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2385    kret = do_pseudo_sigreturn(thread, tcr);
2386  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2387    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2388    kret = 17;
2389  } else {
2390    switch (exception) {
2391    case EXC_BAD_ACCESS:
2392      signum = SIGSEGV;
2393      break;
2394       
2395    case EXC_BAD_INSTRUCTION:
2396      signum = SIGILL;
2397      break;
2398     
2399      break;
2400     
2401    case EXC_ARITHMETIC:
2402      signum = SIGFPE;
2403      break;
2404
2405    default:
2406      break;
2407    }
2408    if (signum) {
2409      kret = setup_signal_frame(thread,
2410                                (void *)pseudo_signal_handler,
2411                                signum,
2412                                code,
2413                                tcr);
2414#if 0
2415      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2416#endif
2417
2418    } else {
2419      kret = 17;
2420    }
2421  }
2422
2423  return kret;
2424}
2425
2426
2427
2428typedef struct {
2429  mach_msg_header_t Head;
2430  /* start of the kernel processed data */
2431  mach_msg_body_t msgh_body;
2432  mach_msg_port_descriptor_t thread;
2433  mach_msg_port_descriptor_t task;
2434  /* end of the kernel processed data */
2435  NDR_record_t NDR;
2436  exception_type_t exception;
2437  mach_msg_type_number_t codeCnt;
2438  integer_t code[2];
2439  mach_msg_trailer_t trailer;
2440} exceptionRequest;
2441
2442
2443boolean_t
2444openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2445{
2446  static NDR_record_t _NDR = {0};
2447  kern_return_t handled;
2448  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2449  exceptionRequest *req = (exceptionRequest *) in;
2450
2451  reply->NDR = _NDR;
2452
2453  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2454  out->msgh_remote_port = in->msgh_remote_port;
2455  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2456  out->msgh_local_port = MACH_PORT_NULL;
2457  out->msgh_id = in->msgh_id+100;
2458
2459  /* Could handle other exception flavors in the range 2401-2403 */
2460
2461
2462  if (in->msgh_id != 2401) {
2463    reply->RetCode = MIG_BAD_ID;
2464    return FALSE;
2465  }
2466  handled = catch_exception_raise(req->Head.msgh_local_port,
2467                                  req->thread.name,
2468                                  req->task.name,
2469                                  req->exception,
2470                                  req->code,
2471                                  req->codeCnt);
2472  reply->RetCode = handled;
2473  return TRUE;
2474}
2475
2476/*
2477  The initial function for an exception-handling thread.
2478*/
2479
2480void *
2481exception_handler_proc(void *arg)
2482{
2483  extern boolean_t exc_server();
2484  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2485
2486  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2487  /* Should never return. */
2488  abort();
2489}
2490
2491
2492
2493mach_port_t
2494mach_exception_port_set()
2495{
2496  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2497  kern_return_t kret; 
2498  if (__exception_port_set == MACH_PORT_NULL) {
2499    kret = mach_port_allocate(mach_task_self(),
2500                              MACH_PORT_RIGHT_PORT_SET,
2501                              &__exception_port_set);
2502    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2503    create_system_thread(0,
2504                         NULL,
2505                         exception_handler_proc, 
2506                         (void *)((natural)__exception_port_set));
2507  }
2508  return __exception_port_set;
2509}
2510
2511/*
2512  Setup a new thread to handle those exceptions specified by
2513  the mask "which".  This involves creating a special Mach
2514  message port, telling the Mach kernel to send exception
2515  messages for the calling thread to that port, and setting
2516  up a handler thread which listens for and responds to
2517  those messages.
2518
2519*/
2520
2521/*
2522  Establish the lisp thread's TCR as its exception port, and determine
2523  whether any other ports have been established by foreign code for
2524  exceptions that lisp cares about.
2525
2526  If this happens at all, it should happen on return from foreign
2527  code and on entry to lisp code via a callback.
2528
2529  This is a lot of trouble (and overhead) to support Java, or other
2530  embeddable systems that clobber their caller's thread exception ports.
2531 
2532*/
2533kern_return_t
2534tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2535{
2536  kern_return_t kret;
2537  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2538  int i;
2539  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2540  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2541  exception_mask_t mask = 0;
2542
2543  kret = thread_swap_exception_ports(thread,
2544                                     LISP_EXCEPTIONS_HANDLED_MASK,
2545                                     lisp_port,
2546                                     EXCEPTION_DEFAULT,
2547                                     THREAD_STATE_NONE,
2548                                     fxs->masks,
2549                                     &n,
2550                                     fxs->ports,
2551                                     fxs->behaviors,
2552                                     fxs->flavors);
2553  if (kret == KERN_SUCCESS) {
2554    fxs->foreign_exception_port_count = n;
2555    for (i = 0; i < n; i ++) {
2556      foreign_port = fxs->ports[i];
2557
2558      if ((foreign_port != lisp_port) &&
2559          (foreign_port != MACH_PORT_NULL)) {
2560        mask |= fxs->masks[i];
2561      }
2562    }
2563    tcr->foreign_exception_status = (int) mask;
2564  }
2565  return kret;
2566}
2567
2568kern_return_t
2569tcr_establish_lisp_exception_port(TCR *tcr)
2570{
2571  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2572}
2573
2574/*
2575  Do this when calling out to or returning from foreign code, if
2576  any conflicting foreign exception ports were established when we
2577  last entered lisp code.
2578*/
2579kern_return_t
2580restore_foreign_exception_ports(TCR *tcr)
2581{
2582  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2583 
2584  if (m) {
2585    MACH_foreign_exception_state *fxs  = 
2586      (MACH_foreign_exception_state *) tcr->native_thread_info;
2587    int i, n = fxs->foreign_exception_port_count;
2588    exception_mask_t tm;
2589
2590    for (i = 0; i < n; i++) {
2591      if ((tm = fxs->masks[i]) & m) {
2592        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2593                                   tm,
2594                                   fxs->ports[i],
2595                                   fxs->behaviors[i],
2596                                   fxs->flavors[i]);
2597      }
2598    }
2599  }
2600}
2601                                   
2602
2603/*
2604  This assumes that a Mach port (to be used as the thread's exception port) whose
2605  "name" matches the TCR's 32-bit address has already been allocated.
2606*/
2607
2608kern_return_t
2609setup_mach_exception_handling(TCR *tcr)
2610{
2611  mach_port_t
2612    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2613    task_self = mach_task_self();
2614  kern_return_t kret;
2615
2616  kret = mach_port_insert_right(task_self,
2617                                thread_exception_port,
2618                                thread_exception_port,
2619                                MACH_MSG_TYPE_MAKE_SEND);
2620  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2621
2622  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2623  if (kret == KERN_SUCCESS) {
2624    mach_port_t exception_port_set = mach_exception_port_set();
2625
2626    kret = mach_port_move_member(task_self,
2627                                 thread_exception_port,
2628                                 exception_port_set);
2629  }
2630  return kret;
2631}
2632
2633void
2634darwin_exception_init(TCR *tcr)
2635{
2636  void tcr_monitor_exception_handling(TCR*, Boolean);
2637  kern_return_t kret;
2638  MACH_foreign_exception_state *fxs = 
2639    calloc(1, sizeof(MACH_foreign_exception_state));
2640 
2641  tcr->native_thread_info = (void *) fxs;
2642
2643  if ((kret = setup_mach_exception_handling(tcr))
2644      != KERN_SUCCESS) {
2645    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2646    terminate_lisp();
2647  }
2648}
2649
2650/*
2651  The tcr is the "name" of the corresponding thread's exception port.
2652  Destroying the port should remove it from all port sets of which it's
2653  a member (notably, the exception port set.)
2654*/
2655void
2656darwin_exception_cleanup(TCR *tcr)
2657{
2658  void *fxs = tcr->native_thread_info;
2659  extern Boolean use_mach_exception_handling;
2660
2661  if (fxs) {
2662    tcr->native_thread_info = NULL;
2663    free(fxs);
2664  }
2665  if (use_mach_exception_handling) {
2666    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2667    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2668  }
2669}
2670
2671
2672Boolean
2673suspend_mach_thread(mach_port_t mach_thread)
2674{
2675  kern_return_t status;
2676  Boolean aborted = false;
2677 
2678  do {
2679    aborted = false;
2680    status = thread_suspend(mach_thread);
2681    if (status == KERN_SUCCESS) {
2682      status = thread_abort_safely(mach_thread);
2683      if (status == KERN_SUCCESS) {
2684        aborted = true;
2685      } else {
2686        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2687        thread_resume(mach_thread);
2688      }
2689    } else {
2690      return false;
2691    }
2692  } while (! aborted);
2693  return true;
2694}
2695
2696/*
2697  Only do this if pthread_kill indicated that the pthread isn't
2698  listening to signals anymore, as can happen as soon as pthread_exit()
2699  is called on Darwin.  The thread could still call out to lisp as it
2700  is exiting, so we need another way to suspend it in this case.
2701*/
2702Boolean
2703mach_suspend_tcr(TCR *tcr)
2704{
2705  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2706  ExceptionInformation *pseudosigcontext;
2707  Boolean result = false;
2708 
2709  result = suspend_mach_thread(mach_thread);
2710  if (result) {
2711    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2712    pseudosigcontext->uc_onstack = 0;
2713    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2714    tcr->suspend_context = pseudosigcontext;
2715  }
2716  return result;
2717}
2718
2719void
2720mach_resume_tcr(TCR *tcr)
2721{
2722  ExceptionInformation *xp;
2723  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2724 
2725  xp = tcr->suspend_context;
2726#ifdef DEBUG_MACH_EXCEPTIONS
2727  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2728          tcr, tcr->pending_exception_context);
2729#endif
2730  tcr->suspend_context = NULL;
2731  restore_mach_thread_state(mach_thread, xp);
2732#ifdef DEBUG_MACH_EXCEPTIONS
2733  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2734          tcr, tcr->pending_exception_context);
2735#endif
2736  thread_resume(mach_thread);
2737}
2738
2739void
2740fatal_mach_error(char *format, ...)
2741{
2742  va_list args;
2743  char s[512];
2744 
2745
2746  va_start(args, format);
2747  vsnprintf(s, sizeof(s),format, args);
2748  va_end(args);
2749
2750  Fatal("Mach error", s);
2751}
2752
2753void
2754pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2755{
2756  interrupt_handler(signum, NULL, context);
2757}
2758
2759int
2760mach_raise_thread_interrupt(TCR *target)
2761{
2762  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2763  kern_return_t kret;
2764  Boolean result = false;
2765  TCR *current = get_tcr(false);
2766  thread_basic_info_data_t info; 
2767  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2768
2769  LOCK(lisp_global(TCR_AREA_LOCK), current);
2770
2771  if (suspend_mach_thread(mach_thread)) {
2772    if (thread_info(mach_thread,
2773                    THREAD_BASIC_INFO,
2774                    (thread_info_t)&info,
2775                    &info_count) == KERN_SUCCESS) {
2776      if (info.suspend_count == 1) {
2777        if ((target->valence == TCR_STATE_LISP) &&
2778            (!target->unwinding) &&
2779            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2780          kret = setup_signal_frame(mach_thread,
2781                                    (void *)pseudo_interrupt_handler,
2782                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2783                                    0,
2784                                    target);
2785          if (kret == KERN_SUCCESS) {
2786            result = true;
2787          }
2788        }
2789      }
2790    }
2791    if (! result) {
2792      target->interrupt_pending = 1 << fixnumshift;
2793    }
2794    thread_resume(mach_thread);
2795   
2796  }
2797  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2798  return 0;
2799}
2800
2801#endif
Note: See TracBrowser for help on using the repository browser.