source: branches/arm/lisp-kernel/arm-exceptions.c @ 14074

Last change on this file since 14074 was 14074, checked in by gb, 10 years ago

handle_uuo(): pass more kinds of UUOs to handle_error().
Make sure that uuo_format_ternary is #defined.

File size: 67.4 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = get_tcr(true);
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg, NULL);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = get_tcr(true);
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call. */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
673  }
674
675
676  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
677  if (cur_allocptr) {
678    update_bytes_allocated(tcr, cur_allocptr);
679    if (freeptr) {
680      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
681    }
682  }
683}
684
685TCR *gc_tcr = NULL;
686
687/* Suspend and "normalize" other tcrs, then call a gc-like function
688   in that context.  Resume the other tcrs, then return what the
689   function returned */
690
691signed_natural
692gc_like_from_xp(ExceptionInformation *xp, 
693                signed_natural(*fun)(TCR *, signed_natural), 
694                signed_natural param)
695{
696  TCR *tcr = get_tcr(true), *other_tcr;
697  int result;
698  signed_natural inhibit;
699
700  suspend_other_threads(true);
701  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
702  if (inhibit != 0) {
703    if (inhibit > 0) {
704      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
705    }
706    resume_other_threads(true);
707    gc_deferred++;
708    return 0;
709  }
710  gc_deferred = 0;
711
712  gc_tcr = tcr;
713
714  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
715
716  normalize_tcr(xp, tcr, false);
717
718
719  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
720    if (other_tcr->pending_exception_context) {
721      other_tcr->gc_context = other_tcr->pending_exception_context;
722    } else if (other_tcr->valence == TCR_STATE_LISP) {
723      other_tcr->gc_context = other_tcr->suspend_context;
724    } else {
725      /* no pending exception, didn't suspend in lisp state:
726         must have executed a synchronous ff-call.
727      */
728      other_tcr->gc_context = NULL;
729    }
730    normalize_tcr(other_tcr->gc_context, other_tcr, true);
731  }
732   
733
734
735  result = fun(tcr, param);
736
737  other_tcr = tcr;
738  do {
739    other_tcr->gc_context = NULL;
740    other_tcr = other_tcr->next;
741  } while (other_tcr != tcr);
742
743  gc_tcr = NULL;
744
745  resume_other_threads(true);
746
747  return result;
748
749}
750
751
752
753/* Returns #bytes freed by invoking GC */
754
755signed_natural
756gc_from_tcr(TCR *tcr, signed_natural param)
757{
758  area *a;
759  BytePtr oldfree, newfree;
760  BytePtr oldend, newend;
761
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774signed_natural
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783signed_natural
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789signed_natural
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815#ifdef LINUX
816  /* Based on experiments with a small sample size; need to R TFM. */
817  return ((xp->uc_mcontext.trap_no == 0xe) &&
818          ((xp->uc_mcontext.error_code & 0xfffffff7) == 0x817));
819#endif
820}
821
822Boolean
823handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
824{
825  BytePtr addr;
826  protected_area_ptr area;
827  protection_handler *handler;
828  extern Boolean touch_page(void *);
829  extern void touch_page_end(void);
830
831#ifdef LINUX
832  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
833#else
834  if (info) {
835    addr = (BytePtr)(info->si_addr);
836  } else {
837    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
838  }
839#endif
840
841  if (addr && (addr == tcr->safe_ref_address)) {
842    adjust_exception_pc(xp,4);
843
844    xpGPR(xp,imm0) = 0;
845    return true;
846  }
847
848  if (xpPC(xp) == (pc)touch_page) {
849    xpGPR(xp,imm0) = 0;
850    xpPC(xp) = (pc)touch_page_end;
851    return true;
852  }
853
854
855  if (is_write_fault(xp,info)) {
856    area = find_protected_area(addr);
857    if (area != NULL) {
858      handler = protection_handlers[area->why];
859      return handler(xp, area, addr);
860    } else {
861      if ((addr >= readonly_area->low) &&
862          (addr < readonly_area->active)) {
863        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
864                        page_size);
865        return true;
866      }
867    }
868  }
869  if (old_valence == TCR_STATE_LISP) {
870    LispObj cmain = nrs_CMAIN.vcell;
871   
872    if ((fulltag_of(cmain) == fulltag_misc) &&
873      (header_subtag(header_of(cmain)) == subtag_macptr)) {
874     
875      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
876    }
877  }
878  return false;
879}
880
881
882
883
884
885OSStatus
886do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
887{
888#ifdef SUPPORT_PRAGMA_UNUSED
889#pragma unused(area,addr)
890#endif
891  reset_lisp_process(xp);
892  return -1;
893}
894
895extern area*
896allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
897
898extern area*
899allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
900
901
902
903
904
905
906Boolean
907lisp_frame_p(lisp_frame *spPtr)
908{
909  return (spPtr->marker == lisp_frame_marker);
910}
911
912
913int ffcall_overflow_count = 0;
914
915
916
917
918
919
920/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
921  the current value of VSP (TSP) or an older area.  */
922
923OSStatus
924do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
925{
926  TCR* tcr = get_tcr(true);
927  area *a = tcr->vs_area;
928  protected_area_ptr vsp_soft = a->softprot;
929  unprotect_area(vsp_soft);
930  signal_stack_soft_overflow(xp,vsp);
931  return 0;
932}
933
934
935
936OSStatus
937do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
938{
939  /* Trying to write into a guard page on the vstack or tstack.
940     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
941     signal an error_stack_overflow condition.
942      */
943  if (prot_area->why == kVSPsoftguard) {
944    return do_vsp_overflow(xp,addr);
945  }
946  unprotect_area(prot_area);
947  signal_stack_soft_overflow(xp,Rsp);
948  return 0;
949}
950
951OSStatus
952do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
953{
954#ifdef SUPPORT_PRAGMA_UNUSED
955#pragma unused(xp,area,addr)
956#endif
957  return -1;
958}
959
960
961
962
963     
964
965
966
967
968
969Boolean
970handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
971{
972  return false;
973}
974
975
976Boolean
977handle_unimplemented_instruction(ExceptionInformation *xp,
978                                 opcode instruction,
979                                 TCR *tcr)
980{
981
982  return false;
983}
984
985Boolean
986handle_exception(int xnum, 
987                 ExceptionInformation *xp, 
988                 TCR *tcr, 
989                 siginfo_t *info,
990                 int old_valence)
991{
992  pc program_counter;
993  opcode instruction = 0;
994
995  if (old_valence != TCR_STATE_LISP) {
996    return false;
997  }
998
999  program_counter = xpPC(xp);
1000 
1001  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1002    instruction = *program_counter;
1003  }
1004
1005  if (IS_ALLOC_TRAP(instruction)) {
1006    return handle_alloc_trap(xp, tcr);
1007  } else if ((xnum == SIGSEGV) ||
1008             (xnum == SIGBUS)) {
1009    return handle_protection_violation(xp, info, tcr, old_valence);
1010  } else if (xnum == SIGFPE) {
1011    return handle_sigfpe(xp, tcr);
1012  } else if ((xnum == SIGILL)) {
1013    if (IS_GC_TRAP(instruction)) {
1014      return handle_gc_trap(xp, tcr);
1015    } else if (IS_UUO(instruction)) {
1016      return handle_uuo(xp, info, instruction);
1017    } else {
1018      return handle_unimplemented_instruction(xp,instruction,tcr);
1019    }
1020  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1021    tcr->interrupt_pending = 0;
1022    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1023    return true;
1024  }
1025
1026  return false;
1027}
1028
1029void
1030adjust_exception_pc(ExceptionInformation *xp, int delta)
1031{
1032  xpPC(xp) += (delta >> 2);
1033}
1034
1035
1036/*
1037  This wants to scan backwards until "where" points to an instruction
1038   whose major opcode is either 63 (double-float) or 59 (single-float)
1039*/
1040
1041OSStatus
1042handle_fpux_binop(ExceptionInformation *xp, pc where)
1043{
1044  OSStatus err = -1;
1045  opcode *there = (opcode *) where, instr, errnum = 0;
1046  return err;
1047}
1048
1049Boolean
1050handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1051{
1052  unsigned 
1053    format = UUO_FORMAT(the_uuo);
1054  Boolean handled = false;
1055  int bump = 4;
1056  TCR *tcr = get_tcr(true);
1057
1058  switch (format) {
1059  case uuo_format_kernel_service:
1060    {
1061      TCR * target = (TCR *)xpGPR(xp,arg_z);
1062      int service = UUO_UNARY_field(the_uuo);
1063
1064      switch (service) {
1065      case error_propagate_suspend:
1066        handled = true;
1067        break;
1068      case error_interrupt:
1069        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1070        handled = true;
1071        break;
1072      case error_suspend:
1073        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1074        handled = true;
1075        break;
1076      case error_suspend_all:
1077        lisp_suspend_other_threads();
1078        handled = true;
1079        break;
1080      case error_resume:
1081        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1082        handled = true;
1083        break;
1084      case error_resume_all:
1085        lisp_resume_other_threads();
1086        handled = true;
1087        break;
1088      case error_kill:
1089        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1090        handled = true;
1091        break;
1092      case error_allocate_list:
1093        allocate_list(xp,tcr);
1094        handled = true;
1095        break;
1096      default:
1097        handled = false;
1098        break;
1099      }
1100      break;
1101    }
1102
1103  case uuo_format_unary:
1104    switch(UUO_UNARY_field(the_uuo)) {
1105    case 3:
1106      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1107        handled = true;
1108        bump = 4;
1109        break;
1110      }
1111      /* fall in */
1112    default:
1113      handled = false;
1114      break;
1115
1116    }
1117    break;
1118
1119  case uuo_format_nullary:
1120    switch (UUOA_field(the_uuo)) {
1121    case 3:
1122      adjust_exception_pc(xp, bump);
1123      bump = 0;
1124      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1125      handled = true;
1126      break;
1127
1128    case 4:
1129      tcr->interrupt_pending = 0;
1130      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, &bump);
1131      handled = true;
1132      break;
1133    default:
1134      handled = false;
1135      break;
1136    }
1137    break;
1138
1139
1140  case uuo_format_error_lisptag:
1141  case uuo_format_error_fulltag:
1142  case uuo_format_error_xtype:
1143  case uuo_format_cerror_lisptag:
1144  case uuo_format_cerror_fulltag:
1145  case uuo_format_cerror_xtype:
1146  case uuo_format_nullary_error:
1147  case uuo_format_unary_error:
1148  case uuo_format_binary_error:
1149  case uuo_format_ternary:
1150    handled = handle_error(xp,0,the_uuo, &bump);
1151    break;
1152
1153  default:
1154    handled = false;
1155    bump = 0;
1156  }
1157 
1158  if (handled && bump) {
1159    adjust_exception_pc(xp, bump);
1160  }
1161  return handled;
1162}
1163
1164natural
1165register_codevector_contains_pc (natural lisp_function, pc where)
1166{
1167  natural code_vector, size;
1168
1169  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1170      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1171    code_vector = deref(lisp_function, 2);
1172    size = header_element_count(header_of(code_vector)) << 2;
1173    if ((untag(code_vector) < (natural)where) && 
1174        ((natural)where < (code_vector + size)))
1175      return(code_vector);
1176  }
1177
1178  return(0);
1179}
1180
1181Boolean
1182callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1183{
1184  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1185}
1186
1187Boolean
1188callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1189                  natural arg1, natural arg2, int *bumpP)
1190{
1191  natural  callback_ptr;
1192  area *a;
1193  natural fnreg = fn,  codevector, offset;
1194  pc where = xpPC(xp);
1195  int delta;
1196
1197  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1198  if (codevector == 0) {
1199    fnreg = nfn;
1200    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1201    if (codevector == 0) {
1202      fnreg = 0;
1203    }
1204  }
1205  if (codevector) {
1206    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1207  } else {
1208    offset = (natural)where;
1209  }
1210                                                 
1211                                               
1212
1213  TCR *tcr = get_tcr(true);
1214
1215  /* Put the active stack pointer where .SPcallback expects it */
1216  a = tcr->cs_area;
1217  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1218
1219  /* Copy globals from the exception frame to tcr */
1220  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1221  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1222
1223
1224
1225  /* Call back.
1226     Lisp will handle trampolining through some code that
1227     will push lr/fn & pc/nfn stack frames for backtrace.
1228  */
1229  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1230  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1231  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1232  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1233
1234  if (bumpP) {
1235    *bumpP = delta;
1236  }
1237
1238  /* Copy GC registers back into exception frame */
1239  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1240  return true;
1241}
1242
1243area *
1244allocate_no_stack (natural size)
1245{
1246#ifdef SUPPORT_PRAGMA_UNUSED
1247#pragma unused(size)
1248#endif
1249
1250  return (area *) NULL;
1251}
1252
1253
1254
1255
1256
1257
1258/* callback to (symbol-value cmain) if it is a macptr,
1259   otherwise report cause and function name to console.
1260   Returns noErr if exception handled OK */
1261OSStatus
1262handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1263{
1264  LispObj   cmain = nrs_CMAIN.vcell;
1265  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1266
1267}
1268
1269
1270
1271
1272void non_fatal_error( char *msg )
1273{
1274  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1275  fflush( dbgout );
1276}
1277
1278
1279
1280Boolean
1281handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1282{
1283  LispObj   errdisp = nrs_ERRDISP.vcell;
1284
1285  if ((fulltag_of(errdisp) == fulltag_misc) &&
1286      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1287    /* errdisp is a macptr, we can call back to lisp */
1288    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1289    }
1290
1291  return false;
1292}
1293               
1294
1295/*
1296   Current thread has all signals masked.  Before unmasking them,
1297   make it appear that the current thread has been suspended.
1298   (This is to handle the case where another thread is trying
1299   to GC before this thread is able to sieze the exception lock.)
1300*/
1301int
1302prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1303{
1304  int old_valence = tcr->valence;
1305
1306  tcr->pending_exception_context = context;
1307  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1308
1309  ALLOW_EXCEPTIONS(context);
1310  return old_valence;
1311} 
1312
1313void
1314wait_for_exception_lock_in_handler(TCR *tcr, 
1315                                   ExceptionInformation *context,
1316                                   xframe_list *xf)
1317{
1318
1319  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1320  xf->curr = context;
1321  xf->prev = tcr->xframe;
1322  tcr->xframe =  xf;
1323  tcr->pending_exception_context = NULL;
1324  tcr->valence = TCR_STATE_FOREIGN; 
1325}
1326
1327void
1328unlock_exception_lock_in_handler(TCR *tcr)
1329{
1330  tcr->pending_exception_context = tcr->xframe->curr;
1331  tcr->xframe = tcr->xframe->prev;
1332  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1333  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1334}
1335
1336/*
1337   If an interrupt is pending on exception exit, try to ensure
1338   that the thread sees it as soon as it's able to run.
1339*/
1340void
1341raise_pending_interrupt(TCR *tcr)
1342{
1343  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1344    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1345  }
1346}
1347
1348void
1349exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1350{
1351  sigset_t mask;
1352  sigfillset(&mask);
1353 
1354  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1355  tcr->valence = old_valence;
1356  tcr->pending_exception_context = NULL;
1357  tcr->last_lisp_frame = old_last_lisp_frame;
1358}
1359
1360
1361void
1362signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence, natural old_last_lisp_frame)
1363{
1364  xframe_list xframe_link;
1365
1366  if (!use_mach_exception_handling) {
1367   
1368    tcr = (TCR *) get_interrupt_tcr(false);
1369 
1370    /* The signal handler's entered with all signals (notably the
1371       thread_suspend signal) blocked.  Don't allow any other signals
1372       (notably the thread_suspend signal) to preempt us until we've
1373       set the TCR's xframe slot to include the current exception
1374       context.
1375    */
1376   
1377    old_last_lisp_frame = tcr->last_lisp_frame;
1378    tcr->last_lisp_frame = xpGPR(context,Rsp);
1379    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1380  }
1381
1382  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1383    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1384    pthread_kill(pthread_self(), thread_suspend_signal);
1385  }
1386
1387 
1388  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1389  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1390    char msg[512];
1391    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1392    if (lisp_Debugger(context, info, signum, false, msg)) {
1393      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1394    }
1395  }
1396  unlock_exception_lock_in_handler(tcr);
1397
1398  /* This thread now looks like a thread that was suspended while
1399     executing lisp code.  If some other thread gets the exception
1400     lock and GCs, the context (this thread's suspend_context) will
1401     be updated.  (That's only of concern if it happens before we
1402     can return to the kernel/to the Mach exception handler).
1403  */
1404  if (!use_mach_exception_handling) {
1405    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1406    raise_pending_interrupt(tcr);
1407  }
1408}
1409
1410#ifdef USE_SIGALTSTACK
1411void
1412invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1413{
1414  ExceptionInformation *xp_copy;
1415  siginfo_t *info_copy;
1416  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1417 
1418  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1419  target_sp -= sizeof(ucontext_t);
1420  xp_copy = (ExceptionInformation *)target_sp;
1421  memmove(target_sp,xp,sizeof(*xp));
1422  xp_copy->uc_stack.ss_sp = 0;
1423  xp_copy->uc_stack.ss_size = 0;
1424  xp_copy->uc_stack.ss_flags = 0;
1425  xp_copy->uc_link = NULL;
1426  target_sp -= sizeof(siginfo_t);
1427  info_copy = (siginfo_t *)target_sp;
1428  memmove(target_sp,info,sizeof(*info));
1429  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1430}
1431 
1432void
1433altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1434{
1435  TCR *tcr=get_tcr(true);
1436  if (signo == SIGSEGV) {
1437    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1438    area *a = tcr->cs_area;
1439   
1440    if ((addr >= a->low) &&
1441        (addr < a->softlimit)) {
1442      if (addr < a->hardlimit) {
1443        Bug(xp, "hard stack overflow");
1444      } else {
1445        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1446      }
1447    }
1448  }
1449  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1450}
1451#endif
1452
1453/*
1454  If it looks like we're in the middle of an atomic operation, make
1455  it seem as if that operation is either complete or hasn't started
1456  yet.
1457
1458  The cases handled include:
1459
1460  a) storing into a newly-allocated lisp frame on the stack.
1461  b) marking a newly-allocated TSP frame as containing "raw" data.
1462  c) consing: the GC has its own ideas about how this should be
1463     handled, but other callers would be best advised to back
1464     up or move forward, according to whether we're in the middle
1465     of allocating a cons cell or allocating a uvector.
1466  d) a STMW to the vsp
1467  e) EGC write-barrier subprims.
1468*/
1469
1470extern opcode
1471  egc_write_barrier_start,
1472  egc_write_barrier_end, 
1473  egc_store_node_conditional, 
1474  egc_store_node_conditional_test,
1475  egc_set_hash_key,
1476  egc_gvset,
1477  egc_rplaca,
1478  egc_rplacd,
1479  egc_set_hash_key_conditional,
1480  egc_set_hash_key_conditional_test;
1481
1482
1483extern opcode ffcall_return_window, ffcall_return_window_end;
1484
1485void
1486pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1487{
1488  pc program_counter = xpPC(xp);
1489  opcode instr = *program_counter;
1490  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1491  LispObj cur_allocptr = xpGPR(xp, allocptr);
1492  int allocptr_tag = fulltag_of(cur_allocptr);
1493 
1494
1495
1496  if ((program_counter < &egc_write_barrier_end) && 
1497      (program_counter >= &egc_write_barrier_start)) {
1498    LispObj *ea = 0, val = 0, root = 0;
1499    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1500    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1501
1502    if (program_counter >= &egc_set_hash_key_conditional) {
1503      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1504          ((program_counter == &egc_set_hash_key_conditional_test) &&
1505           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1506        return;
1507      }
1508      need_store = false;
1509      root = xpGPR(xp,arg_x);
1510      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1511      need_memoize_root = true;
1512    } else if (program_counter >= &egc_store_node_conditional) {
1513      if ((program_counter < &egc_store_node_conditional_test) ||
1514          ((program_counter == &egc_store_node_conditional_test) &&
1515           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1516        /* The conditional store either hasn't been attempted yet, or
1517           has failed.  No need to adjust the PC, or do memoization. */
1518        return;
1519      }
1520      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1521      xpGPR(xp,arg_z) = t_value;
1522      need_store = false;
1523    } else if (program_counter >= &egc_set_hash_key) {
1524      root = xpGPR(xp,arg_x);
1525      val = xpGPR(xp,arg_z);
1526      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1527      need_memoize_root = true;
1528    } else if (program_counter >= &egc_gvset) {
1529      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1530      val = xpGPR(xp,arg_z);
1531    } else if (program_counter >= &egc_rplacd) {
1532      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1533      val = xpGPR(xp,arg_z);
1534    } else {                      /* egc_rplaca */
1535      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1536      val = xpGPR(xp,arg_z);
1537    }
1538    if (need_store) {
1539      *ea = val;
1540    }
1541    if (need_check_memo) {
1542      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1543      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1544          ((LispObj)ea < val)) {
1545        atomic_set_bit(refbits, bitnumber);
1546        if (need_memoize_root) {
1547          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1548          atomic_set_bit(refbits, bitnumber);
1549        }
1550      }
1551    }
1552    xpPC(xp) = xpLR(xp);
1553    return;
1554  }
1555
1556
1557 
1558  if (allocptr_tag != tag_fixnum) {
1559    signed_natural disp = allocptr_displacement(xp);
1560
1561    if (disp) {
1562      /* Being architecturally "at" the alloc trap doesn't tell
1563         us much (in particular, it doesn't tell us whether
1564         or not the thread has committed to taking the trap
1565         and is waiting for the exception lock (or waiting
1566         for the Mach exception thread to tell it how bad
1567         things are) or is about to execute a conditional
1568         trap.
1569         Regardless of which case applies, we want the
1570         other thread to take (or finish taking) the
1571         trap, and we don't want it to consider its
1572         current allocptr to be valid.
1573         The difference between this case (suspend other
1574         thread for GC) and the previous case (suspend
1575         current thread for interrupt) is solely a
1576         matter of what happens after we leave this
1577         function: some non-current thread will stay
1578         suspended until the GC finishes, then take
1579         (or start processing) the alloc trap.   The
1580         current thread will go off and do PROCESS-INTERRUPT
1581         or something, and may return from the interrupt
1582         and need to finish the allocation that got interrupted.
1583      */
1584
1585      if (alloc_disp) {
1586        *alloc_disp = disp;
1587        xpGPR(xp,allocptr) += disp;
1588        /* Leave the PC at the alloc trap.  When the interrupt
1589           handler returns, it'll decrement allocptr by disp
1590           and the trap may or may not be taken.
1591        */
1592      } else {
1593        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1594        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1595      }
1596    } else {
1597      /* If we're already past the alloc_trap, finish allocating
1598         the object. */
1599      if (allocptr_tag == fulltag_cons) {
1600        finish_allocating_cons(xp);
1601      } else {
1602        if (allocptr_tag == fulltag_misc) {
1603          finish_allocating_uvector(xp);
1604        } else {
1605          Bug(xp, "what's being allocated here ?");
1606        }
1607      }
1608      /* Whatever we finished allocating, reset allocptr/allocbase to
1609         VOID_ALLOCPTR */
1610      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1611    }
1612    return;
1613  }
1614}
1615
1616void
1617interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1618{
1619  TCR *tcr = get_interrupt_tcr(false);
1620  if (tcr) {
1621    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1622      tcr->interrupt_pending = 1 << fixnumshift;
1623    } else {
1624      LispObj cmain = nrs_CMAIN.vcell;
1625
1626      if ((fulltag_of(cmain) == fulltag_misc) &&
1627          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1628        /*
1629           This thread can (allegedly) take an interrupt now.
1630           It's tricky to do that if we're executing
1631           foreign code (especially Linuxthreads code, much
1632           of which isn't reentrant.)
1633           If we're unwinding the stack, we also want to defer
1634           the interrupt.
1635        */
1636        if ((tcr->valence != TCR_STATE_LISP) ||
1637            (tcr->unwinding != 0)) {
1638          tcr->interrupt_pending = 1 << fixnumshift;
1639        } else {
1640          xframe_list xframe_link;
1641          int old_valence;
1642          signed_natural disp=0;
1643          natural old_last_lisp_frame = tcr->last_lisp_frame;
1644         
1645          tcr->last_lisp_frame = xpGPR(context,Rsp);
1646          pc_luser_xp(context, tcr, &disp);
1647          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1648          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1649          handle_exception(signum, context, tcr, info, old_valence);
1650          if (disp) {
1651            xpGPR(context,allocptr) -= disp;
1652          }
1653          unlock_exception_lock_in_handler(tcr);
1654          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1655        }
1656      }
1657    }
1658  }
1659#ifdef DARWIN
1660    DarwinSigReturn(context);
1661#endif
1662}
1663
1664#ifdef USE_SIGALTSTACK
1665void
1666altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1667{
1668  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1669}
1670#endif
1671
1672
1673void
1674install_signal_handler(int signo, void *handler, Boolean system_p, Boolean on_altstack)
1675{
1676  struct sigaction sa;
1677 
1678  sa.sa_sigaction = (void *)handler;
1679  sigfillset(&sa.sa_mask);
1680  sa.sa_flags = 
1681    0 /* SA_RESTART */
1682    | SA_SIGINFO
1683#ifdef USE_SIGALTSTACK
1684    | (on_altstack ? SA_ONSTACK : 0)
1685#endif
1686    ;
1687
1688  sigaction(signo, &sa, NULL);
1689}
1690
1691
1692void
1693install_pmcl_exception_handlers()
1694{
1695#ifdef DARWIN
1696  extern Boolean use_mach_exception_handling;
1697#endif
1698
1699  Boolean install_signal_handlers_for_exceptions =
1700#ifdef DARWIN
1701    !use_mach_exception_handling
1702#else
1703    true
1704#endif
1705    ;
1706  if (install_signal_handlers_for_exceptions) {
1707    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
1708    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),true, true);
1709
1710  }
1711 
1712  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1713                         (void *)interrupt_handler, true, false);
1714  signal(SIGPIPE, SIG_IGN);
1715}
1716
1717#ifdef USE_SIGALTSTACK
1718void
1719setup_sigaltstack(area *a)
1720{
1721  stack_t stack;
1722#if 0
1723  stack.ss_sp = a->low;
1724  a->low += SIGSTKSZ*8;
1725#endif
1726  stack.ss_size = SIGSTKSZ*8;
1727  stack.ss_flags = 0;
1728  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1729  if (sigaltstack(&stack, NULL) != 0) {
1730    perror("sigaltstack");
1731    exit(-1);
1732  }
1733}
1734#endif
1735
1736void
1737thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1738{
1739  TCR *tcr = get_tcr(false);
1740  area *a;
1741  sigset_t mask;
1742 
1743  sigemptyset(&mask);
1744
1745  if (tcr) {
1746    tcr->valence = TCR_STATE_FOREIGN;
1747    a = tcr->vs_area;
1748    if (a) {
1749      a->active = a->high;
1750    }
1751    a = tcr->cs_area;
1752    if (a) {
1753      a->active = a->high;
1754    }
1755  }
1756 
1757  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1758  pthread_exit(NULL);
1759}
1760
1761#ifdef USE_SIGALTSTACK
1762void
1763altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1764{
1765  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1766}
1767#endif
1768
1769void
1770thread_signal_setup()
1771{
1772  thread_suspend_signal = SIG_SUSPEND_THREAD;
1773  thread_kill_signal = SIG_KILL_THREAD;
1774
1775  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
1776  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
1777}
1778
1779
1780
1781void
1782unprotect_all_areas()
1783{
1784  protected_area_ptr p;
1785
1786  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1787    unprotect_area(p);
1788  }
1789}
1790
1791/*
1792  A binding subprim has just done "twlle limit_regno,idx_regno" and
1793  the trap's been taken.  Extend the tcr's tlb so that the index will
1794  be in bounds and the new limit will be on a page boundary, filling
1795  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1796  the tcr fields and the registers in the xp and return true if this
1797  all works, false otherwise.
1798
1799  Note that the tlb was allocated via malloc, so realloc can do some
1800  of the hard work.
1801*/
1802Boolean
1803extend_tcr_tlb(TCR *tcr, 
1804               ExceptionInformation *xp, 
1805               unsigned idx_regno)
1806{
1807  unsigned
1808    index = (unsigned) (xpGPR(xp,idx_regno)),
1809    old_limit = tcr->tlb_limit,
1810    new_limit = align_to_power_of_2(index+1,12),
1811    new_bytes = new_limit-old_limit;
1812  LispObj
1813    *old_tlb = tcr->tlb_pointer,
1814    *new_tlb = realloc(old_tlb, new_limit),
1815    *work;
1816
1817  if (new_tlb == NULL) {
1818    return false;
1819  }
1820 
1821  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1822
1823  while (new_bytes) {
1824    *work++ = no_thread_local_binding_marker;
1825    new_bytes -= sizeof(LispObj);
1826  }
1827  tcr->tlb_pointer = new_tlb;
1828  tcr->tlb_limit = new_limit;
1829  return true;
1830}
1831
1832
1833
1834void
1835exception_init()
1836{
1837  install_pmcl_exception_handlers();
1838}
1839
1840
1841
1842
1843
1844#ifdef DARWIN
1845
1846
1847#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1848#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1849
1850
1851
1852#define LISP_EXCEPTIONS_HANDLED_MASK \
1853 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1854
1855/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1856#define NUM_LISP_EXCEPTIONS_HANDLED 4
1857
1858typedef struct {
1859  int foreign_exception_port_count;
1860  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1861  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1862  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1863  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1864} MACH_foreign_exception_state;
1865
1866
1867
1868
1869/*
1870  Mach's exception mechanism works a little better than its signal
1871  mechanism (and, not incidentally, it gets along with GDB a lot
1872  better.
1873
1874  Initially, we install an exception handler to handle each native
1875  thread's exceptions.  This process involves creating a distinguished
1876  thread which listens for kernel exception messages on a set of
1877  0 or more thread exception ports.  As threads are created, they're
1878  added to that port set; a thread's exception port is destroyed
1879  (and therefore removed from the port set) when the thread exits.
1880
1881  A few exceptions can be handled directly in the handler thread;
1882  others require that we resume the user thread (and that the
1883  exception thread resumes listening for exceptions.)  The user
1884  thread might eventually want to return to the original context
1885  (possibly modified somewhat.)
1886
1887  As it turns out, the simplest way to force the faulting user
1888  thread to handle its own exceptions is to do pretty much what
1889  signal() does: the exception handlng thread sets up a sigcontext
1890  on the user thread's stack and forces the user thread to resume
1891  execution as if a signal handler had been called with that
1892  context as an argument.  We can use a distinguished UUO at a
1893  distinguished address to do something like sigreturn(); that'll
1894  have the effect of resuming the user thread's execution in
1895  the (pseudo-) signal context.
1896
1897  Since:
1898    a) we have miles of code in C and in Lisp that knows how to
1899    deal with Linux sigcontexts
1900    b) Linux sigcontexts contain a little more useful information
1901    (the DAR, DSISR, etc.) than their Darwin counterparts
1902    c) we have to create a sigcontext ourselves when calling out
1903    to the user thread: we aren't really generating a signal, just
1904    leveraging existing signal-handling code.
1905
1906  we create a Linux sigcontext struct.
1907
1908  Simple ?  Hopefully from the outside it is ...
1909
1910  We want the process of passing a thread's own context to it to
1911  appear to be atomic: in particular, we don't want the GC to suspend
1912  a thread that's had an exception but has not yet had its user-level
1913  exception handler called, and we don't want the thread's exception
1914  context to be modified by a GC while the Mach handler thread is
1915  copying it around.  On Linux (and on Jaguar), we avoid this issue
1916  because (a) the kernel sets up the user-level signal handler and
1917  (b) the signal handler blocks signals (including the signal used
1918  by the GC to suspend threads) until tcr->xframe is set up.
1919
1920  The GC and the Mach server thread therefore contend for the lock
1921  "mach_exception_lock".  The Mach server thread holds the lock
1922  when copying exception information between the kernel and the
1923  user thread; the GC holds this lock during most of its execution
1924  (delaying exception processing until it can be done without
1925  GC interference.)
1926
1927*/
1928
1929
1930#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1931
1932void
1933fatal_mach_error(char *format, ...);
1934
1935#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1936
1937
1938void
1939restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1940{
1941  kern_return_t kret;
1942  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
1943
1944  /* Set the thread's FP state from the pseudosigcontext */
1945  kret = thread_set_state(thread,
1946                          ARM_VFP_STATE,
1947                          (thread_state_t)&(mc->__fs),
1948                          ARM_VFP_STATE_COUNT);
1949
1950  MACH_CHECK_ERROR("setting thread FP state", kret);
1951
1952  /* The thread'll be as good as new ... */
1953  kret = thread_set_state(thread, 
1954                          MACHINE_THREAD_STATE,
1955                          (thread_state_t)&(mc->__ss),
1956                          MACHINE_THREAD_STATE_COUNT);
1957  MACH_CHECK_ERROR("setting thread state", kret);
1958} 
1959
1960/* This code runs in the exception handling thread, in response
1961   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1962   in response to a call to pseudo_sigreturn() from the specified
1963   user thread.
1964   Find that context (the user thread's R3 points to it), then
1965   use that context to set the user thread's state.  When this
1966   function's caller returns, the Mach kernel will resume the
1967   user thread.
1968*/
1969
1970kern_return_t
1971do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1972{
1973  ExceptionInformation *xp;
1974
1975#ifdef DEBUG_MACH_EXCEPTIONS
1976  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1977#endif
1978  xp = tcr->pending_exception_context;
1979  if (xp) {
1980    tcr->pending_exception_context = NULL;
1981    tcr->valence = TCR_STATE_LISP;
1982    restore_mach_thread_state(thread, xp);
1983    raise_pending_interrupt(tcr);
1984  } else {
1985    Bug(NULL, "no xp here!\n");
1986  }
1987#ifdef DEBUG_MACH_EXCEPTIONS
1988  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1989#endif
1990  return KERN_SUCCESS;
1991} 
1992
1993ExceptionInformation *
1994create_thread_context_frame(mach_port_t thread, 
1995                            natural *new_stack_top)
1996{
1997  arm_thread_state_t ts;
1998  mach_msg_type_number_t thread_state_count;
1999  kern_return_t result;
2000  ExceptionInformation *pseudosigcontext;
2001  _STRUCT_MCONTEXT *mc;
2002  natural stackp, backlink;
2003
2004  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2005  result = thread_get_state(thread, 
2006                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2007                            (thread_state_t)&ts,
2008                            &thread_state_count);
2009 
2010  if (result != KERN_SUCCESS) {
2011    get_tcr(true);
2012    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2013  }
2014  stackp = ts.__sp;
2015  backlink = stackp;
2016
2017  stackp -= sizeof(*pseudosigcontext);
2018  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2019
2020  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2021  memmove(&(mc->__ss),&ts,sizeof(ts));
2022
2023  thread_state_count = ARM_VFP_STATE_COUNT;
2024  thread_get_state(thread,
2025                   ARM_VFP_STATE,
2026                   (thread_state_t)&(mc->__fs),
2027                   &thread_state_count);
2028
2029
2030  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2031  thread_get_state(thread,
2032                   ARM_EXCEPTION_STATE,
2033                   (thread_state_t)&(mc->__es),
2034                   &thread_state_count);
2035
2036
2037  UC_MCONTEXT(pseudosigcontext) = mc;
2038  if (new_stack_top) {
2039    *new_stack_top = stackp;
2040  }
2041  return pseudosigcontext;
2042}
2043
2044/*
2045  This code sets up the user thread so that it executes a "pseudo-signal
2046  handler" function when it resumes.  Create a linux sigcontext struct
2047  on the thread's stack and pass it as an argument to the pseudo-signal
2048  handler.
2049
2050  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2051  which will restore the thread's context.
2052
2053  If the handler invokes code that throws (or otherwise never sigreturn()'s
2054  to the context), that's fine.
2055
2056  Actually, check that: throw (and variants) may need to be careful and
2057  pop the tcr's xframe list until it's younger than any frame being
2058  entered.
2059*/
2060
2061int
2062setup_signal_frame(mach_port_t thread,
2063                   void *handler_address,
2064                   int signum,
2065                   int code,
2066                   TCR *tcr)
2067{
2068  arm_thread_state_t ts;
2069  ExceptionInformation *pseudosigcontext;
2070  int old_valence = tcr->valence;
2071  natural stackp;
2072
2073#ifdef DEBUG_MACH_EXCEPTIONS
2074  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2075#endif
2076  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2077  pseudosigcontext->uc_onstack = 0;
2078  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2079  tcr->pending_exception_context = pseudosigcontext;
2080  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2081 
2082
2083  /*
2084     It seems like we've created a  sigcontext on the thread's
2085     stack.  Set things up so that we call the handler (with appropriate
2086     args) when the thread's resumed.
2087  */
2088
2089  ts.__pc = (natural) handler_address;
2090  ts.__sp = stackp;
2091  ts.__r[0] = signum;
2092  ts.__r[1] = (natural)pseudosigcontext;
2093  ts.__r[2] = (natural)tcr;
2094  ts.__r[3] = (natural)old_valence;
2095  ts.__lr = (natural)pseudo_sigreturn;
2096
2097
2098  thread_set_state(thread, 
2099                   MACHINE_THREAD_STATE,
2100                   (thread_state_t)&ts,
2101                   MACHINE_THREAD_STATE_COUNT);
2102#ifdef DEBUG_MACH_EXCEPTIONS
2103  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2104#endif
2105  return 0;
2106}
2107
2108
2109void
2110pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2111{
2112  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2113} 
2114
2115
2116int
2117thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2118{
2119  /* Likely hopeless. */
2120  return 0;
2121}
2122
2123/*
2124  This function runs in the exception handling thread.  It's
2125  called (by this precise name) from the library function "exc_server()"
2126  when the thread's exception ports are set up.  (exc_server() is called
2127  via mach_msg_server(), which is a function that waits for and dispatches
2128  on exception messages from the Mach kernel.)
2129
2130  This checks to see if the exception was caused by a pseudo_sigreturn()
2131  UUO; if so, it arranges for the thread to have its state restored
2132  from the specified context.
2133
2134  Otherwise, it tries to map the exception to a signal number and
2135  arranges that the thread run a "pseudo signal handler" to handle
2136  the exception.
2137
2138  Some exceptions could and should be handled here directly.
2139*/
2140
2141kern_return_t
2142catch_exception_raise(mach_port_t exception_port,
2143                      mach_port_t thread,
2144                      mach_port_t task, 
2145                      exception_type_t exception,
2146                      exception_data_t code_vector,
2147                      mach_msg_type_number_t code_count)
2148{
2149  int signum = 0, code = *code_vector, code1;
2150  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2151  kern_return_t kret;
2152
2153#ifdef DEBUG_MACH_EXCEPTIONS
2154  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2155#endif
2156
2157  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2158    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2159  } 
2160  if ((exception == EXC_BAD_INSTRUCTION) &&
2161      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2162      (((code1 = code_vector[1]) == (int)pseudo_sigreturn))) {
2163    kret = do_pseudo_sigreturn(thread, tcr);
2164  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2165    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2166    kret = 17;
2167  } else {
2168    switch (exception) {
2169    case EXC_BAD_ACCESS:
2170      signum = SIGSEGV;
2171      break;
2172       
2173    case EXC_BAD_INSTRUCTION:
2174      signum = SIGILL;
2175      break;
2176     
2177      break;
2178     
2179    case EXC_ARITHMETIC:
2180      signum = SIGFPE;
2181      break;
2182
2183    default:
2184      break;
2185    }
2186    if (signum) {
2187      kret = setup_signal_frame(thread,
2188                                (void *)pseudo_signal_handler,
2189                                signum,
2190                                code,
2191                                tcr);
2192#if 0
2193      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2194#endif
2195
2196    } else {
2197      kret = 17;
2198    }
2199  }
2200
2201  return kret;
2202}
2203
2204
2205
2206typedef struct {
2207  mach_msg_header_t Head;
2208  /* start of the kernel processed data */
2209  mach_msg_body_t msgh_body;
2210  mach_msg_port_descriptor_t thread;
2211  mach_msg_port_descriptor_t task;
2212  /* end of the kernel processed data */
2213  NDR_record_t NDR;
2214  exception_type_t exception;
2215  mach_msg_type_number_t codeCnt;
2216  integer_t code[2];
2217  mach_msg_trailer_t trailer;
2218} exceptionRequest;
2219
2220
2221boolean_t
2222openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2223{
2224  static NDR_record_t _NDR = {0};
2225  kern_return_t handled;
2226  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2227  exceptionRequest *req = (exceptionRequest *) in;
2228
2229  reply->NDR = _NDR;
2230
2231  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2232  out->msgh_remote_port = in->msgh_remote_port;
2233  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2234  out->msgh_local_port = MACH_PORT_NULL;
2235  out->msgh_id = in->msgh_id+100;
2236
2237  /* Could handle other exception flavors in the range 2401-2403 */
2238
2239
2240  if (in->msgh_id != 2401) {
2241    reply->RetCode = MIG_BAD_ID;
2242    return FALSE;
2243  }
2244  handled = catch_exception_raise(req->Head.msgh_local_port,
2245                                  req->thread.name,
2246                                  req->task.name,
2247                                  req->exception,
2248                                  req->code,
2249                                  req->codeCnt);
2250  reply->RetCode = handled;
2251  return TRUE;
2252}
2253
2254/*
2255  The initial function for an exception-handling thread.
2256*/
2257
2258void *
2259exception_handler_proc(void *arg)
2260{
2261  extern boolean_t exc_server();
2262  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2263
2264  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2265  /* Should never return. */
2266  abort();
2267}
2268
2269
2270
2271mach_port_t
2272mach_exception_port_set()
2273{
2274  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2275  kern_return_t kret; 
2276  if (__exception_port_set == MACH_PORT_NULL) {
2277    kret = mach_port_allocate(mach_task_self(),
2278                              MACH_PORT_RIGHT_PORT_SET,
2279                              &__exception_port_set);
2280    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2281    create_system_thread(0,
2282                         NULL,
2283                         exception_handler_proc, 
2284                         (void *)((natural)__exception_port_set));
2285  }
2286  return __exception_port_set;
2287}
2288
2289/*
2290  Setup a new thread to handle those exceptions specified by
2291  the mask "which".  This involves creating a special Mach
2292  message port, telling the Mach kernel to send exception
2293  messages for the calling thread to that port, and setting
2294  up a handler thread which listens for and responds to
2295  those messages.
2296
2297*/
2298
2299/*
2300  Establish the lisp thread's TCR as its exception port, and determine
2301  whether any other ports have been established by foreign code for
2302  exceptions that lisp cares about.
2303
2304  If this happens at all, it should happen on return from foreign
2305  code and on entry to lisp code via a callback.
2306
2307  This is a lot of trouble (and overhead) to support Java, or other
2308  embeddable systems that clobber their caller's thread exception ports.
2309 
2310*/
2311kern_return_t
2312tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2313{
2314  kern_return_t kret;
2315  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2316  int i;
2317  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2318  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2319  exception_mask_t mask = 0;
2320
2321  kret = thread_swap_exception_ports(thread,
2322                                     LISP_EXCEPTIONS_HANDLED_MASK,
2323                                     lisp_port,
2324                                     EXCEPTION_DEFAULT,
2325                                     THREAD_STATE_NONE,
2326                                     fxs->masks,
2327                                     &n,
2328                                     fxs->ports,
2329                                     fxs->behaviors,
2330                                     fxs->flavors);
2331  if (kret == KERN_SUCCESS) {
2332    fxs->foreign_exception_port_count = n;
2333    for (i = 0; i < n; i ++) {
2334      foreign_port = fxs->ports[i];
2335
2336      if ((foreign_port != lisp_port) &&
2337          (foreign_port != MACH_PORT_NULL)) {
2338        mask |= fxs->masks[i];
2339      }
2340    }
2341    tcr->foreign_exception_status = (int) mask;
2342  }
2343  return kret;
2344}
2345
2346kern_return_t
2347tcr_establish_lisp_exception_port(TCR *tcr)
2348{
2349  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2350}
2351
2352/*
2353  Do this when calling out to or returning from foreign code, if
2354  any conflicting foreign exception ports were established when we
2355  last entered lisp code.
2356*/
2357kern_return_t
2358restore_foreign_exception_ports(TCR *tcr)
2359{
2360  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2361 
2362  if (m) {
2363    MACH_foreign_exception_state *fxs  = 
2364      (MACH_foreign_exception_state *) tcr->native_thread_info;
2365    int i, n = fxs->foreign_exception_port_count;
2366    exception_mask_t tm;
2367
2368    for (i = 0; i < n; i++) {
2369      if ((tm = fxs->masks[i]) & m) {
2370        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2371                                   tm,
2372                                   fxs->ports[i],
2373                                   fxs->behaviors[i],
2374                                   fxs->flavors[i]);
2375      }
2376    }
2377  }
2378}
2379                                   
2380
2381/*
2382  This assumes that a Mach port (to be used as the thread's exception port) whose
2383  "name" matches the TCR's 32-bit address has already been allocated.
2384*/
2385
2386kern_return_t
2387setup_mach_exception_handling(TCR *tcr)
2388{
2389  mach_port_t
2390    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2391    task_self = mach_task_self();
2392  kern_return_t kret;
2393
2394  kret = mach_port_insert_right(task_self,
2395                                thread_exception_port,
2396                                thread_exception_port,
2397                                MACH_MSG_TYPE_MAKE_SEND);
2398  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2399
2400  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2401  if (kret == KERN_SUCCESS) {
2402    mach_port_t exception_port_set = mach_exception_port_set();
2403
2404    kret = mach_port_move_member(task_self,
2405                                 thread_exception_port,
2406                                 exception_port_set);
2407  }
2408  return kret;
2409}
2410
2411void
2412darwin_exception_init(TCR *tcr)
2413{
2414  void tcr_monitor_exception_handling(TCR*, Boolean);
2415  kern_return_t kret;
2416  MACH_foreign_exception_state *fxs = 
2417    calloc(1, sizeof(MACH_foreign_exception_state));
2418 
2419  tcr->native_thread_info = (void *) fxs;
2420
2421  if ((kret = setup_mach_exception_handling(tcr))
2422      != KERN_SUCCESS) {
2423    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2424    terminate_lisp();
2425  }
2426}
2427
2428/*
2429  The tcr is the "name" of the corresponding thread's exception port.
2430  Destroying the port should remove it from all port sets of which it's
2431  a member (notably, the exception port set.)
2432*/
2433void
2434darwin_exception_cleanup(TCR *tcr)
2435{
2436  void *fxs = tcr->native_thread_info;
2437  extern Boolean use_mach_exception_handling;
2438
2439  if (fxs) {
2440    tcr->native_thread_info = NULL;
2441    free(fxs);
2442  }
2443  if (use_mach_exception_handling) {
2444    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2445    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2446  }
2447}
2448
2449
2450Boolean
2451suspend_mach_thread(mach_port_t mach_thread)
2452{
2453  kern_return_t status;
2454  Boolean aborted = false;
2455 
2456  do {
2457    aborted = false;
2458    status = thread_suspend(mach_thread);
2459    if (status == KERN_SUCCESS) {
2460      status = thread_abort_safely(mach_thread);
2461      if (status == KERN_SUCCESS) {
2462        aborted = true;
2463      } else {
2464        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2465        thread_resume(mach_thread);
2466      }
2467    } else {
2468      return false;
2469    }
2470  } while (! aborted);
2471  return true;
2472}
2473
2474/*
2475  Only do this if pthread_kill indicated that the pthread isn't
2476  listening to signals anymore, as can happen as soon as pthread_exit()
2477  is called on Darwin.  The thread could still call out to lisp as it
2478  is exiting, so we need another way to suspend it in this case.
2479*/
2480Boolean
2481mach_suspend_tcr(TCR *tcr)
2482{
2483  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2484  ExceptionInformation *pseudosigcontext;
2485  Boolean result = false;
2486 
2487  result = suspend_mach_thread(mach_thread);
2488  if (result) {
2489    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2490    pseudosigcontext->uc_onstack = 0;
2491    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2492    tcr->suspend_context = pseudosigcontext;
2493  }
2494  return result;
2495}
2496
2497void
2498mach_resume_tcr(TCR *tcr)
2499{
2500  ExceptionInformation *xp;
2501  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2502 
2503  xp = tcr->suspend_context;
2504#ifdef DEBUG_MACH_EXCEPTIONS
2505  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2506          tcr, tcr->pending_exception_context);
2507#endif
2508  tcr->suspend_context = NULL;
2509  restore_mach_thread_state(mach_thread, xp);
2510#ifdef DEBUG_MACH_EXCEPTIONS
2511  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2512          tcr, tcr->pending_exception_context);
2513#endif
2514  thread_resume(mach_thread);
2515}
2516
2517void
2518fatal_mach_error(char *format, ...)
2519{
2520  va_list args;
2521  char s[512];
2522 
2523
2524  va_start(args, format);
2525  vsnprintf(s, sizeof(s),format, args);
2526  va_end(args);
2527
2528  Fatal("Mach error", s);
2529}
2530
2531void
2532pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2533{
2534  interrupt_handler(signum, NULL, context);
2535}
2536
2537int
2538mach_raise_thread_interrupt(TCR *target)
2539{
2540  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2541  kern_return_t kret;
2542  Boolean result = false;
2543  TCR *current = get_tcr(false);
2544  thread_basic_info_data_t info; 
2545  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2546
2547  LOCK(lisp_global(TCR_AREA_LOCK), current);
2548
2549  if (suspend_mach_thread(mach_thread)) {
2550    if (thread_info(mach_thread,
2551                    THREAD_BASIC_INFO,
2552                    (thread_info_t)&info,
2553                    &info_count) == KERN_SUCCESS) {
2554      if (info.suspend_count == 1) {
2555        if ((target->valence == TCR_STATE_LISP) &&
2556            (!target->unwinding) &&
2557            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2558          kret = setup_signal_frame(mach_thread,
2559                                    (void *)pseudo_interrupt_handler,
2560                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2561                                    0,
2562                                    target);
2563          if (kret == KERN_SUCCESS) {
2564            result = true;
2565          }
2566        }
2567      }
2568    }
2569    if (! result) {
2570      target->interrupt_pending = 1 << fixnumshift;
2571    }
2572    thread_resume(mach_thread);
2573   
2574  }
2575  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2576  return 0;
2577}
2578
2579#endif
Note: See TracBrowser for help on using the repository browser.