source: branches/arm/lisp-kernel/arm-exceptions.c @ 13825

Last change on this file since 13825 was 13825, checked in by gb, 11 years ago

handle_sigfpe: return type Boolean, return false for now.
Missing break when handling kernel-service UUOs.

File size: 67.4 KB
Line 
1/*
2   Copyright (C) 2010 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#ifndef SA_NODEFER
37#define SA_NODEFER 0
38#endif
39#include <sysexits.h>
40
41/* a distinguished UUO at a distinguished address */
42extern void pseudo_sigreturn(ExceptionInformation *);
43#endif
44
45
46#include "Threads.h"
47
48
49#ifdef LINUX
50/* Some relatively recent kernels support this interface.
51   If this prctl isn't supported, assume that we're always
52   running with excptions enabled and "precise".
53*/
54#ifndef PR_SET_FPEXC
55#define PR_SET_FPEXC 12
56#endif
57#ifndef PR_FP_EXC_DISABLED
58#define PR_FP_EXC_DISABLED 0
59#endif
60#ifndef PR_FP_EXC_PRECISE
61#define PR_FP_EXC_PRECISE 3
62#endif
63
64void
65enable_fp_exceptions()
66{
67  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
68}
69
70void
71disable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
74}
75
76#endif
77
78/*
79  Handle exceptions.
80
81*/
82
83extern LispObj lisp_nil;
84
85extern natural lisp_heap_gc_threshold;
86extern Boolean grow_dynamic_area(natural);
87
88
89
90
91
92
93int
94page_size = 4096;
95
96int
97log2_page_size = 12;
98
99
100
101
102
103/*
104  If the PC is pointing to an allocation trap, the previous instruction
105  must have decremented allocptr.  Return the non-zero amount by which
106  allocptr was decremented.
107*/
108signed_natural
109allocptr_displacement(ExceptionInformation *xp)
110{
111  pc program_counter = xpPC(xp);
112  opcode instr = *program_counter, prev_instr;
113
114  if (IS_ALLOC_TRAP(instr)) {
115    /* The alloc trap must have been preceded by a cmp and a
116       load from tcr.allocbase. */
117    prev_instr = program_counter[-3];
118
119    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
121    }
122   
123    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
124      return -((signed_natural)(prev_instr & 0xff));
125    }
126
127    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
128      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
129
130      instr = program_counter[-4];
131      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
132        return -((signed_natural)(disp | (instr & 0xff)));
133      }
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (IS_CLR_ALLOCPTR_TAG(instr)) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
169      c->car = xpGPR(xp,RD_field(instr));
170    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
171      c->cdr = xpGPR(xp,RD_field(instr));
172    } else {
173      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
174      xpGPR(xp,RD_field(instr)) = cur_allocptr;
175    }
176  }
177}
178
179/*
180  We were interrupted in the process of allocating a uvector; we
181  survived the allocation trap, and allocptr is tagged as fulltag_misc.
182  Emulate any instructions which store a header into the uvector,
183  assign the value of allocptr to some other register, and clear
184  allocptr's tag.  Don't expect/allow any other instructions in
185  this environment.
186*/
187void
188finish_allocating_uvector(ExceptionInformation *xp)
189{
190  pc program_counter = xpPC(xp);
191  opcode instr;
192  LispObj cur_allocptr = xpGPR(xp, allocptr);
193  int target_reg;
194
195  while (1) {
196    instr = *program_counter++;
197    if (IS_CLR_ALLOCPTR_TAG(instr)) {
198      xpGPR(xp, allocptr) = untag(cur_allocptr);
199      xpPC(xp) = program_counter;
200      return;
201    }
202    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
203      header_of(cur_allocptr) == xpGPR(xp,RD_field(instr));
204    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
205      xpGPR(xp,RD_field(instr)) = cur_allocptr;
206    } else {
207      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
208    }
209  }
210}
211
212
213Boolean
214allocate_object(ExceptionInformation *xp,
215                natural bytes_needed, 
216                signed_natural disp_from_allocptr,
217                TCR *tcr)
218{
219  area *a = active_dynamic_area;
220
221  /* Maybe do an EGC */
222  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
223    if (((a->active)-(a->low)) >= a->threshold) {
224      gc_from_xp(xp, 0L);
225    }
226  }
227
228  /* Life is pretty simple if we can simply grab a segment
229     without extending the heap.
230  */
231  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
232    xpGPR(xp, allocptr) += disp_from_allocptr;
233    return true;
234  }
235 
236  /* It doesn't make sense to try a full GC if the object
237     we're trying to allocate is larger than everything
238     allocated so far.
239  */
240  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
241    untenure_from_area(tenured_area); /* force a full GC */
242    gc_from_xp(xp, 0L);
243  }
244 
245  /* Try again, growing the heap if necessary */
246  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
247    xpGPR(xp, allocptr) += disp_from_allocptr;
248    return true;
249  }
250 
251  return false;
252}
253
254#ifndef XNOMEM
255#define XNOMEM 10
256#endif
257
258void
259update_bytes_allocated(TCR* tcr, void *cur_allocptr)
260{
261  BytePtr
262    last = (BytePtr) tcr->last_allocptr, 
263    current = (BytePtr) cur_allocptr;
264  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
265    tcr->bytes_allocated += last-current;
266  }
267  tcr->last_allocptr = 0;
268}
269
270void
271lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
272{
273  /* Couldn't allocate the object.  If it's smaller than some arbitrary
274     size (say 128K bytes), signal a "chronically out-of-memory" condition;
275     else signal a "allocation request failed" condition.
276  */
277  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
278  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0);
279}
280
281/*
282  Allocate a large list, where "large" means "large enough to
283  possibly trigger the EGC several times if this was done
284  by individually allocating each CONS."  The number of
285  ocnses in question is in arg_z; on successful return,
286  the list will be in arg_z
287*/
288
289Boolean
290allocate_list(ExceptionInformation *xp, TCR *tcr)
291{
292  natural
293    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
294    bytes_needed = (nconses << dnode_shift);
295  LispObj
296    prev = lisp_nil,
297    current,
298    initial = xpGPR(xp,arg_y);
299
300  if (nconses == 0) {
301    /* Silly case */
302    xpGPR(xp,arg_z) = lisp_nil;
303    xpGPR(xp,allocptr) = lisp_nil;
304    return true;
305  }
306  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
307  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
308    for (current = xpGPR(xp,allocptr);
309         nconses;
310         prev = current, current+= dnode_size, nconses--) {
311      deref(current,0) = prev;
312      deref(current,1) = initial;
313    }
314    xpGPR(xp,arg_z) = prev;
315    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
316    xpGPR(xp,allocptr)-=fulltag_cons;
317  } else {
318    lisp_allocation_failure(xp,tcr,bytes_needed);
319  }
320  return true;
321}
322
323Boolean
324handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
325{
326  pc program_counter;
327  natural cur_allocptr, bytes_needed = 0;
328  opcode prev_instr;
329  signed_natural disp = 0;
330  unsigned allocptr_tag;
331
332  cur_allocptr = xpGPR(xp,allocptr);
333
334  allocptr_tag = fulltag_of(cur_allocptr);
335
336  switch (allocptr_tag) {
337  case fulltag_cons:
338    bytes_needed = sizeof(cons);
339    disp = -sizeof(cons) + fulltag_cons;
340    break;
341
342  case fulltag_misc:
343    disp = allocptr_displacement(xp);
344    bytes_needed = (-disp) + fulltag_misc;
345    break;
346
347    /* else fall thru */
348  default:
349    return false;
350  }
351
352  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
353  if (allocate_object(xp, bytes_needed, disp, tcr)) {
354    adjust_exception_pc(xp,4);
355    return true;
356  }
357  lisp_allocation_failure(xp,tcr,bytes_needed);
358  return true;
359}
360
361natural gc_deferred = 0, full_gc_deferred = 0;
362
363signed_natural
364flash_freeze(TCR *tcr, signed_natural param)
365{
366  return 0;
367}
368
369Boolean
370handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
371{
372  LispObj
373    selector = xpGPR(xp,imm0), 
374    arg = xpGPR(xp,imm1);
375  area *a = active_dynamic_area;
376  Boolean egc_was_enabled = (a->older != NULL);
377  natural gc_previously_deferred = gc_deferred;
378
379
380  switch (selector) {
381  case GC_TRAP_FUNCTION_EGC_CONTROL:
382    egc_control(arg != 0, a->active);
383    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
384    break;
385
386  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
387    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
388    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
389    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
390    xpGPR(xp,arg_z) = lisp_nil+t_offset;
391    break;
392
393  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
394    if (((signed_natural) arg) > 0) {
395      lisp_heap_gc_threshold = 
396        align_to_power_of_2((arg-1) +
397                            (heap_segment_size - 1),
398                            log2_heap_segment_size);
399    }
400    /* fall through */
401  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
402    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
403    break;
404
405  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
406    /*  Try to put the current threshold in effect.  This may
407        need to disable/reenable the EGC. */
408    untenure_from_area(tenured_area);
409    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
410    if (egc_was_enabled) {
411      if ((a->high - a->active) >= a->threshold) {
412        tenure_to_area(tenured_area);
413      }
414    }
415    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
416    break;
417
418  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
419    ensure_static_conses(xp,tcr,32768);
420    break;
421
422  case GC_TRAP_FUNCTION_FLASH_FREEZE:
423    untenure_from_area(tenured_area);
424    gc_like_from_xp(xp,flash_freeze,0);
425    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
426    tenured_area->static_dnodes = area_dnode(a->active, a->low);
427    if (egc_was_enabled) {
428      tenure_to_area(tenured_area);
429    }
430    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
431    break;
432
433  default:
434    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
435
436    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
437      if (!full_gc_deferred) {
438        gc_from_xp(xp, 0L);
439        break;
440      }
441      /* Tried to do a full GC when gc was disabled.  That failed,
442         so try full GC now */
443      selector = GC_TRAP_FUNCTION_GC;
444    }
445   
446    if (egc_was_enabled) {
447      egc_control(false, (BytePtr) a->active);
448    }
449    gc_from_xp(xp, 0L);
450    if (gc_deferred > gc_previously_deferred) {
451      full_gc_deferred = 1;
452    } else {
453      full_gc_deferred = 0;
454    }
455    if (selector > GC_TRAP_FUNCTION_GC) {
456      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
457        impurify_from_xp(xp, 0L);
458        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
459        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
460        gc_from_xp(xp, 0L);
461      }
462      if (selector & GC_TRAP_FUNCTION_PURIFY) {
463        purify_from_xp(xp, 0L);
464        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
465        gc_from_xp(xp, 0L);
466      }
467      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
468        OSErr err;
469        extern OSErr save_application(unsigned, Boolean);
470        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
471        area *vsarea = tcr->vs_area;
472       
473        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
474        err = save_application(arg, egc_was_enabled);
475        if (err == noErr) {
476          _exit(0);
477        }
478        fatal_oserr(": save_application", err);
479      }
480      switch (selector) {
481
482
483      case GC_TRAP_FUNCTION_FREEZE:
484        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
485        tenured_area->static_dnodes = area_dnode(a->active, a->low);
486        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
487        break;
488      default:
489        break;
490      }
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(true, NULL);
495    }
496    break;
497   
498  }
499
500  adjust_exception_pc(xp,4);
501  return true;
502}
503
504
505
506void
507signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
508{
509  /* The cstack just overflowed.  Force the current thread's
510     control stack to do so until all stacks are well under their overflow
511     limits.
512  */
513
514#if 0
515  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
516#endif
517  handle_error(xp, error_stack_overflow, reg);
518}
519
520/*
521  Lower (move toward 0) the "end" of the soft protected area associated
522  with a by a page, if we can.
523*/
524
525void
526adjust_soft_protection_limit(area *a)
527{
528  char *proposed_new_soft_limit = a->softlimit - 4096;
529  protected_area_ptr p = a->softprot;
530 
531  if (proposed_new_soft_limit >= (p->start+16384)) {
532    p->end = proposed_new_soft_limit;
533    p->protsize = p->end-p->start;
534    a->softlimit = proposed_new_soft_limit;
535  }
536  protect_area(p);
537}
538
539void
540restore_soft_stack_limit(unsigned stkreg)
541{
542  area *a;
543  TCR *tcr = get_tcr(true);
544
545  switch (stkreg) {
546  case Rsp:
547    a = tcr->cs_area;
548    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
549      a->softlimit -= 4096;
550    }
551    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
552    break;
553  case vsp:
554    a = tcr->vs_area;
555    adjust_soft_protection_limit(a);
556    break;
557  }
558}
559
560/* Maybe this'll work someday.  We may have to do something to
561   make the thread look like it's not handling an exception */
562void
563reset_lisp_process(ExceptionInformation *xp)
564{
565  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
566  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
567
568  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
569
570  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
571
572  start_lisp(tcr, 1);
573}
574
575/*
576  This doesn't GC; it returns true if it made enough room, false
577  otherwise.
578  If "extend" is true, it can try to extend the dynamic area to
579  satisfy the request.
580*/
581
582Boolean
583new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
584{
585  area *a;
586  natural newlimit, oldlimit;
587  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
588
589  a  = active_dynamic_area;
590  oldlimit = (natural) a->active;
591  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
592              align_to_power_of_2(need, log2_allocation_quantum));
593  if (newlimit > (natural) (a->high)) {
594    if (extend) {
595      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
596      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
597      do {
598        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
599          break;
600        }
601        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
602        if (extend_by < 4<<20) {
603          return false;
604        }
605      } while (1);
606    } else {
607      return false;
608    }
609  }
610  a->active = (BytePtr) newlimit;
611  tcr->last_allocptr = (void *)newlimit;
612  xpGPR(xp,allocptr) = (LispObj) newlimit;
613  tcr->save_allocbase = (void*) oldlimit;
614
615  return true;
616}
617
618 
619void
620update_area_active (area **aptr, BytePtr value)
621{
622  area *a = *aptr;
623  for (; a; a = a->older) {
624    if ((a->low <= value) && (a->high >= value)) break;
625  };
626  if (a == NULL) Bug(NULL, "Can't find active area");
627  a->active = value;
628  *aptr = a;
629
630  for (a = a->younger; a; a = a->younger) {
631    a->active = a->high;
632  }
633}
634
635LispObj *
636tcr_frame_ptr(TCR *tcr)
637{
638  ExceptionInformation *xp;
639  LispObj *bp = NULL;
640
641  if (tcr->pending_exception_context)
642    xp = tcr->pending_exception_context;
643  else {
644    xp = tcr->suspend_context;
645  }
646  if (xp) {
647    bp = (LispObj *) xpGPR(xp, Rsp);
648  }
649  return bp;
650}
651
652void
653normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
654{
655  void *cur_allocptr = NULL;
656  LispObj freeptr = 0;
657
658  if (xp) {
659    if (is_other_tcr) {
660      pc_luser_xp(xp, tcr, NULL);
661      freeptr = xpGPR(xp, allocptr);
662      if (fulltag_of(freeptr) == 0){
663        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
664      }
665    }
666    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
667    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
668  } else {
669    /* In ff-call.  No need to update cs_area */
670    cur_allocptr = (void *) (tcr->save_allocptr);
671    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
672  }
673
674
675  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
676  if (cur_allocptr) {
677    update_bytes_allocated(tcr, cur_allocptr);
678    if (freeptr) {
679      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
680    }
681  }
682}
683
684TCR *gc_tcr = NULL;
685
686/* Suspend and "normalize" other tcrs, then call a gc-like function
687   in that context.  Resume the other tcrs, then return what the
688   function returned */
689
690signed_natural
691gc_like_from_xp(ExceptionInformation *xp, 
692                signed_natural(*fun)(TCR *, signed_natural), 
693                signed_natural param)
694{
695  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
696  int result;
697  signed_natural inhibit;
698
699  suspend_other_threads(true);
700  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
701  if (inhibit != 0) {
702    if (inhibit > 0) {
703      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
704    }
705    resume_other_threads(true);
706    gc_deferred++;
707    return 0;
708  }
709  gc_deferred = 0;
710
711  gc_tcr = tcr;
712
713  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751
752/* Returns #bytes freed by invoking GC */
753
754signed_natural
755gc_from_tcr(TCR *tcr, signed_natural param)
756{
757  area *a;
758  BytePtr oldfree, newfree;
759  BytePtr oldend, newend;
760
761  a = active_dynamic_area;
762  oldend = a->high;
763  oldfree = a->active;
764  gc(tcr, param);
765  newfree = a->active;
766  newend = a->high;
767#if 0
768  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
769#endif
770  return ((oldfree-newfree)+(newend-oldend));
771}
772
773signed_natural
774gc_from_xp(ExceptionInformation *xp, signed_natural param)
775{
776  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
777
778  freeGCptrs();
779  return status;
780}
781
782signed_natural
783purify_from_xp(ExceptionInformation *xp, signed_natural param)
784{
785  return gc_like_from_xp(xp, purify, param);
786}
787
788signed_natural
789impurify_from_xp(ExceptionInformation *xp, signed_natural param)
790{
791  return gc_like_from_xp(xp, impurify, param);
792}
793
794
795
796
797
798
799protection_handler
800 * protection_handlers[] = {
801   do_spurious_wp_fault,
802   do_soft_stack_overflow,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_hard_stack_overflow,   
806   do_hard_stack_overflow,
807   do_hard_stack_overflow
808   };
809
810
811Boolean
812is_write_fault(ExceptionInformation *xp, siginfo_t *info)
813{
814#ifdef LINUX
815  /* Based on experiments with a small sample size; need to R TFM. */
816  return ((xp->uc_mcontext.trap_no == 0xe) &&
817          (xp->uc_mcontext.error_code == 0x817));
818#endif
819}
820
821Boolean
822handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
823{
824  BytePtr addr;
825  protected_area_ptr area;
826  protection_handler *handler;
827  extern Boolean touch_page(void *);
828  extern void touch_page_end(void);
829
830  if (info) {
831    addr = (BytePtr)(info->si_addr);
832  } else {
833    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
834  }
835
836  if (addr && (addr == tcr->safe_ref_address)) {
837    adjust_exception_pc(xp,4);
838
839    xpGPR(xp,imm0) = 0;
840    return true;
841  }
842
843  if (xpPC(xp) == (pc)touch_page) {
844    xpGPR(xp,imm0) = 0;
845    xpPC(xp) = (pc)touch_page_end;
846    return true;
847  }
848
849
850  if (is_write_fault(xp,info)) {
851    area = find_protected_area(addr);
852    if (area != NULL) {
853      handler = protection_handlers[area->why];
854      return handler(xp, area, addr);
855    } else {
856      if ((addr >= readonly_area->low) &&
857          (addr < readonly_area->active)) {
858        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
859                        page_size);
860        return true;
861      }
862    }
863  }
864  if (old_valence == TCR_STATE_LISP) {
865    LispObj cmain = nrs_CMAIN.vcell;
866   
867    if ((fulltag_of(cmain) == fulltag_misc) &&
868      (header_subtag(header_of(cmain)) == subtag_macptr)) {
869     
870      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr);
871    }
872  }
873  return false;
874}
875
876
877
878
879
880OSStatus
881do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
882{
883#ifdef SUPPORT_PRAGMA_UNUSED
884#pragma unused(area,addr)
885#endif
886  reset_lisp_process(xp);
887  return -1;
888}
889
890extern area*
891allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
892
893extern area*
894allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
895
896
897
898
899
900
901Boolean
902lisp_frame_p(lisp_frame *spPtr)
903{
904  return (spPtr->marker == lisp_frame_marker);
905}
906
907
908int ffcall_overflow_count = 0;
909
910
911
912
913
914
915/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
916  the current value of VSP (TSP) or an older area.  */
917
918OSStatus
919do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
920{
921  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
922  area *a = tcr->vs_area;
923  protected_area_ptr vsp_soft = a->softprot;
924  unprotect_area(vsp_soft);
925  signal_stack_soft_overflow(xp,vsp);
926  return 0;
927}
928
929
930
931OSStatus
932do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
933{
934  /* Trying to write into a guard page on the vstack or tstack.
935     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
936     signal an error_stack_overflow condition.
937      */
938  do_vsp_overflow(xp,addr);
939}
940
941OSStatus
942do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
943{
944#ifdef SUPPORT_PRAGMA_UNUSED
945#pragma unused(xp,area,addr)
946#endif
947  return -1;
948}
949
950
951
952
953     
954
955
956
957
958
959Boolean
960handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
961{
962  return false;
963}
964
965   
966
967
968/* This only tries to implement the "optional" fsqrt and fsqrts
969   instructions, which were generally implemented on IBM hardware
970   but generally not available on Motorola/Freescale systems.
971*/               
972OSStatus
973handle_unimplemented_instruction(ExceptionInformation *xp,
974                                 opcode instruction,
975                                 TCR *tcr)
976{
977
978  return -1;
979}
980
981Boolean
982handle_exception(int xnum, 
983                 ExceptionInformation *xp, 
984                 TCR *tcr, 
985                 siginfo_t *info,
986                 int old_valence)
987{
988  pc program_counter;
989  opcode instruction = 0;
990
991
992  program_counter = xpPC(xp);
993 
994  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
995    instruction = *program_counter;
996  }
997
998  if (IS_ALLOC_TRAP(instruction)) {
999    return handle_alloc_trap(xp, tcr);
1000  } else if ((xnum == SIGSEGV) ||
1001             (xnum == SIGBUS)) {
1002    return handle_protection_violation(xp, info, tcr, old_valence);
1003  } else if (xnum == SIGFPE) {
1004    return handle_sigfpe(xp, tcr);
1005  } else if ((xnum == SIGILL)) {
1006    if (IS_GC_TRAP(instruction)) {
1007      return handle_gc_trap(xp, tcr);
1008    } else if (IS_UUO(instruction)) {
1009      return handle_uuo(xp, instruction);
1010    } else {
1011      return handle_unimplemented_instruction(xp,instruction,tcr);
1012    }
1013  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1014    tcr->interrupt_pending = 0;
1015    callback_for_trap(nrs_CMAIN.vcell, xp, xnum, 0);
1016    return true;
1017  }
1018
1019  return false;
1020}
1021
1022void
1023adjust_exception_pc(ExceptionInformation *xp, int delta)
1024{
1025  xpPC(xp) += (delta >> 2);
1026}
1027
1028
1029/*
1030  This wants to scan backwards until "where" points to an instruction
1031   whose major opcode is either 63 (double-float) or 59 (single-float)
1032*/
1033
1034OSStatus
1035handle_fpux_binop(ExceptionInformation *xp, pc where)
1036{
1037  OSStatus err = -1;
1038  opcode *there = (opcode *) where, instr, errnum = 0;
1039  return err;
1040}
1041
1042Boolean
1043handle_uuo(ExceptionInformation *xp, opcode the_uuo) 
1044{
1045  unsigned 
1046    format = UUO_FORMAT(the_uuo);
1047  Boolean handled = false;
1048  int bump = 4;
1049  TCR *tcr = get_tcr(true);
1050
1051  switch (format) {
1052  case uuo_format_kernel_service:
1053    {
1054      TCR * target = (TCR *)xpGPR(xp,arg_z);
1055      int service = UUO_UNARY_field(the_uuo);
1056
1057      switch (service) {
1058      case error_propagate_suspend:
1059        handled = true;
1060        break;
1061      case error_interrupt:
1062        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1063        handled = true;
1064        break;
1065      case error_suspend:
1066        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1067        handled = true;
1068        break;
1069      case error_suspend_all:
1070        lisp_suspend_other_threads();
1071        handled = true;
1072        break;
1073      case error_resume:
1074        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1075        handled = true;
1076        break;
1077      case error_resume_all:
1078        lisp_resume_other_threads();
1079        handled = true;
1080        break;
1081      case error_kill:
1082        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1083        handled = true;
1084        break;
1085      case error_allocate_list:
1086        allocate_list(xp,tcr);
1087        handled = true;
1088        break;
1089      default:
1090        handled = false;
1091        break;
1092      }
1093      break;
1094    }
1095
1096  case uuo_format_unary:
1097    switch(UUO_UNARY_field(the_uuo)) {
1098    case 3:
1099      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1100        handled = true;
1101        bump = 4;
1102        break;
1103      }
1104      /* fall in */
1105    default:
1106      handled = false;
1107      break;
1108
1109    }
1110    break;
1111
1112  case uuo_format_nullary:
1113    switch (UUOA_field(the_uuo)) {
1114    case 4:
1115      tcr->interrupt_pending = 0;
1116      callback_for_trap(nrs_CMAIN.vcell, xp, SIGNAL_FOR_PROCESS_INTERRUPT, 0);
1117      handled = true;
1118      break;
1119    default:
1120      handled = false;
1121      break;
1122    }
1123
1124  case uuo_format_error_lisptag:
1125  case uuo_format_error_fulltag:
1126  case uuo_format_error_xtype:
1127  case uuo_format_nullary_error:
1128  case uuo_format_unary_error:
1129  case uuo_format_binary_error:
1130    handled = handle_error(xp,0,the_uuo);
1131    break;
1132
1133  default:
1134    handled = false;
1135    bump = 0;
1136  }
1137 
1138  if (handled && bump) {
1139    adjust_exception_pc(xp, bump);
1140  }
1141  return handled;
1142}
1143
1144natural
1145register_codevector_contains_pc (natural lisp_function, pc where)
1146{
1147  natural code_vector, size;
1148
1149  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1150      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1151    code_vector = deref(lisp_function, 2);
1152    size = header_element_count(header_of(code_vector)) << 2;
1153    if ((untag(code_vector) < (natural)where) && 
1154        ((natural)where < (code_vector + size)))
1155      return(code_vector);
1156  }
1157
1158  return(0);
1159}
1160
1161int
1162callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg)
1163{
1164  return callback_to_lisp(callback_macptr, xp, info,arg);
1165}
1166
1167int
1168callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1169                  natural arg1, natural arg2)
1170{
1171  natural  callback_ptr;
1172  area *a;
1173  natural fnreg = fn,  codevector, offset;
1174  pc where = xpPC(xp);
1175
1176  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1177  if (codevector == 0) {
1178    fnreg = nfn;
1179    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1180  }
1181  offset = (natural)where - codevector;
1182                                                 
1183                                               
1184
1185  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1186
1187  /* Put the active stack pointer where .SPcallback expects it */
1188  a = tcr->cs_area;
1189  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1190
1191  /* Copy globals from the exception frame to tcr */
1192  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1193  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1194
1195
1196
1197  /* Call back.
1198     Lisp will handle trampolining through some code that
1199     will push lr/fn & pc/nfn stack frames for backtrace.
1200  */
1201  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1202  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1203  ((void (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1204  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1205
1206
1207
1208  /* Copy GC registers back into exception frame */
1209  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1210}
1211
1212area *
1213allocate_no_stack (natural size)
1214{
1215#ifdef SUPPORT_PRAGMA_UNUSED
1216#pragma unused(size)
1217#endif
1218
1219  return (area *) NULL;
1220}
1221
1222
1223
1224
1225
1226
1227/* callback to (symbol-value cmain) if it is a macptr,
1228   otherwise report cause and function name to console.
1229   Returns noErr if exception handled OK */
1230OSStatus
1231handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1232{
1233  LispObj   cmain = nrs_CMAIN.vcell;
1234  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1235
1236}
1237
1238
1239
1240
1241void non_fatal_error( char *msg )
1242{
1243  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1244  fflush( dbgout );
1245}
1246
1247
1248
1249Boolean
1250handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2)
1251{
1252  LispObj   errdisp = nrs_ERRDISP.vcell;
1253
1254  if ((fulltag_of(errdisp) == fulltag_misc) &&
1255      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1256    /* errdisp is a macptr, we can call back to lisp */
1257    return callback_for_trap(errdisp, xp, arg1, arg2);
1258    }
1259
1260  return false;
1261}
1262               
1263
1264/*
1265   Current thread has all signals masked.  Before unmasking them,
1266   make it appear that the current thread has been suspended.
1267   (This is to handle the case where another thread is trying
1268   to GC before this thread is able to sieze the exception lock.)
1269*/
1270int
1271prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1272{
1273  int old_valence = tcr->valence;
1274
1275  tcr->pending_exception_context = context;
1276  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1277
1278  ALLOW_EXCEPTIONS(context);
1279  return old_valence;
1280} 
1281
1282void
1283wait_for_exception_lock_in_handler(TCR *tcr, 
1284                                   ExceptionInformation *context,
1285                                   xframe_list *xf)
1286{
1287
1288  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1289  xf->curr = context;
1290  xf->prev = tcr->xframe;
1291  tcr->xframe =  xf;
1292  tcr->pending_exception_context = NULL;
1293  tcr->valence = TCR_STATE_FOREIGN; 
1294}
1295
1296void
1297unlock_exception_lock_in_handler(TCR *tcr)
1298{
1299  tcr->pending_exception_context = tcr->xframe->curr;
1300  tcr->xframe = tcr->xframe->prev;
1301  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1302  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1303}
1304
1305/*
1306   If an interrupt is pending on exception exit, try to ensure
1307   that the thread sees it as soon as it's able to run.
1308*/
1309void
1310raise_pending_interrupt(TCR *tcr)
1311{
1312  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1313    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1314  }
1315}
1316
1317void
1318exit_signal_handler(TCR *tcr, int old_valence)
1319{
1320  sigset_t mask;
1321  sigfillset(&mask);
1322 
1323  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1324  tcr->valence = old_valence;
1325  tcr->pending_exception_context = NULL;
1326}
1327
1328
1329void
1330signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1331{
1332  xframe_list xframe_link;
1333
1334  if (!use_mach_exception_handling) {
1335   
1336    tcr = (TCR *) get_interrupt_tcr(false);
1337 
1338    /* The signal handler's entered with all signals (notably the
1339       thread_suspend signal) blocked.  Don't allow any other signals
1340       (notably the thread_suspend signal) to preempt us until we've
1341       set the TCR's xframe slot to include the current exception
1342       context.
1343    */
1344   
1345    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1346  }
1347
1348  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1349    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1350    pthread_kill(pthread_self(), thread_suspend_signal);
1351  }
1352
1353 
1354  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1355  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1356    char msg[512];
1357    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1358    if (lisp_Debugger(context, info, signum, false, msg)) {
1359      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1360    }
1361  }
1362
1363  unlock_exception_lock_in_handler(tcr);
1364
1365  /* This thread now looks like a thread that was suspended while
1366     executing lisp code.  If some other thread gets the exception
1367     lock and GCs, the context (this thread's suspend_context) will
1368     be updated.  (That's only of concern if it happens before we
1369     can return to the kernel/to the Mach exception handler).
1370  */
1371  if (!use_mach_exception_handling) {
1372    exit_signal_handler(tcr, old_valence);
1373    raise_pending_interrupt(tcr);
1374  }
1375}
1376
1377/*
1378  If it looks like we're in the middle of an atomic operation, make
1379  it seem as if that operation is either complete or hasn't started
1380  yet.
1381
1382  The cases handled include:
1383
1384  a) storing into a newly-allocated lisp frame on the stack.
1385  b) marking a newly-allocated TSP frame as containing "raw" data.
1386  c) consing: the GC has its own ideas about how this should be
1387     handled, but other callers would be best advised to back
1388     up or move forward, according to whether we're in the middle
1389     of allocating a cons cell or allocating a uvector.
1390  d) a STMW to the vsp
1391  e) EGC write-barrier subprims.
1392*/
1393
1394extern opcode
1395  egc_write_barrier_start,
1396  egc_write_barrier_end, 
1397  egc_store_node_conditional, 
1398  egc_store_node_conditional_test,
1399  egc_set_hash_key,
1400  egc_gvset,
1401  egc_rplaca,
1402  egc_rplacd,
1403  egc_set_hash_key_conditional,
1404  egc_set_hash_key_conditional_test;
1405
1406
1407extern opcode ffcall_return_window, ffcall_return_window_end;
1408
1409void
1410pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1411{
1412  pc program_counter = xpPC(xp);
1413  opcode instr = *program_counter;
1414  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1415  LispObj cur_allocptr = xpGPR(xp, allocptr);
1416  int allocptr_tag = fulltag_of(cur_allocptr);
1417 
1418
1419
1420  if ((program_counter < &egc_write_barrier_end) && 
1421      (program_counter >= &egc_write_barrier_start)) {
1422    LispObj *ea = 0, val = 0, root = 0;
1423    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1424    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1425
1426    if (program_counter >= &egc_set_hash_key_conditional) {
1427      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1428          ((program_counter == &egc_set_hash_key_conditional_test) &&
1429           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1430        return;
1431      }
1432      need_store = false;
1433      root = xpGPR(xp,arg_x);
1434      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1435      need_memoize_root = true;
1436    } else if (program_counter >= &egc_store_node_conditional) {
1437      if ((program_counter < &egc_store_node_conditional_test) ||
1438          ((program_counter == &egc_store_node_conditional_test) &&
1439           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1440        /* The conditional store either hasn't been attempted yet, or
1441           has failed.  No need to adjust the PC, or do memoization. */
1442        return;
1443      }
1444      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1445      xpGPR(xp,arg_z) = t_value;
1446      need_store = false;
1447    } else if (program_counter >= &egc_set_hash_key) {
1448      root = xpGPR(xp,arg_x);
1449      val = xpGPR(xp,arg_z);
1450      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1451      need_memoize_root = true;
1452    } else if (program_counter >= &egc_gvset) {
1453      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1454      val = xpGPR(xp,arg_z);
1455    } else if (program_counter >= &egc_rplacd) {
1456      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1457      val = xpGPR(xp,arg_z);
1458    } else {                      /* egc_rplaca */
1459      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1460      val = xpGPR(xp,arg_z);
1461    }
1462    if (need_store) {
1463      *ea = val;
1464    }
1465    if (need_check_memo) {
1466      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1467      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1468          ((LispObj)ea < val)) {
1469        atomic_set_bit(refbits, bitnumber);
1470        if (need_memoize_root) {
1471          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1472          atomic_set_bit(refbits, bitnumber);
1473        }
1474      }
1475    }
1476    xpPC(xp) = xpLR(xp);
1477    return;
1478  }
1479
1480
1481 
1482  if (allocptr_tag != tag_fixnum) {
1483    signed_natural disp = allocptr_displacement(xp);
1484
1485    if (disp) {
1486      /* Being architecturally "at" the alloc trap doesn't tell
1487         us much (in particular, it doesn't tell us whether
1488         or not the thread has committed to taking the trap
1489         and is waiting for the exception lock (or waiting
1490         for the Mach exception thread to tell it how bad
1491         things are) or is about to execute a conditional
1492         trap.
1493         Regardless of which case applies, we want the
1494         other thread to take (or finish taking) the
1495         trap, and we don't want it to consider its
1496         current allocptr to be valid.
1497         The difference between this case (suspend other
1498         thread for GC) and the previous case (suspend
1499         current thread for interrupt) is solely a
1500         matter of what happens after we leave this
1501         function: some non-current thread will stay
1502         suspended until the GC finishes, then take
1503         (or start processing) the alloc trap.   The
1504         current thread will go off and do PROCESS-INTERRUPT
1505         or something, and may return from the interrupt
1506         and need to finish the allocation that got interrupted.
1507      */
1508
1509      if (alloc_disp) {
1510        *alloc_disp = disp;
1511        xpGPR(xp,allocptr) += disp;
1512        /* Leave the PC at the alloc trap.  When the interrupt
1513           handler returns, it'll decrement allocptr by disp
1514           and the trap may or may not be taken.
1515        */
1516      } else {
1517        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1518        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1519      }
1520    } else {
1521      /* If we're already past the alloc_trap, finish allocating
1522         the object. */
1523      if (allocptr_tag == fulltag_cons) {
1524        finish_allocating_cons(xp);
1525      } else {
1526        if (allocptr_tag == fulltag_misc) {
1527          finish_allocating_uvector(xp);
1528        } else {
1529          Bug(xp, "what's being allocated here ?");
1530        }
1531      }
1532      /* Whatever we finished allocating, reset allocptr/allocbase to
1533         VOID_ALLOCPTR */
1534      xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1535    }
1536    return;
1537  }
1538}
1539
1540void
1541interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1542{
1543  TCR *tcr = get_interrupt_tcr(false);
1544  if (tcr) {
1545    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1546      tcr->interrupt_pending = 1 << fixnumshift;
1547    } else {
1548      LispObj cmain = nrs_CMAIN.vcell;
1549
1550      if ((fulltag_of(cmain) == fulltag_misc) &&
1551          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1552        /*
1553           This thread can (allegedly) take an interrupt now.
1554           It's tricky to do that if we're executing
1555           foreign code (especially Linuxthreads code, much
1556           of which isn't reentrant.)
1557           If we're unwinding the stack, we also want to defer
1558           the interrupt.
1559        */
1560        if ((tcr->valence != TCR_STATE_LISP) ||
1561            (tcr->unwinding != 0)) {
1562          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
1563        } else {
1564          xframe_list xframe_link;
1565          int old_valence;
1566          signed_natural disp=0;
1567         
1568          pc_luser_xp(context, tcr, &disp);
1569          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1570          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1571          handle_exception(signum, context, tcr, info, old_valence);
1572          if (disp) {
1573            xpGPR(context,allocptr) -= disp;
1574          }
1575          unlock_exception_lock_in_handler(tcr);
1576          exit_signal_handler(tcr, old_valence);
1577        }
1578      }
1579    }
1580  }
1581#ifdef DARWIN
1582    DarwinSigReturn(context);
1583#endif
1584}
1585
1586
1587
1588void
1589install_signal_handler(int signo, void *handler)
1590{
1591  struct sigaction sa;
1592 
1593  sa.sa_sigaction = (void *)handler;
1594  sigfillset(&sa.sa_mask);
1595  sa.sa_flags = 
1596    0 /* SA_RESTART */
1597    | SA_SIGINFO
1598    ;
1599
1600  sigaction(signo, &sa, NULL);
1601}
1602
1603void
1604install_pmcl_exception_handlers()
1605{
1606#ifdef DARWIN
1607  extern Boolean use_mach_exception_handling;
1608#endif
1609
1610  Boolean install_signal_handlers_for_exceptions =
1611#ifdef DARWIN
1612    !use_mach_exception_handling
1613#else
1614    true
1615#endif
1616    ;
1617  if (install_signal_handlers_for_exceptions) {
1618    extern int no_sigtrap;
1619    install_signal_handler(SIGILL, (void *)signal_handler);
1620    if (no_sigtrap != 1) {
1621      install_signal_handler(SIGTRAP, (void *)signal_handler);
1622    }
1623    install_signal_handler(SIGBUS,  (void *)signal_handler);
1624    install_signal_handler(SIGSEGV, (void *)signal_handler);
1625    install_signal_handler(SIGFPE, (void *)signal_handler);
1626  }
1627 
1628  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1629                         (void *)interrupt_handler);
1630  signal(SIGPIPE, SIG_IGN);
1631}
1632
1633void
1634thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1635{
1636  TCR *tcr = get_tcr(false);
1637  area *a;
1638  sigset_t mask;
1639 
1640  sigemptyset(&mask);
1641
1642  if (tcr) {
1643    tcr->valence = TCR_STATE_FOREIGN;
1644    a = tcr->vs_area;
1645    if (a) {
1646      a->active = a->high;
1647    }
1648    a = tcr->cs_area;
1649    if (a) {
1650      a->active = a->high;
1651    }
1652  }
1653 
1654  pthread_sigmask(SIG_SETMASK,&mask,NULL);
1655  pthread_exit(NULL);
1656}
1657
1658void
1659thread_signal_setup()
1660{
1661  thread_suspend_signal = SIG_SUSPEND_THREAD;
1662  thread_kill_signal = SIG_KILL_THREAD;
1663
1664  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
1665  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
1666}
1667
1668
1669
1670void
1671unprotect_all_areas()
1672{
1673  protected_area_ptr p;
1674
1675  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
1676    unprotect_area(p);
1677  }
1678}
1679
1680/*
1681  A binding subprim has just done "twlle limit_regno,idx_regno" and
1682  the trap's been taken.  Extend the tcr's tlb so that the index will
1683  be in bounds and the new limit will be on a page boundary, filling
1684  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1685  the tcr fields and the registers in the xp and return true if this
1686  all works, false otherwise.
1687
1688  Note that the tlb was allocated via malloc, so realloc can do some
1689  of the hard work.
1690*/
1691Boolean
1692extend_tcr_tlb(TCR *tcr, 
1693               ExceptionInformation *xp, 
1694               unsigned idx_regno)
1695{
1696  unsigned
1697    index = (unsigned) (xpGPR(xp,idx_regno)),
1698    old_limit = tcr->tlb_limit,
1699    new_limit = align_to_power_of_2(index+1,12),
1700    new_bytes = new_limit-old_limit;
1701  LispObj
1702    *old_tlb = tcr->tlb_pointer,
1703    *new_tlb = realloc(old_tlb, new_limit),
1704    *work;
1705
1706  if (new_tlb == NULL) {
1707    return false;
1708  }
1709 
1710  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1711
1712  while (new_bytes) {
1713    *work++ = no_thread_local_binding_marker;
1714    new_bytes -= sizeof(LispObj);
1715  }
1716  tcr->tlb_pointer = new_tlb;
1717  tcr->tlb_limit = new_limit;
1718  return true;
1719}
1720
1721
1722
1723void
1724exception_init()
1725{
1726  install_pmcl_exception_handlers();
1727}
1728
1729
1730
1731
1732
1733#ifdef DARWIN
1734
1735
1736#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
1737#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
1738
1739
1740
1741#define LISP_EXCEPTIONS_HANDLED_MASK \
1742 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
1743
1744/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
1745#define NUM_LISP_EXCEPTIONS_HANDLED 4
1746
1747typedef struct {
1748  int foreign_exception_port_count;
1749  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
1750  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
1751  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
1752  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
1753} MACH_foreign_exception_state;
1754
1755
1756
1757
1758/*
1759  Mach's exception mechanism works a little better than its signal
1760  mechanism (and, not incidentally, it gets along with GDB a lot
1761  better.
1762
1763  Initially, we install an exception handler to handle each native
1764  thread's exceptions.  This process involves creating a distinguished
1765  thread which listens for kernel exception messages on a set of
1766  0 or more thread exception ports.  As threads are created, they're
1767  added to that port set; a thread's exception port is destroyed
1768  (and therefore removed from the port set) when the thread exits.
1769
1770  A few exceptions can be handled directly in the handler thread;
1771  others require that we resume the user thread (and that the
1772  exception thread resumes listening for exceptions.)  The user
1773  thread might eventually want to return to the original context
1774  (possibly modified somewhat.)
1775
1776  As it turns out, the simplest way to force the faulting user
1777  thread to handle its own exceptions is to do pretty much what
1778  signal() does: the exception handlng thread sets up a sigcontext
1779  on the user thread's stack and forces the user thread to resume
1780  execution as if a signal handler had been called with that
1781  context as an argument.  We can use a distinguished UUO at a
1782  distinguished address to do something like sigreturn(); that'll
1783  have the effect of resuming the user thread's execution in
1784  the (pseudo-) signal context.
1785
1786  Since:
1787    a) we have miles of code in C and in Lisp that knows how to
1788    deal with Linux sigcontexts
1789    b) Linux sigcontexts contain a little more useful information
1790    (the DAR, DSISR, etc.) than their Darwin counterparts
1791    c) we have to create a sigcontext ourselves when calling out
1792    to the user thread: we aren't really generating a signal, just
1793    leveraging existing signal-handling code.
1794
1795  we create a Linux sigcontext struct.
1796
1797  Simple ?  Hopefully from the outside it is ...
1798
1799  We want the process of passing a thread's own context to it to
1800  appear to be atomic: in particular, we don't want the GC to suspend
1801  a thread that's had an exception but has not yet had its user-level
1802  exception handler called, and we don't want the thread's exception
1803  context to be modified by a GC while the Mach handler thread is
1804  copying it around.  On Linux (and on Jaguar), we avoid this issue
1805  because (a) the kernel sets up the user-level signal handler and
1806  (b) the signal handler blocks signals (including the signal used
1807  by the GC to suspend threads) until tcr->xframe is set up.
1808
1809  The GC and the Mach server thread therefore contend for the lock
1810  "mach_exception_lock".  The Mach server thread holds the lock
1811  when copying exception information between the kernel and the
1812  user thread; the GC holds this lock during most of its execution
1813  (delaying exception processing until it can be done without
1814  GC interference.)
1815
1816*/
1817
1818
1819#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
1820
1821void
1822fatal_mach_error(char *format, ...);
1823
1824#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
1825
1826
1827void
1828restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
1829{
1830  kern_return_t kret;
1831  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
1832
1833  /* Set the thread's FP state from the pseudosigcontext */
1834  kret = thread_set_state(thread,
1835                          ARM_FLOAT_STATE,
1836                          (thread_state_t)&(mc->__fs),
1837                          ARM_FLOAT_STATE_COUNT);
1838
1839  MACH_CHECK_ERROR("setting thread FP state", kret);
1840
1841  /* The thread'll be as good as new ... */
1842  kret = thread_set_state(thread, 
1843                          MACHINE_THREAD_STATE,
1844                          (thread_state_t)&(mc->__ss),
1845                          MACHINE_THREAD_STATE_COUNT);
1846  MACH_CHECK_ERROR("setting thread state", kret);
1847} 
1848
1849/* This code runs in the exception handling thread, in response
1850   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
1851   in response to a call to pseudo_sigreturn() from the specified
1852   user thread.
1853   Find that context (the user thread's R3 points to it), then
1854   use that context to set the user thread's state.  When this
1855   function's caller returns, the Mach kernel will resume the
1856   user thread.
1857*/
1858
1859kern_return_t
1860do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
1861{
1862  ExceptionInformation *xp;
1863
1864#ifdef DEBUG_MACH_EXCEPTIONS
1865  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
1866#endif
1867  xp = tcr->pending_exception_context;
1868  if (xp) {
1869    tcr->pending_exception_context = NULL;
1870    tcr->valence = TCR_STATE_LISP;
1871    restore_mach_thread_state(thread, xp);
1872    raise_pending_interrupt(tcr);
1873  } else {
1874    Bug(NULL, "no xp here!\n");
1875  }
1876#ifdef DEBUG_MACH_EXCEPTIONS
1877  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
1878#endif
1879  return KERN_SUCCESS;
1880} 
1881
1882ExceptionInformation *
1883create_thread_context_frame(mach_port_t thread, 
1884                            natural *new_stack_top)
1885{
1886  arm_thread_state_t ts;
1887  mach_msg_type_number_t thread_state_count;
1888  kern_return_t result;
1889  ExceptionInformation *pseudosigcontext;
1890  MCONTEXT_T mc;
1891  natural stackp, backlink;
1892
1893  thread_state_count = MACHINE_THREAD_STATE_COUNT;
1894  result = thread_get_state(thread, 
1895                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
1896                            (thread_state_t)&ts,
1897                            &thread_state_count);
1898 
1899  if (result != KERN_SUCCESS) {
1900    get_tcr(true);
1901    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
1902  }
1903  stackp = ts.__r1;
1904  backlink = stackp;
1905  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
1906  stackp -= sizeof(*pseudosigcontext);
1907  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
1908
1909  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
1910  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
1911  memmove(&(mc->__ss),&ts,sizeof(ts));
1912
1913  thread_state_count = PPC_FLOAT_STATE_COUNT;
1914  thread_get_state(thread,
1915                   PPC_FLOAT_STATE,
1916                   (thread_state_t)&(mc->__fs),
1917                   &thread_state_count);
1918
1919
1920#ifdef PPC64
1921  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
1922#else
1923  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
1924#endif
1925  thread_get_state(thread,
1926#ifdef PPC64
1927                   PPC_EXCEPTION_STATE64,
1928#else
1929                   PPC_EXCEPTION_STATE,
1930#endif
1931                   (thread_state_t)&(mc->__es),
1932                   &thread_state_count);
1933
1934
1935  UC_MCONTEXT(pseudosigcontext) = mc;
1936  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
1937  stackp -= C_LINKAGE_LEN;
1938  *(natural *)ptr_from_lispobj(stackp) = backlink;
1939  if (new_stack_top) {
1940    *new_stack_top = stackp;
1941  }
1942  return pseudosigcontext;
1943}
1944
1945/*
1946  This code sets up the user thread so that it executes a "pseudo-signal
1947  handler" function when it resumes.  Create a linux sigcontext struct
1948  on the thread's stack and pass it as an argument to the pseudo-signal
1949  handler.
1950
1951  Things are set up so that the handler "returns to" pseudo_sigreturn(),
1952  which will restore the thread's context.
1953
1954  If the handler invokes code that throws (or otherwise never sigreturn()'s
1955  to the context), that's fine.
1956
1957  Actually, check that: throw (and variants) may need to be careful and
1958  pop the tcr's xframe list until it's younger than any frame being
1959  entered.
1960*/
1961
1962int
1963setup_signal_frame(mach_port_t thread,
1964                   void *handler_address,
1965                   int signum,
1966                   int code,
1967                   TCR *tcr)
1968{
1969#ifdef PPC64
1970  ppc_thread_state64_t ts;
1971#else
1972  ppc_thread_state_t ts;
1973#endif
1974  ExceptionInformation *pseudosigcontext;
1975  int old_valence = tcr->valence;
1976  natural stackp;
1977
1978#ifdef DEBUG_MACH_EXCEPTIONS
1979  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
1980#endif
1981  pseudosigcontext = create_thread_context_frame(thread, &stackp);
1982  pseudosigcontext->uc_onstack = 0;
1983  pseudosigcontext->uc_sigmask = (sigset_t) 0;
1984  tcr->pending_exception_context = pseudosigcontext;
1985  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1986 
1987
1988  /*
1989     It seems like we've created a  sigcontext on the thread's
1990     stack.  Set things up so that we call the handler (with appropriate
1991     args) when the thread's resumed.
1992  */
1993
1994  ts.__srr0 = (natural) handler_address;
1995  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
1996  ts.__r1 = stackp;
1997  ts.__r3 = signum;
1998  ts.__r4 = (natural)pseudosigcontext;
1999  ts.__r5 = (natural)tcr;
2000  ts.__r6 = (natural)old_valence;
2001  ts.__lr = (natural)pseudo_sigreturn;
2002
2003
2004#ifdef PPC64
2005  ts.__r13 = xpGPR(pseudosigcontext,13);
2006  thread_set_state(thread,
2007                   PPC_THREAD_STATE64,
2008                   (thread_state_t)&ts,
2009                   PPC_THREAD_STATE64_COUNT);
2010#else
2011  thread_set_state(thread, 
2012                   MACHINE_THREAD_STATE,
2013                   (thread_state_t)&ts,
2014                   MACHINE_THREAD_STATE_COUNT);
2015#endif
2016#ifdef DEBUG_MACH_EXCEPTIONS
2017  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2018#endif
2019  return 0;
2020}
2021
2022
2023void
2024pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2025{
2026  signal_handler(signum, NULL, context, tcr, old_valence);
2027} 
2028
2029
2030int
2031thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2032{
2033#ifdef PPC64
2034  ppc_thread_state64_t ts;
2035#else
2036  ppc_thread_state_t ts;
2037#endif
2038  mach_msg_type_number_t thread_state_count;
2039
2040#ifdef PPC64
2041  thread_state_count = PPC_THREAD_STATE64_COUNT;
2042#else
2043  thread_state_count = PPC_THREAD_STATE_COUNT;
2044#endif
2045  thread_get_state(thread, 
2046#ifdef PPC64
2047                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2048#else
2049                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2050#endif
2051                   (thread_state_t)&ts,
2052                   &thread_state_count);
2053  if (enabled) {
2054    ts.__srr1 |= MSR_FE0_FE1_MASK;
2055  } else {
2056    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2057  }
2058  /*
2059     Hack-o-rama warning (isn't it about time for such a warning?):
2060     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2061     Our handler for lisp's use of pthread_kill() pushes a phony
2062     lisp frame on the stack and force the context to resume at
2063     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2064     lisp frame contains the -real- address that process_interrupt
2065     should have returned to, and the fact that it's in a lisp
2066     frame should convince the GC to notice that address if it
2067     runs in the tiny time window between returning from our
2068     interrupt handler and ... here.
2069     If the top frame on the stack is a lisp frame, discard it
2070     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2071     just adjust ts.srr0 to skip over the UUO.
2072  */
2073  {
2074    lisp_frame *tos = (lisp_frame *)ts.__r1,
2075      *next_frame = tos->backlink;
2076   
2077    if (tos == (next_frame -1)) {
2078      ts.__srr0 = tos->savelr;
2079      ts.__r1 = (LispObj) next_frame;
2080    } else {
2081      ts.__srr0 += 4;
2082    }
2083  }
2084  thread_set_state(thread, 
2085#ifdef PPC64
2086                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2087#else
2088                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2089#endif
2090                   (thread_state_t)&ts,
2091#ifdef PPC64
2092                   PPC_THREAD_STATE64_COUNT
2093#else
2094                   PPC_THREAD_STATE_COUNT
2095#endif
2096                   );
2097
2098  return 0;
2099}
2100
2101/*
2102  This function runs in the exception handling thread.  It's
2103  called (by this precise name) from the library function "exc_server()"
2104  when the thread's exception ports are set up.  (exc_server() is called
2105  via mach_msg_server(), which is a function that waits for and dispatches
2106  on exception messages from the Mach kernel.)
2107
2108  This checks to see if the exception was caused by a pseudo_sigreturn()
2109  UUO; if so, it arranges for the thread to have its state restored
2110  from the specified context.
2111
2112  Otherwise, it tries to map the exception to a signal number and
2113  arranges that the thread run a "pseudo signal handler" to handle
2114  the exception.
2115
2116  Some exceptions could and should be handled here directly.
2117*/
2118
2119kern_return_t
2120catch_exception_raise(mach_port_t exception_port,
2121                      mach_port_t thread,
2122                      mach_port_t task, 
2123                      exception_type_t exception,
2124                      exception_data_t code_vector,
2125                      mach_msg_type_number_t code_count)
2126{
2127  int signum = 0, code = *code_vector, code1;
2128  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2129  kern_return_t kret;
2130
2131#ifdef DEBUG_MACH_EXCEPTIONS
2132  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2133#endif
2134
2135  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2136    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2137  } 
2138  if ((exception == EXC_BAD_INSTRUCTION) &&
2139      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2140      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2141       (code1 == (int)enable_fp_exceptions) ||
2142       (code1 == (int)disable_fp_exceptions))) {
2143    if (code1 == (int)pseudo_sigreturn) {
2144      kret = do_pseudo_sigreturn(thread, tcr);
2145#if 0
2146      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2147#endif
2148       
2149    } else if (code1 == (int)enable_fp_exceptions) {
2150      kret = thread_set_fp_exceptions_enabled(thread, true);
2151    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2152  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2153    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2154    kret = 17;
2155  } else {
2156    switch (exception) {
2157    case EXC_BAD_ACCESS:
2158      signum = SIGSEGV;
2159      break;
2160       
2161    case EXC_BAD_INSTRUCTION:
2162      signum = SIGILL;
2163      break;
2164     
2165    case EXC_SOFTWARE:
2166      if (code == EXC_PPC_TRAP) {
2167        signum = SIGTRAP;
2168      }
2169      break;
2170     
2171    case EXC_ARITHMETIC:
2172      signum = SIGFPE;
2173      break;
2174
2175    default:
2176      break;
2177    }
2178    if (signum) {
2179      kret = setup_signal_frame(thread,
2180                                (void *)pseudo_signal_handler,
2181                                signum,
2182                                code,
2183                                tcr);
2184#if 0
2185      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2186#endif
2187
2188    } else {
2189      kret = 17;
2190    }
2191  }
2192
2193  return kret;
2194}
2195
2196
2197
2198typedef struct {
2199  mach_msg_header_t Head;
2200  /* start of the kernel processed data */
2201  mach_msg_body_t msgh_body;
2202  mach_msg_port_descriptor_t thread;
2203  mach_msg_port_descriptor_t task;
2204  /* end of the kernel processed data */
2205  NDR_record_t NDR;
2206  exception_type_t exception;
2207  mach_msg_type_number_t codeCnt;
2208  integer_t code[2];
2209  mach_msg_trailer_t trailer;
2210} exceptionRequest;
2211
2212
2213boolean_t
2214openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2215{
2216  static NDR_record_t _NDR = {0};
2217  kern_return_t handled;
2218  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2219  exceptionRequest *req = (exceptionRequest *) in;
2220
2221  reply->NDR = _NDR;
2222
2223  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2224  out->msgh_remote_port = in->msgh_remote_port;
2225  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2226  out->msgh_local_port = MACH_PORT_NULL;
2227  out->msgh_id = in->msgh_id+100;
2228
2229  /* Could handle other exception flavors in the range 2401-2403 */
2230
2231
2232  if (in->msgh_id != 2401) {
2233    reply->RetCode = MIG_BAD_ID;
2234    return FALSE;
2235  }
2236  handled = catch_exception_raise(req->Head.msgh_local_port,
2237                                  req->thread.name,
2238                                  req->task.name,
2239                                  req->exception,
2240                                  req->code,
2241                                  req->codeCnt);
2242  reply->RetCode = handled;
2243  return TRUE;
2244}
2245
2246/*
2247  The initial function for an exception-handling thread.
2248*/
2249
2250void *
2251exception_handler_proc(void *arg)
2252{
2253  extern boolean_t exc_server();
2254  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2255
2256  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2257  /* Should never return. */
2258  abort();
2259}
2260
2261
2262
2263mach_port_t
2264mach_exception_port_set()
2265{
2266  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2267  kern_return_t kret; 
2268  if (__exception_port_set == MACH_PORT_NULL) {
2269    kret = mach_port_allocate(mach_task_self(),
2270                              MACH_PORT_RIGHT_PORT_SET,
2271                              &__exception_port_set);
2272    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2273    create_system_thread(0,
2274                         NULL,
2275                         exception_handler_proc, 
2276                         (void *)((natural)__exception_port_set));
2277  }
2278  return __exception_port_set;
2279}
2280
2281/*
2282  Setup a new thread to handle those exceptions specified by
2283  the mask "which".  This involves creating a special Mach
2284  message port, telling the Mach kernel to send exception
2285  messages for the calling thread to that port, and setting
2286  up a handler thread which listens for and responds to
2287  those messages.
2288
2289*/
2290
2291/*
2292  Establish the lisp thread's TCR as its exception port, and determine
2293  whether any other ports have been established by foreign code for
2294  exceptions that lisp cares about.
2295
2296  If this happens at all, it should happen on return from foreign
2297  code and on entry to lisp code via a callback.
2298
2299  This is a lot of trouble (and overhead) to support Java, or other
2300  embeddable systems that clobber their caller's thread exception ports.
2301 
2302*/
2303kern_return_t
2304tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2305{
2306  kern_return_t kret;
2307  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2308  int i;
2309  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2310  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2311  exception_mask_t mask = 0;
2312
2313  kret = thread_swap_exception_ports(thread,
2314                                     LISP_EXCEPTIONS_HANDLED_MASK,
2315                                     lisp_port,
2316                                     EXCEPTION_DEFAULT,
2317                                     THREAD_STATE_NONE,
2318                                     fxs->masks,
2319                                     &n,
2320                                     fxs->ports,
2321                                     fxs->behaviors,
2322                                     fxs->flavors);
2323  if (kret == KERN_SUCCESS) {
2324    fxs->foreign_exception_port_count = n;
2325    for (i = 0; i < n; i ++) {
2326      foreign_port = fxs->ports[i];
2327
2328      if ((foreign_port != lisp_port) &&
2329          (foreign_port != MACH_PORT_NULL)) {
2330        mask |= fxs->masks[i];
2331      }
2332    }
2333    tcr->foreign_exception_status = (int) mask;
2334  }
2335  return kret;
2336}
2337
2338kern_return_t
2339tcr_establish_lisp_exception_port(TCR *tcr)
2340{
2341  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2342}
2343
2344/*
2345  Do this when calling out to or returning from foreign code, if
2346  any conflicting foreign exception ports were established when we
2347  last entered lisp code.
2348*/
2349kern_return_t
2350restore_foreign_exception_ports(TCR *tcr)
2351{
2352  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2353 
2354  if (m) {
2355    MACH_foreign_exception_state *fxs  = 
2356      (MACH_foreign_exception_state *) tcr->native_thread_info;
2357    int i, n = fxs->foreign_exception_port_count;
2358    exception_mask_t tm;
2359
2360    for (i = 0; i < n; i++) {
2361      if ((tm = fxs->masks[i]) & m) {
2362        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2363                                   tm,
2364                                   fxs->ports[i],
2365                                   fxs->behaviors[i],
2366                                   fxs->flavors[i]);
2367      }
2368    }
2369  }
2370}
2371                                   
2372
2373/*
2374  This assumes that a Mach port (to be used as the thread's exception port) whose
2375  "name" matches the TCR's 32-bit address has already been allocated.
2376*/
2377
2378kern_return_t
2379setup_mach_exception_handling(TCR *tcr)
2380{
2381  mach_port_t
2382    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2383    task_self = mach_task_self();
2384  kern_return_t kret;
2385
2386  kret = mach_port_insert_right(task_self,
2387                                thread_exception_port,
2388                                thread_exception_port,
2389                                MACH_MSG_TYPE_MAKE_SEND);
2390  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2391
2392  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2393  if (kret == KERN_SUCCESS) {
2394    mach_port_t exception_port_set = mach_exception_port_set();
2395
2396    kret = mach_port_move_member(task_self,
2397                                 thread_exception_port,
2398                                 exception_port_set);
2399  }
2400  return kret;
2401}
2402
2403void
2404darwin_exception_init(TCR *tcr)
2405{
2406  void tcr_monitor_exception_handling(TCR*, Boolean);
2407  kern_return_t kret;
2408  MACH_foreign_exception_state *fxs = 
2409    calloc(1, sizeof(MACH_foreign_exception_state));
2410 
2411  tcr->native_thread_info = (void *) fxs;
2412
2413  if ((kret = setup_mach_exception_handling(tcr))
2414      != KERN_SUCCESS) {
2415    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2416    terminate_lisp();
2417  }
2418}
2419
2420/*
2421  The tcr is the "name" of the corresponding thread's exception port.
2422  Destroying the port should remove it from all port sets of which it's
2423  a member (notably, the exception port set.)
2424*/
2425void
2426darwin_exception_cleanup(TCR *tcr)
2427{
2428  void *fxs = tcr->native_thread_info;
2429  extern Boolean use_mach_exception_handling;
2430
2431  if (fxs) {
2432    tcr->native_thread_info = NULL;
2433    free(fxs);
2434  }
2435  if (use_mach_exception_handling) {
2436    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2437    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2438  }
2439}
2440
2441
2442Boolean
2443suspend_mach_thread(mach_port_t mach_thread)
2444{
2445  kern_return_t status;
2446  Boolean aborted = false;
2447 
2448  do {
2449    aborted = false;
2450    status = thread_suspend(mach_thread);
2451    if (status == KERN_SUCCESS) {
2452      status = thread_abort_safely(mach_thread);
2453      if (status == KERN_SUCCESS) {
2454        aborted = true;
2455      } else {
2456        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2457        thread_resume(mach_thread);
2458      }
2459    } else {
2460      return false;
2461    }
2462  } while (! aborted);
2463  return true;
2464}
2465
2466/*
2467  Only do this if pthread_kill indicated that the pthread isn't
2468  listening to signals anymore, as can happen as soon as pthread_exit()
2469  is called on Darwin.  The thread could still call out to lisp as it
2470  is exiting, so we need another way to suspend it in this case.
2471*/
2472Boolean
2473mach_suspend_tcr(TCR *tcr)
2474{
2475  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2476  ExceptionInformation *pseudosigcontext;
2477  Boolean result = false;
2478 
2479  result = suspend_mach_thread(mach_thread);
2480  if (result) {
2481    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2482    pseudosigcontext->uc_onstack = 0;
2483    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2484    tcr->suspend_context = pseudosigcontext;
2485  }
2486  return result;
2487}
2488
2489void
2490mach_resume_tcr(TCR *tcr)
2491{
2492  ExceptionInformation *xp;
2493  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2494 
2495  xp = tcr->suspend_context;
2496#ifdef DEBUG_MACH_EXCEPTIONS
2497  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2498          tcr, tcr->pending_exception_context);
2499#endif
2500  tcr->suspend_context = NULL;
2501  restore_mach_thread_state(mach_thread, xp);
2502#ifdef DEBUG_MACH_EXCEPTIONS
2503  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2504          tcr, tcr->pending_exception_context);
2505#endif
2506  thread_resume(mach_thread);
2507}
2508
2509void
2510fatal_mach_error(char *format, ...)
2511{
2512  va_list args;
2513  char s[512];
2514 
2515
2516  va_start(args, format);
2517  vsnprintf(s, sizeof(s),format, args);
2518  va_end(args);
2519
2520  Fatal("Mach error", s);
2521}
2522
2523void
2524pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2525{
2526  interrupt_handler(signum, NULL, context);
2527}
2528
2529int
2530mach_raise_thread_interrupt(TCR *target)
2531{
2532  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2533  kern_return_t kret;
2534  Boolean result = false;
2535  TCR *current = get_tcr(false);
2536  thread_basic_info_data_t info; 
2537  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2538
2539  LOCK(lisp_global(TCR_AREA_LOCK), current);
2540
2541  if (suspend_mach_thread(mach_thread)) {
2542    if (thread_info(mach_thread,
2543                    THREAD_BASIC_INFO,
2544                    (thread_info_t)&info,
2545                    &info_count) == KERN_SUCCESS) {
2546      if (info.suspend_count == 1) {
2547        if ((target->valence == TCR_STATE_LISP) &&
2548            (!target->unwinding) &&
2549            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2550          kret = setup_signal_frame(mach_thread,
2551                                    (void *)pseudo_interrupt_handler,
2552                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2553                                    0,
2554                                    target);
2555          if (kret == KERN_SUCCESS) {
2556            result = true;
2557          }
2558        }
2559      }
2560    }
2561    if (! result) {
2562      target->interrupt_pending = 1 << fixnumshift;
2563    }
2564    thread_resume(mach_thread);
2565   
2566  }
2567  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2568  return 0;
2569}
2570
2571#endif
Note: See TracBrowser for help on using the repository browser.