source: trunk/source/lisp-kernel/ppc-exceptions.c @ 14438

Last change on this file since 14438 was 14438, checked in by gb, 9 years ago

enable_fp_exceptions() just before callback in callback_to_lisp().

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 88.5 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <sys/mman.h>
31#include <fpu_control.h>
32#include <linux/prctl.h>
33#endif
34
35#ifdef DARWIN
36#include <sys/mman.h>
37#define _FPU_RESERVED 0xffffff00
38#ifndef SA_NODEFER
39#define SA_NODEFER 0
40#endif
41#include <sysexits.h>
42
43/* a distinguished UUO at a distinguished address */
44extern void pseudo_sigreturn(ExceptionInformation *);
45#endif
46
47
48#include "threads.h"
49
50#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
51#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
52#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
53extern void enable_fp_exceptions(void);
54extern void disable_fp_exceptions(void);
55
56#ifdef LINUX
57/* Some relatively recent kernels support this interface.
58   If this prctl isn't supported, assume that we're always
59   running with excptions enabled and "precise".
60*/
61#ifndef PR_SET_FPEXC
62#define PR_SET_FPEXC 12
63#endif
64#ifndef PR_FP_EXC_DISABLED
65#define PR_FP_EXC_DISABLED 0
66#endif
67#ifndef PR_FP_EXC_PRECISE
68#define PR_FP_EXC_PRECISE 3
69#endif
70
71void
72enable_fp_exceptions()
73{
74  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
75}
76
77void
78disable_fp_exceptions()
79{
80  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
81}
82
83#endif
84
85/*
86  Handle exceptions.
87
88*/
89
90extern LispObj lisp_nil;
91
92extern natural lisp_heap_gc_threshold;
93extern Boolean grow_dynamic_area(natural);
94
95
96
97
98
99
100int
101page_size = 4096;
102
103int
104log2_page_size = 12;
105
106
107
108
109
110/*
111  If the PC is pointing to an allocation trap, the previous instruction
112  must have decremented allocptr.  Return the non-zero amount by which
113  allocptr was decremented.
114*/
115signed_natural
116allocptr_displacement(ExceptionInformation *xp)
117{
118  pc program_counter = xpPC(xp);
119  opcode instr = *program_counter, prev_instr = *(program_counter-1);
120
121  if (instr == ALLOC_TRAP_INSTRUCTION) {
122    if (match_instr(prev_instr, 
123                    XO_MASK | RT_MASK | RB_MASK,
124                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
125                    RT(allocptr) |
126                    RB(allocptr))) {
127      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
128    }
129    if (match_instr(prev_instr,
130                    OP_MASK | RT_MASK | RA_MASK,
131                    OP(major_opcode_ADDI) | 
132                    RT(allocptr) |
133                    RA(allocptr))) {
134      return (signed_natural) -((short) prev_instr);
135    }
136    Bug(xp, "Can't determine allocation displacement");
137  }
138  return 0;
139}
140
141
142/*
143  A cons cell's been successfully allocated, but the allocptr's
144  still tagged (as fulltag_cons, of course.)  Emulate any instructions
145  that might follow the allocation (stores to the car or cdr, an
146  assignment to the "result" gpr) that take place while the allocptr's
147  tag is non-zero, advancing over each such instruction.  When we're
148  done, the cons cell will be allocated and initialized, the result
149  register will point to it, the allocptr will be untagged, and
150  the PC will point past the instruction that clears the allocptr's
151  tag.
152*/
153void
154finish_allocating_cons(ExceptionInformation *xp)
155{
156  pc program_counter = xpPC(xp);
157  opcode instr;
158  LispObj cur_allocptr = xpGPR(xp, allocptr);
159  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
160  int target_reg;
161
162  while (1) {
163    instr = *program_counter++;
164
165    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
166      xpGPR(xp, allocptr) = untag(cur_allocptr);
167      xpPC(xp) = program_counter;
168      return;
169    }
170   
171    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
172    case STORE_CAR_ALLOCPTR_INSTRUCTION:
173      c->car = xpGPR(xp,RT_field(instr));
174      break;
175    case STORE_CDR_ALLOCPTR_INSTRUCTION:
176      c->cdr = xpGPR(xp,RT_field(instr));
177      break;
178    default:
179      /* Assume that this is an assignment: {rt/ra} <- allocptr.
180         There are several equivalent instruction forms
181         that might have that effect; just assign to target here.
182      */
183      if (major_opcode_p(instr,major_opcode_X31)) {
184        target_reg = RA_field(instr);
185      } else {
186        target_reg = RT_field(instr);
187      }
188      xpGPR(xp,target_reg) = cur_allocptr;
189      break;
190    }
191  }
192}
193
194/*
195  We were interrupted in the process of allocating a uvector; we
196  survived the allocation trap, and allocptr is tagged as fulltag_misc.
197  Emulate any instructions which store a header into the uvector,
198  assign the value of allocptr to some other register, and clear
199  allocptr's tag.  Don't expect/allow any other instructions in
200  this environment.
201*/
202void
203finish_allocating_uvector(ExceptionInformation *xp)
204{
205  pc program_counter = xpPC(xp);
206  opcode instr;
207  LispObj cur_allocptr = xpGPR(xp, allocptr);
208  int target_reg;
209
210  while (1) {
211    instr = *program_counter++;
212    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
213      xpGPR(xp, allocptr) = untag(cur_allocptr);
214      xpPC(xp) = program_counter;
215      return;
216    }
217    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
218        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
219      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
220    } else {
221      /* assume that this is an assignment */
222
223      if (major_opcode_p(instr,major_opcode_X31)) {
224        target_reg = RA_field(instr);
225      } else {
226        target_reg = RT_field(instr);
227      }
228      xpGPR(xp,target_reg) = cur_allocptr;
229    }
230  }
231}
232
233
234Boolean
235allocate_object(ExceptionInformation *xp,
236                natural bytes_needed, 
237                signed_natural disp_from_allocptr,
238                TCR *tcr)
239{
240  area *a = active_dynamic_area;
241
242  /* Maybe do an EGC */
243  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
244    if (((a->active)-(a->low)) >= a->threshold) {
245      gc_from_xp(xp, 0L);
246    }
247  }
248
249  /* Life is pretty simple if we can simply grab a segment
250     without extending the heap.
251  */
252  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
253    xpGPR(xp, allocptr) += disp_from_allocptr;
254#ifdef DEBUG
255    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
256            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
257#endif
258    return true;
259  }
260 
261  /* It doesn't make sense to try a full GC if the object
262     we're trying to allocate is larger than everything
263     allocated so far.
264  */
265  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
266    untenure_from_area(tenured_area); /* force a full GC */
267    gc_from_xp(xp, 0L);
268  }
269 
270  /* Try again, growing the heap if necessary */
271  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
272    xpGPR(xp, allocptr) += disp_from_allocptr;
273#ifdef DEBUG
274    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
275            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
276#endif
277    return true;
278  }
279 
280  return false;
281}
282
283#ifndef XNOMEM
284#define XNOMEM 10
285#endif
286
287void
288update_bytes_allocated(TCR* tcr, void *cur_allocptr)
289{
290  BytePtr
291    last = (BytePtr) tcr->last_allocptr, 
292    current = (BytePtr) cur_allocptr;
293  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
294    tcr->bytes_allocated += last-current;
295  }
296  tcr->last_allocptr = 0;
297}
298
299void
300lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
301{
302  /* Couldn't allocate the object.  If it's smaller than some arbitrary
303     size (say 128K bytes), signal a "chronically out-of-memory" condition;
304     else signal a "allocation request failed" condition.
305  */
306  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
307  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
308}
309
310/*
311  Allocate a large list, where "large" means "large enough to
312  possibly trigger the EGC several times if this was done
313  by individually allocating each CONS."  The number of
314  ocnses in question is in arg_z; on successful return,
315  the list will be in arg_z
316*/
317
318Boolean
319allocate_list(ExceptionInformation *xp, TCR *tcr)
320{
321  natural
322    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
323    bytes_needed = (nconses << dnode_shift);
324  LispObj
325    prev = lisp_nil,
326    current,
327    initial = xpGPR(xp,arg_y);
328
329  if (nconses == 0) {
330    /* Silly case */
331    xpGPR(xp,arg_z) = lisp_nil;
332    xpGPR(xp,allocptr) = lisp_nil;
333    return true;
334  }
335  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
336  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
337    for (current = xpGPR(xp,allocptr);
338         nconses;
339         prev = current, current+= dnode_size, nconses--) {
340      deref(current,0) = prev;
341      deref(current,1) = initial;
342    }
343    xpGPR(xp,arg_z) = prev;
344    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
345    xpGPR(xp,allocptr)-=fulltag_cons;
346  } else {
347    lisp_allocation_failure(xp,tcr,bytes_needed);
348  }
349  return true;
350}
351
352OSStatus
353handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
354{
355  pc program_counter;
356  natural cur_allocptr, bytes_needed = 0;
357  opcode prev_instr;
358  signed_natural disp = 0;
359  unsigned allocptr_tag;
360
361  cur_allocptr = xpGPR(xp,allocptr);
362  program_counter = xpPC(xp);
363  prev_instr = *(program_counter-1);
364  allocptr_tag = fulltag_of(cur_allocptr);
365
366  switch (allocptr_tag) {
367  case fulltag_cons:
368    bytes_needed = sizeof(cons);
369    disp = -sizeof(cons) + fulltag_cons;
370    break;
371
372  case fulltag_even_fixnum:
373  case fulltag_odd_fixnum:
374    break;
375
376  case fulltag_misc:
377    if (match_instr(prev_instr, 
378                    XO_MASK | RT_MASK | RB_MASK,
379                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
380                    RT(allocptr) |
381                    RB(allocptr))) {
382      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
383    } else if (match_instr(prev_instr,
384                           OP_MASK | RT_MASK | RA_MASK,
385                           OP(major_opcode_ADDI) | 
386                           RT(allocptr) |
387                           RA(allocptr))) {
388      disp = (signed_natural) ((short) prev_instr);
389    }
390    if (disp) {
391      bytes_needed = (-disp) + fulltag_misc;
392      break;
393    }
394    /* else fall thru */
395  default:
396    return -1;
397  }
398
399  if (bytes_needed) {
400    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
401    if (allocate_object(xp, bytes_needed, disp, tcr)) {
402#if 0
403      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
404              tcr, xpGPR(xp, allocptr));
405#endif
406      adjust_exception_pc(xp,4);
407      return 0;
408    }
409    lisp_allocation_failure(xp,tcr,bytes_needed);
410    return -1;
411  }
412  return -1;
413}
414
415natural gc_deferred = 0, full_gc_deferred = 0;
416
417signed_natural
418flash_freeze(TCR *tcr, signed_natural param)
419{
420  return 0;
421}
422
423OSStatus
424handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
425{
426  LispObj
427    selector = xpGPR(xp,imm0), 
428    arg = xpGPR(xp,imm1);
429  area *a = active_dynamic_area;
430  Boolean egc_was_enabled = (a->older != NULL);
431  natural gc_previously_deferred = gc_deferred;
432
433
434  switch (selector) {
435  case GC_TRAP_FUNCTION_EGC_CONTROL:
436    egc_control(arg != 0, a->active);
437    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
438    break;
439
440  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
441    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
442    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
443    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
444    xpGPR(xp,arg_z) = lisp_nil+t_offset;
445    break;
446
447  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
448    if (((signed_natural) arg) > 0) {
449      lisp_heap_gc_threshold = 
450        align_to_power_of_2((arg-1) +
451                            (heap_segment_size - 1),
452                            log2_heap_segment_size);
453    }
454    /* fall through */
455  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
456    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
457    break;
458
459  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
460    /*  Try to put the current threshold in effect.  This may
461        need to disable/reenable the EGC. */
462    untenure_from_area(tenured_area);
463    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
464    if (egc_was_enabled) {
465      if ((a->high - a->active) >= a->threshold) {
466        tenure_to_area(tenured_area);
467      }
468    }
469    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
470    break;
471
472  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
473    ensure_static_conses(xp,tcr,32768);
474    break;
475
476  case GC_TRAP_FUNCTION_FLASH_FREEZE:
477    untenure_from_area(tenured_area);
478    gc_like_from_xp(xp,flash_freeze,0);
479    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
480    tenured_area->static_dnodes = area_dnode(a->active, a->low);
481    if (egc_was_enabled) {
482      tenure_to_area(tenured_area);
483    }
484    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
485    break;
486
487  default:
488    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
489
490    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
491      if (!full_gc_deferred) {
492        gc_from_xp(xp, 0L);
493        break;
494      }
495      /* Tried to do a full GC when gc was disabled.  That failed,
496         so try full GC now */
497      selector = GC_TRAP_FUNCTION_GC;
498    }
499   
500    if (egc_was_enabled) {
501      egc_control(false, (BytePtr) a->active);
502    }
503    gc_from_xp(xp, 0L);
504    if (gc_deferred > gc_previously_deferred) {
505      full_gc_deferred = 1;
506    } else {
507      full_gc_deferred = 0;
508    }
509    if (selector > GC_TRAP_FUNCTION_GC) {
510      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
511        impurify_from_xp(xp, 0L);
512        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
513        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
514        gc_from_xp(xp, 0L);
515      }
516      if (selector & GC_TRAP_FUNCTION_PURIFY) {
517        purify_from_xp(xp, 0L);
518        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
519        gc_from_xp(xp, 0L);
520      }
521      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
522        OSErr err;
523        extern OSErr save_application(unsigned, Boolean);
524        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
525        area *vsarea = tcr->vs_area;
526       
527        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
528        err = save_application(arg, egc_was_enabled);
529        if (err == noErr) {
530          _exit(0);
531        }
532        fatal_oserr(": save_application", err);
533      }
534      switch (selector) {
535
536
537      case GC_TRAP_FUNCTION_FREEZE:
538        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
539        tenured_area->static_dnodes = area_dnode(a->active, a->low);
540        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
541        break;
542      default:
543        break;
544      }
545    }
546   
547    if (egc_was_enabled) {
548      egc_control(true, NULL);
549    }
550    break;
551   
552  }
553
554  adjust_exception_pc(xp,4);
555  return 0;
556}
557
558
559
560void
561signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
562{
563  /* The cstack just overflowed.  Force the current thread's
564     control stack to do so until all stacks are well under their overflow
565     limits.
566  */
567
568#if 0
569  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
570#endif
571  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
572}
573
574/*
575  Lower (move toward 0) the "end" of the soft protected area associated
576  with a by a page, if we can.
577*/
578
579void
580adjust_soft_protection_limit(area *a)
581{
582  char *proposed_new_soft_limit = a->softlimit - 4096;
583  protected_area_ptr p = a->softprot;
584 
585  if (proposed_new_soft_limit >= (p->start+16384)) {
586    p->end = proposed_new_soft_limit;
587    p->protsize = p->end-p->start;
588    a->softlimit = proposed_new_soft_limit;
589  }
590  protect_area(p);
591}
592
593void
594restore_soft_stack_limit(unsigned stkreg)
595{
596  area *a;
597  TCR *tcr = get_tcr(true);
598
599  switch (stkreg) {
600  case sp:
601    a = tcr->cs_area;
602    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
603      a->softlimit -= 4096;
604    }
605    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
606    break;
607  case vsp:
608    a = tcr->vs_area;
609    adjust_soft_protection_limit(a);
610    break;
611  case tsp:
612    a = tcr->ts_area;
613    adjust_soft_protection_limit(a);
614  }
615}
616
617/* Maybe this'll work someday.  We may have to do something to
618   make the thread look like it's not handling an exception */
619void
620reset_lisp_process(ExceptionInformation *xp)
621{
622  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
623  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
624
625  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
626  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
627
628  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
629  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
630
631  start_lisp(tcr, 1);
632}
633
634/*
635  This doesn't GC; it returns true if it made enough room, false
636  otherwise.
637  If "extend" is true, it can try to extend the dynamic area to
638  satisfy the request.
639*/
640
641Boolean
642new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
643{
644  area *a;
645  natural newlimit, oldlimit;
646  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
647
648  a  = active_dynamic_area;
649  oldlimit = (natural) a->active;
650  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
651              align_to_power_of_2(need, log2_allocation_quantum));
652  if (newlimit > (natural) (a->high)) {
653    if (extend) {
654      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
655      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
656      do {
657        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
658          break;
659        }
660        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
661        if (extend_by < 4<<20) {
662          return false;
663        }
664      } while (1);
665    } else {
666      return false;
667    }
668  }
669  a->active = (BytePtr) newlimit;
670  tcr->last_allocptr = (void *)newlimit;
671  xpGPR(xp,allocptr) = (LispObj) newlimit;
672  xpGPR(xp,allocbase) = (LispObj) oldlimit;
673
674  return true;
675}
676
677 
678void
679update_area_active (area **aptr, BytePtr value)
680{
681  area *a = *aptr;
682  for (; a; a = a->older) {
683    if ((a->low <= value) && (a->high >= value)) break;
684  };
685  if (a == NULL) Bug(NULL, "Can't find active area");
686  a->active = value;
687  *aptr = a;
688
689  for (a = a->younger; a; a = a->younger) {
690    a->active = a->high;
691  }
692}
693
694LispObj *
695tcr_frame_ptr(TCR *tcr)
696{
697  ExceptionInformation *xp;
698  LispObj *bp = NULL;
699
700  if (tcr->pending_exception_context)
701    xp = tcr->pending_exception_context;
702  else {
703    xp = tcr->suspend_context;
704  }
705  if (xp) {
706    bp = (LispObj *) xpGPR(xp, sp);
707  }
708  return bp;
709}
710
711void
712normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
713{
714  void *cur_allocptr = NULL;
715  LispObj freeptr = 0;
716
717  if (xp) {
718    if (is_other_tcr) {
719      pc_luser_xp(xp, tcr, NULL);
720      freeptr = xpGPR(xp, allocptr);
721      if (fulltag_of(freeptr) == 0){
722        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
723      }
724    }
725    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
726    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
727    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
728#ifdef DEBUG
729    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
730            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
731    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
732            tcr,
733            xpGPR(xp, allocbase),
734            xpGPR(xp, allocptr),
735            xpPC(xp));
736    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
737            tcr,
738            tcr->pending_exception_context);
739#endif
740  } else {
741    /* In ff-call.  No need to update cs_area */
742    cur_allocptr = (void *) (tcr->save_allocptr);
743#ifdef DEBUG
744    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
745            tcr, tcr->save_vsp, tcr->save_tsp);
746    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
747            tcr,
748            tcr->save_allocbase,
749            tcr->save_allocptr,
750            xpPC(xp));
751
752#endif
753    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
754    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
755  }
756
757
758  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
759  if (cur_allocptr) {
760    update_bytes_allocated(tcr, cur_allocptr);
761    if (freeptr) {
762      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
764    }
765  }
766}
767
768TCR *gc_tcr = NULL;
769
770/* Suspend and "normalize" other tcrs, then call a gc-like function
771   in that context.  Resume the other tcrs, then return what the
772   function returned */
773
774signed_natural
775gc_like_from_xp(ExceptionInformation *xp, 
776                signed_natural(*fun)(TCR *, signed_natural), 
777                signed_natural param)
778{
779  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
780  int result;
781  signed_natural inhibit;
782
783  suspend_other_threads(true);
784  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
785  if (inhibit != 0) {
786    if (inhibit > 0) {
787      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
788    }
789    resume_other_threads(true);
790    gc_deferred++;
791    return 0;
792  }
793  gc_deferred = 0;
794
795  gc_tcr = tcr;
796
797  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
798  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
799
800  normalize_tcr(xp, tcr, false);
801
802
803  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
804    if (other_tcr->pending_exception_context) {
805      other_tcr->gc_context = other_tcr->pending_exception_context;
806    } else if (other_tcr->valence == TCR_STATE_LISP) {
807      other_tcr->gc_context = other_tcr->suspend_context;
808    } else {
809      /* no pending exception, didn't suspend in lisp state:
810         must have executed a synchronous ff-call.
811      */
812      other_tcr->gc_context = NULL;
813    }
814    normalize_tcr(other_tcr->gc_context, other_tcr, true);
815  }
816   
817
818
819  result = fun(tcr, param);
820
821  other_tcr = tcr;
822  do {
823    other_tcr->gc_context = NULL;
824    other_tcr = other_tcr->next;
825  } while (other_tcr != tcr);
826
827  gc_tcr = NULL;
828
829  resume_other_threads(true);
830
831  return result;
832
833}
834
835
836
837/* Returns #bytes freed by invoking GC */
838
839signed_natural
840gc_from_tcr(TCR *tcr, signed_natural param)
841{
842  area *a;
843  BytePtr oldfree, newfree;
844  BytePtr oldend, newend;
845
846#ifdef DEBUG
847  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
848#endif
849  a = active_dynamic_area;
850  oldend = a->high;
851  oldfree = a->active;
852  gc(tcr, param);
853  newfree = a->active;
854  newend = a->high;
855#if 0
856  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
857#endif
858  return ((oldfree-newfree)+(newend-oldend));
859}
860
861signed_natural
862gc_from_xp(ExceptionInformation *xp, signed_natural param)
863{
864  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
865
866  freeGCptrs();
867  return status;
868}
869
870signed_natural
871purify_from_xp(ExceptionInformation *xp, signed_natural param)
872{
873  return gc_like_from_xp(xp, purify, param);
874}
875
876signed_natural
877impurify_from_xp(ExceptionInformation *xp, signed_natural param)
878{
879  return gc_like_from_xp(xp, impurify, param);
880}
881
882
883
884
885
886
887protection_handler
888 * protection_handlers[] = {
889   do_spurious_wp_fault,
890   do_soft_stack_overflow,
891   do_soft_stack_overflow,
892   do_soft_stack_overflow,
893   do_hard_stack_overflow,   
894   do_hard_stack_overflow,
895   do_hard_stack_overflow
896   };
897
898
899Boolean
900is_write_fault(ExceptionInformation *xp, siginfo_t *info)
901{
902  /* use the siginfo if it's available.  Some versions of Linux
903     don't propagate the DSISR and TRAP fields correctly from
904     64- to 32-bit handlers.
905  */
906  if (info) {
907    /*
908       To confuse matters still further, the value of SEGV_ACCERR
909       varies quite a bit among LinuxPPC variants (the value defined
910       in the header files varies, and the value actually set by
911       the kernel also varies.  So far, we're only looking at the
912       siginfo under Linux and Linux always seems to generate
913       SIGSEGV, so check for SIGSEGV and check the low 16 bits
914       of the si_code.
915    */
916    return ((info->si_signo == SIGSEGV) &&
917            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
918  }
919  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
920         (xpTRAP(xp) == 
921#ifdef LINUX
9220x0300
923#endif
924#ifdef DARWIN
9250x0300/0x100
926#endif
927)
928         );
929#if 0 
930  /* Maybe worth keeping around; not sure if it's an exhaustive
931     list of PPC instructions that could cause a WP fault */
932  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
933     valid values of those SPRs in the context they provide to
934     exception handlers.  Look at the opcode of the offending
935     instruction & recognize 32-bit store operations */
936  opcode instr = *(xpPC(xp));
937
938  if (xp->regs->trap != 0x300) {
939    return 0;
940  }
941  switch (instr >> 26) {
942  case 47:                      /* STMW */
943  case 36:                      /* STW */
944  case 37:                      /* STWU */
945    return 1;
946  case 31:
947    switch ((instr >> 1) & 1023) {
948    case 151:                   /* STWX */
949    case 183:                   /* STWUX */
950      return 1;
951    default:
952      return 0;
953    }
954  default:
955    return 0;
956  }
957#endif
958}
959
960OSStatus
961handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
962{
963  BytePtr addr;
964  protected_area_ptr area;
965  protection_handler *handler;
966  extern Boolean touch_page(void *);
967  extern void touch_page_end(void);
968
969  if (info) {
970    addr = (BytePtr)(info->si_addr);
971  } else {
972    addr = (BytePtr) ((natural) (xpDAR(xp)));
973  }
974
975  if (addr && (addr == tcr->safe_ref_address)) {
976    adjust_exception_pc(xp,4);
977
978    xpGPR(xp,imm0) = 0;
979    return 0;
980  }
981
982  if (xpPC(xp) == (pc)touch_page) {
983    xpGPR(xp,imm0) = 0;
984    xpPC(xp) = (pc)touch_page_end;
985    return 0;
986  }
987
988
989  if (is_write_fault(xp,info)) {
990    area = find_protected_area(addr);
991    if (area != NULL) {
992      handler = protection_handlers[area->why];
993      return handler(xp, area, addr);
994    } else {
995      if ((addr >= readonly_area->low) &&
996          (addr < readonly_area->active)) {
997        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
998                        page_size);
999        return 0;
1000      }
1001    }
1002  }
1003  if (old_valence == TCR_STATE_LISP) {
1004    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
1005  }
1006  return -1;
1007}
1008
1009
1010
1011
1012
1013OSStatus
1014do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1015{
1016#ifdef SUPPORT_PRAGMA_UNUSED
1017#pragma unused(area,addr)
1018#endif
1019  reset_lisp_process(xp);
1020  return -1;
1021}
1022
1023extern area*
1024allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1025
1026extern area*
1027allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031catch_frame_p(lisp_frame *spPtr)
1032{
1033  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1034
1035  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1036    if (spPtr == ((lisp_frame *) catch->csp)) {
1037      return true;
1038    }
1039  }
1040  return false;
1041}
1042#endif
1043
1044Boolean
1045unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1046{
1047  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1048      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1049    return true;
1050  } else {
1051    return false;
1052  }
1053}
1054
1055Boolean
1056lexpr_entry_frame_p(lisp_frame *spPtr)
1057{
1058  LispObj savelr = spPtr->savelr;
1059  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1060  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1061  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1062
1063  return
1064    (savelr == lexpr_return1v) ||
1065    (savelr == lexpr_return) ||
1066    ((savelr == ret1valn) &&
1067     (((spPtr->backlink)->savelr) == lexpr_return));
1068}
1069
1070Boolean
1071lisp_frame_p(lisp_frame *spPtr)
1072{
1073  LispObj savefn;
1074  /* We can't just look at the size of the stack frame under the EABI
1075     calling sequence, but that's the first thing to check. */
1076  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1077    return false;
1078  }
1079  savefn = spPtr->savefn;
1080  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1081 
1082}
1083
1084
1085int ffcall_overflow_count = 0;
1086
1087/* Find a frame that is neither a catch frame nor one of the
1088   lexpr_entry frames We don't check for non-lisp frames here because
1089   we'll always stop before we get there due to a dummy lisp frame
1090   pushed by .SPcallback that masks out the foreign frames.  The one
1091   exception is that there is a non-lisp frame without a valid VSP
1092   while in the process of ppc-ff-call. We recognize that because its
1093   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1094   next frame is 0, then we're executing an unwind-protect cleanup
1095   form, and the top stack frame belongs to its (no longer extant)
1096   catch frame.  */
1097
1098#ifdef EXTEND_VSTACK
1099lisp_frame *
1100find_non_catch_frame_from_xp (ExceptionInformation *xp)
1101{
1102  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1103  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1104    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1105  }
1106  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1107         unwind_protect_cleanup_frame_p(spPtr) ||
1108         catch_frame_p(spPtr) ||
1109         lexpr_entry_frame_p(spPtr) ; ) {
1110     spPtr = spPtr->backlink;
1111     };
1112  return spPtr;
1113}
1114#endif
1115
1116#ifdef EXTEND_VSTACK
1117Boolean
1118db_link_chain_in_area_p (area *a)
1119{
1120  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1121          *high = (LispObj *) a->high,
1122          *low = (LispObj *) a->low;
1123  for (; db; db = (LispObj *) *db) {
1124    if ((db >= low) && (db < high)) return true;
1125  };
1126  return false;
1127}
1128#endif
1129
1130
1131
1132
1133/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1134  the current value of VSP (TSP) or an older area.  */
1135
1136OSStatus
1137do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1138{
1139  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1140  area *a = tcr->vs_area;
1141  protected_area_ptr vsp_soft = a->softprot;
1142  unprotect_area(vsp_soft);
1143  signal_stack_soft_overflow(xp,vsp);
1144  return 0;
1145}
1146
1147
1148OSStatus
1149do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1150{
1151  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1152  area *a = tcr->ts_area;
1153  protected_area_ptr tsp_soft = a->softprot;
1154  unprotect_area(tsp_soft);
1155  signal_stack_soft_overflow(xp,tsp);
1156  return 0;
1157}
1158
1159OSStatus
1160do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1161{
1162  /* Trying to write into a guard page on the vstack or tstack.
1163     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1164     signal an error_stack_overflow condition.
1165      */
1166  lisp_protection_kind which = prot_area->why;
1167  Boolean on_TSP = (which == kTSPsoftguard);
1168
1169  if (on_TSP) {
1170    return do_tsp_overflow(xp, addr);
1171   } else {
1172    return do_vsp_overflow(xp, addr);
1173   }
1174}
1175
1176OSStatus
1177do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1178{
1179#ifdef SUPPORT_PRAGMA_UNUSED
1180#pragma unused(xp,area,addr)
1181#endif
1182  return -1;
1183}
1184
1185
1186/*
1187  We have a couple of choices here.  We can simply unprotect the page
1188  and let the store happen on return, or we can try to emulate writes
1189  that we know will involve an intergenerational reference.  Both are
1190  correct as far as EGC constraints go, but the latter approach is
1191  probably more efficient.  (This only matters in the case where the
1192  GC runs after this exception handler returns but before the write
1193  actually happens.  If we didn't emulate node stores here, the EGC
1194  would scan the newly-writen page, find nothing interesting, and
1195  run to completion.  This thread will try the write again afer it
1196  resumes, the page'll be re-protected, and we'll have taken this
1197  fault twice.  The whole scenario shouldn't happen very often, but
1198  (having already taken a fault and committed to an mprotect syscall)
1199  we might as well emulate stores involving intergenerational references,
1200  since they're pretty easy to identify.
1201
1202  Note that cases involving two or more threads writing to the same
1203  page (before either of them can run this handler) is benign: one
1204  invocation of the handler will just unprotect an unprotected page in
1205  that case.
1206
1207  If there are GCs (or any other suspensions of the thread between
1208  the time that the write fault was detected and the time that the
1209  exception lock is obtained) none of this stuff happens.
1210*/
1211
1212/*
1213  Return true (and emulate the instruction) iff:
1214  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1215     instruction.
1216  b) RS is a node register (>= fn)
1217  c) RS is tagged as a cons or vector
1218  d) RS is in some ephemeral generation.
1219  This is slightly conservative, since RS may be no younger than the
1220  EA being written to.
1221*/
1222Boolean
1223is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1224{
1225  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1226    opcode instr = *xpPC(xp);
1227   
1228    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1229        major_opcode_p(instr, major_opcode_STW)) {
1230      LispObj
1231        rs = RS_field(instr), 
1232        rsval = xpGPR(xp,rs),
1233        tag = fulltag_of(rsval);
1234     
1235      if (rs >= fn) {
1236        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1237          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1238              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1239            *(LispObj *)ea = rsval;
1240            return true;
1241          }
1242        }
1243      }
1244    }
1245  }
1246  return false;
1247}
1248
1249     
1250
1251
1252
1253
1254
1255OSStatus
1256handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1257{
1258  (void) zero_fpscr(tcr);
1259  enable_fp_exceptions();
1260
1261
1262  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1263
1264  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1265     operation; there's an FPU operation right at the PC, so tell it to start
1266     looking one word beyond */
1267  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1268}
1269
1270   
1271int
1272altivec_present = 1;
1273
1274
1275/* This only tries to implement the "optional" fsqrt and fsqrts
1276   instructions, which were generally implemented on IBM hardware
1277   but generally not available on Motorola/Freescale systems.
1278*/               
1279OSStatus
1280handle_unimplemented_instruction(ExceptionInformation *xp,
1281                                 opcode instruction,
1282                                 TCR *tcr)
1283{
1284  (void) zero_fpscr(tcr);
1285  enable_fp_exceptions();
1286  /* the rc bit (bit 0 in the instruction) is supposed to cause
1287     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
1288     fsqrt. or fsqrts.
1289  */
1290  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1291       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1292      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1293    double b, d, sqrt(double);
1294
1295    b = xpFPR(xp,RB_field(instruction));
1296    d = sqrt(b);
1297    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1298                   (get_fpscr() & _FPU_RESERVED));
1299    xpFPR(xp,RT_field(instruction)) = d;
1300    adjust_exception_pc(xp,4);
1301    return 0;
1302  }
1303
1304  return -1;
1305}
1306
1307OSStatus
1308PMCL_exception_handler(int xnum, 
1309                       ExceptionInformation *xp, 
1310                       TCR *tcr, 
1311                       siginfo_t *info,
1312                       int old_valence)
1313{
1314  OSStatus status = -1;
1315  pc program_counter;
1316  opcode instruction = 0;
1317
1318
1319  program_counter = xpPC(xp);
1320 
1321  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1322    instruction = *program_counter;
1323  }
1324
1325  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1326    status = handle_alloc_trap(xp, tcr);
1327  } else if ((xnum == SIGSEGV) ||
1328             (xnum == SIGBUS)) {
1329    status = handle_protection_violation(xp, info, tcr, old_valence);
1330  } else if (xnum == SIGFPE) {
1331    status = handle_sigfpe(xp, tcr);
1332  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1333    if (instruction == GC_TRAP_INSTRUCTION) {
1334      status = handle_gc_trap(xp, tcr);
1335    } else if (IS_UUO(instruction)) {
1336      status = handle_uuo(xp, instruction, program_counter);
1337    } else if (is_conditional_trap(instruction)) {
1338      status = handle_trap(xp, instruction, program_counter, info);
1339    } else {
1340      status = handle_unimplemented_instruction(xp,instruction,tcr);
1341    }
1342  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1343    tcr->interrupt_pending = 0;
1344    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1345    status = 0;
1346  }
1347
1348  return status;
1349}
1350
1351void
1352adjust_exception_pc(ExceptionInformation *xp, int delta)
1353{
1354  xpPC(xp) += (delta >> 2);
1355}
1356
1357
1358/*
1359  This wants to scan backwards until "where" points to an instruction
1360   whose major opcode is either 63 (double-float) or 59 (single-float)
1361*/
1362
1363OSStatus
1364handle_fpux_binop(ExceptionInformation *xp, pc where)
1365{
1366  OSStatus err;
1367  opcode *there = (opcode *) where, instr, errnum = 0;
1368  int i = TRAP_LOOKUP_TRIES, delta = 0;
1369 
1370  while (i--) {
1371    instr = *--there;
1372    delta -= 4;
1373    if (codevec_hdr_p(instr)) {
1374      return -1;
1375    }
1376    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1377      errnum = error_FPU_exception_double;
1378      break;
1379    }
1380
1381    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1382      errnum = error_FPU_exception_short;
1383      break;
1384    }
1385  }
1386 
1387  err = handle_error(xp, errnum, rcontext, 0,  there);
1388  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1389 
1390  adjust_exception_pc(xp, delta);
1391  xpFPSCR(xp)  &=  0x03fff;
1392 
1393  return err;
1394
1395}
1396
1397OSStatus
1398handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1399{
1400#ifdef SUPPORT_PRAGMA_UNUSED
1401#pragma unused(where)
1402#endif
1403  unsigned 
1404    minor = UUO_MINOR(the_uuo),
1405    rb = 0x1f & (the_uuo >> 11),
1406    errnum = 0x3ff & (the_uuo >> 16);
1407
1408  OSStatus status = -1;
1409
1410  int bump = 4;
1411
1412  switch (minor) {
1413
1414  case UUO_ZERO_FPSCR:
1415    status = 0;
1416    xpFPSCR(xp) = 0;
1417    break;
1418
1419
1420  case UUO_INTERR:
1421    {
1422      TCR * target = (TCR *)xpGPR(xp,arg_z);
1423      status = 0;
1424      switch (errnum) {
1425      case error_propagate_suspend:
1426        break;
1427      case error_interrupt:
1428        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1429        break;
1430      case error_suspend:
1431        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1432        break;
1433      case error_suspend_all:
1434        lisp_suspend_other_threads();
1435        break;
1436      case error_resume:
1437        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1438        break;
1439      case error_resume_all:
1440        lisp_resume_other_threads();
1441        break;
1442      case error_kill:
1443        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1444        break;
1445      case error_allocate_list:
1446        allocate_list(xp,get_tcr(true));
1447        break;
1448      default:
1449        status = handle_error(xp, errnum, rb, 0,  where);
1450        break;
1451      }
1452    }
1453    break;
1454
1455  case UUO_INTCERR:
1456    status = handle_error(xp, errnum, rb, 1,  where);
1457    if (errnum == error_udf_call) {
1458      /* If lisp's returned from a continuable undefined-function call,
1459         it's put a code vector in the xp's PC.  Don't advance the
1460         PC ... */
1461      bump = 0;
1462    }
1463    break;
1464
1465  case UUO_FPUX_BINOP:
1466    status = handle_fpux_binop(xp, where);
1467    bump = 0;
1468    break;
1469
1470  default:
1471    status = -1;
1472    bump = 0;
1473  }
1474 
1475  if ((!status) && bump) {
1476    adjust_exception_pc(xp, bump);
1477  }
1478  return status;
1479}
1480
1481natural
1482register_codevector_contains_pc (natural lisp_function, pc where)
1483{
1484  natural code_vector, size;
1485
1486  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1487      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1488    code_vector = deref(lisp_function, 1);
1489    size = header_element_count(header_of(code_vector)) << 2;
1490    if ((untag(code_vector) < (natural)where) && 
1491        ((natural)where < (code_vector + size)))
1492      return(code_vector);
1493  }
1494
1495  return(0);
1496}
1497
1498/* Callback to lisp to handle a trap. Need to translate the
1499   PC (where) into one of two forms of pairs:
1500
1501   1. If PC is in fn or nfn's code vector, use the register number
1502      of fn or nfn and the index into that function's code vector.
1503   2. Otherwise use 0 and the pc itself
1504*/
1505void
1506callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1507                   natural arg1, natural arg2, natural arg3)
1508{
1509  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1510  unsigned register_number = fn;
1511  natural index = (natural)where;
1512
1513  if (code_vector == 0) {
1514    register_number = nfn;
1515    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1516  }
1517  if (code_vector == 0)
1518    register_number = 0;
1519  else
1520    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1521  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1522}
1523
1524void
1525callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1526                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1527{
1528  natural  callback_ptr;
1529  area *a;
1530
1531  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1532
1533  /* Put the active stack pointer where .SPcallback expects it */
1534  a = tcr->cs_area;
1535  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1536
1537  /* Copy globals from the exception frame to tcr */
1538  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1539  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1540  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1541  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1542
1543#ifdef DARWIN
1544  enable_fp_exceptions();
1545#endif
1546
1547
1548  /* Call back.
1549     Lisp will handle trampolining through some code that
1550     will push lr/fn & pc/nfn stack frames for backtrace.
1551  */
1552  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1553#ifdef DEBUG
1554  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
1555#endif
1556  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1557  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1558  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1559#ifdef DEBUG
1560  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
1561#endif
1562
1563
1564
1565  /* Copy GC registers back into exception frame */
1566  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1567  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1568}
1569
1570area *
1571allocate_no_stack (natural size)
1572{
1573#ifdef SUPPORT_PRAGMA_UNUSED
1574#pragma unused(size)
1575#endif
1576
1577  return (area *) NULL;
1578}
1579
1580
1581
1582
1583
1584
1585/* callback to (symbol-value cmain) if it is a macptr,
1586   otherwise report cause and function name to console.
1587   Returns noErr if exception handled OK */
1588OSStatus
1589handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1590{
1591  LispObj   cmain = nrs_CMAIN.vcell;
1592  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1593
1594  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1595     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1596
1597  /* If it's a "trllt" instruction where RA == sp, it's a failed
1598     control stack overflow check.  In that case:
1599     
1600     a) We're in "yellow zone" mode if the value of the
1601     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1602     we're not already in yellow zone mode, attempt to create a new
1603     thread and continue execution on its stack. If that fails, call
1604     signal_stack_soft_overflow to enter yellow zone mode and signal
1605     the condition to lisp.
1606     
1607     b) If we're already in "yellow zone" mode, then:
1608     
1609     1) if the SP is past the current control-stack area's hard
1610     overflow limit, signal a "hard" stack overflow error (e.g., throw
1611     to toplevel as quickly as possible. If we aren't in "yellow zone"
1612     mode, attempt to continue on another thread first.
1613     
1614     2) if SP is "well" (> 4K) below its soft overflow limit, set
1615     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1616     "yellow zone mode" in this case.
1617     
1618     3) Otherwise, do nothing.  We'll continue to trap every time
1619     something gets pushed on the control stack, so we should try to
1620     detect and handle all of these cases fairly quickly.  Of course,
1621     the trap overhead is going to slow things down quite a bit.
1622     */
1623
1624  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1625      (RA_field(the_trap) == sp) &&
1626      (TO_field(the_trap) == TO_LO)) {
1627    area
1628      *CS_area = tcr->cs_area,
1629      *VS_area = tcr->vs_area;
1630     
1631    natural
1632      current_SP = xpGPR(xp,sp),
1633      current_VSP = xpGPR(xp,vsp);
1634
1635    if (current_SP  < (natural) (CS_area->hardlimit)) {
1636      /* If we're not in soft overflow mode yet, assume that the
1637         user has set the soft overflow size very small and try to
1638         continue on another thread before throwing to toplevel */
1639      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1640        reset_lisp_process(xp);
1641      }
1642    } else {
1643      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1644        /* If the control stack pointer is at least 4K away from its soft limit
1645           and the value stack pointer is at least 4K away from its soft limit,
1646           stop trapping.  Else keep trapping. */
1647        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1648            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1649          protected_area_ptr vs_soft = VS_area->softprot;
1650          if (vs_soft->nprot == 0) {
1651            protect_area(vs_soft);
1652          }
1653          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1654        }
1655      } else {
1656        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1657        signal_stack_soft_overflow(xp, sp);
1658      }
1659    }
1660   
1661    adjust_exception_pc(xp, 4);
1662    return noErr;
1663  } else {
1664    if (the_trap == LISP_BREAK_INSTRUCTION) {
1665      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1666      set_xpPC(xp, xpLR(xp));
1667      if (message == NULL) {
1668        message = "Lisp Breakpoint";
1669      }
1670      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1671      return noErr;
1672    }
1673    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1674      adjust_exception_pc(xp,4);
1675      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1676      return noErr;
1677    }
1678    /*
1679      twlle ra,rb is used to detect tlb overflow, where RA = current
1680      limit and RB = index to use.
1681    */
1682    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1683        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1684      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1685        return noErr;
1686      }
1687      return -1;
1688    }
1689
1690    if ((fulltag_of(cmain) == fulltag_misc) &&
1691        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1692      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1693        /* reset interrup_level, interrupt_pending */
1694        TCR_INTERRUPT_LEVEL(tcr) = 0;
1695        tcr->interrupt_pending = 0;
1696      }
1697#if 0
1698      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
1699#endif
1700      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1701      adjust_exception_pc(xp, 4);
1702      return(noErr);
1703    }
1704    return -1;
1705  }
1706}
1707
1708
1709/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1710   Stop if subtag_code_vector is encountered. */
1711unsigned
1712scan_for_instr( unsigned target, unsigned mask, pc where )
1713{
1714  int i = TRAP_LOOKUP_TRIES;
1715
1716  while( i-- ) {
1717    unsigned instr = *(--where);
1718    if ( codevec_hdr_p(instr) ) {
1719      return 0;
1720    } else if ( match_instr(instr, mask, target) ) {
1721      return instr;
1722    }
1723  }
1724  return 0;
1725}
1726
1727
1728void non_fatal_error( char *msg )
1729{
1730  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1731  fflush( dbgout );
1732}
1733
1734/* The main opcode.  */
1735
1736int 
1737is_conditional_trap(opcode instr)
1738{
1739  unsigned to = TO_field(instr);
1740  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1741
1742#ifndef MACOS
1743  if ((instr == LISP_BREAK_INSTRUCTION) ||
1744      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1745    return 1;
1746  }
1747#endif
1748  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1749    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1750       EQ bit must be set in the TO mask and either the register
1751       operands (if "tw") are the same or either both of the signed or
1752       both of the unsigned inequality bits must be set. */
1753    if (! (to & TO_EQ)) {
1754      return 1;                 /* Won't trap on EQ: conditional */
1755    }
1756    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1757      return 0;                 /* Will trap on EQ, same regs: unconditional */
1758    }
1759    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1760        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1761      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1762    }
1763    return 1;                   /* must be conditional */
1764  }
1765  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1766                                   debugger have it */
1767}
1768
1769OSStatus
1770handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1771{
1772  LispObj   errdisp = nrs_ERRDISP.vcell;
1773
1774  if ((fulltag_of(errdisp) == fulltag_misc) &&
1775      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1776    /* errdisp is a macptr, we can call back to lisp */
1777    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1778    return(0);
1779    }
1780
1781  return(-1);
1782}
1783               
1784
1785/*
1786   Current thread has all signals masked.  Before unmasking them,
1787   make it appear that the current thread has been suspended.
1788   (This is to handle the case where another thread is trying
1789   to GC before this thread is able to sieze the exception lock.)
1790*/
1791int
1792prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1793{
1794  int old_valence = tcr->valence;
1795
1796  tcr->pending_exception_context = context;
1797  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1798
1799  ALLOW_EXCEPTIONS(context);
1800  return old_valence;
1801} 
1802
1803void
1804wait_for_exception_lock_in_handler(TCR *tcr, 
1805                                   ExceptionInformation *context,
1806                                   xframe_list *xf)
1807{
1808
1809  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1810#ifdef DEBUG
1811  fprintf(dbgout, "0x%x has exception lock\n", tcr);
1812#endif
1813  xf->curr = context;
1814  xf->prev = tcr->xframe;
1815  tcr->xframe =  xf;
1816  tcr->pending_exception_context = NULL;
1817  tcr->valence = TCR_STATE_FOREIGN; 
1818}
1819
1820void
1821unlock_exception_lock_in_handler(TCR *tcr)
1822{
1823  tcr->pending_exception_context = tcr->xframe->curr;
1824  tcr->xframe = tcr->xframe->prev;
1825  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1826#ifdef DEBUG
1827  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
1828#endif
1829  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1830}
1831
1832/*
1833   If an interrupt is pending on exception exit, try to ensure
1834   that the thread sees it as soon as it's able to run.
1835*/
1836void
1837raise_pending_interrupt(TCR *tcr)
1838{
1839  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1840    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1841  }
1842}
1843
1844void
1845exit_signal_handler(TCR *tcr, int old_valence)
1846{
1847  sigset_t mask;
1848  sigfillset(&mask);
1849 
1850  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1851  tcr->valence = old_valence;
1852  tcr->pending_exception_context = NULL;
1853}
1854
1855
1856void
1857signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1858{
1859  xframe_list xframe_link;
1860
1861  if (!use_mach_exception_handling) {
1862   
1863    tcr = (TCR *) get_interrupt_tcr(false);
1864 
1865    /* The signal handler's entered with all signals (notably the
1866       thread_suspend signal) blocked.  Don't allow any other signals
1867       (notably the thread_suspend signal) to preempt us until we've
1868       set the TCR's xframe slot to include the current exception
1869       context.
1870    */
1871   
1872    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1873  }
1874
1875  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1876    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1877    pthread_kill(pthread_self(), thread_suspend_signal);
1878  }
1879
1880 
1881  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1882  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1883    char msg[512];
1884    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1885    if (lisp_Debugger(context, info, signum, false, msg)) {
1886      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1887    }
1888  }
1889
1890  unlock_exception_lock_in_handler(tcr);
1891
1892  /* This thread now looks like a thread that was suspended while
1893     executing lisp code.  If some other thread gets the exception
1894     lock and GCs, the context (this thread's suspend_context) will
1895     be updated.  (That's only of concern if it happens before we
1896     can return to the kernel/to the Mach exception handler).
1897  */
1898  if (!use_mach_exception_handling) {
1899    exit_signal_handler(tcr, old_valence);
1900    raise_pending_interrupt(tcr);
1901  }
1902}
1903
1904/*
1905  If it looks like we're in the middle of an atomic operation, make
1906  it seem as if that operation is either complete or hasn't started
1907  yet.
1908
1909  The cases handled include:
1910
1911  a) storing into a newly-allocated lisp frame on the stack.
1912  b) marking a newly-allocated TSP frame as containing "raw" data.
1913  c) consing: the GC has its own ideas about how this should be
1914     handled, but other callers would be best advised to back
1915     up or move forward, according to whether we're in the middle
1916     of allocating a cons cell or allocating a uvector.
1917  d) a STMW to the vsp
1918  e) EGC write-barrier subprims.
1919*/
1920
1921extern opcode
1922  egc_write_barrier_start,
1923  egc_write_barrier_end, 
1924  egc_store_node_conditional, 
1925  egc_store_node_conditional_test,
1926  egc_set_hash_key,
1927  egc_gvset,
1928  egc_rplaca,
1929  egc_rplacd,
1930  egc_set_hash_key_conditional,
1931  egc_set_hash_key_conditional_test;
1932
1933
1934extern opcode ffcall_return_window, ffcall_return_window_end;
1935
1936void
1937pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1938{
1939  pc program_counter = xpPC(xp);
1940  opcode instr = *program_counter;
1941  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1942  LispObj cur_allocptr = xpGPR(xp, allocptr);
1943  int allocptr_tag = fulltag_of(cur_allocptr);
1944 
1945
1946
1947  if ((program_counter < &egc_write_barrier_end) && 
1948      (program_counter >= &egc_write_barrier_start)) {
1949    LispObj *ea = 0, val = 0, root = 0;
1950    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1951    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1952
1953    if (program_counter >= &egc_set_hash_key_conditional) {
1954      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1955          ((program_counter == &egc_set_hash_key_conditional_test) &&
1956           (! (xpCCR(xp) & 0x20000000)))) {
1957        return;
1958      }
1959      need_store = false;
1960      root = xpGPR(xp,arg_x);
1961      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1962      need_memoize_root = true;
1963    } else if (program_counter >= &egc_store_node_conditional) {
1964      if ((program_counter < &egc_store_node_conditional_test) ||
1965          ((program_counter == &egc_store_node_conditional_test) &&
1966           (! (xpCCR(xp) & 0x20000000)))) {
1967        /* The conditional store either hasn't been attempted yet, or
1968           has failed.  No need to adjust the PC, or do memoization. */
1969        return;
1970      }
1971      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1972      xpGPR(xp,arg_z) = t_value;
1973      need_store = false;
1974    } else if (program_counter >= &egc_set_hash_key) {
1975      root = xpGPR(xp,arg_x);
1976      val = xpGPR(xp,arg_z);
1977      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1978      need_memoize_root = true;
1979    } else if (program_counter >= &egc_gvset) {
1980      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1981      val = xpGPR(xp,arg_z);
1982    } else if (program_counter >= &egc_rplacd) {
1983      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1984      val = xpGPR(xp,arg_z);
1985    } else {                      /* egc_rplaca */
1986      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1987      val = xpGPR(xp,arg_z);
1988    }
1989    if (need_store) {
1990      *ea = val;
1991    }
1992    if (need_check_memo) {
1993      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1994      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1995          ((LispObj)ea < val)) {
1996        atomic_set_bit(refbits, bitnumber);
1997        if (need_memoize_root) {
1998          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1999          atomic_set_bit(refbits, bitnumber);
2000        }
2001      }
2002    }
2003    set_xpPC(xp, xpLR(xp));
2004    return;
2005  }
2006
2007
2008  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
2009    LispObj tsp_val = xpGPR(xp,tsp);
2010   
2011    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
2012    adjust_exception_pc(xp, 4);
2013    return;
2014  }
2015 
2016  if (frame->backlink == (frame+1)) {
2017    if (
2018#ifdef PPC64
2019        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2020        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2021#else
2022        (major_opcode_p(instr, major_opcode_STW)) && 
2023#endif
2024        (RA_field(instr) == sp) &&
2025        /* There are a few places in the runtime that store into
2026           a previously-allocated frame atop the stack when
2027           throwing values around.  We only care about the case
2028           where the frame was newly allocated, in which case
2029           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2030           a few instructions before the current program counter.
2031           (The whole point here is that a newly allocated frame
2032           might contain random values that we don't want the
2033           GC to see; a previously allocated frame should already
2034           be completely initialized.)
2035        */
2036        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2037         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2038         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2039#ifdef PPC64
2040      int disp = DS_field(instr);
2041#else     
2042      int disp = D_field(instr);
2043#endif
2044
2045
2046      if (disp < (4*node_size)) {
2047#if 0
2048        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2049#endif
2050        frame->savevsp = 0;
2051        if (disp < (3*node_size)) {
2052          frame->savelr = 0;
2053          if (disp == node_size) {
2054            frame->savefn = 0;
2055          }
2056        }
2057      }
2058      return;
2059    }
2060  }
2061
2062  if (allocptr_tag != tag_fixnum) {
2063    signed_natural disp = allocptr_displacement(xp);
2064
2065    if (disp) {
2066      /* Being architecturally "at" the alloc trap doesn't tell
2067         us much (in particular, it doesn't tell us whether
2068         or not the thread has committed to taking the trap
2069         and is waiting for the exception lock (or waiting
2070         for the Mach exception thread to tell it how bad
2071         things are) or is about to execute a conditional
2072         trap.
2073         Regardless of which case applies, we want the
2074         other thread to take (or finish taking) the
2075         trap, and we don't want it to consider its
2076         current allocptr to be valid.
2077         The difference between this case (suspend other
2078         thread for GC) and the previous case (suspend
2079         current thread for interrupt) is solely a
2080         matter of what happens after we leave this
2081         function: some non-current thread will stay
2082         suspended until the GC finishes, then take
2083         (or start processing) the alloc trap.   The
2084         current thread will go off and do PROCESS-INTERRUPT
2085         or something, and may return from the interrupt
2086         and need to finish the allocation that got interrupted.
2087      */
2088
2089      if (alloc_disp) {
2090        *alloc_disp = disp;
2091        xpGPR(xp,allocptr) += disp;
2092        /* Leave the PC at the alloc trap.  When the interrupt
2093           handler returns, it'll decrement allocptr by disp
2094           and the trap may or may not be taken.
2095        */
2096      } else {
2097        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2098        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2099        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2100      }
2101    } else {
2102#ifdef DEBUG
2103      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2104#endif
2105      /* If we're already past the alloc_trap, finish allocating
2106         the object. */
2107      if (allocptr_tag == fulltag_cons) {
2108        finish_allocating_cons(xp);
2109#ifdef DEBUG
2110          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
2111                  tcr);
2112#endif
2113      } else {
2114        if (allocptr_tag == fulltag_misc) {
2115#ifdef DEBUG
2116          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
2117                  tcr);
2118#endif
2119          finish_allocating_uvector(xp);
2120        } else {
2121          Bug(xp, "what's being allocated here ?");
2122        }
2123      }
2124      /* Whatever we finished allocating, reset allocptr/allocbase to
2125         VOID_ALLOCPTR */
2126      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2127    }
2128    return;
2129  }
2130
2131  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2132    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2133    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2134#if 0
2135        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2136#endif
2137
2138    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2139      deref(frame,idx) = 0;
2140    }
2141    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2142    return;
2143  }
2144
2145#ifndef PC64
2146  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2147      (RA_field(instr) == vsp)) {
2148    int r;
2149    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2150   
2151    for (r = RS_field(instr); r <= 31; r++) {
2152      *vspptr++ = xpGPR(xp,r);
2153    }
2154    adjust_exception_pc(xp, 4);
2155  }
2156#endif
2157}
2158
2159void
2160interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2161{
2162  TCR *tcr = get_interrupt_tcr(false);
2163  if (tcr) {
2164    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2165      tcr->interrupt_pending = 1 << fixnumshift;
2166    } else {
2167      LispObj cmain = nrs_CMAIN.vcell;
2168
2169      if ((fulltag_of(cmain) == fulltag_misc) &&
2170          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2171        /*
2172           This thread can (allegedly) take an interrupt now.
2173           It's tricky to do that if we're executing
2174           foreign code (especially Linuxthreads code, much
2175           of which isn't reentrant.)
2176           If we're unwinding the stack, we also want to defer
2177           the interrupt.
2178        */
2179        if ((tcr->valence != TCR_STATE_LISP) ||
2180            (tcr->unwinding != 0)) {
2181          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2182        } else {
2183          xframe_list xframe_link;
2184          int old_valence;
2185          signed_natural disp=0;
2186         
2187          pc_luser_xp(context, tcr, &disp);
2188          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2189          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2190#ifdef DEBUG
2191          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
2192#endif
2193          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2194          if (disp) {
2195            xpGPR(context,allocptr) -= disp;
2196          }
2197          unlock_exception_lock_in_handler(tcr);
2198#ifdef DEBUG
2199          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
2200#endif
2201          exit_signal_handler(tcr, old_valence);
2202        }
2203      }
2204    }
2205  }
2206#ifdef DARWIN
2207    DarwinSigReturn(context);
2208#endif
2209}
2210
2211
2212
2213void
2214install_signal_handler(int signo, void *handler, Boolean system, Boolean on_altstack)
2215{
2216  struct sigaction sa;
2217 
2218  sa.sa_sigaction = (void *)handler;
2219  sigfillset(&sa.sa_mask);
2220  sa.sa_flags = 
2221    0 /* SA_RESTART */
2222    | SA_SIGINFO
2223#ifdef DARWIN
2224#ifdef PPC64
2225    | SA_64REGSET
2226#endif
2227#endif
2228    ;
2229
2230  sigaction(signo, &sa, NULL);
2231  if (system) {
2232    extern sigset_t user_signals_reserved;
2233    sigaddset(&user_signals_reserved, signo);
2234  }
2235
2236}
2237
2238void
2239install_pmcl_exception_handlers()
2240{
2241#ifdef DARWIN
2242  extern Boolean use_mach_exception_handling;
2243#endif
2244
2245  Boolean install_signal_handlers_for_exceptions =
2246#ifdef DARWIN
2247    !use_mach_exception_handling
2248#else
2249    true
2250#endif
2251    ;
2252  if (install_signal_handlers_for_exceptions) {
2253    extern int no_sigtrap;
2254    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
2255    if (no_sigtrap != 1) {
2256      install_signal_handler(SIGTRAP, (void *)signal_handler, true, false);
2257    }
2258    install_signal_handler(SIGBUS,  (void *)signal_handler, true, false);
2259    install_signal_handler(SIGSEGV, (void *)signal_handler, true, false);
2260    install_signal_handler(SIGFPE, (void *)signal_handler, true, false);
2261  }
2262 
2263  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2264                         (void *)interrupt_handler, true, false);
2265  signal(SIGPIPE, SIG_IGN);
2266}
2267
2268void
2269thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2270{
2271  TCR *tcr = get_tcr(false);
2272  area *a;
2273  sigset_t mask;
2274 
2275  sigemptyset(&mask);
2276
2277  if (tcr) {
2278    tcr->valence = TCR_STATE_FOREIGN;
2279    a = tcr->vs_area;
2280    if (a) {
2281      a->active = a->high;
2282    }
2283    a = tcr->ts_area;
2284    if (a) {
2285      a->active = a->high;
2286    }
2287    a = tcr->cs_area;
2288    if (a) {
2289      a->active = a->high;
2290    }
2291  }
2292 
2293  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2294  pthread_exit(NULL);
2295}
2296
2297void
2298thread_signal_setup()
2299{
2300  thread_suspend_signal = SIG_SUSPEND_THREAD;
2301  thread_kill_signal = SIG_KILL_THREAD;
2302
2303  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
2304  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
2305}
2306
2307
2308
2309void
2310unprotect_all_areas()
2311{
2312  protected_area_ptr p;
2313
2314  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2315    unprotect_area(p);
2316  }
2317}
2318
2319/*
2320  A binding subprim has just done "twlle limit_regno,idx_regno" and
2321  the trap's been taken.  Extend the tcr's tlb so that the index will
2322  be in bounds and the new limit will be on a page boundary, filling
2323  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2324  the tcr fields and the registers in the xp and return true if this
2325  all works, false otherwise.
2326
2327  Note that the tlb was allocated via malloc, so realloc can do some
2328  of the hard work.
2329*/
2330Boolean
2331extend_tcr_tlb(TCR *tcr, 
2332               ExceptionInformation *xp, 
2333               unsigned limit_regno,
2334               unsigned idx_regno)
2335{
2336  unsigned
2337    index = (unsigned) (xpGPR(xp,idx_regno)),
2338    old_limit = tcr->tlb_limit,
2339    new_limit = align_to_power_of_2(index+1,12),
2340    new_bytes = new_limit-old_limit;
2341  LispObj
2342    *old_tlb = tcr->tlb_pointer,
2343    *new_tlb = realloc(old_tlb, new_limit),
2344    *work;
2345
2346  if (new_tlb == NULL) {
2347    return false;
2348  }
2349 
2350  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2351
2352  while (new_bytes) {
2353    *work++ = no_thread_local_binding_marker;
2354    new_bytes -= sizeof(LispObj);
2355  }
2356  tcr->tlb_pointer = new_tlb;
2357  tcr->tlb_limit = new_limit;
2358  xpGPR(xp, limit_regno) = new_limit;
2359  return true;
2360}
2361
2362
2363
2364void
2365exception_init()
2366{
2367  install_pmcl_exception_handlers();
2368}
2369
2370
2371
2372
2373
2374#ifdef DARWIN
2375
2376
2377#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2378#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2379
2380
2381
2382#define LISP_EXCEPTIONS_HANDLED_MASK \
2383 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2384
2385/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2386#define NUM_LISP_EXCEPTIONS_HANDLED 4
2387
2388typedef struct {
2389  int foreign_exception_port_count;
2390  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2391  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2392  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2393  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2394} MACH_foreign_exception_state;
2395
2396
2397
2398
2399/*
2400  Mach's exception mechanism works a little better than its signal
2401  mechanism (and, not incidentally, it gets along with GDB a lot
2402  better.
2403
2404  Initially, we install an exception handler to handle each native
2405  thread's exceptions.  This process involves creating a distinguished
2406  thread which listens for kernel exception messages on a set of
2407  0 or more thread exception ports.  As threads are created, they're
2408  added to that port set; a thread's exception port is destroyed
2409  (and therefore removed from the port set) when the thread exits.
2410
2411  A few exceptions can be handled directly in the handler thread;
2412  others require that we resume the user thread (and that the
2413  exception thread resumes listening for exceptions.)  The user
2414  thread might eventually want to return to the original context
2415  (possibly modified somewhat.)
2416
2417  As it turns out, the simplest way to force the faulting user
2418  thread to handle its own exceptions is to do pretty much what
2419  signal() does: the exception handlng thread sets up a sigcontext
2420  on the user thread's stack and forces the user thread to resume
2421  execution as if a signal handler had been called with that
2422  context as an argument.  We can use a distinguished UUO at a
2423  distinguished address to do something like sigreturn(); that'll
2424  have the effect of resuming the user thread's execution in
2425  the (pseudo-) signal context.
2426
2427  Since:
2428    a) we have miles of code in C and in Lisp that knows how to
2429    deal with Linux sigcontexts
2430    b) Linux sigcontexts contain a little more useful information
2431    (the DAR, DSISR, etc.) than their Darwin counterparts
2432    c) we have to create a sigcontext ourselves when calling out
2433    to the user thread: we aren't really generating a signal, just
2434    leveraging existing signal-handling code.
2435
2436  we create a Linux sigcontext struct.
2437
2438  Simple ?  Hopefully from the outside it is ...
2439
2440  We want the process of passing a thread's own context to it to
2441  appear to be atomic: in particular, we don't want the GC to suspend
2442  a thread that's had an exception but has not yet had its user-level
2443  exception handler called, and we don't want the thread's exception
2444  context to be modified by a GC while the Mach handler thread is
2445  copying it around.  On Linux (and on Jaguar), we avoid this issue
2446  because (a) the kernel sets up the user-level signal handler and
2447  (b) the signal handler blocks signals (including the signal used
2448  by the GC to suspend threads) until tcr->xframe is set up.
2449
2450  The GC and the Mach server thread therefore contend for the lock
2451  "mach_exception_lock".  The Mach server thread holds the lock
2452  when copying exception information between the kernel and the
2453  user thread; the GC holds this lock during most of its execution
2454  (delaying exception processing until it can be done without
2455  GC interference.)
2456
2457*/
2458
2459#ifdef PPC64
2460#define C_REDZONE_LEN           320
2461#define C_STK_ALIGN             32
2462#else
2463#define C_REDZONE_LEN           224
2464#define C_STK_ALIGN             16
2465#endif
2466#define C_PARAMSAVE_LEN         64
2467#define C_LINKAGE_LEN           48
2468
2469#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2470
2471void
2472fatal_mach_error(char *format, ...);
2473
2474#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2475
2476
2477void
2478restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2479{
2480  kern_return_t kret;
2481  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2482
2483  /* Set the thread's FP state from the pseudosigcontext */
2484  kret = thread_set_state(thread,
2485                          PPC_FLOAT_STATE,
2486                          (thread_state_t)&(mc->__fs),
2487                          PPC_FLOAT_STATE_COUNT);
2488
2489  MACH_CHECK_ERROR("setting thread FP state", kret);
2490
2491  /* The thread'll be as good as new ... */
2492#ifdef PPC64
2493  kret = thread_set_state(thread,
2494                          PPC_THREAD_STATE64,
2495                          (thread_state_t)&(mc->__ss),
2496                          PPC_THREAD_STATE64_COUNT);
2497#else
2498  kret = thread_set_state(thread, 
2499                          MACHINE_THREAD_STATE,
2500                          (thread_state_t)&(mc->__ss),
2501                          MACHINE_THREAD_STATE_COUNT);
2502#endif
2503  MACH_CHECK_ERROR("setting thread state", kret);
2504} 
2505
2506/* This code runs in the exception handling thread, in response
2507   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2508   in response to a call to pseudo_sigreturn() from the specified
2509   user thread.
2510   Find that context (the user thread's R3 points to it), then
2511   use that context to set the user thread's state.  When this
2512   function's caller returns, the Mach kernel will resume the
2513   user thread.
2514*/
2515
2516kern_return_t
2517do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2518{
2519  ExceptionInformation *xp;
2520
2521#ifdef DEBUG_MACH_EXCEPTIONS
2522  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2523#endif
2524  xp = tcr->pending_exception_context;
2525  if (xp) {
2526    tcr->pending_exception_context = NULL;
2527    tcr->valence = TCR_STATE_LISP;
2528    restore_mach_thread_state(thread, xp);
2529    raise_pending_interrupt(tcr);
2530  } else {
2531    Bug(NULL, "no xp here!\n");
2532  }
2533#ifdef DEBUG_MACH_EXCEPTIONS
2534  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2535#endif
2536  return KERN_SUCCESS;
2537} 
2538
2539ExceptionInformation *
2540create_thread_context_frame(mach_port_t thread, 
2541                            natural *new_stack_top)
2542{
2543#ifdef PPC64
2544  ppc_thread_state64_t ts;
2545#else
2546  ppc_thread_state_t ts;
2547#endif
2548  mach_msg_type_number_t thread_state_count;
2549  kern_return_t result;
2550  ExceptionInformation *pseudosigcontext;
2551  MCONTEXT_T mc;
2552  natural stackp, backlink;
2553
2554#ifdef PPC64
2555  thread_state_count = PPC_THREAD_STATE64_COUNT;
2556  result = thread_get_state(thread,
2557                            PPC_THREAD_STATE64,
2558                            (thread_state_t)&ts,
2559                            &thread_state_count);
2560#else
2561  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2562  result = thread_get_state(thread, 
2563                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2564                            (thread_state_t)&ts,
2565                            &thread_state_count);
2566#endif
2567 
2568  if (result != KERN_SUCCESS) {
2569    get_tcr(true);
2570    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2571  }
2572  stackp = ts.__r1;
2573  backlink = stackp;
2574  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2575  stackp -= sizeof(*pseudosigcontext);
2576  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2577
2578  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2579  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2580  memmove(&(mc->__ss),&ts,sizeof(ts));
2581
2582  thread_state_count = PPC_FLOAT_STATE_COUNT;
2583  thread_get_state(thread,
2584                   PPC_FLOAT_STATE,
2585                   (thread_state_t)&(mc->__fs),
2586                   &thread_state_count);
2587
2588
2589#ifdef PPC64
2590  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2591#else
2592  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2593#endif
2594  thread_get_state(thread,
2595#ifdef PPC64
2596                   PPC_EXCEPTION_STATE64,
2597#else
2598                   PPC_EXCEPTION_STATE,
2599#endif
2600                   (thread_state_t)&(mc->__es),
2601                   &thread_state_count);
2602
2603
2604  UC_MCONTEXT(pseudosigcontext) = mc;
2605  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2606  stackp -= C_LINKAGE_LEN;
2607  *(natural *)ptr_from_lispobj(stackp) = backlink;
2608  if (new_stack_top) {
2609    *new_stack_top = stackp;
2610  }
2611  return pseudosigcontext;
2612}
2613
2614/*
2615  This code sets up the user thread so that it executes a "pseudo-signal
2616  handler" function when it resumes.  Create a linux sigcontext struct
2617  on the thread's stack and pass it as an argument to the pseudo-signal
2618  handler.
2619
2620  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2621  which will restore the thread's context.
2622
2623  If the handler invokes code that throws (or otherwise never sigreturn()'s
2624  to the context), that's fine.
2625
2626  Actually, check that: throw (and variants) may need to be careful and
2627  pop the tcr's xframe list until it's younger than any frame being
2628  entered.
2629*/
2630
2631int
2632setup_signal_frame(mach_port_t thread,
2633                   void *handler_address,
2634                   int signum,
2635                   int code,
2636                   TCR *tcr)
2637{
2638#ifdef PPC64
2639  ppc_thread_state64_t ts;
2640#else
2641  ppc_thread_state_t ts;
2642#endif
2643  ExceptionInformation *pseudosigcontext;
2644  int old_valence = tcr->valence;
2645  natural stackp;
2646
2647#ifdef DEBUG_MACH_EXCEPTIONS
2648  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2649#endif
2650  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2651  pseudosigcontext->uc_onstack = 0;
2652  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2653  tcr->pending_exception_context = pseudosigcontext;
2654  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2655 
2656
2657  /*
2658     It seems like we've created a  sigcontext on the thread's
2659     stack.  Set things up so that we call the handler (with appropriate
2660     args) when the thread's resumed.
2661  */
2662
2663  ts.__srr0 = (natural) handler_address;
2664  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2665  ts.__r1 = stackp;
2666  ts.__r3 = signum;
2667  ts.__r4 = (natural)pseudosigcontext;
2668  ts.__r5 = (natural)tcr;
2669  ts.__r6 = (natural)old_valence;
2670  ts.__lr = (natural)pseudo_sigreturn;
2671
2672
2673#ifdef PPC64
2674  ts.__r13 = xpGPR(pseudosigcontext,13);
2675  thread_set_state(thread,
2676                   PPC_THREAD_STATE64,
2677                   (thread_state_t)&ts,
2678                   PPC_THREAD_STATE64_COUNT);
2679#else
2680  thread_set_state(thread, 
2681                   MACHINE_THREAD_STATE,
2682                   (thread_state_t)&ts,
2683                   MACHINE_THREAD_STATE_COUNT);
2684#endif
2685#ifdef DEBUG_MACH_EXCEPTIONS
2686  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2687#endif
2688  return 0;
2689}
2690
2691
2692void
2693pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2694{
2695  signal_handler(signum, NULL, context, tcr, old_valence);
2696} 
2697
2698
2699int
2700thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2701{
2702#ifdef PPC64
2703  ppc_thread_state64_t ts;
2704#else
2705  ppc_thread_state_t ts;
2706#endif
2707  mach_msg_type_number_t thread_state_count;
2708
2709#ifdef PPC64
2710  thread_state_count = PPC_THREAD_STATE64_COUNT;
2711#else
2712  thread_state_count = PPC_THREAD_STATE_COUNT;
2713#endif
2714  thread_get_state(thread, 
2715#ifdef PPC64
2716                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2717#else
2718                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2719#endif
2720                   (thread_state_t)&ts,
2721                   &thread_state_count);
2722  if (enabled) {
2723    ts.__srr1 |= MSR_FE0_FE1_MASK;
2724  } else {
2725    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2726  }
2727 
2728  ts.__srr0 += 4;
2729  thread_set_state(thread, 
2730#ifdef PPC64
2731                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2732#else
2733                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2734#endif
2735                   (thread_state_t)&ts,
2736#ifdef PPC64
2737                   PPC_THREAD_STATE64_COUNT
2738#else
2739                   PPC_THREAD_STATE_COUNT
2740#endif
2741                   );
2742
2743  return 0;
2744}
2745
2746/*
2747  This function runs in the exception handling thread.  It's
2748  called (by this precise name) from the library function "exc_server()"
2749  when the thread's exception ports are set up.  (exc_server() is called
2750  via mach_msg_server(), which is a function that waits for and dispatches
2751  on exception messages from the Mach kernel.)
2752
2753  This checks to see if the exception was caused by a pseudo_sigreturn()
2754  UUO; if so, it arranges for the thread to have its state restored
2755  from the specified context.
2756
2757  Otherwise, it tries to map the exception to a signal number and
2758  arranges that the thread run a "pseudo signal handler" to handle
2759  the exception.
2760
2761  Some exceptions could and should be handled here directly.
2762*/
2763
2764kern_return_t
2765catch_exception_raise(mach_port_t exception_port,
2766                      mach_port_t thread,
2767                      mach_port_t task, 
2768                      exception_type_t exception,
2769                      exception_data_t code_vector,
2770                      mach_msg_type_number_t code_count)
2771{
2772  int signum = 0, code = *code_vector, code1;
2773  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2774  kern_return_t kret;
2775
2776#ifdef DEBUG_MACH_EXCEPTIONS
2777  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2778#endif
2779
2780  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2781    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2782  } 
2783  if ((exception == EXC_BAD_INSTRUCTION) &&
2784      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2785      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2786       (code1 == (int)enable_fp_exceptions) ||
2787       (code1 == (int)disable_fp_exceptions))) {
2788    if (code1 == (int)pseudo_sigreturn) {
2789      kret = do_pseudo_sigreturn(thread, tcr);
2790#if 0
2791      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2792#endif
2793       
2794    } else if (code1 == (int)enable_fp_exceptions) {
2795      kret = thread_set_fp_exceptions_enabled(thread, true);
2796    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2797  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2798    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2799    kret = 17;
2800  } else {
2801    switch (exception) {
2802    case EXC_BAD_ACCESS:
2803      signum = SIGSEGV;
2804      break;
2805       
2806    case EXC_BAD_INSTRUCTION:
2807      signum = SIGILL;
2808      break;
2809     
2810    case EXC_SOFTWARE:
2811      if (code == EXC_PPC_TRAP) {
2812        signum = SIGTRAP;
2813      }
2814      break;
2815     
2816    case EXC_ARITHMETIC:
2817      signum = SIGFPE;
2818      break;
2819
2820    default:
2821      break;
2822    }
2823    if (signum) {
2824      kret = setup_signal_frame(thread,
2825                                (void *)pseudo_signal_handler,
2826                                signum,
2827                                code,
2828                                tcr);
2829#if 0
2830      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2831#endif
2832
2833    } else {
2834      kret = 17;
2835    }
2836  }
2837
2838  return kret;
2839}
2840
2841
2842
2843typedef struct {
2844  mach_msg_header_t Head;
2845  /* start of the kernel processed data */
2846  mach_msg_body_t msgh_body;
2847  mach_msg_port_descriptor_t thread;
2848  mach_msg_port_descriptor_t task;
2849  /* end of the kernel processed data */
2850  NDR_record_t NDR;
2851  exception_type_t exception;
2852  mach_msg_type_number_t codeCnt;
2853  integer_t code[2];
2854  mach_msg_trailer_t trailer;
2855} exceptionRequest;
2856
2857
2858boolean_t
2859openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2860{
2861  static NDR_record_t _NDR = {0};
2862  kern_return_t handled;
2863  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2864  exceptionRequest *req = (exceptionRequest *) in;
2865
2866  reply->NDR = _NDR;
2867
2868  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2869  out->msgh_remote_port = in->msgh_remote_port;
2870  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2871  out->msgh_local_port = MACH_PORT_NULL;
2872  out->msgh_id = in->msgh_id+100;
2873
2874  /* Could handle other exception flavors in the range 2401-2403 */
2875
2876
2877  if (in->msgh_id != 2401) {
2878    reply->RetCode = MIG_BAD_ID;
2879    return FALSE;
2880  }
2881  handled = catch_exception_raise(req->Head.msgh_local_port,
2882                                  req->thread.name,
2883                                  req->task.name,
2884                                  req->exception,
2885                                  req->code,
2886                                  req->codeCnt);
2887  reply->RetCode = handled;
2888  return TRUE;
2889}
2890
2891/*
2892  The initial function for an exception-handling thread.
2893*/
2894
2895void *
2896exception_handler_proc(void *arg)
2897{
2898  extern boolean_t exc_server();
2899  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2900
2901  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2902  /* Should never return. */
2903  abort();
2904}
2905
2906
2907
2908mach_port_t
2909mach_exception_port_set()
2910{
2911  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2912  kern_return_t kret; 
2913  if (__exception_port_set == MACH_PORT_NULL) {
2914    kret = mach_port_allocate(mach_task_self(),
2915                              MACH_PORT_RIGHT_PORT_SET,
2916                              &__exception_port_set);
2917    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2918    create_system_thread(0,
2919                         NULL,
2920                         exception_handler_proc, 
2921                         (void *)((natural)__exception_port_set));
2922  }
2923  return __exception_port_set;
2924}
2925
2926/*
2927  Setup a new thread to handle those exceptions specified by
2928  the mask "which".  This involves creating a special Mach
2929  message port, telling the Mach kernel to send exception
2930  messages for the calling thread to that port, and setting
2931  up a handler thread which listens for and responds to
2932  those messages.
2933
2934*/
2935
2936/*
2937  Establish the lisp thread's TCR as its exception port, and determine
2938  whether any other ports have been established by foreign code for
2939  exceptions that lisp cares about.
2940
2941  If this happens at all, it should happen on return from foreign
2942  code and on entry to lisp code via a callback.
2943
2944  This is a lot of trouble (and overhead) to support Java, or other
2945  embeddable systems that clobber their caller's thread exception ports.
2946 
2947*/
2948kern_return_t
2949tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2950{
2951  kern_return_t kret;
2952  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2953  int i;
2954  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2955  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2956  exception_mask_t mask = 0;
2957
2958  kret = thread_swap_exception_ports(thread,
2959                                     LISP_EXCEPTIONS_HANDLED_MASK,
2960                                     lisp_port,
2961                                     EXCEPTION_DEFAULT,
2962                                     THREAD_STATE_NONE,
2963                                     fxs->masks,
2964                                     &n,
2965                                     fxs->ports,
2966                                     fxs->behaviors,
2967                                     fxs->flavors);
2968  if (kret == KERN_SUCCESS) {
2969    fxs->foreign_exception_port_count = n;
2970    for (i = 0; i < n; i ++) {
2971      foreign_port = fxs->ports[i];
2972
2973      if ((foreign_port != lisp_port) &&
2974          (foreign_port != MACH_PORT_NULL)) {
2975        mask |= fxs->masks[i];
2976      }
2977    }
2978    tcr->foreign_exception_status = (int) mask;
2979  }
2980  return kret;
2981}
2982
2983kern_return_t
2984tcr_establish_lisp_exception_port(TCR *tcr)
2985{
2986  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2987}
2988
2989/*
2990  Do this when calling out to or returning from foreign code, if
2991  any conflicting foreign exception ports were established when we
2992  last entered lisp code.
2993*/
2994kern_return_t
2995restore_foreign_exception_ports(TCR *tcr)
2996{
2997  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2998 
2999  if (m) {
3000    MACH_foreign_exception_state *fxs  = 
3001      (MACH_foreign_exception_state *) tcr->native_thread_info;
3002    int i, n = fxs->foreign_exception_port_count;
3003    exception_mask_t tm;
3004
3005    for (i = 0; i < n; i++) {
3006      if ((tm = fxs->masks[i]) & m) {
3007        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3008                                   tm,
3009                                   fxs->ports[i],
3010                                   fxs->behaviors[i],
3011                                   fxs->flavors[i]);
3012      }
3013    }
3014  }
3015}
3016                                   
3017
3018/*
3019  This assumes that a Mach port (to be used as the thread's exception port) whose
3020  "name" matches the TCR's 32-bit address has already been allocated.
3021*/
3022
3023kern_return_t
3024setup_mach_exception_handling(TCR *tcr)
3025{
3026  mach_port_t
3027    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3028    task_self = mach_task_self();
3029  kern_return_t kret;
3030
3031  kret = mach_port_insert_right(task_self,
3032                                thread_exception_port,
3033                                thread_exception_port,
3034                                MACH_MSG_TYPE_MAKE_SEND);
3035  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3036
3037  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3038  if (kret == KERN_SUCCESS) {
3039    mach_port_t exception_port_set = mach_exception_port_set();
3040
3041    kret = mach_port_move_member(task_self,
3042                                 thread_exception_port,
3043                                 exception_port_set);
3044  }
3045  return kret;
3046}
3047
3048void
3049darwin_exception_init(TCR *tcr)
3050{
3051  void tcr_monitor_exception_handling(TCR*, Boolean);
3052  kern_return_t kret;
3053  MACH_foreign_exception_state *fxs = 
3054    calloc(1, sizeof(MACH_foreign_exception_state));
3055 
3056  tcr->native_thread_info = (void *) fxs;
3057
3058  if ((kret = setup_mach_exception_handling(tcr))
3059      != KERN_SUCCESS) {
3060    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3061    terminate_lisp();
3062  }
3063}
3064
3065/*
3066  The tcr is the "name" of the corresponding thread's exception port.
3067  Destroying the port should remove it from all port sets of which it's
3068  a member (notably, the exception port set.)
3069*/
3070void
3071darwin_exception_cleanup(TCR *tcr)
3072{
3073  void *fxs = tcr->native_thread_info;
3074  extern Boolean use_mach_exception_handling;
3075
3076  if (fxs) {
3077    tcr->native_thread_info = NULL;
3078    free(fxs);
3079  }
3080  if (use_mach_exception_handling) {
3081    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3082    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3083  }
3084}
3085
3086
3087Boolean
3088suspend_mach_thread(mach_port_t mach_thread)
3089{
3090  kern_return_t status;
3091  Boolean aborted = false;
3092 
3093  do {
3094    aborted = false;
3095    status = thread_suspend(mach_thread);
3096    if (status == KERN_SUCCESS) {
3097      status = thread_abort_safely(mach_thread);
3098      if (status == KERN_SUCCESS) {
3099        aborted = true;
3100      } else {
3101        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
3102        thread_resume(mach_thread);
3103      }
3104    } else {
3105      return false;
3106    }
3107  } while (! aborted);
3108  return true;
3109}
3110
3111/*
3112  Only do this if pthread_kill indicated that the pthread isn't
3113  listening to signals anymore, as can happen as soon as pthread_exit()
3114  is called on Darwin.  The thread could still call out to lisp as it
3115  is exiting, so we need another way to suspend it in this case.
3116*/
3117Boolean
3118mach_suspend_tcr(TCR *tcr)
3119{
3120  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3121  ExceptionInformation *pseudosigcontext;
3122  Boolean result = false;
3123 
3124  result = suspend_mach_thread(mach_thread);
3125  if (result) {
3126    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3127    pseudosigcontext->uc_onstack = 0;
3128    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3129    tcr->suspend_context = pseudosigcontext;
3130  }
3131  return result;
3132}
3133
3134void
3135mach_resume_tcr(TCR *tcr)
3136{
3137  ExceptionInformation *xp;
3138  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3139 
3140  xp = tcr->suspend_context;
3141#ifdef DEBUG_MACH_EXCEPTIONS
3142  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3143          tcr, tcr->pending_exception_context);
3144#endif
3145  tcr->suspend_context = NULL;
3146  restore_mach_thread_state(mach_thread, xp);
3147#ifdef DEBUG_MACH_EXCEPTIONS
3148  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3149          tcr, tcr->pending_exception_context);
3150#endif
3151  thread_resume(mach_thread);
3152}
3153
3154void
3155fatal_mach_error(char *format, ...)
3156{
3157  va_list args;
3158  char s[512];
3159 
3160
3161  va_start(args, format);
3162  vsnprintf(s, sizeof(s),format, args);
3163  va_end(args);
3164
3165  Fatal("Mach error", s);
3166}
3167
3168void
3169pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3170{
3171  interrupt_handler(signum, NULL, context);
3172}
3173
3174int
3175mach_raise_thread_interrupt(TCR *target)
3176{
3177  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3178  kern_return_t kret;
3179  Boolean result = false;
3180  TCR *current = get_tcr(false);
3181  thread_basic_info_data_t info; 
3182  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3183
3184  LOCK(lisp_global(TCR_AREA_LOCK), current);
3185
3186  if (suspend_mach_thread(mach_thread)) {
3187    if (thread_info(mach_thread,
3188                    THREAD_BASIC_INFO,
3189                    (thread_info_t)&info,
3190                    &info_count) == KERN_SUCCESS) {
3191      if (info.suspend_count == 1) {
3192        if ((target->valence == TCR_STATE_LISP) &&
3193            (!target->unwinding) &&
3194            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3195          kret = setup_signal_frame(mach_thread,
3196                                    (void *)pseudo_interrupt_handler,
3197                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3198                                    0,
3199                                    target);
3200          if (kret == KERN_SUCCESS) {
3201            result = true;
3202          }
3203        }
3204      }
3205    }
3206    if (! result) {
3207      target->interrupt_pending = 1 << fixnumshift;
3208    }
3209    thread_resume(mach_thread);
3210   
3211  }
3212  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3213  return 0;
3214}
3215
3216#endif
Note: See TracBrowser for help on using the repository browser.