source: trunk/source/lisp-kernel/ppc-exceptions.c @ 11523

Last change on this file since 11523 was 11523, checked in by gb, 11 years ago

x86 support for FLASH-FREEZE.
Use signed_natural in x86 gc-like functions.
RECURSIVE-LOCK-WHOSTATE and the RWLOCK-WHOSTATE functions: use
WITH-STANDARD-IO-SYNTAX when consing up the string. Do that in
higher-level code, to avoid early refs to CL-USER pacjage.
(In general, other things similar to RECURSIVE-LOCK-WHOSTATE are
suspect, in that they call (FORMAT NIL ...) in a random environment
where things like *PRINT-READABLY* may be in effect. There are
probably other cases of this.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 89.0 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298void
299lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
300{
301  /* Couldn't allocate the object.  If it's smaller than some arbitrary
302     size (say 128K bytes), signal a "chronically out-of-memory" condition;
303     else signal a "allocation request failed" condition.
304  */
305  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
306  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
307}
308
309/*
310  Allocate a large list, where "large" means "large enough to
311  possibly trigger the EGC several times if this was done
312  by individually allocating each CONS."  The number of
313  ocnses in question is in arg_z; on successful return,
314  the list will be in arg_z
315*/
316
317Boolean
318allocate_list(ExceptionInformation *xp, TCR *tcr)
319{
320  natural
321    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
322    bytes_needed = (nconses << dnode_shift);
323  LispObj
324    prev = lisp_nil,
325    current,
326    initial = xpGPR(xp,arg_y);
327
328  if (nconses == 0) {
329    /* Silly case */
330    xpGPR(xp,arg_z) = lisp_nil;
331    xpGPR(xp,allocptr) = lisp_nil;
332    return true;
333  }
334  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
335  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
336    for (current = xpGPR(xp,allocptr);
337         nconses;
338         prev = current, current+= dnode_size, nconses--) {
339      deref(current,0) = prev;
340      deref(current,1) = initial;
341    }
342    xpGPR(xp,arg_z) = prev;
343    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
344    xpGPR(xp,allocptr)-=fulltag_cons;
345  } else {
346    lisp_allocation_failure(xp,tcr,bytes_needed);
347  }
348  return true;
349}
350
351OSStatus
352handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
353{
354  pc program_counter;
355  natural cur_allocptr, bytes_needed = 0;
356  opcode prev_instr;
357  signed_natural disp = 0;
358  unsigned allocptr_tag;
359
360  cur_allocptr = xpGPR(xp,allocptr);
361  program_counter = xpPC(xp);
362  prev_instr = *(program_counter-1);
363  allocptr_tag = fulltag_of(cur_allocptr);
364
365  switch (allocptr_tag) {
366  case fulltag_cons:
367    bytes_needed = sizeof(cons);
368    disp = -sizeof(cons) + fulltag_cons;
369    break;
370
371  case fulltag_even_fixnum:
372  case fulltag_odd_fixnum:
373    break;
374
375  case fulltag_misc:
376    if (match_instr(prev_instr, 
377                    XO_MASK | RT_MASK | RB_MASK,
378                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
379                    RT(allocptr) |
380                    RB(allocptr))) {
381      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
382    } else if (match_instr(prev_instr,
383                           OP_MASK | RT_MASK | RA_MASK,
384                           OP(major_opcode_ADDI) | 
385                           RT(allocptr) |
386                           RA(allocptr))) {
387      disp = (signed_natural) ((short) prev_instr);
388    }
389    if (disp) {
390      bytes_needed = (-disp) + fulltag_misc;
391      break;
392    }
393    /* else fall thru */
394  default:
395    return -1;
396  }
397
398  if (bytes_needed) {
399    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
400    if (allocate_object(xp, bytes_needed, disp, tcr)) {
401#if 0
402      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
403              tcr, xpGPR(xp, allocptr));
404#endif
405      adjust_exception_pc(xp,4);
406      return 0;
407    }
408    lisp_allocation_failure(xp,tcr,bytes_needed);
409    return -1;
410  }
411  return -1;
412}
413
414natural gc_deferred = 0, full_gc_deferred = 0;
415
416signed_natural
417flash_freeze(TCR *tcr, signed_natural param)
418{
419  return 0;
420}
421
422OSStatus
423handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
424{
425  LispObj
426    selector = xpGPR(xp,imm0), 
427    arg = xpGPR(xp,imm1);
428  area *a = active_dynamic_area;
429  Boolean egc_was_enabled = (a->older != NULL);
430  natural gc_previously_deferred = gc_deferred;
431
432
433  switch (selector) {
434  case GC_TRAP_FUNCTION_EGC_CONTROL:
435    egc_control(arg != 0, a->active);
436    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
437    break;
438
439  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
440    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
441    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
442    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
443    xpGPR(xp,arg_z) = lisp_nil+t_offset;
444    break;
445
446  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
447    if (((signed_natural) arg) > 0) {
448      lisp_heap_gc_threshold = 
449        align_to_power_of_2((arg-1) +
450                            (heap_segment_size - 1),
451                            log2_heap_segment_size);
452    }
453    /* fall through */
454  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
455    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
456    break;
457
458  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
459    /*  Try to put the current threshold in effect.  This may
460        need to disable/reenable the EGC. */
461    untenure_from_area(tenured_area);
462    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
463    if (egc_was_enabled) {
464      if ((a->high - a->active) >= a->threshold) {
465        tenure_to_area(tenured_area);
466      }
467    }
468    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
469    break;
470
471  case GC_TRAP_FUNCTION_FLASH_FREEZE:
472    untenure_from_area(tenured_area);
473    gc_like_from_xp(xp,flash_freeze,0);
474    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
475    tenured_area->static_dnodes = area_dnode(a->active, a->low);
476    if (egc_was_enabled) {
477      tenure_to_area(tenured_area);
478    }
479    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
480    break;
481
482  default:
483    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
484
485    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
486      if (!full_gc_deferred) {
487        gc_from_xp(xp, 0L);
488        break;
489      }
490      /* Tried to do a full GC when gc was disabled.  That failed,
491         so try full GC now */
492      selector = GC_TRAP_FUNCTION_GC;
493    }
494   
495    if (egc_was_enabled) {
496      egc_control(false, (BytePtr) a->active);
497    }
498    gc_from_xp(xp, 0L);
499    if (gc_deferred > gc_previously_deferred) {
500      full_gc_deferred = 1;
501    } else {
502      full_gc_deferred = 0;
503    }
504    if (selector > GC_TRAP_FUNCTION_GC) {
505      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
506        impurify_from_xp(xp, 0L);
507        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
508        gc_from_xp(xp, 0L);
509        release_readonly_area();
510      }
511      if (selector & GC_TRAP_FUNCTION_PURIFY) {
512        purify_from_xp(xp, 0L);
513        gc_from_xp(xp, 0L);
514      }
515      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
516        OSErr err;
517        extern OSErr save_application(unsigned);
518        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
519        area *vsarea = tcr->vs_area;
520       
521        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
522        err = save_application(arg);
523        if (err == noErr) {
524          _exit(0);
525        }
526        fatal_oserr(": save_application", err);
527      }
528      switch (selector) {
529      case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
530        xpGPR(xp, imm0) = 0;
531        break;
532
533      case GC_TRAP_FUNCTION_FREEZE:
534        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
535        tenured_area->static_dnodes = area_dnode(a->active, a->low);
536        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
537        break;
538      default:
539        break;
540      }
541    }
542   
543    if (egc_was_enabled) {
544      egc_control(true, NULL);
545    }
546    break;
547   
548  }
549
550  adjust_exception_pc(xp,4);
551  return 0;
552}
553
554
555
556void
557signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
558{
559  /* The cstack just overflowed.  Force the current thread's
560     control stack to do so until all stacks are well under their overflow
561     limits.
562  */
563
564#if 0
565  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
566#endif
567  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
568}
569
570/*
571  Lower (move toward 0) the "end" of the soft protected area associated
572  with a by a page, if we can.
573*/
574
575void
576adjust_soft_protection_limit(area *a)
577{
578  char *proposed_new_soft_limit = a->softlimit - 4096;
579  protected_area_ptr p = a->softprot;
580 
581  if (proposed_new_soft_limit >= (p->start+16384)) {
582    p->end = proposed_new_soft_limit;
583    p->protsize = p->end-p->start;
584    a->softlimit = proposed_new_soft_limit;
585  }
586  protect_area(p);
587}
588
589void
590restore_soft_stack_limit(unsigned stkreg)
591{
592  area *a;
593  TCR *tcr = get_tcr(true);
594
595  switch (stkreg) {
596  case sp:
597    a = tcr->cs_area;
598    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
599      a->softlimit -= 4096;
600    }
601    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
602    break;
603  case vsp:
604    a = tcr->vs_area;
605    adjust_soft_protection_limit(a);
606    break;
607  case tsp:
608    a = tcr->ts_area;
609    adjust_soft_protection_limit(a);
610  }
611}
612
613/* Maybe this'll work someday.  We may have to do something to
614   make the thread look like it's not handling an exception */
615void
616reset_lisp_process(ExceptionInformation *xp)
617{
618  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
619  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
620
621  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
622  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
623
624  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
625  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
626
627  start_lisp(tcr, 1);
628}
629
630/*
631  This doesn't GC; it returns true if it made enough room, false
632  otherwise.
633  If "extend" is true, it can try to extend the dynamic area to
634  satisfy the request.
635*/
636
637Boolean
638new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
639{
640  area *a;
641  natural newlimit, oldlimit;
642  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
643
644  a  = active_dynamic_area;
645  oldlimit = (natural) a->active;
646  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
647              align_to_power_of_2(need, log2_allocation_quantum));
648  if (newlimit > (natural) (a->high)) {
649    if (extend) {
650      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
651      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
652      do {
653        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
654          break;
655        }
656        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
657        if (extend_by < 4<<20) {
658          return false;
659        }
660      } while (1);
661    } else {
662      return false;
663    }
664  }
665  a->active = (BytePtr) newlimit;
666  tcr->last_allocptr = (void *)newlimit;
667  xpGPR(xp,allocptr) = (LispObj) newlimit;
668  xpGPR(xp,allocbase) = (LispObj) oldlimit;
669
670  return true;
671}
672
673 
674void
675update_area_active (area **aptr, BytePtr value)
676{
677  area *a = *aptr;
678  for (; a; a = a->older) {
679    if ((a->low <= value) && (a->high >= value)) break;
680  };
681  if (a == NULL) Bug(NULL, "Can't find active area");
682  a->active = value;
683  *aptr = a;
684
685  for (a = a->younger; a; a = a->younger) {
686    a->active = a->high;
687  }
688}
689
690void
691normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
692{
693  void *cur_allocptr = NULL;
694  LispObj freeptr = 0;
695
696  if (xp) {
697    if (is_other_tcr) {
698      pc_luser_xp(xp, tcr, NULL);
699      freeptr = xpGPR(xp, allocptr);
700      if (fulltag_of(freeptr) == 0){
701        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
702      }
703    }
704    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
705    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
706    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
707#ifdef DEBUG
708    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
709            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
710    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
711            tcr,
712            xpGPR(xp, allocbase),
713            xpGPR(xp, allocptr),
714            xpPC(xp));
715    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
716            tcr,
717            tcr->pending_exception_context);
718#endif
719  } else {
720    /* In ff-call.  No need to update cs_area */
721    cur_allocptr = (void *) (tcr->save_allocptr);
722#ifdef DEBUG
723    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
724            tcr, tcr->save_vsp, tcr->save_tsp);
725    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
726            tcr,
727            tcr->save_allocbase,
728            tcr->save_allocptr,
729            xpPC(xp));
730
731#endif
732    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
733    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
734  }
735
736
737  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
738  if (cur_allocptr) {
739    update_bytes_allocated(tcr, cur_allocptr);
740    if (freeptr) {
741      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
742      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
743    }
744  }
745}
746
747TCR *gc_tcr = NULL;
748
749/* Suspend and "normalize" other tcrs, then call a gc-like function
750   in that context.  Resume the other tcrs, then return what the
751   function returned */
752
753signed_natural
754gc_like_from_xp(ExceptionInformation *xp, 
755                signed_natural(*fun)(TCR *, signed_natural), 
756                signed_natural param)
757{
758  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
759  int result;
760  signed_natural inhibit;
761
762  suspend_other_threads(true);
763  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
764  if (inhibit != 0) {
765    if (inhibit > 0) {
766      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
767    }
768    resume_other_threads(true);
769    gc_deferred++;
770    return 0;
771  }
772  gc_deferred = 0;
773
774  gc_tcr = tcr;
775
776  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
777  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
778
779  normalize_tcr(xp, tcr, false);
780
781
782  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
783    if (other_tcr->pending_exception_context) {
784      other_tcr->gc_context = other_tcr->pending_exception_context;
785    } else if (other_tcr->valence == TCR_STATE_LISP) {
786      other_tcr->gc_context = other_tcr->suspend_context;
787    } else {
788      /* no pending exception, didn't suspend in lisp state:
789         must have executed a synchronous ff-call.
790      */
791      other_tcr->gc_context = NULL;
792    }
793    normalize_tcr(other_tcr->gc_context, other_tcr, true);
794  }
795   
796
797
798  result = fun(tcr, param);
799
800  other_tcr = tcr;
801  do {
802    other_tcr->gc_context = NULL;
803    other_tcr = other_tcr->next;
804  } while (other_tcr != tcr);
805
806  gc_tcr = NULL;
807
808  resume_other_threads(true);
809
810  return result;
811
812}
813
814
815
816/* Returns #bytes freed by invoking GC */
817
818signed_natural
819gc_from_tcr(TCR *tcr, signed_natural param)
820{
821  area *a;
822  BytePtr oldfree, newfree;
823  BytePtr oldend, newend;
824
825#ifdef DEBUG
826  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
827#endif
828  a = active_dynamic_area;
829  oldend = a->high;
830  oldfree = a->active;
831  gc(tcr, param);
832  newfree = a->active;
833  newend = a->high;
834#if 0
835  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
836#endif
837  return ((oldfree-newfree)+(newend-oldend));
838}
839
840signed_natural
841gc_from_xp(ExceptionInformation *xp, signed_natural param)
842{
843  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
844
845  freeGCptrs();
846  return status;
847}
848
849signed_natural
850purify_from_xp(ExceptionInformation *xp, signed_natural param)
851{
852  return gc_like_from_xp(xp, purify, param);
853}
854
855signed_natural
856impurify_from_xp(ExceptionInformation *xp, signed_natural param)
857{
858  return gc_like_from_xp(xp, impurify, param);
859}
860
861
862
863
864
865
866protection_handler
867 * protection_handlers[] = {
868   do_spurious_wp_fault,
869   do_soft_stack_overflow,
870   do_soft_stack_overflow,
871   do_soft_stack_overflow,
872   do_hard_stack_overflow,   
873   do_hard_stack_overflow,
874   do_hard_stack_overflow
875   };
876
877
878Boolean
879is_write_fault(ExceptionInformation *xp, siginfo_t *info)
880{
881  /* use the siginfo if it's available.  Some versions of Linux
882     don't propagate the DSISR and TRAP fields correctly from
883     64- to 32-bit handlers.
884  */
885  if (info) {
886    /*
887       To confuse matters still further, the value of SEGV_ACCERR
888       varies quite a bit among LinuxPPC variants (the value defined
889       in the header files varies, and the value actually set by
890       the kernel also varies.  So far, we're only looking at the
891       siginfo under Linux and Linux always seems to generate
892       SIGSEGV, so check for SIGSEGV and check the low 16 bits
893       of the si_code.
894    */
895    return ((info->si_signo == SIGSEGV) &&
896            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
897  }
898  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
899         (xpTRAP(xp) == 
900#ifdef LINUX
9010x0300
902#endif
903#ifdef DARWIN
9040x0300/0x100
905#endif
906)
907         );
908#if 0 
909  /* Maybe worth keeping around; not sure if it's an exhaustive
910     list of PPC instructions that could cause a WP fault */
911  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
912     valid values of those SPRs in the context they provide to
913     exception handlers.  Look at the opcode of the offending
914     instruction & recognize 32-bit store operations */
915  opcode instr = *(xpPC(xp));
916
917  if (xp->regs->trap != 0x300) {
918    return 0;
919  }
920  switch (instr >> 26) {
921  case 47:                      /* STMW */
922  case 36:                      /* STW */
923  case 37:                      /* STWU */
924    return 1;
925  case 31:
926    switch ((instr >> 1) & 1023) {
927    case 151:                   /* STWX */
928    case 183:                   /* STWUX */
929      return 1;
930    default:
931      return 0;
932    }
933  default:
934    return 0;
935  }
936#endif
937}
938
939OSStatus
940handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
941{
942  BytePtr addr;
943  protected_area_ptr area;
944  protection_handler *handler;
945  extern Boolean touch_page(void *);
946  extern void touch_page_end(void);
947
948  if (info) {
949    addr = (BytePtr)(info->si_addr);
950  } else {
951    addr = (BytePtr) ((natural) (xpDAR(xp)));
952  }
953
954  if (addr && (addr == tcr->safe_ref_address)) {
955    adjust_exception_pc(xp,4);
956
957    xpGPR(xp,imm0) = 0;
958    return 0;
959  }
960
961  if (xpPC(xp) == (pc)touch_page) {
962    xpGPR(xp,imm0) = 0;
963    xpPC(xp) = (pc)touch_page_end;
964    return 0;
965  }
966
967
968  if (is_write_fault(xp,info)) {
969    area = find_protected_area(addr);
970    if (area != NULL) {
971      handler = protection_handlers[area->why];
972      return handler(xp, area, addr);
973    } else {
974      if ((addr >= readonly_area->low) &&
975          (addr < readonly_area->active)) {
976        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
977                        page_size);
978        return 0;
979      }
980    }
981  }
982  if (old_valence == TCR_STATE_LISP) {
983    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
984  }
985  return -1;
986}
987
988
989
990
991
992OSStatus
993do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
994{
995#ifdef SUPPORT_PRAGMA_UNUSED
996#pragma unused(area,addr)
997#endif
998  reset_lisp_process(xp);
999  return -1;
1000}
1001
1002extern area*
1003allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1004
1005extern area*
1006allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1007
1008#ifdef EXTEND_VSTACK
1009Boolean
1010catch_frame_p(lisp_frame *spPtr)
1011{
1012  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1013
1014  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1015    if (spPtr == ((lisp_frame *) catch->csp)) {
1016      return true;
1017    }
1018  }
1019  return false;
1020}
1021#endif
1022
1023Boolean
1024unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1025{
1026  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1027      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1028    return true;
1029  } else {
1030    return false;
1031  }
1032}
1033
1034Boolean
1035lexpr_entry_frame_p(lisp_frame *spPtr)
1036{
1037  LispObj savelr = spPtr->savelr;
1038  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1039  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1040  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1041
1042  return
1043    (savelr == lexpr_return1v) ||
1044    (savelr == lexpr_return) ||
1045    ((savelr == ret1valn) &&
1046     (((spPtr->backlink)->savelr) == lexpr_return));
1047}
1048
1049Boolean
1050lisp_frame_p(lisp_frame *spPtr)
1051{
1052  LispObj savefn;
1053  /* We can't just look at the size of the stack frame under the EABI
1054     calling sequence, but that's the first thing to check. */
1055  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1056    return false;
1057  }
1058  savefn = spPtr->savefn;
1059  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1060 
1061}
1062
1063
1064int ffcall_overflow_count = 0;
1065
1066/* Find a frame that is neither a catch frame nor one of the
1067   lexpr_entry frames We don't check for non-lisp frames here because
1068   we'll always stop before we get there due to a dummy lisp frame
1069   pushed by .SPcallback that masks out the foreign frames.  The one
1070   exception is that there is a non-lisp frame without a valid VSP
1071   while in the process of ppc-ff-call. We recognize that because its
1072   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1073   next frame is 0, then we're executing an unwind-protect cleanup
1074   form, and the top stack frame belongs to its (no longer extant)
1075   catch frame.  */
1076
1077#ifdef EXTEND_VSTACK
1078lisp_frame *
1079find_non_catch_frame_from_xp (ExceptionInformation *xp)
1080{
1081  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1082  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1083    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1084  }
1085  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1086         unwind_protect_cleanup_frame_p(spPtr) ||
1087         catch_frame_p(spPtr) ||
1088         lexpr_entry_frame_p(spPtr) ; ) {
1089     spPtr = spPtr->backlink;
1090     };
1091  return spPtr;
1092}
1093#endif
1094
1095#ifdef EXTEND_VSTACK
1096Boolean
1097db_link_chain_in_area_p (area *a)
1098{
1099  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1100          *high = (LispObj *) a->high,
1101          *low = (LispObj *) a->low;
1102  for (; db; db = (LispObj *) *db) {
1103    if ((db >= low) && (db < high)) return true;
1104  };
1105  return false;
1106}
1107#endif
1108
1109
1110
1111
1112/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1113  the current value of VSP (TSP) or an older area.  */
1114
1115OSStatus
1116do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1117{
1118  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1119  area *a = tcr->vs_area;
1120  protected_area_ptr vsp_soft = a->softprot;
1121  unprotect_area(vsp_soft);
1122  signal_stack_soft_overflow(xp,vsp);
1123  return 0;
1124}
1125
1126
1127OSStatus
1128do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1129{
1130  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1131  area *a = tcr->ts_area;
1132  protected_area_ptr tsp_soft = a->softprot;
1133  unprotect_area(tsp_soft);
1134  signal_stack_soft_overflow(xp,tsp);
1135  return 0;
1136}
1137
1138OSStatus
1139do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1140{
1141  /* Trying to write into a guard page on the vstack or tstack.
1142     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1143     signal an error_stack_overflow condition.
1144      */
1145  lisp_protection_kind which = prot_area->why;
1146  Boolean on_TSP = (which == kTSPsoftguard);
1147
1148  if (on_TSP) {
1149    return do_tsp_overflow(xp, addr);
1150   } else {
1151    return do_vsp_overflow(xp, addr);
1152   }
1153}
1154
1155OSStatus
1156do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1157{
1158#ifdef SUPPORT_PRAGMA_UNUSED
1159#pragma unused(xp,area,addr)
1160#endif
1161  return -1;
1162}
1163
1164
1165/*
1166  We have a couple of choices here.  We can simply unprotect the page
1167  and let the store happen on return, or we can try to emulate writes
1168  that we know will involve an intergenerational reference.  Both are
1169  correct as far as EGC constraints go, but the latter approach is
1170  probably more efficient.  (This only matters in the case where the
1171  GC runs after this exception handler returns but before the write
1172  actually happens.  If we didn't emulate node stores here, the EGC
1173  would scan the newly-writen page, find nothing interesting, and
1174  run to completion.  This thread will try the write again afer it
1175  resumes, the page'll be re-protected, and we'll have taken this
1176  fault twice.  The whole scenario shouldn't happen very often, but
1177  (having already taken a fault and committed to an mprotect syscall)
1178  we might as well emulate stores involving intergenerational references,
1179  since they're pretty easy to identify.
1180
1181  Note that cases involving two or more threads writing to the same
1182  page (before either of them can run this handler) is benign: one
1183  invocation of the handler will just unprotect an unprotected page in
1184  that case.
1185
1186  If there are GCs (or any other suspensions of the thread between
1187  the time that the write fault was detected and the time that the
1188  exception lock is obtained) none of this stuff happens.
1189*/
1190
1191/*
1192  Return true (and emulate the instruction) iff:
1193  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1194     instruction.
1195  b) RS is a node register (>= fn)
1196  c) RS is tagged as a cons or vector
1197  d) RS is in some ephemeral generation.
1198  This is slightly conservative, since RS may be no younger than the
1199  EA being written to.
1200*/
1201Boolean
1202is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1203{
1204  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1205    opcode instr = *xpPC(xp);
1206   
1207    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1208        major_opcode_p(instr, major_opcode_STW)) {
1209      LispObj
1210        rs = RS_field(instr), 
1211        rsval = xpGPR(xp,rs),
1212        tag = fulltag_of(rsval);
1213     
1214      if (rs >= fn) {
1215        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1216          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1217              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1218            *(LispObj *)ea = rsval;
1219            return true;
1220          }
1221        }
1222      }
1223    }
1224  }
1225  return false;
1226}
1227
1228     
1229
1230
1231
1232
1233
1234OSStatus
1235handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1236{
1237  (void) zero_fpscr(tcr);
1238  enable_fp_exceptions();
1239
1240
1241  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1242
1243  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1244     operation; there's an FPU operation right at the PC, so tell it to start
1245     looking one word beyond */
1246  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1247}
1248
1249   
1250int
1251altivec_present = 1;
1252
1253
1254/* This only tries to implement the "optional" fsqrt and fsqrts
1255   instructions, which were generally implemented on IBM hardware
1256   but generally not available on Motorola/Freescale systems.
1257*/               
1258OSStatus
1259handle_unimplemented_instruction(ExceptionInformation *xp,
1260                                 opcode instruction,
1261                                 TCR *tcr)
1262{
1263  (void) zero_fpscr(tcr);
1264  enable_fp_exceptions();
1265  /* the rc bit (bit 0 in the instruction) is supposed to cause
1266     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1267     fsqrt. or fsqrts.
1268  */
1269  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1270       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1271      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1272    double b, d, sqrt(double);
1273
1274    b = xpFPR(xp,RB_field(instruction));
1275    d = sqrt(b);
1276    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1277                   (get_fpscr() & _FPU_RESERVED));
1278    xpFPR(xp,RT_field(instruction)) = d;
1279    adjust_exception_pc(xp,4);
1280    return 0;
1281  }
1282
1283  return -1;
1284}
1285
1286OSStatus
1287PMCL_exception_handler(int xnum, 
1288                       ExceptionInformation *xp, 
1289                       TCR *tcr, 
1290                       siginfo_t *info,
1291                       int old_valence)
1292{
1293  OSStatus status = -1;
1294  pc program_counter;
1295  opcode instruction = 0;
1296
1297
1298  program_counter = xpPC(xp);
1299 
1300  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1301    instruction = *program_counter;
1302  }
1303
1304  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1305    status = handle_alloc_trap(xp, tcr);
1306  } else if ((xnum == SIGSEGV) ||
1307             (xnum == SIGBUS)) {
1308    status = handle_protection_violation(xp, info, tcr, old_valence);
1309  } else if (xnum == SIGFPE) {
1310    status = handle_sigfpe(xp, tcr);
1311  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1312    if (instruction == GC_TRAP_INSTRUCTION) {
1313      status = handle_gc_trap(xp, tcr);
1314    } else if (IS_UUO(instruction)) {
1315      status = handle_uuo(xp, instruction, program_counter);
1316    } else if (is_conditional_trap(instruction)) {
1317      status = handle_trap(xp, instruction, program_counter, info);
1318    } else {
1319      status = handle_unimplemented_instruction(xp,instruction,tcr);
1320    }
1321  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1322    tcr->interrupt_pending = 0;
1323    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1324    status = 0;
1325  }
1326
1327  return status;
1328}
1329
1330void
1331adjust_exception_pc(ExceptionInformation *xp, int delta)
1332{
1333  xpPC(xp) += (delta >> 2);
1334}
1335
1336
1337/*
1338  This wants to scan backwards until "where" points to an instruction
1339   whose major opcode is either 63 (double-float) or 59 (single-float)
1340*/
1341
1342OSStatus
1343handle_fpux_binop(ExceptionInformation *xp, pc where)
1344{
1345  OSStatus err;
1346  opcode *there = (opcode *) where, instr, errnum = 0;
1347  int i = TRAP_LOOKUP_TRIES, delta = 0;
1348 
1349  while (i--) {
1350    instr = *--there;
1351    delta -= 4;
1352    if (codevec_hdr_p(instr)) {
1353      return -1;
1354    }
1355    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1356      errnum = error_FPU_exception_double;
1357      break;
1358    }
1359
1360    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1361      errnum = error_FPU_exception_short;
1362      break;
1363    }
1364  }
1365 
1366  err = handle_error(xp, errnum, rcontext, 0,  there);
1367  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1368 
1369  adjust_exception_pc(xp, delta);
1370  xpFPSCR(xp)  &=  0x03fff;
1371 
1372  return err;
1373
1374}
1375
1376OSStatus
1377handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1378{
1379#ifdef SUPPORT_PRAGMA_UNUSED
1380#pragma unused(where)
1381#endif
1382  unsigned 
1383    minor = UUO_MINOR(the_uuo),
1384    rb = 0x1f & (the_uuo >> 11),
1385    errnum = 0x3ff & (the_uuo >> 16);
1386
1387  OSStatus status = -1;
1388
1389  int bump = 4;
1390
1391  switch (minor) {
1392
1393  case UUO_ZERO_FPSCR:
1394    status = 0;
1395    xpFPSCR(xp) = 0;
1396    break;
1397
1398
1399  case UUO_INTERR:
1400    {
1401      TCR * target = (TCR *)xpGPR(xp,arg_z);
1402      status = 0;
1403      switch (errnum) {
1404      case error_propagate_suspend:
1405        break;
1406      case error_interrupt:
1407        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1408        break;
1409      case error_suspend:
1410        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1411        break;
1412      case error_suspend_all:
1413        lisp_suspend_other_threads();
1414        break;
1415      case error_resume:
1416        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1417        break;
1418      case error_resume_all:
1419        lisp_resume_other_threads();
1420        break;
1421      case error_kill:
1422        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1423        break;
1424      case error_allocate_list:
1425        allocate_list(xp,get_tcr(true));
1426        break;
1427      default:
1428        status = handle_error(xp, errnum, rb, 0,  where);
1429        break;
1430      }
1431    }
1432    break;
1433
1434  case UUO_INTCERR:
1435    status = handle_error(xp, errnum, rb, 1,  where);
1436    if (errnum == error_udf_call) {
1437      /* If lisp's returned from a continuable undefined-function call,
1438         it's put a code vector in the xp's PC.  Don't advance the
1439         PC ... */
1440      bump = 0;
1441    }
1442    break;
1443
1444  case UUO_FPUX_BINOP:
1445    status = handle_fpux_binop(xp, where);
1446    bump = 0;
1447    break;
1448
1449  default:
1450    status = -1;
1451    bump = 0;
1452  }
1453 
1454  if ((!status) && bump) {
1455    adjust_exception_pc(xp, bump);
1456  }
1457  return status;
1458}
1459
1460natural
1461register_codevector_contains_pc (natural lisp_function, pc where)
1462{
1463  natural code_vector, size;
1464
1465  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1466      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1467    code_vector = deref(lisp_function, 1);
1468    size = header_element_count(header_of(code_vector)) << 2;
1469    if ((untag(code_vector) < (natural)where) && 
1470        ((natural)where < (code_vector + size)))
1471      return(code_vector);
1472  }
1473
1474  return(0);
1475}
1476
1477/* Callback to lisp to handle a trap. Need to translate the
1478   PC (where) into one of two forms of pairs:
1479
1480   1. If PC is in fn or nfn's code vector, use the register number
1481      of fn or nfn and the index into that function's code vector.
1482   2. Otherwise use 0 and the pc itself
1483*/
1484void
1485callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1486                   natural arg1, natural arg2, natural arg3)
1487{
1488  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1489  unsigned register_number = fn;
1490  natural index = (natural)where;
1491
1492  if (code_vector == 0) {
1493    register_number = nfn;
1494    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1495  }
1496  if (code_vector == 0)
1497    register_number = 0;
1498  else
1499    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1500  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1501}
1502
1503void
1504callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1505                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1506{
1507  natural  callback_ptr;
1508  area *a;
1509
1510  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1511
1512  /* Put the active stack pointer where .SPcallback expects it */
1513  a = tcr->cs_area;
1514  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1515
1516  /* Copy globals from the exception frame to tcr */
1517  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1518  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1519  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1520  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1521
1522
1523
1524  /* Call back.
1525     Lisp will handle trampolining through some code that
1526     will push lr/fn & pc/nfn stack frames for backtrace.
1527  */
1528  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1529#ifdef DEBUG
1530  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1531#endif
1532  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1533  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1534  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1535#ifdef DEBUG
1536  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1537#endif
1538
1539
1540
1541  /* Copy GC registers back into exception frame */
1542  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1543  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1544}
1545
1546area *
1547allocate_no_stack (natural size)
1548{
1549#ifdef SUPPORT_PRAGMA_UNUSED
1550#pragma unused(size)
1551#endif
1552
1553  return (area *) NULL;
1554}
1555
1556
1557
1558
1559
1560
1561/* callback to (symbol-value cmain) if it is a macptr,
1562   otherwise report cause and function name to console.
1563   Returns noErr if exception handled OK */
1564OSStatus
1565handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1566{
1567  LispObj   cmain = nrs_CMAIN.vcell;
1568  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1569
1570  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1571     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1572
1573  /* If it's a "trllt" instruction where RA == sp, it's a failed
1574     control stack overflow check.  In that case:
1575     
1576     a) We're in "yellow zone" mode if the value of the
1577     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1578     we're not already in yellow zone mode, attempt to create a new
1579     thread and continue execution on its stack. If that fails, call
1580     signal_stack_soft_overflow to enter yellow zone mode and signal
1581     the condition to lisp.
1582     
1583     b) If we're already in "yellow zone" mode, then:
1584     
1585     1) if the SP is past the current control-stack area's hard
1586     overflow limit, signal a "hard" stack overflow error (e.g., throw
1587     to toplevel as quickly as possible. If we aren't in "yellow zone"
1588     mode, attempt to continue on another thread first.
1589     
1590     2) if SP is "well" (> 4K) below its soft overflow limit, set
1591     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1592     "yellow zone mode" in this case.
1593     
1594     3) Otherwise, do nothing.  We'll continue to trap every time
1595     something gets pushed on the control stack, so we should try to
1596     detect and handle all of these cases fairly quickly.  Of course,
1597     the trap overhead is going to slow things down quite a bit.
1598     */
1599
1600  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1601      (RA_field(the_trap) == sp) &&
1602      (TO_field(the_trap) == TO_LO)) {
1603    area
1604      *CS_area = tcr->cs_area,
1605      *VS_area = tcr->vs_area;
1606     
1607    natural
1608      current_SP = xpGPR(xp,sp),
1609      current_VSP = xpGPR(xp,vsp);
1610
1611    if (current_SP  < (natural) (CS_area->hardlimit)) {
1612      /* If we're not in soft overflow mode yet, assume that the
1613         user has set the soft overflow size very small and try to
1614         continue on another thread before throwing to toplevel */
1615      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1616        reset_lisp_process(xp);
1617      }
1618    } else {
1619      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1620        /* If the control stack pointer is at least 4K away from its soft limit
1621           and the value stack pointer is at least 4K away from its soft limit,
1622           stop trapping.  Else keep trapping. */
1623        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1624            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1625          protected_area_ptr vs_soft = VS_area->softprot;
1626          if (vs_soft->nprot == 0) {
1627            protect_area(vs_soft);
1628          }
1629          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1630        }
1631      } else {
1632        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1633        signal_stack_soft_overflow(xp, sp);
1634      }
1635    }
1636   
1637    adjust_exception_pc(xp, 4);
1638    return noErr;
1639  } else {
1640    if (the_trap == LISP_BREAK_INSTRUCTION) {
1641      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1642      set_xpPC(xp, xpLR(xp));
1643      if (message == NULL) {
1644        message = "Lisp Breakpoint";
1645      }
1646      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1647      return noErr;
1648    }
1649    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1650      adjust_exception_pc(xp,4);
1651      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1652      return noErr;
1653    }
1654    /*
1655      twlle ra,rb is used to detect tlb overflow, where RA = current
1656      limit and RB = index to use.
1657    */
1658    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1659        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1660      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1661        return noErr;
1662      }
1663      return -1;
1664    }
1665
1666    if ((fulltag_of(cmain) == fulltag_misc) &&
1667        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1668      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1669        /* reset interrup_level, interrupt_pending */
1670        TCR_INTERRUPT_LEVEL(tcr) = 0;
1671        tcr->interrupt_pending = 0;
1672      }
1673#if 0
1674      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1675#endif
1676      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1677      adjust_exception_pc(xp, 4);
1678      return(noErr);
1679    }
1680    return -1;
1681  }
1682}
1683
1684
1685/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1686   Stop if subtag_code_vector is encountered. */
1687unsigned
1688scan_for_instr( unsigned target, unsigned mask, pc where )
1689{
1690  int i = TRAP_LOOKUP_TRIES;
1691
1692  while( i-- ) {
1693    unsigned instr = *(--where);
1694    if ( codevec_hdr_p(instr) ) {
1695      return 0;
1696    } else if ( match_instr(instr, mask, target) ) {
1697      return instr;
1698    }
1699  }
1700  return 0;
1701}
1702
1703
1704void non_fatal_error( char *msg )
1705{
1706  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1707  fflush( stderr );
1708}
1709
1710/* The main opcode.  */
1711
1712int 
1713is_conditional_trap(opcode instr)
1714{
1715  unsigned to = TO_field(instr);
1716  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1717
1718#ifndef MACOS
1719  if ((instr == LISP_BREAK_INSTRUCTION) ||
1720      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1721    return 1;
1722  }
1723#endif
1724  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1725    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1726       EQ bit must be set in the TO mask and either the register
1727       operands (if "tw") are the same or either both of the signed or
1728       both of the unsigned inequality bits must be set. */
1729    if (! (to & TO_EQ)) {
1730      return 1;                 /* Won't trap on EQ: conditional */
1731    }
1732    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1733      return 0;                 /* Will trap on EQ, same regs: unconditional */
1734    }
1735    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1736        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1737      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1738    }
1739    return 1;                   /* must be conditional */
1740  }
1741  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1742                                   debugger have it */
1743}
1744
1745OSStatus
1746handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1747{
1748  LispObj   errdisp = nrs_ERRDISP.vcell;
1749
1750  if ((fulltag_of(errdisp) == fulltag_misc) &&
1751      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1752    /* errdisp is a macptr, we can call back to lisp */
1753    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1754    return(0);
1755    }
1756
1757  return(-1);
1758}
1759               
1760
1761/*
1762   Current thread has all signals masked.  Before unmasking them,
1763   make it appear that the current thread has been suspended.
1764   (This is to handle the case where another thread is trying
1765   to GC before this thread is able to sieze the exception lock.)
1766*/
1767int
1768prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1769{
1770  int old_valence = tcr->valence;
1771
1772  tcr->pending_exception_context = context;
1773  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1774
1775  ALLOW_EXCEPTIONS(context);
1776  return old_valence;
1777} 
1778
1779void
1780wait_for_exception_lock_in_handler(TCR *tcr, 
1781                                   ExceptionInformation *context,
1782                                   xframe_list *xf)
1783{
1784
1785  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1786#ifdef DEBUG
1787  fprintf(stderr, "0x%x has exception lock\n", tcr);
1788#endif
1789  xf->curr = context;
1790  xf->prev = tcr->xframe;
1791  tcr->xframe =  xf;
1792  tcr->pending_exception_context = NULL;
1793  tcr->valence = TCR_STATE_FOREIGN; 
1794}
1795
1796void
1797unlock_exception_lock_in_handler(TCR *tcr)
1798{
1799  tcr->pending_exception_context = tcr->xframe->curr;
1800  tcr->xframe = tcr->xframe->prev;
1801  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1802#ifdef DEBUG
1803  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1804#endif
1805  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1806}
1807
1808/*
1809   If an interrupt is pending on exception exit, try to ensure
1810   that the thread sees it as soon as it's able to run.
1811*/
1812void
1813raise_pending_interrupt(TCR *tcr)
1814{
1815  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1816    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1817  }
1818}
1819
1820void
1821exit_signal_handler(TCR *tcr, int old_valence)
1822{
1823  sigset_t mask;
1824  sigfillset(&mask);
1825 
1826  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1827  tcr->valence = old_valence;
1828  tcr->pending_exception_context = NULL;
1829}
1830
1831
1832void
1833signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1834{
1835  xframe_list xframe_link;
1836
1837#ifdef DARWIN
1838  if (running_under_rosetta) {
1839    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1840  }
1841#endif
1842  if (!use_mach_exception_handling) {
1843   
1844    tcr = (TCR *) get_interrupt_tcr(false);
1845 
1846    /* The signal handler's entered with all signals (notably the
1847       thread_suspend signal) blocked.  Don't allow any other signals
1848       (notably the thread_suspend signal) to preempt us until we've
1849       set the TCR's xframe slot to include the current exception
1850       context.
1851    */
1852   
1853    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1854  }
1855
1856  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1857    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1858    pthread_kill(pthread_self(), thread_suspend_signal);
1859  }
1860
1861 
1862  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1863  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1864    char msg[512];
1865    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1866    if (lisp_Debugger(context, info, signum, false, msg)) {
1867      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1868    }
1869  }
1870
1871  unlock_exception_lock_in_handler(tcr);
1872
1873  /* This thread now looks like a thread that was suspended while
1874     executing lisp code.  If some other thread gets the exception
1875     lock and GCs, the context (this thread's suspend_context) will
1876     be updated.  (That's only of concern if it happens before we
1877     can return to the kernel/to the Mach exception handler).
1878  */
1879  if (!use_mach_exception_handling) {
1880    exit_signal_handler(tcr, old_valence);
1881    raise_pending_interrupt(tcr);
1882  }
1883}
1884
1885/*
1886  If it looks like we're in the middle of an atomic operation, make
1887  it seem as if that operation is either complete or hasn't started
1888  yet.
1889
1890  The cases handled include:
1891
1892  a) storing into a newly-allocated lisp frame on the stack.
1893  b) marking a newly-allocated TSP frame as containing "raw" data.
1894  c) consing: the GC has its own ideas about how this should be
1895     handled, but other callers would be best advised to back
1896     up or move forward, according to whether we're in the middle
1897     of allocating a cons cell or allocating a uvector.
1898  d) a STMW to the vsp
1899  e) EGC write-barrier subprims.
1900*/
1901
1902extern opcode
1903  egc_write_barrier_start,
1904  egc_write_barrier_end, 
1905  egc_store_node_conditional, 
1906  egc_store_node_conditional_test,
1907  egc_set_hash_key,
1908  egc_gvset,
1909  egc_rplaca,
1910  egc_rplacd,
1911  egc_set_hash_key_conditional,
1912  egc_set_hash_key_conditional_test;
1913
1914
1915extern opcode ffcall_return_window, ffcall_return_window_end;
1916
1917void
1918pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1919{
1920  pc program_counter = xpPC(xp);
1921  opcode instr = *program_counter;
1922  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1923  LispObj cur_allocptr = xpGPR(xp, allocptr);
1924  int allocptr_tag = fulltag_of(cur_allocptr);
1925 
1926
1927
1928  if ((program_counter < &egc_write_barrier_end) && 
1929      (program_counter >= &egc_write_barrier_start)) {
1930    LispObj *ea = 0, val = 0, root = 0;
1931    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1932    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1933
1934    if (program_counter >= &egc_set_hash_key_conditional) {
1935      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1936          ((program_counter == &egc_set_hash_key_conditional_test) &&
1937           (! (xpCCR(xp) & 0x20000000)))) {
1938        return;
1939      }
1940      need_store = false;
1941      root = xpGPR(xp,arg_x);
1942      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1943      need_memoize_root = true;
1944    } else if (program_counter >= &egc_store_node_conditional) {
1945      if ((program_counter < &egc_store_node_conditional_test) ||
1946          ((program_counter == &egc_store_node_conditional_test) &&
1947           (! (xpCCR(xp) & 0x20000000)))) {
1948        /* The conditional store either hasn't been attempted yet, or
1949           has failed.  No need to adjust the PC, or do memoization. */
1950        return;
1951      }
1952      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1953      xpGPR(xp,arg_z) = t_value;
1954      need_store = false;
1955    } else if (program_counter >= &egc_set_hash_key) {
1956      root = xpGPR(xp,arg_x);
1957      val = xpGPR(xp,arg_z);
1958      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1959      need_memoize_root = true;
1960    } else if (program_counter >= &egc_gvset) {
1961      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1962      val = xpGPR(xp,arg_z);
1963    } else if (program_counter >= &egc_rplacd) {
1964      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1965      val = xpGPR(xp,arg_z);
1966    } else {                      /* egc_rplaca */
1967      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1968      val = xpGPR(xp,arg_z);
1969    }
1970    if (need_store) {
1971      *ea = val;
1972    }
1973    if (need_check_memo) {
1974      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1975      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1976          ((LispObj)ea < val)) {
1977        atomic_set_bit(refbits, bitnumber);
1978        if (need_memoize_root) {
1979          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1980          atomic_set_bit(refbits, bitnumber);
1981        }
1982      }
1983    }
1984    set_xpPC(xp, xpLR(xp));
1985    return;
1986  }
1987
1988
1989  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1990    LispObj tsp_val = xpGPR(xp,tsp);
1991   
1992    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1993    adjust_exception_pc(xp, 4);
1994    return;
1995  }
1996 
1997  if (frame->backlink == (frame+1)) {
1998    if (
1999#ifdef PPC64
2000        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2001        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2002#else
2003        (major_opcode_p(instr, major_opcode_STW)) && 
2004#endif
2005        (RA_field(instr) == sp) &&
2006        /* There are a few places in the runtime that store into
2007           a previously-allocated frame atop the stack when
2008           throwing values around.  We only care about the case
2009           where the frame was newly allocated, in which case
2010           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2011           a few instructions before the current program counter.
2012           (The whole point here is that a newly allocated frame
2013           might contain random values that we don't want the
2014           GC to see; a previously allocated frame should already
2015           be completely initialized.)
2016        */
2017        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2018         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2019         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2020#ifdef PPC64
2021      int disp = DS_field(instr);
2022#else     
2023      int disp = D_field(instr);
2024#endif
2025
2026
2027      if (disp < (4*node_size)) {
2028#if 0
2029        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2030#endif
2031        frame->savevsp = 0;
2032        if (disp < (3*node_size)) {
2033          frame->savelr = 0;
2034          if (disp == node_size) {
2035            frame->savefn = 0;
2036          }
2037        }
2038      }
2039      return;
2040    }
2041  }
2042
2043  if (allocptr_tag != tag_fixnum) {
2044    signed_natural disp = allocptr_displacement(xp);
2045
2046    if (disp) {
2047      /* Being architecturally "at" the alloc trap doesn't tell
2048         us much (in particular, it doesn't tell us whether
2049         or not the thread has committed to taking the trap
2050         and is waiting for the exception lock (or waiting
2051         for the Mach exception thread to tell it how bad
2052         things are) or is about to execute a conditional
2053         trap.
2054         Regardless of which case applies, we want the
2055         other thread to take (or finish taking) the
2056         trap, and we don't want it to consider its
2057         current allocptr to be valid.
2058         The difference between this case (suspend other
2059         thread for GC) and the previous case (suspend
2060         current thread for interrupt) is solely a
2061         matter of what happens after we leave this
2062         function: some non-current thread will stay
2063         suspended until the GC finishes, then take
2064         (or start processing) the alloc trap.   The
2065         current thread will go off and do PROCESS-INTERRUPT
2066         or something, and may return from the interrupt
2067         and need to finish the allocation that got interrupted.
2068      */
2069
2070      if (alloc_disp) {
2071        *alloc_disp = disp;
2072        xpGPR(xp,allocptr) += disp;
2073        /* Leave the PC at the alloc trap.  When the interrupt
2074           handler returns, it'll decrement allocptr by disp
2075           and the trap may or may not be taken.
2076        */
2077      } else {
2078        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2079        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2080        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2081      }
2082    } else {
2083#ifdef DEBUG
2084      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2085#endif
2086      /* If we're already past the alloc_trap, finish allocating
2087         the object. */
2088      if (allocptr_tag == fulltag_cons) {
2089        finish_allocating_cons(xp);
2090#ifdef DEBUG
2091          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
2092                  tcr);
2093#endif
2094      } else {
2095        if (allocptr_tag == fulltag_misc) {
2096#ifdef DEBUG
2097          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
2098                  tcr);
2099#endif
2100          finish_allocating_uvector(xp);
2101        } else {
2102          Bug(xp, "what's being allocated here ?");
2103        }
2104      }
2105      /* Whatever we finished allocating, reset allocptr/allocbase to
2106         VOID_ALLOCPTR */
2107      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2108    }
2109    return;
2110  }
2111
2112  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2113    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2114    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2115#if 0
2116        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2117#endif
2118
2119    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2120      deref(frame,idx) = 0;
2121    }
2122    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2123    return;
2124  }
2125
2126#ifndef PC64
2127  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2128      (RA_field(instr) == vsp)) {
2129    int r;
2130    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2131   
2132    for (r = RS_field(instr); r <= 31; r++) {
2133      *vspptr++ = xpGPR(xp,r);
2134    }
2135    adjust_exception_pc(xp, 4);
2136  }
2137#endif
2138}
2139
2140void
2141interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2142{
2143  TCR *tcr = get_interrupt_tcr(false);
2144  if (tcr) {
2145    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2146      tcr->interrupt_pending = 1 << fixnumshift;
2147    } else {
2148      LispObj cmain = nrs_CMAIN.vcell;
2149
2150      if ((fulltag_of(cmain) == fulltag_misc) &&
2151          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2152        /*
2153           This thread can (allegedly) take an interrupt now.
2154           It's tricky to do that if we're executing
2155           foreign code (especially Linuxthreads code, much
2156           of which isn't reentrant.)
2157           If we're unwinding the stack, we also want to defer
2158           the interrupt.
2159        */
2160        if ((tcr->valence != TCR_STATE_LISP) ||
2161            (tcr->unwinding != 0)) {
2162          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2163        } else {
2164          xframe_list xframe_link;
2165          int old_valence;
2166          signed_natural disp=0;
2167         
2168          pc_luser_xp(context, tcr, &disp);
2169          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2170          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2171#ifdef DEBUG
2172          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2173#endif
2174          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2175          if (disp) {
2176            xpGPR(context,allocptr) -= disp;
2177          }
2178          unlock_exception_lock_in_handler(tcr);
2179#ifdef DEBUG
2180          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2181#endif
2182          exit_signal_handler(tcr, old_valence);
2183        }
2184      }
2185    }
2186  }
2187#ifdef DARWIN
2188    DarwinSigReturn(context);
2189#endif
2190}
2191
2192
2193
2194void
2195install_signal_handler(int signo, void *handler)
2196{
2197  struct sigaction sa;
2198 
2199  sa.sa_sigaction = (void *)handler;
2200  sigfillset(&sa.sa_mask);
2201  sa.sa_flags = 
2202    0 /* SA_RESTART */
2203    | SA_SIGINFO
2204#ifdef DARWIN
2205#ifdef PPC64
2206    | SA_64REGSET
2207#endif
2208#endif
2209    ;
2210
2211  sigaction(signo, &sa, NULL);
2212}
2213
2214void
2215install_pmcl_exception_handlers()
2216{
2217#ifdef DARWIN
2218  extern Boolean use_mach_exception_handling;
2219#endif
2220
2221  Boolean install_signal_handlers_for_exceptions =
2222#ifdef DARWIN
2223    !use_mach_exception_handling
2224#else
2225    true
2226#endif
2227    ;
2228  if (install_signal_handlers_for_exceptions) {
2229    extern int no_sigtrap;
2230    install_signal_handler(SIGILL, (void *)signal_handler);
2231    if (no_sigtrap != 1) {
2232      install_signal_handler(SIGTRAP, (void *)signal_handler);
2233    }
2234    install_signal_handler(SIGBUS,  (void *)signal_handler);
2235    install_signal_handler(SIGSEGV, (void *)signal_handler);
2236    install_signal_handler(SIGFPE, (void *)signal_handler);
2237  }
2238 
2239  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2240                         (void *)interrupt_handler);
2241  signal(SIGPIPE, SIG_IGN);
2242}
2243
2244void
2245thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2246{
2247  TCR *tcr = get_tcr(false);
2248  area *a;
2249  sigset_t mask;
2250 
2251  sigemptyset(&mask);
2252
2253  if (tcr) {
2254    tcr->valence = TCR_STATE_FOREIGN;
2255    a = tcr->vs_area;
2256    if (a) {
2257      a->active = a->high;
2258    }
2259    a = tcr->ts_area;
2260    if (a) {
2261      a->active = a->high;
2262    }
2263    a = tcr->cs_area;
2264    if (a) {
2265      a->active = a->high;
2266    }
2267  }
2268 
2269  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2270  pthread_exit(NULL);
2271}
2272
2273void
2274thread_signal_setup()
2275{
2276  thread_suspend_signal = SIG_SUSPEND_THREAD;
2277  thread_kill_signal = SIG_KILL_THREAD;
2278
2279  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2280  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
2281}
2282
2283
2284
2285void
2286unprotect_all_areas()
2287{
2288  protected_area_ptr p;
2289
2290  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2291    unprotect_area(p);
2292  }
2293}
2294
2295/*
2296  A binding subprim has just done "twlle limit_regno,idx_regno" and
2297  the trap's been taken.  Extend the tcr's tlb so that the index will
2298  be in bounds and the new limit will be on a page boundary, filling
2299  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2300  the tcr fields and the registers in the xp and return true if this
2301  all works, false otherwise.
2302
2303  Note that the tlb was allocated via malloc, so realloc can do some
2304  of the hard work.
2305*/
2306Boolean
2307extend_tcr_tlb(TCR *tcr, 
2308               ExceptionInformation *xp, 
2309               unsigned limit_regno,
2310               unsigned idx_regno)
2311{
2312  unsigned
2313    index = (unsigned) (xpGPR(xp,idx_regno)),
2314    old_limit = tcr->tlb_limit,
2315    new_limit = align_to_power_of_2(index+1,12),
2316    new_bytes = new_limit-old_limit;
2317  LispObj
2318    *old_tlb = tcr->tlb_pointer,
2319    *new_tlb = realloc(old_tlb, new_limit),
2320    *work;
2321
2322  if (new_tlb == NULL) {
2323    return false;
2324  }
2325 
2326  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2327
2328  while (new_bytes) {
2329    *work++ = no_thread_local_binding_marker;
2330    new_bytes -= sizeof(LispObj);
2331  }
2332  tcr->tlb_pointer = new_tlb;
2333  tcr->tlb_limit = new_limit;
2334  xpGPR(xp, limit_regno) = new_limit;
2335  return true;
2336}
2337
2338
2339
2340void
2341exception_init()
2342{
2343  install_pmcl_exception_handlers();
2344}
2345
2346
2347
2348
2349
2350#ifdef DARWIN
2351
2352
2353#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2354#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2355
2356
2357
2358#define LISP_EXCEPTIONS_HANDLED_MASK \
2359 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2360
2361/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2362#define NUM_LISP_EXCEPTIONS_HANDLED 4
2363
2364typedef struct {
2365  int foreign_exception_port_count;
2366  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2367  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2368  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2369  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2370} MACH_foreign_exception_state;
2371
2372
2373
2374
2375/*
2376  Mach's exception mechanism works a little better than its signal
2377  mechanism (and, not incidentally, it gets along with GDB a lot
2378  better.
2379
2380  Initially, we install an exception handler to handle each native
2381  thread's exceptions.  This process involves creating a distinguished
2382  thread which listens for kernel exception messages on a set of
2383  0 or more thread exception ports.  As threads are created, they're
2384  added to that port set; a thread's exception port is destroyed
2385  (and therefore removed from the port set) when the thread exits.
2386
2387  A few exceptions can be handled directly in the handler thread;
2388  others require that we resume the user thread (and that the
2389  exception thread resumes listening for exceptions.)  The user
2390  thread might eventually want to return to the original context
2391  (possibly modified somewhat.)
2392
2393  As it turns out, the simplest way to force the faulting user
2394  thread to handle its own exceptions is to do pretty much what
2395  signal() does: the exception handlng thread sets up a sigcontext
2396  on the user thread's stack and forces the user thread to resume
2397  execution as if a signal handler had been called with that
2398  context as an argument.  We can use a distinguished UUO at a
2399  distinguished address to do something like sigreturn(); that'll
2400  have the effect of resuming the user thread's execution in
2401  the (pseudo-) signal context.
2402
2403  Since:
2404    a) we have miles of code in C and in Lisp that knows how to
2405    deal with Linux sigcontexts
2406    b) Linux sigcontexts contain a little more useful information
2407    (the DAR, DSISR, etc.) than their Darwin counterparts
2408    c) we have to create a sigcontext ourselves when calling out
2409    to the user thread: we aren't really generating a signal, just
2410    leveraging existing signal-handling code.
2411
2412  we create a Linux sigcontext struct.
2413
2414  Simple ?  Hopefully from the outside it is ...
2415
2416  We want the process of passing a thread's own context to it to
2417  appear to be atomic: in particular, we don't want the GC to suspend
2418  a thread that's had an exception but has not yet had its user-level
2419  exception handler called, and we don't want the thread's exception
2420  context to be modified by a GC while the Mach handler thread is
2421  copying it around.  On Linux (and on Jaguar), we avoid this issue
2422  because (a) the kernel sets up the user-level signal handler and
2423  (b) the signal handler blocks signals (including the signal used
2424  by the GC to suspend threads) until tcr->xframe is set up.
2425
2426  The GC and the Mach server thread therefore contend for the lock
2427  "mach_exception_lock".  The Mach server thread holds the lock
2428  when copying exception information between the kernel and the
2429  user thread; the GC holds this lock during most of its execution
2430  (delaying exception processing until it can be done without
2431  GC interference.)
2432
2433*/
2434
2435#ifdef PPC64
2436#define C_REDZONE_LEN           320
2437#define C_STK_ALIGN             32
2438#else
2439#define C_REDZONE_LEN           224
2440#define C_STK_ALIGN             16
2441#endif
2442#define C_PARAMSAVE_LEN         64
2443#define C_LINKAGE_LEN           48
2444
2445#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2446
2447void
2448fatal_mach_error(char *format, ...);
2449
2450#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2451
2452
2453void
2454restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2455{
2456  kern_return_t kret;
2457  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2458
2459  /* Set the thread's FP state from the pseudosigcontext */
2460  kret = thread_set_state(thread,
2461                          PPC_FLOAT_STATE,
2462                          (thread_state_t)&(mc->fs),
2463                          PPC_FLOAT_STATE_COUNT);
2464
2465  MACH_CHECK_ERROR("setting thread FP state", kret);
2466
2467  /* The thread'll be as good as new ... */
2468#ifdef PPC64
2469  kret = thread_set_state(thread,
2470                          PPC_THREAD_STATE64,
2471                          (thread_state_t)&(mc->ss),
2472                          PPC_THREAD_STATE64_COUNT);
2473#else
2474  kret = thread_set_state(thread, 
2475                          MACHINE_THREAD_STATE,
2476                          (thread_state_t)&(mc->ss),
2477                          MACHINE_THREAD_STATE_COUNT);
2478#endif
2479  MACH_CHECK_ERROR("setting thread state", kret);
2480} 
2481
2482/* This code runs in the exception handling thread, in response
2483   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2484   in response to a call to pseudo_sigreturn() from the specified
2485   user thread.
2486   Find that context (the user thread's R3 points to it), then
2487   use that context to set the user thread's state.  When this
2488   function's caller returns, the Mach kernel will resume the
2489   user thread.
2490*/
2491
2492kern_return_t
2493do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2494{
2495  ExceptionInformation *xp;
2496
2497#ifdef DEBUG_MACH_EXCEPTIONS
2498  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2499#endif
2500  xp = tcr->pending_exception_context;
2501  if (xp) {
2502    tcr->pending_exception_context = NULL;
2503    tcr->valence = TCR_STATE_LISP;
2504    restore_mach_thread_state(thread, xp);
2505    raise_pending_interrupt(tcr);
2506  } else {
2507    Bug(NULL, "no xp here!\n");
2508  }
2509#ifdef DEBUG_MACH_EXCEPTIONS
2510  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2511#endif
2512  return KERN_SUCCESS;
2513} 
2514
2515ExceptionInformation *
2516create_thread_context_frame(mach_port_t thread, 
2517                            natural *new_stack_top)
2518{
2519#ifdef PPC64
2520  ppc_thread_state64_t ts;
2521#else
2522  ppc_thread_state_t ts;
2523#endif
2524  mach_msg_type_number_t thread_state_count;
2525  kern_return_t result;
2526  ExceptionInformation *pseudosigcontext;
2527  MCONTEXT_T mc;
2528  natural stackp, backlink;
2529
2530#ifdef PPC64
2531  thread_state_count = PPC_THREAD_STATE64_COUNT;
2532  result = thread_get_state(thread,
2533                            PPC_THREAD_STATE64,
2534                            (thread_state_t)&ts,
2535                            &thread_state_count);
2536#else
2537  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2538  result = thread_get_state(thread, 
2539                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2540                            (thread_state_t)&ts,
2541                            &thread_state_count);
2542#endif
2543 
2544  if (result != KERN_SUCCESS) {
2545    get_tcr(true);
2546    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2547  }
2548  stackp = ts.r1;
2549  backlink = stackp;
2550  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2551  stackp -= sizeof(*pseudosigcontext);
2552  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2553
2554  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2555  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2556  memmove(&(mc->ss),&ts,sizeof(ts));
2557
2558  thread_state_count = PPC_FLOAT_STATE_COUNT;
2559  thread_get_state(thread,
2560                   PPC_FLOAT_STATE,
2561                   (thread_state_t)&(mc->fs),
2562                   &thread_state_count);
2563
2564
2565#ifdef PPC64
2566  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2567#else
2568  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2569#endif
2570  thread_get_state(thread,
2571#ifdef PPC64
2572                   PPC_EXCEPTION_STATE64,
2573#else
2574                   PPC_EXCEPTION_STATE,
2575#endif
2576                   (thread_state_t)&(mc->es),
2577                   &thread_state_count);
2578
2579
2580  UC_MCONTEXT(pseudosigcontext) = mc;
2581  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2582  stackp -= C_LINKAGE_LEN;
2583  *(natural *)ptr_from_lispobj(stackp) = backlink;
2584  if (new_stack_top) {
2585    *new_stack_top = stackp;
2586  }
2587  return pseudosigcontext;
2588}
2589
2590/*
2591  This code sets up the user thread so that it executes a "pseudo-signal
2592  handler" function when it resumes.  Create a linux sigcontext struct
2593  on the thread's stack and pass it as an argument to the pseudo-signal
2594  handler.
2595
2596  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2597  which will restore the thread's context.
2598
2599  If the handler invokes code that throws (or otherwise never sigreturn()'s
2600  to the context), that's fine.
2601
2602  Actually, check that: throw (and variants) may need to be careful and
2603  pop the tcr's xframe list until it's younger than any frame being
2604  entered.
2605*/
2606
2607int
2608setup_signal_frame(mach_port_t thread,
2609                   void *handler_address,
2610                   int signum,
2611                   int code,
2612                   TCR *tcr)
2613{
2614#ifdef PPC64
2615  ppc_thread_state64_t ts;
2616#else
2617  ppc_thread_state_t ts;
2618#endif
2619  ExceptionInformation *pseudosigcontext;
2620  int old_valence = tcr->valence;
2621  natural stackp;
2622
2623#ifdef DEBUG_MACH_EXCEPTIONS
2624  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2625#endif
2626  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2627  pseudosigcontext->uc_onstack = 0;
2628  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2629  tcr->pending_exception_context = pseudosigcontext;
2630  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2631 
2632
2633  /*
2634     It seems like we've created a  sigcontext on the thread's
2635     stack.  Set things up so that we call the handler (with appropriate
2636     args) when the thread's resumed.
2637  */
2638
2639  ts.srr0 = (natural) handler_address;
2640  ts.srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2641  ts.r1 = stackp;
2642  ts.r3 = signum;
2643  ts.r4 = (natural)pseudosigcontext;
2644  ts.r5 = (natural)tcr;
2645  ts.r6 = (natural)old_valence;
2646  ts.lr = (natural)pseudo_sigreturn;
2647
2648
2649#ifdef PPC64
2650  ts.r13 = xpGPR(pseudosigcontext,13);
2651  thread_set_state(thread,
2652                   PPC_THREAD_STATE64,
2653                   (thread_state_t)&ts,
2654                   PPC_THREAD_STATE64_COUNT);
2655#else
2656  thread_set_state(thread, 
2657                   MACHINE_THREAD_STATE,
2658                   (thread_state_t)&ts,
2659                   MACHINE_THREAD_STATE_COUNT);
2660#endif
2661#ifdef DEBUG_MACH_EXCEPTIONS
2662  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2663#endif
2664  return 0;
2665}
2666
2667
2668void
2669pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2670{
2671  signal_handler(signum, NULL, context, tcr, old_valence);
2672} 
2673
2674
2675int
2676thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2677{
2678#ifdef PPC64
2679  ppc_thread_state64_t ts;
2680#else
2681  ppc_thread_state_t ts;
2682#endif
2683  mach_msg_type_number_t thread_state_count;
2684
2685#ifdef PPC64
2686  thread_state_count = PPC_THREAD_STATE64_COUNT;
2687#else
2688  thread_state_count = PPC_THREAD_STATE_COUNT;
2689#endif
2690  thread_get_state(thread, 
2691#ifdef PPC64
2692                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2693#else
2694                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2695#endif
2696                   (thread_state_t)&ts,
2697                   &thread_state_count);
2698  if (enabled) {
2699    ts.srr1 |= MSR_FE0_FE1_MASK;
2700  } else {
2701    ts.srr1 &= ~MSR_FE0_FE1_MASK;
2702  }
2703  /*
2704     Hack-o-rama warning (isn't it about time for such a warning?):
2705     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2706     Our handler for lisp's use of pthread_kill() pushes a phony
2707     lisp frame on the stack and force the context to resume at
2708     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2709     lisp frame contains the -real- address that process_interrupt
2710     should have returned to, and the fact that it's in a lisp
2711     frame should convince the GC to notice that address if it
2712     runs in the tiny time window between returning from our
2713     interrupt handler and ... here.
2714     If the top frame on the stack is a lisp frame, discard it
2715     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2716     just adjust ts.srr0 to skip over the UUO.
2717  */
2718  {
2719    lisp_frame *tos = (lisp_frame *)ts.r1,
2720      *next_frame = tos->backlink;
2721   
2722    if (tos == (next_frame -1)) {
2723      ts.srr0 = tos->savelr;
2724      ts.r1 = (LispObj) next_frame;
2725    } else {
2726      ts.srr0 += 4;
2727    }
2728  }
2729  thread_set_state(thread, 
2730#ifdef PPC64
2731                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2732#else
2733                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2734#endif
2735                   (thread_state_t)&ts,
2736#ifdef PPC64
2737                   PPC_THREAD_STATE64_COUNT
2738#else
2739                   PPC_THREAD_STATE_COUNT
2740#endif
2741                   );
2742
2743  return 0;
2744}
2745
2746/*
2747  This function runs in the exception handling thread.  It's
2748  called (by this precise name) from the library function "exc_server()"
2749  when the thread's exception ports are set up.  (exc_server() is called
2750  via mach_msg_server(), which is a function that waits for and dispatches
2751  on exception messages from the Mach kernel.)
2752
2753  This checks to see if the exception was caused by a pseudo_sigreturn()
2754  UUO; if so, it arranges for the thread to have its state restored
2755  from the specified context.
2756
2757  Otherwise, it tries to map the exception to a signal number and
2758  arranges that the thread run a "pseudo signal handler" to handle
2759  the exception.
2760
2761  Some exceptions could and should be handled here directly.
2762*/
2763
2764kern_return_t
2765catch_exception_raise(mach_port_t exception_port,
2766                      mach_port_t thread,
2767                      mach_port_t task, 
2768                      exception_type_t exception,
2769                      exception_data_t code_vector,
2770                      mach_msg_type_number_t code_count)
2771{
2772  int signum = 0, code = *code_vector, code1;
2773  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2774  kern_return_t kret;
2775
2776#ifdef DEBUG_MACH_EXCEPTIONS
2777  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2778#endif
2779
2780  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2781    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2782  } 
2783  if ((exception == EXC_BAD_INSTRUCTION) &&
2784      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2785      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2786       (code1 == (int)enable_fp_exceptions) ||
2787       (code1 == (int)disable_fp_exceptions))) {
2788    if (code1 == (int)pseudo_sigreturn) {
2789      kret = do_pseudo_sigreturn(thread, tcr);
2790#if 0
2791      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2792#endif
2793       
2794    } else if (code1 == (int)enable_fp_exceptions) {
2795      kret = thread_set_fp_exceptions_enabled(thread, true);
2796    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2797  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2798    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2799    kret = 17;
2800  } else {
2801    switch (exception) {
2802    case EXC_BAD_ACCESS:
2803      signum = SIGSEGV;
2804      break;
2805       
2806    case EXC_BAD_INSTRUCTION:
2807      signum = SIGILL;
2808      break;
2809     
2810    case EXC_SOFTWARE:
2811      if (code == EXC_PPC_TRAP) {
2812        signum = SIGTRAP;
2813      }
2814      break;
2815     
2816    case EXC_ARITHMETIC:
2817      signum = SIGFPE;
2818      break;
2819
2820    default:
2821      break;
2822    }
2823    if (signum) {
2824      kret = setup_signal_frame(thread,
2825                                (void *)pseudo_signal_handler,
2826                                signum,
2827                                code,
2828                                tcr);
2829#if 0
2830      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2831#endif
2832
2833    } else {
2834      kret = 17;
2835    }
2836  }
2837
2838  return kret;
2839}
2840
2841
2842
2843typedef struct {
2844  mach_msg_header_t Head;
2845  /* start of the kernel processed data */
2846  mach_msg_body_t msgh_body;
2847  mach_msg_port_descriptor_t thread;
2848  mach_msg_port_descriptor_t task;
2849  /* end of the kernel processed data */
2850  NDR_record_t NDR;
2851  exception_type_t exception;
2852  mach_msg_type_number_t codeCnt;
2853  integer_t code[2];
2854  mach_msg_trailer_t trailer;
2855} exceptionRequest;
2856
2857
2858boolean_t
2859openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2860{
2861  static NDR_record_t _NDR = {0};
2862  kern_return_t handled;
2863  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2864  exceptionRequest *req = (exceptionRequest *) in;
2865
2866  reply->NDR = _NDR;
2867
2868  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2869  out->msgh_remote_port = in->msgh_remote_port;
2870  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2871  out->msgh_local_port = MACH_PORT_NULL;
2872  out->msgh_id = in->msgh_id+100;
2873
2874  /* Could handle other exception flavors in the range 2401-2403 */
2875
2876
2877  if (in->msgh_id != 2401) {
2878    reply->RetCode = MIG_BAD_ID;
2879    return FALSE;
2880  }
2881  handled = catch_exception_raise(req->Head.msgh_local_port,
2882                                  req->thread.name,
2883                                  req->task.name,
2884                                  req->exception,
2885                                  req->code,
2886                                  req->codeCnt);
2887  reply->RetCode = handled;
2888  return TRUE;
2889}
2890
2891/*
2892  The initial function for an exception-handling thread.
2893*/
2894
2895void *
2896exception_handler_proc(void *arg)
2897{
2898  extern boolean_t exc_server();
2899  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2900
2901  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2902  /* Should never return. */
2903  abort();
2904}
2905
2906
2907
2908mach_port_t
2909mach_exception_port_set()
2910{
2911  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2912  kern_return_t kret; 
2913  if (__exception_port_set == MACH_PORT_NULL) {
2914    kret = mach_port_allocate(mach_task_self(),
2915                              MACH_PORT_RIGHT_PORT_SET,
2916                              &__exception_port_set);
2917    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2918    create_system_thread(0,
2919                         NULL,
2920                         exception_handler_proc, 
2921                         (void *)((natural)__exception_port_set));
2922  }
2923  return __exception_port_set;
2924}
2925
2926/*
2927  Setup a new thread to handle those exceptions specified by
2928  the mask "which".  This involves creating a special Mach
2929  message port, telling the Mach kernel to send exception
2930  messages for the calling thread to that port, and setting
2931  up a handler thread which listens for and responds to
2932  those messages.
2933
2934*/
2935
2936/*
2937  Establish the lisp thread's TCR as its exception port, and determine
2938  whether any other ports have been established by foreign code for
2939  exceptions that lisp cares about.
2940
2941  If this happens at all, it should happen on return from foreign
2942  code and on entry to lisp code via a callback.
2943
2944  This is a lot of trouble (and overhead) to support Java, or other
2945  embeddable systems that clobber their caller's thread exception ports.
2946 
2947*/
2948kern_return_t
2949tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2950{
2951  kern_return_t kret;
2952  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2953  int i;
2954  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2955  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2956  exception_mask_t mask = 0;
2957
2958  kret = thread_swap_exception_ports(thread,
2959                                     LISP_EXCEPTIONS_HANDLED_MASK,
2960                                     lisp_port,
2961                                     EXCEPTION_DEFAULT,
2962                                     THREAD_STATE_NONE,
2963                                     fxs->masks,
2964                                     &n,
2965                                     fxs->ports,
2966                                     fxs->behaviors,
2967                                     fxs->flavors);
2968  if (kret == KERN_SUCCESS) {
2969    fxs->foreign_exception_port_count = n;
2970    for (i = 0; i < n; i ++) {
2971      foreign_port = fxs->ports[i];
2972
2973      if ((foreign_port != lisp_port) &&
2974          (foreign_port != MACH_PORT_NULL)) {
2975        mask |= fxs->masks[i];
2976      }
2977    }
2978    tcr->foreign_exception_status = (int) mask;
2979  }
2980  return kret;
2981}
2982
2983kern_return_t
2984tcr_establish_lisp_exception_port(TCR *tcr)
2985{
2986  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2987}
2988
2989/*
2990  Do this when calling out to or returning from foreign code, if
2991  any conflicting foreign exception ports were established when we
2992  last entered lisp code.
2993*/
2994kern_return_t
2995restore_foreign_exception_ports(TCR *tcr)
2996{
2997  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2998 
2999  if (m) {
3000    MACH_foreign_exception_state *fxs  = 
3001      (MACH_foreign_exception_state *) tcr->native_thread_info;
3002    int i, n = fxs->foreign_exception_port_count;
3003    exception_mask_t tm;
3004
3005    for (i = 0; i < n; i++) {
3006      if ((tm = fxs->masks[i]) & m) {
3007        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3008                                   tm,
3009                                   fxs->ports[i],
3010                                   fxs->behaviors[i],
3011                                   fxs->flavors[i]);
3012      }
3013    }
3014  }
3015}
3016                                   
3017
3018/*
3019  This assumes that a Mach port (to be used as the thread's exception port) whose
3020  "name" matches the TCR's 32-bit address has already been allocated.
3021*/
3022
3023kern_return_t
3024setup_mach_exception_handling(TCR *tcr)
3025{
3026  mach_port_t
3027    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3028    task_self = mach_task_self();
3029  kern_return_t kret;
3030
3031  kret = mach_port_insert_right(task_self,
3032                                thread_exception_port,
3033                                thread_exception_port,
3034                                MACH_MSG_TYPE_MAKE_SEND);
3035  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3036
3037  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3038  if (kret == KERN_SUCCESS) {
3039    mach_port_t exception_port_set = mach_exception_port_set();
3040
3041    kret = mach_port_move_member(task_self,
3042                                 thread_exception_port,
3043                                 exception_port_set);
3044  }
3045  return kret;
3046}
3047
3048void
3049darwin_exception_init(TCR *tcr)
3050{
3051  void tcr_monitor_exception_handling(TCR*, Boolean);
3052  kern_return_t kret;
3053  MACH_foreign_exception_state *fxs = 
3054    calloc(1, sizeof(MACH_foreign_exception_state));
3055 
3056  tcr->native_thread_info = (void *) fxs;
3057
3058  if ((kret = setup_mach_exception_handling(tcr))
3059      != KERN_SUCCESS) {
3060    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
3061    terminate_lisp();
3062  }
3063  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
3064  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
3065}
3066
3067/*
3068  The tcr is the "name" of the corresponding thread's exception port.
3069  Destroying the port should remove it from all port sets of which it's
3070  a member (notably, the exception port set.)
3071*/
3072void
3073darwin_exception_cleanup(TCR *tcr)
3074{
3075  void *fxs = tcr->native_thread_info;
3076  extern Boolean use_mach_exception_handling;
3077
3078  if (fxs) {
3079    tcr->native_thread_info = NULL;
3080    free(fxs);
3081  }
3082  if (use_mach_exception_handling) {
3083    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3084    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3085  }
3086}
3087
3088
3089Boolean
3090suspend_mach_thread(mach_port_t mach_thread)
3091{
3092  kern_return_t status;
3093  Boolean aborted = false;
3094 
3095  do {
3096    aborted = false;
3097    status = thread_suspend(mach_thread);
3098    if (status == KERN_SUCCESS) {
3099      status = thread_abort_safely(mach_thread);
3100      if (status == KERN_SUCCESS) {
3101        aborted = true;
3102      } else {
3103        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3104        thread_resume(mach_thread);
3105      }
3106    } else {
3107      return false;
3108    }
3109  } while (! aborted);
3110  return true;
3111}
3112
3113/*
3114  Only do this if pthread_kill indicated that the pthread isn't
3115  listening to signals anymore, as can happen as soon as pthread_exit()
3116  is called on Darwin.  The thread could still call out to lisp as it
3117  is exiting, so we need another way to suspend it in this case.
3118*/
3119Boolean
3120mach_suspend_tcr(TCR *tcr)
3121{
3122  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3123  ExceptionInformation *pseudosigcontext;
3124  Boolean result = false;
3125 
3126  result = suspend_mach_thread(mach_thread);
3127  if (result) {
3128    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3129    pseudosigcontext->uc_onstack = 0;
3130    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3131    tcr->suspend_context = pseudosigcontext;
3132  }
3133  return result;
3134}
3135
3136void
3137mach_resume_tcr(TCR *tcr)
3138{
3139  ExceptionInformation *xp;
3140  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3141 
3142  xp = tcr->suspend_context;
3143#ifdef DEBUG_MACH_EXCEPTIONS
3144  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3145          tcr, tcr->pending_exception_context);
3146#endif
3147  tcr->suspend_context = NULL;
3148  restore_mach_thread_state(mach_thread, xp);
3149#ifdef DEBUG_MACH_EXCEPTIONS
3150  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3151          tcr, tcr->pending_exception_context);
3152#endif
3153  thread_resume(mach_thread);
3154}
3155
3156void
3157fatal_mach_error(char *format, ...)
3158{
3159  va_list args;
3160  char s[512];
3161 
3162
3163  va_start(args, format);
3164  vsnprintf(s, sizeof(s),format, args);
3165  va_end(args);
3166
3167  Fatal("Mach error", s);
3168}
3169
3170void
3171pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3172{
3173  interrupt_handler(signum, NULL, context);
3174}
3175
3176int
3177mach_raise_thread_interrupt(TCR *target)
3178{
3179  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3180  kern_return_t kret;
3181  Boolean result = false;
3182  TCR *current = get_tcr(false);
3183  thread_basic_info_data_t info; 
3184  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3185
3186  LOCK(lisp_global(TCR_AREA_LOCK), current);
3187
3188  if (suspend_mach_thread(mach_thread)) {
3189    if (thread_info(mach_thread,
3190                    THREAD_BASIC_INFO,
3191                    (thread_info_t)&info,
3192                    &info_count) == KERN_SUCCESS) {
3193      if (info.suspend_count == 1) {
3194        if ((target->valence == TCR_STATE_LISP) &&
3195            (!target->unwinding) &&
3196            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3197          kret = setup_signal_frame(mach_thread,
3198                                    (void *)pseudo_interrupt_handler,
3199                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3200                                    0,
3201                                    target);
3202          if (kret == KERN_SUCCESS) {
3203            result = true;
3204          }
3205        }
3206      }
3207    }
3208    if (! result) {
3209      target->interrupt_pending = 1 << fixnumshift;
3210    }
3211    thread_resume(mach_thread);
3212   
3213  }
3214  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3215  return 0;
3216}
3217
3218#endif
Note: See TracBrowser for help on using the repository browser.