source: trunk/source/lisp-kernel/ppc-exceptions.c @ 11501

Last change on this file since 11501 was 11501, checked in by gb, 12 years ago

Try to use non-conflicting, platform-dependent signals instead of
SIGQUIT. (Darwin uses SIGEMT, which doesn't seem to be raised
by the kernel on x86 or PPC; other platfroms can use user-defined
(possibly "realtime" signals).

Rename kernel things that had 'quit' in their names and had to
do with terminating threads to instead have 'thread_kill' in
their names (e.g. quit_handler -> thread_kill_handler.)

In the x86 GC, at least in mark_root and the recursive case of
rmark, check the dnode against gc_area_dnode before mapping
a TRA to the containing function. (This keeps us from crashing
in those cases if we see a garbage root that's tagged as a TRA,
but that fixes the symptom and not the proble, that would cause
such a garbage root to appear.) This is x86-specific; the
PPC ports don't use TRAs.

Save lisp_heap_gc_threshold, the EGC enable state, and the
sizes of the ephemeral generations in the image and restore
them on startup. (The -T option - which sets the global
GC threshold from the command line - overrides any value
set in the image; more accurately, it does that if the value
of the -T argument isn't the default GC threshold size.)
This is probably the right idea, but it's an incompatible
change; people who override the defaults at build or startup
time may need to change how their systems are built and
initialized.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.1 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444        release_readonly_area();
445      }
446      if (selector & GC_TRAP_FUNCTION_PURIFY) {
447        purify_from_xp(xp, 0L);
448        gc_from_xp(xp, 0L);
449      }
450      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
451        OSErr err;
452        extern OSErr save_application(unsigned);
453        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
454        area *vsarea = tcr->vs_area;
455       
456        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
457        err = save_application(arg);
458        if (err == noErr) {
459          _exit(0);
460        }
461        fatal_oserr(": save_application", err);
462      }
463      switch (selector) {
464      case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
465        xpGPR(xp, imm0) = 0;
466        break;
467
468      case GC_TRAP_FUNCTION_FREEZE:
469        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
470        tenured_area->static_dnodes = area_dnode(a->active, a->low);
471        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
472        break;
473      default:
474        break;
475      }
476    }
477   
478    if (egc_was_enabled) {
479      egc_control(true, NULL);
480    }
481    break;
482   
483  }
484
485  adjust_exception_pc(xp,4);
486  return 0;
487}
488
489
490
491void
492signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
493{
494  /* The cstack just overflowed.  Force the current thread's
495     control stack to do so until all stacks are well under their overflow
496     limits.
497  */
498
499#if 0
500  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
501#endif
502  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
503}
504
505/*
506  Lower (move toward 0) the "end" of the soft protected area associated
507  with a by a page, if we can.
508*/
509
510void
511adjust_soft_protection_limit(area *a)
512{
513  char *proposed_new_soft_limit = a->softlimit - 4096;
514  protected_area_ptr p = a->softprot;
515 
516  if (proposed_new_soft_limit >= (p->start+16384)) {
517    p->end = proposed_new_soft_limit;
518    p->protsize = p->end-p->start;
519    a->softlimit = proposed_new_soft_limit;
520  }
521  protect_area(p);
522}
523
524void
525restore_soft_stack_limit(unsigned stkreg)
526{
527  area *a;
528  TCR *tcr = get_tcr(true);
529
530  switch (stkreg) {
531  case sp:
532    a = tcr->cs_area;
533    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
534      a->softlimit -= 4096;
535    }
536    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
537    break;
538  case vsp:
539    a = tcr->vs_area;
540    adjust_soft_protection_limit(a);
541    break;
542  case tsp:
543    a = tcr->ts_area;
544    adjust_soft_protection_limit(a);
545  }
546}
547
548/* Maybe this'll work someday.  We may have to do something to
549   make the thread look like it's not handling an exception */
550void
551reset_lisp_process(ExceptionInformation *xp)
552{
553  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
554  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
555
556  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
557  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
558
559  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
560  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
561
562  start_lisp(tcr, 1);
563}
564
565/*
566  This doesn't GC; it returns true if it made enough room, false
567  otherwise.
568  If "extend" is true, it can try to extend the dynamic area to
569  satisfy the request.
570*/
571
572Boolean
573new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
574{
575  area *a;
576  natural newlimit, oldlimit;
577  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
578
579  a  = active_dynamic_area;
580  oldlimit = (natural) a->active;
581  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
582              align_to_power_of_2(need, log2_allocation_quantum));
583  if (newlimit > (natural) (a->high)) {
584    if (extend) {
585      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
586      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
587      do {
588        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
589          break;
590        }
591        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
592        if (extend_by < 4<<20) {
593          return false;
594        }
595      } while (1);
596    } else {
597      return false;
598    }
599  }
600  a->active = (BytePtr) newlimit;
601  tcr->last_allocptr = (void *)newlimit;
602  xpGPR(xp,allocptr) = (LispObj) newlimit;
603  xpGPR(xp,allocbase) = (LispObj) oldlimit;
604
605  return true;
606}
607
608 
609void
610update_area_active (area **aptr, BytePtr value)
611{
612  area *a = *aptr;
613  for (; a; a = a->older) {
614    if ((a->low <= value) && (a->high >= value)) break;
615  };
616  if (a == NULL) Bug(NULL, "Can't find active area");
617  a->active = value;
618  *aptr = a;
619
620  for (a = a->younger; a; a = a->younger) {
621    a->active = a->high;
622  }
623}
624
625void
626normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
627{
628  void *cur_allocptr = NULL;
629  LispObj freeptr = 0;
630
631  if (xp) {
632    if (is_other_tcr) {
633      pc_luser_xp(xp, tcr, NULL);
634      freeptr = xpGPR(xp, allocptr);
635      if (fulltag_of(freeptr) == 0){
636        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
637      }
638    }
639    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
640    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
641    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
642#ifdef DEBUG
643    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
644            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
645    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
646            tcr,
647            xpGPR(xp, allocbase),
648            xpGPR(xp, allocptr),
649            xpPC(xp));
650    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
651            tcr,
652            tcr->pending_exception_context);
653#endif
654  } else {
655    /* In ff-call.  No need to update cs_area */
656    cur_allocptr = (void *) (tcr->save_allocptr);
657#ifdef DEBUG
658    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
659            tcr, tcr->save_vsp, tcr->save_tsp);
660    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
661            tcr,
662            tcr->save_allocbase,
663            tcr->save_allocptr,
664            xpPC(xp));
665
666#endif
667    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
668    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
669  }
670
671
672  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
673  if (cur_allocptr) {
674    update_bytes_allocated(tcr, cur_allocptr);
675    if (freeptr) {
676      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
677      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
678    }
679  }
680}
681
682TCR *gc_tcr = NULL;
683
684/* Suspend and "normalize" other tcrs, then call a gc-like function
685   in that context.  Resume the other tcrs, then return what the
686   function returned */
687
688int
689gc_like_from_xp(ExceptionInformation *xp, 
690                int(*fun)(TCR *, signed_natural), 
691                signed_natural param)
692{
693  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
694  int result;
695  signed_natural inhibit;
696
697  suspend_other_threads(true);
698  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
699  if (inhibit != 0) {
700    if (inhibit > 0) {
701      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
702    }
703    resume_other_threads(true);
704    gc_deferred++;
705    return 0;
706  }
707  gc_deferred = 0;
708
709  gc_tcr = tcr;
710
711  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
712  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
713
714  normalize_tcr(xp, tcr, false);
715
716
717  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
718    if (other_tcr->pending_exception_context) {
719      other_tcr->gc_context = other_tcr->pending_exception_context;
720    } else if (other_tcr->valence == TCR_STATE_LISP) {
721      other_tcr->gc_context = other_tcr->suspend_context;
722    } else {
723      /* no pending exception, didn't suspend in lisp state:
724         must have executed a synchronous ff-call.
725      */
726      other_tcr->gc_context = NULL;
727    }
728    normalize_tcr(other_tcr->gc_context, other_tcr, true);
729  }
730   
731
732
733  result = fun(tcr, param);
734
735  other_tcr = tcr;
736  do {
737    other_tcr->gc_context = NULL;
738    other_tcr = other_tcr->next;
739  } while (other_tcr != tcr);
740
741  gc_tcr = NULL;
742
743  resume_other_threads(true);
744
745  return result;
746
747}
748
749
750/* Returns #bytes freed by invoking GC */
751
752int
753gc_from_tcr(TCR *tcr, signed_natural param)
754{
755  area *a;
756  BytePtr oldfree, newfree;
757  BytePtr oldend, newend;
758
759#ifdef DEBUG
760  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
761#endif
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774int
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  int status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783int
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789int
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  /* use the siginfo if it's available.  Some versions of Linux
816     don't propagate the DSISR and TRAP fields correctly from
817     64- to 32-bit handlers.
818  */
819  if (info) {
820    /*
821       To confuse matters still further, the value of SEGV_ACCERR
822       varies quite a bit among LinuxPPC variants (the value defined
823       in the header files varies, and the value actually set by
824       the kernel also varies.  So far, we're only looking at the
825       siginfo under Linux and Linux always seems to generate
826       SIGSEGV, so check for SIGSEGV and check the low 16 bits
827       of the si_code.
828    */
829    return ((info->si_signo == SIGSEGV) &&
830            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
831  }
832  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
833         (xpTRAP(xp) == 
834#ifdef LINUX
8350x0300
836#endif
837#ifdef DARWIN
8380x0300/0x100
839#endif
840)
841         );
842#if 0 
843  /* Maybe worth keeping around; not sure if it's an exhaustive
844     list of PPC instructions that could cause a WP fault */
845  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
846     valid values of those SPRs in the context they provide to
847     exception handlers.  Look at the opcode of the offending
848     instruction & recognize 32-bit store operations */
849  opcode instr = *(xpPC(xp));
850
851  if (xp->regs->trap != 0x300) {
852    return 0;
853  }
854  switch (instr >> 26) {
855  case 47:                      /* STMW */
856  case 36:                      /* STW */
857  case 37:                      /* STWU */
858    return 1;
859  case 31:
860    switch ((instr >> 1) & 1023) {
861    case 151:                   /* STWX */
862    case 183:                   /* STWUX */
863      return 1;
864    default:
865      return 0;
866    }
867  default:
868    return 0;
869  }
870#endif
871}
872
873OSStatus
874handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
875{
876  BytePtr addr;
877  protected_area_ptr area;
878  protection_handler *handler;
879  extern Boolean touch_page(void *);
880  extern void touch_page_end(void);
881
882  if (info) {
883    addr = (BytePtr)(info->si_addr);
884  } else {
885    addr = (BytePtr) ((natural) (xpDAR(xp)));
886  }
887
888  if (addr && (addr == tcr->safe_ref_address)) {
889    adjust_exception_pc(xp,4);
890
891    xpGPR(xp,imm0) = 0;
892    return 0;
893  }
894
895  if (xpPC(xp) == (pc)touch_page) {
896    xpGPR(xp,imm0) = 0;
897    xpPC(xp) = (pc)touch_page_end;
898    return 0;
899  }
900
901
902  if (is_write_fault(xp,info)) {
903    area = find_protected_area(addr);
904    if (area != NULL) {
905      handler = protection_handlers[area->why];
906      return handler(xp, area, addr);
907    } else {
908      if ((addr >= readonly_area->low) &&
909          (addr < readonly_area->active)) {
910        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
911                        page_size);
912        return 0;
913      }
914    }
915  }
916  if (old_valence == TCR_STATE_LISP) {
917    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
918  }
919  return -1;
920}
921
922
923
924
925
926OSStatus
927do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
928{
929#ifdef SUPPORT_PRAGMA_UNUSED
930#pragma unused(area,addr)
931#endif
932  reset_lisp_process(xp);
933  return -1;
934}
935
936extern area*
937allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
938
939extern area*
940allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
941
942#ifdef EXTEND_VSTACK
943Boolean
944catch_frame_p(lisp_frame *spPtr)
945{
946  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
947
948  for (; catch; catch = (catch_frame *) untag(catch->link)) {
949    if (spPtr == ((lisp_frame *) catch->csp)) {
950      return true;
951    }
952  }
953  return false;
954}
955#endif
956
957Boolean
958unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
959{
960  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
961      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
962    return true;
963  } else {
964    return false;
965  }
966}
967
968Boolean
969lexpr_entry_frame_p(lisp_frame *spPtr)
970{
971  LispObj savelr = spPtr->savelr;
972  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
973  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
974  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
975
976  return
977    (savelr == lexpr_return1v) ||
978    (savelr == lexpr_return) ||
979    ((savelr == ret1valn) &&
980     (((spPtr->backlink)->savelr) == lexpr_return));
981}
982
983Boolean
984lisp_frame_p(lisp_frame *spPtr)
985{
986  LispObj savefn;
987  /* We can't just look at the size of the stack frame under the EABI
988     calling sequence, but that's the first thing to check. */
989  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
990    return false;
991  }
992  savefn = spPtr->savefn;
993  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
994 
995}
996
997
998int ffcall_overflow_count = 0;
999
1000/* Find a frame that is neither a catch frame nor one of the
1001   lexpr_entry frames We don't check for non-lisp frames here because
1002   we'll always stop before we get there due to a dummy lisp frame
1003   pushed by .SPcallback that masks out the foreign frames.  The one
1004   exception is that there is a non-lisp frame without a valid VSP
1005   while in the process of ppc-ff-call. We recognize that because its
1006   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1007   next frame is 0, then we're executing an unwind-protect cleanup
1008   form, and the top stack frame belongs to its (no longer extant)
1009   catch frame.  */
1010
1011#ifdef EXTEND_VSTACK
1012lisp_frame *
1013find_non_catch_frame_from_xp (ExceptionInformation *xp)
1014{
1015  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1016  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1017    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1018  }
1019  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1020         unwind_protect_cleanup_frame_p(spPtr) ||
1021         catch_frame_p(spPtr) ||
1022         lexpr_entry_frame_p(spPtr) ; ) {
1023     spPtr = spPtr->backlink;
1024     };
1025  return spPtr;
1026}
1027#endif
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031db_link_chain_in_area_p (area *a)
1032{
1033  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1034          *high = (LispObj *) a->high,
1035          *low = (LispObj *) a->low;
1036  for (; db; db = (LispObj *) *db) {
1037    if ((db >= low) && (db < high)) return true;
1038  };
1039  return false;
1040}
1041#endif
1042
1043
1044
1045
1046/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1047  the current value of VSP (TSP) or an older area.  */
1048
1049OSStatus
1050do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1051{
1052  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1053  area *a = tcr->vs_area;
1054  protected_area_ptr vsp_soft = a->softprot;
1055  unprotect_area(vsp_soft);
1056  signal_stack_soft_overflow(xp,vsp);
1057  return 0;
1058}
1059
1060
1061OSStatus
1062do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1063{
1064  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1065  area *a = tcr->ts_area;
1066  protected_area_ptr tsp_soft = a->softprot;
1067  unprotect_area(tsp_soft);
1068  signal_stack_soft_overflow(xp,tsp);
1069  return 0;
1070}
1071
1072OSStatus
1073do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1074{
1075  /* Trying to write into a guard page on the vstack or tstack.
1076     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1077     signal an error_stack_overflow condition.
1078      */
1079  lisp_protection_kind which = prot_area->why;
1080  Boolean on_TSP = (which == kTSPsoftguard);
1081
1082  if (on_TSP) {
1083    return do_tsp_overflow(xp, addr);
1084   } else {
1085    return do_vsp_overflow(xp, addr);
1086   }
1087}
1088
1089OSStatus
1090do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1091{
1092#ifdef SUPPORT_PRAGMA_UNUSED
1093#pragma unused(xp,area,addr)
1094#endif
1095  return -1;
1096}
1097
1098
1099/*
1100  We have a couple of choices here.  We can simply unprotect the page
1101  and let the store happen on return, or we can try to emulate writes
1102  that we know will involve an intergenerational reference.  Both are
1103  correct as far as EGC constraints go, but the latter approach is
1104  probably more efficient.  (This only matters in the case where the
1105  GC runs after this exception handler returns but before the write
1106  actually happens.  If we didn't emulate node stores here, the EGC
1107  would scan the newly-writen page, find nothing interesting, and
1108  run to completion.  This thread will try the write again afer it
1109  resumes, the page'll be re-protected, and we'll have taken this
1110  fault twice.  The whole scenario shouldn't happen very often, but
1111  (having already taken a fault and committed to an mprotect syscall)
1112  we might as well emulate stores involving intergenerational references,
1113  since they're pretty easy to identify.
1114
1115  Note that cases involving two or more threads writing to the same
1116  page (before either of them can run this handler) is benign: one
1117  invocation of the handler will just unprotect an unprotected page in
1118  that case.
1119
1120  If there are GCs (or any other suspensions of the thread between
1121  the time that the write fault was detected and the time that the
1122  exception lock is obtained) none of this stuff happens.
1123*/
1124
1125/*
1126  Return true (and emulate the instruction) iff:
1127  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1128     instruction.
1129  b) RS is a node register (>= fn)
1130  c) RS is tagged as a cons or vector
1131  d) RS is in some ephemeral generation.
1132  This is slightly conservative, since RS may be no younger than the
1133  EA being written to.
1134*/
1135Boolean
1136is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1137{
1138  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1139    opcode instr = *xpPC(xp);
1140   
1141    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1142        major_opcode_p(instr, major_opcode_STW)) {
1143      LispObj
1144        rs = RS_field(instr), 
1145        rsval = xpGPR(xp,rs),
1146        tag = fulltag_of(rsval);
1147     
1148      if (rs >= fn) {
1149        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1150          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1151              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1152            *(LispObj *)ea = rsval;
1153            return true;
1154          }
1155        }
1156      }
1157    }
1158  }
1159  return false;
1160}
1161
1162     
1163
1164
1165
1166
1167
1168OSStatus
1169handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1170{
1171  (void) zero_fpscr(tcr);
1172  enable_fp_exceptions();
1173
1174
1175  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1176
1177  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1178     operation; there's an FPU operation right at the PC, so tell it to start
1179     looking one word beyond */
1180  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1181}
1182
1183   
1184int
1185altivec_present = 1;
1186
1187
1188/* This only tries to implement the "optional" fsqrt and fsqrts
1189   instructions, which were generally implemented on IBM hardware
1190   but generally not available on Motorola/Freescale systems.
1191*/               
1192OSStatus
1193handle_unimplemented_instruction(ExceptionInformation *xp,
1194                                 opcode instruction,
1195                                 TCR *tcr)
1196{
1197  (void) zero_fpscr(tcr);
1198  enable_fp_exceptions();
1199  /* the rc bit (bit 0 in the instruction) is supposed to cause
1200     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1201     fsqrt. or fsqrts.
1202  */
1203  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1204       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1205      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1206    double b, d, sqrt(double);
1207
1208    b = xpFPR(xp,RB_field(instruction));
1209    d = sqrt(b);
1210    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1211                   (get_fpscr() & _FPU_RESERVED));
1212    xpFPR(xp,RT_field(instruction)) = d;
1213    adjust_exception_pc(xp,4);
1214    return 0;
1215  }
1216
1217  return -1;
1218}
1219
1220OSStatus
1221PMCL_exception_handler(int xnum, 
1222                       ExceptionInformation *xp, 
1223                       TCR *tcr, 
1224                       siginfo_t *info,
1225                       int old_valence)
1226{
1227  OSStatus status = -1;
1228  pc program_counter;
1229  opcode instruction = 0;
1230
1231
1232  program_counter = xpPC(xp);
1233 
1234  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1235    instruction = *program_counter;
1236  }
1237
1238  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1239    status = handle_alloc_trap(xp, tcr);
1240  } else if ((xnum == SIGSEGV) ||
1241             (xnum == SIGBUS)) {
1242    status = handle_protection_violation(xp, info, tcr, old_valence);
1243  } else if (xnum == SIGFPE) {
1244    status = handle_sigfpe(xp, tcr);
1245  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1246    if (instruction == GC_TRAP_INSTRUCTION) {
1247      status = handle_gc_trap(xp, tcr);
1248    } else if (IS_UUO(instruction)) {
1249      status = handle_uuo(xp, instruction, program_counter);
1250    } else if (is_conditional_trap(instruction)) {
1251      status = handle_trap(xp, instruction, program_counter, info);
1252    } else {
1253      status = handle_unimplemented_instruction(xp,instruction,tcr);
1254    }
1255  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1256    tcr->interrupt_pending = 0;
1257    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1258    status = 0;
1259  }
1260
1261  return status;
1262}
1263
1264void
1265adjust_exception_pc(ExceptionInformation *xp, int delta)
1266{
1267  xpPC(xp) += (delta >> 2);
1268}
1269
1270
1271/*
1272  This wants to scan backwards until "where" points to an instruction
1273   whose major opcode is either 63 (double-float) or 59 (single-float)
1274*/
1275
1276OSStatus
1277handle_fpux_binop(ExceptionInformation *xp, pc where)
1278{
1279  OSStatus err;
1280  opcode *there = (opcode *) where, instr, errnum = 0;
1281  int i = TRAP_LOOKUP_TRIES, delta = 0;
1282 
1283  while (i--) {
1284    instr = *--there;
1285    delta -= 4;
1286    if (codevec_hdr_p(instr)) {
1287      return -1;
1288    }
1289    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1290      errnum = error_FPU_exception_double;
1291      break;
1292    }
1293
1294    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1295      errnum = error_FPU_exception_short;
1296      break;
1297    }
1298  }
1299 
1300  err = handle_error(xp, errnum, rcontext, 0,  there);
1301  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1302 
1303  adjust_exception_pc(xp, delta);
1304  xpFPSCR(xp)  &=  0x03fff;
1305 
1306  return err;
1307
1308}
1309
1310OSStatus
1311handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1312{
1313#ifdef SUPPORT_PRAGMA_UNUSED
1314#pragma unused(where)
1315#endif
1316  unsigned 
1317    minor = UUO_MINOR(the_uuo),
1318    rb = 0x1f & (the_uuo >> 11),
1319    errnum = 0x3ff & (the_uuo >> 16);
1320
1321  OSStatus status = -1;
1322
1323  int bump = 4;
1324
1325  switch (minor) {
1326
1327  case UUO_ZERO_FPSCR:
1328    status = 0;
1329    xpFPSCR(xp) = 0;
1330    break;
1331
1332
1333  case UUO_INTERR:
1334    {
1335      TCR * target = (TCR *)xpGPR(xp,arg_z);
1336      status = 0;
1337      switch (errnum) {
1338      case error_propagate_suspend:
1339        break;
1340      case error_interrupt:
1341        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1342        break;
1343      case error_suspend:
1344        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1345        break;
1346      case error_suspend_all:
1347        lisp_suspend_other_threads();
1348        break;
1349      case error_resume:
1350        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1351        break;
1352      case error_resume_all:
1353        lisp_resume_other_threads();
1354        break;
1355      case error_kill:
1356        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1357        break;
1358      default:
1359        status = handle_error(xp, errnum, rb, 0,  where);
1360        break;
1361      }
1362    }
1363    break;
1364
1365  case UUO_INTCERR:
1366    status = handle_error(xp, errnum, rb, 1,  where);
1367    if (errnum == error_udf_call) {
1368      /* If lisp's returned from a continuable undefined-function call,
1369         it's put a code vector in the xp's PC.  Don't advance the
1370         PC ... */
1371      bump = 0;
1372    }
1373    break;
1374
1375  case UUO_FPUX_BINOP:
1376    status = handle_fpux_binop(xp, where);
1377    bump = 0;
1378    break;
1379
1380  default:
1381    status = -1;
1382    bump = 0;
1383  }
1384 
1385  if ((!status) && bump) {
1386    adjust_exception_pc(xp, bump);
1387  }
1388  return status;
1389}
1390
1391natural
1392register_codevector_contains_pc (natural lisp_function, pc where)
1393{
1394  natural code_vector, size;
1395
1396  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1397      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1398    code_vector = deref(lisp_function, 1);
1399    size = header_element_count(header_of(code_vector)) << 2;
1400    if ((untag(code_vector) < (natural)where) && 
1401        ((natural)where < (code_vector + size)))
1402      return(code_vector);
1403  }
1404
1405  return(0);
1406}
1407
1408/* Callback to lisp to handle a trap. Need to translate the
1409   PC (where) into one of two forms of pairs:
1410
1411   1. If PC is in fn or nfn's code vector, use the register number
1412      of fn or nfn and the index into that function's code vector.
1413   2. Otherwise use 0 and the pc itself
1414*/
1415void
1416callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1417                   natural arg1, natural arg2, natural arg3)
1418{
1419  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1420  unsigned register_number = fn;
1421  natural index = (natural)where;
1422
1423  if (code_vector == 0) {
1424    register_number = nfn;
1425    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1426  }
1427  if (code_vector == 0)
1428    register_number = 0;
1429  else
1430    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1431  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1432}
1433
1434void
1435callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1436                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1437{
1438  natural  callback_ptr;
1439  area *a;
1440
1441  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1442
1443  /* Put the active stack pointer where .SPcallback expects it */
1444  a = tcr->cs_area;
1445  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1446
1447  /* Copy globals from the exception frame to tcr */
1448  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1449  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1450  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1451  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1452
1453
1454
1455  /* Call back.
1456     Lisp will handle trampolining through some code that
1457     will push lr/fn & pc/nfn stack frames for backtrace.
1458  */
1459  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1460#ifdef DEBUG
1461  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1462#endif
1463  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1464  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1465  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1466#ifdef DEBUG
1467  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1468#endif
1469
1470
1471
1472  /* Copy GC registers back into exception frame */
1473  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1474  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1475}
1476
1477area *
1478allocate_no_stack (natural size)
1479{
1480#ifdef SUPPORT_PRAGMA_UNUSED
1481#pragma unused(size)
1482#endif
1483
1484  return (area *) NULL;
1485}
1486
1487
1488
1489
1490
1491
1492/* callback to (symbol-value cmain) if it is a macptr,
1493   otherwise report cause and function name to console.
1494   Returns noErr if exception handled OK */
1495OSStatus
1496handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1497{
1498  LispObj   cmain = nrs_CMAIN.vcell;
1499  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1500
1501  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1502     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1503
1504  /* If it's a "trllt" instruction where RA == sp, it's a failed
1505     control stack overflow check.  In that case:
1506     
1507     a) We're in "yellow zone" mode if the value of the
1508     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1509     we're not already in yellow zone mode, attempt to create a new
1510     thread and continue execution on its stack. If that fails, call
1511     signal_stack_soft_overflow to enter yellow zone mode and signal
1512     the condition to lisp.
1513     
1514     b) If we're already in "yellow zone" mode, then:
1515     
1516     1) if the SP is past the current control-stack area's hard
1517     overflow limit, signal a "hard" stack overflow error (e.g., throw
1518     to toplevel as quickly as possible. If we aren't in "yellow zone"
1519     mode, attempt to continue on another thread first.
1520     
1521     2) if SP is "well" (> 4K) below its soft overflow limit, set
1522     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1523     "yellow zone mode" in this case.
1524     
1525     3) Otherwise, do nothing.  We'll continue to trap every time
1526     something gets pushed on the control stack, so we should try to
1527     detect and handle all of these cases fairly quickly.  Of course,
1528     the trap overhead is going to slow things down quite a bit.
1529     */
1530
1531  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1532      (RA_field(the_trap) == sp) &&
1533      (TO_field(the_trap) == TO_LO)) {
1534    area
1535      *CS_area = tcr->cs_area,
1536      *VS_area = tcr->vs_area;
1537     
1538    natural
1539      current_SP = xpGPR(xp,sp),
1540      current_VSP = xpGPR(xp,vsp);
1541
1542    if (current_SP  < (natural) (CS_area->hardlimit)) {
1543      /* If we're not in soft overflow mode yet, assume that the
1544         user has set the soft overflow size very small and try to
1545         continue on another thread before throwing to toplevel */
1546      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1547        reset_lisp_process(xp);
1548      }
1549    } else {
1550      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1551        /* If the control stack pointer is at least 4K away from its soft limit
1552           and the value stack pointer is at least 4K away from its soft limit,
1553           stop trapping.  Else keep trapping. */
1554        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1555            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1556          protected_area_ptr vs_soft = VS_area->softprot;
1557          if (vs_soft->nprot == 0) {
1558            protect_area(vs_soft);
1559          }
1560          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1561        }
1562      } else {
1563        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1564        signal_stack_soft_overflow(xp, sp);
1565      }
1566    }
1567   
1568    adjust_exception_pc(xp, 4);
1569    return noErr;
1570  } else {
1571    if (the_trap == LISP_BREAK_INSTRUCTION) {
1572      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1573      set_xpPC(xp, xpLR(xp));
1574      if (message == NULL) {
1575        message = "Lisp Breakpoint";
1576      }
1577      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1578      return noErr;
1579    }
1580    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1581      adjust_exception_pc(xp,4);
1582      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1583      return noErr;
1584    }
1585    /*
1586      twlle ra,rb is used to detect tlb overflow, where RA = current
1587      limit and RB = index to use.
1588    */
1589    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1590        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1591      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1592        return noErr;
1593      }
1594      return -1;
1595    }
1596
1597    if ((fulltag_of(cmain) == fulltag_misc) &&
1598        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1599      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1600        /* reset interrup_level, interrupt_pending */
1601        TCR_INTERRUPT_LEVEL(tcr) = 0;
1602        tcr->interrupt_pending = 0;
1603      }
1604#if 0
1605      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1606#endif
1607      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1608      adjust_exception_pc(xp, 4);
1609      return(noErr);
1610    }
1611    return -1;
1612  }
1613}
1614
1615
1616/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1617   Stop if subtag_code_vector is encountered. */
1618unsigned
1619scan_for_instr( unsigned target, unsigned mask, pc where )
1620{
1621  int i = TRAP_LOOKUP_TRIES;
1622
1623  while( i-- ) {
1624    unsigned instr = *(--where);
1625    if ( codevec_hdr_p(instr) ) {
1626      return 0;
1627    } else if ( match_instr(instr, mask, target) ) {
1628      return instr;
1629    }
1630  }
1631  return 0;
1632}
1633
1634
1635void non_fatal_error( char *msg )
1636{
1637  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1638  fflush( stderr );
1639}
1640
1641/* The main opcode.  */
1642
1643int 
1644is_conditional_trap(opcode instr)
1645{
1646  unsigned to = TO_field(instr);
1647  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1648
1649#ifndef MACOS
1650  if ((instr == LISP_BREAK_INSTRUCTION) ||
1651      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1652    return 1;
1653  }
1654#endif
1655  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1656    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1657       EQ bit must be set in the TO mask and either the register
1658       operands (if "tw") are the same or either both of the signed or
1659       both of the unsigned inequality bits must be set. */
1660    if (! (to & TO_EQ)) {
1661      return 1;                 /* Won't trap on EQ: conditional */
1662    }
1663    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1664      return 0;                 /* Will trap on EQ, same regs: unconditional */
1665    }
1666    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1667        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1668      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1669    }
1670    return 1;                   /* must be conditional */
1671  }
1672  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1673                                   debugger have it */
1674}
1675
1676OSStatus
1677handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1678{
1679  LispObj   errdisp = nrs_ERRDISP.vcell;
1680
1681  if ((fulltag_of(errdisp) == fulltag_misc) &&
1682      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1683    /* errdisp is a macptr, we can call back to lisp */
1684    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1685    return(0);
1686    }
1687
1688  return(-1);
1689}
1690               
1691
1692/*
1693   Current thread has all signals masked.  Before unmasking them,
1694   make it appear that the current thread has been suspended.
1695   (This is to handle the case where another thread is trying
1696   to GC before this thread is able to sieze the exception lock.)
1697*/
1698int
1699prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1700{
1701  int old_valence = tcr->valence;
1702
1703  tcr->pending_exception_context = context;
1704  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1705
1706  ALLOW_EXCEPTIONS(context);
1707  return old_valence;
1708} 
1709
1710void
1711wait_for_exception_lock_in_handler(TCR *tcr, 
1712                                   ExceptionInformation *context,
1713                                   xframe_list *xf)
1714{
1715
1716  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1717#ifdef DEBUG
1718  fprintf(stderr, "0x%x has exception lock\n", tcr);
1719#endif
1720  xf->curr = context;
1721  xf->prev = tcr->xframe;
1722  tcr->xframe =  xf;
1723  tcr->pending_exception_context = NULL;
1724  tcr->valence = TCR_STATE_FOREIGN; 
1725}
1726
1727void
1728unlock_exception_lock_in_handler(TCR *tcr)
1729{
1730  tcr->pending_exception_context = tcr->xframe->curr;
1731  tcr->xframe = tcr->xframe->prev;
1732  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1733#ifdef DEBUG
1734  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1735#endif
1736  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1737}
1738
1739/*
1740   If an interrupt is pending on exception exit, try to ensure
1741   that the thread sees it as soon as it's able to run.
1742*/
1743void
1744raise_pending_interrupt(TCR *tcr)
1745{
1746  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1747    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1748  }
1749}
1750
1751void
1752exit_signal_handler(TCR *tcr, int old_valence)
1753{
1754  sigset_t mask;
1755  sigfillset(&mask);
1756 
1757  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1758  tcr->valence = old_valence;
1759  tcr->pending_exception_context = NULL;
1760}
1761
1762
1763void
1764signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1765{
1766  xframe_list xframe_link;
1767
1768#ifdef DARWIN
1769  if (running_under_rosetta) {
1770    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1771  }
1772#endif
1773  if (!use_mach_exception_handling) {
1774   
1775    tcr = (TCR *) get_interrupt_tcr(false);
1776 
1777    /* The signal handler's entered with all signals (notably the
1778       thread_suspend signal) blocked.  Don't allow any other signals
1779       (notably the thread_suspend signal) to preempt us until we've
1780       set the TCR's xframe slot to include the current exception
1781       context.
1782    */
1783   
1784    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1785  }
1786
1787  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1788    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1789    pthread_kill(pthread_self(), thread_suspend_signal);
1790  }
1791
1792 
1793  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1794  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1795    char msg[512];
1796    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1797    if (lisp_Debugger(context, info, signum, false, msg)) {
1798      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1799    }
1800  }
1801
1802  unlock_exception_lock_in_handler(tcr);
1803
1804  /* This thread now looks like a thread that was suspended while
1805     executing lisp code.  If some other thread gets the exception
1806     lock and GCs, the context (this thread's suspend_context) will
1807     be updated.  (That's only of concern if it happens before we
1808     can return to the kernel/to the Mach exception handler).
1809  */
1810  if (!use_mach_exception_handling) {
1811    exit_signal_handler(tcr, old_valence);
1812    raise_pending_interrupt(tcr);
1813  }
1814}
1815
1816/*
1817  If it looks like we're in the middle of an atomic operation, make
1818  it seem as if that operation is either complete or hasn't started
1819  yet.
1820
1821  The cases handled include:
1822
1823  a) storing into a newly-allocated lisp frame on the stack.
1824  b) marking a newly-allocated TSP frame as containing "raw" data.
1825  c) consing: the GC has its own ideas about how this should be
1826     handled, but other callers would be best advised to back
1827     up or move forward, according to whether we're in the middle
1828     of allocating a cons cell or allocating a uvector.
1829  d) a STMW to the vsp
1830  e) EGC write-barrier subprims.
1831*/
1832
1833extern opcode
1834  egc_write_barrier_start,
1835  egc_write_barrier_end, 
1836  egc_store_node_conditional, 
1837  egc_store_node_conditional_test,
1838  egc_set_hash_key,
1839  egc_gvset,
1840  egc_rplaca,
1841  egc_rplacd,
1842  egc_set_hash_key_conditional,
1843  egc_set_hash_key_conditional_test;
1844
1845
1846extern opcode ffcall_return_window, ffcall_return_window_end;
1847
1848void
1849pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1850{
1851  pc program_counter = xpPC(xp);
1852  opcode instr = *program_counter;
1853  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1854  LispObj cur_allocptr = xpGPR(xp, allocptr);
1855  int allocptr_tag = fulltag_of(cur_allocptr);
1856 
1857
1858
1859  if ((program_counter < &egc_write_barrier_end) && 
1860      (program_counter >= &egc_write_barrier_start)) {
1861    LispObj *ea = 0, val = 0, root = 0;
1862    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1863    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1864
1865    if (program_counter >= &egc_set_hash_key_conditional) {
1866      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1867          ((program_counter == &egc_set_hash_key_conditional_test) &&
1868           (! (xpCCR(xp) & 0x20000000)))) {
1869        return;
1870      }
1871      need_store = false;
1872      root = xpGPR(xp,arg_x);
1873      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1874      need_memoize_root = true;
1875    } else if (program_counter >= &egc_store_node_conditional) {
1876      if ((program_counter < &egc_store_node_conditional_test) ||
1877          ((program_counter == &egc_store_node_conditional_test) &&
1878           (! (xpCCR(xp) & 0x20000000)))) {
1879        /* The conditional store either hasn't been attempted yet, or
1880           has failed.  No need to adjust the PC, or do memoization. */
1881        return;
1882      }
1883      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1884      xpGPR(xp,arg_z) = t_value;
1885      need_store = false;
1886    } else if (program_counter >= &egc_set_hash_key) {
1887      root = xpGPR(xp,arg_x);
1888      val = xpGPR(xp,arg_z);
1889      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1890      need_memoize_root = true;
1891    } else if (program_counter >= &egc_gvset) {
1892      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1893      val = xpGPR(xp,arg_z);
1894    } else if (program_counter >= &egc_rplacd) {
1895      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1896      val = xpGPR(xp,arg_z);
1897    } else {                      /* egc_rplaca */
1898      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1899      val = xpGPR(xp,arg_z);
1900    }
1901    if (need_store) {
1902      *ea = val;
1903    }
1904    if (need_check_memo) {
1905      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1906      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1907          ((LispObj)ea < val)) {
1908        atomic_set_bit(refbits, bitnumber);
1909        if (need_memoize_root) {
1910          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1911          atomic_set_bit(refbits, bitnumber);
1912        }
1913      }
1914    }
1915    set_xpPC(xp, xpLR(xp));
1916    return;
1917  }
1918
1919
1920  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1921    LispObj tsp_val = xpGPR(xp,tsp);
1922   
1923    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1924    adjust_exception_pc(xp, 4);
1925    return;
1926  }
1927 
1928  if (frame->backlink == (frame+1)) {
1929    if (
1930#ifdef PPC64
1931        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1932        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1933#else
1934        (major_opcode_p(instr, major_opcode_STW)) && 
1935#endif
1936        (RA_field(instr) == sp) &&
1937        /* There are a few places in the runtime that store into
1938           a previously-allocated frame atop the stack when
1939           throwing values around.  We only care about the case
1940           where the frame was newly allocated, in which case
1941           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1942           a few instructions before the current program counter.
1943           (The whole point here is that a newly allocated frame
1944           might contain random values that we don't want the
1945           GC to see; a previously allocated frame should already
1946           be completely initialized.)
1947        */
1948        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1949         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1950         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1951#ifdef PPC64
1952      int disp = DS_field(instr);
1953#else     
1954      int disp = D_field(instr);
1955#endif
1956
1957
1958      if (disp < (4*node_size)) {
1959#if 0
1960        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1961#endif
1962        frame->savevsp = 0;
1963        if (disp < (3*node_size)) {
1964          frame->savelr = 0;
1965          if (disp == node_size) {
1966            frame->savefn = 0;
1967          }
1968        }
1969      }
1970      return;
1971    }
1972  }
1973
1974  if (allocptr_tag != tag_fixnum) {
1975    signed_natural disp = allocptr_displacement(xp);
1976
1977    if (disp) {
1978      /* Being architecturally "at" the alloc trap doesn't tell
1979         us much (in particular, it doesn't tell us whether
1980         or not the thread has committed to taking the trap
1981         and is waiting for the exception lock (or waiting
1982         for the Mach exception thread to tell it how bad
1983         things are) or is about to execute a conditional
1984         trap.
1985         Regardless of which case applies, we want the
1986         other thread to take (or finish taking) the
1987         trap, and we don't want it to consider its
1988         current allocptr to be valid.
1989         The difference between this case (suspend other
1990         thread for GC) and the previous case (suspend
1991         current thread for interrupt) is solely a
1992         matter of what happens after we leave this
1993         function: some non-current thread will stay
1994         suspended until the GC finishes, then take
1995         (or start processing) the alloc trap.   The
1996         current thread will go off and do PROCESS-INTERRUPT
1997         or something, and may return from the interrupt
1998         and need to finish the allocation that got interrupted.
1999      */
2000
2001      if (alloc_disp) {
2002        *alloc_disp = disp;
2003        xpGPR(xp,allocptr) += disp;
2004        /* Leave the PC at the alloc trap.  When the interrupt
2005           handler returns, it'll decrement allocptr by disp
2006           and the trap may or may not be taken.
2007        */
2008      } else {
2009        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2010        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2011        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2012      }
2013    } else {
2014#ifdef DEBUG
2015      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2016#endif
2017      /* If we're already past the alloc_trap, finish allocating
2018         the object. */
2019      if (allocptr_tag == fulltag_cons) {
2020        finish_allocating_cons(xp);
2021#ifdef DEBUG
2022          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
2023                  tcr);
2024#endif
2025      } else {
2026        if (allocptr_tag == fulltag_misc) {
2027#ifdef DEBUG
2028          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
2029                  tcr);
2030#endif
2031          finish_allocating_uvector(xp);
2032        } else {
2033          Bug(xp, "what's being allocated here ?");
2034        }
2035      }
2036      /* Whatever we finished allocating, reset allocptr/allocbase to
2037         VOID_ALLOCPTR */
2038      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2039    }
2040    return;
2041  }
2042
2043  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2044    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2045    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2046#if 0
2047        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2048#endif
2049
2050    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2051      deref(frame,idx) = 0;
2052    }
2053    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2054    return;
2055  }
2056
2057#ifndef PC64
2058  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2059      (RA_field(instr) == vsp)) {
2060    int r;
2061    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2062   
2063    for (r = RS_field(instr); r <= 31; r++) {
2064      *vspptr++ = xpGPR(xp,r);
2065    }
2066    adjust_exception_pc(xp, 4);
2067  }
2068#endif
2069}
2070
2071void
2072interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2073{
2074  TCR *tcr = get_interrupt_tcr(false);
2075  if (tcr) {
2076    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2077      tcr->interrupt_pending = 1 << fixnumshift;
2078    } else {
2079      LispObj cmain = nrs_CMAIN.vcell;
2080
2081      if ((fulltag_of(cmain) == fulltag_misc) &&
2082          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2083        /*
2084           This thread can (allegedly) take an interrupt now.
2085           It's tricky to do that if we're executing
2086           foreign code (especially Linuxthreads code, much
2087           of which isn't reentrant.)
2088           If we're unwinding the stack, we also want to defer
2089           the interrupt.
2090        */
2091        if ((tcr->valence != TCR_STATE_LISP) ||
2092            (tcr->unwinding != 0)) {
2093          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2094        } else {
2095          xframe_list xframe_link;
2096          int old_valence;
2097          signed_natural disp=0;
2098         
2099          pc_luser_xp(context, tcr, &disp);
2100          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2101          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2102#ifdef DEBUG
2103          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2104#endif
2105          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2106          if (disp) {
2107            xpGPR(context,allocptr) -= disp;
2108          }
2109          unlock_exception_lock_in_handler(tcr);
2110#ifdef DEBUG
2111          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2112#endif
2113          exit_signal_handler(tcr, old_valence);
2114        }
2115      }
2116    }
2117  }
2118#ifdef DARWIN
2119    DarwinSigReturn(context);
2120#endif
2121}
2122
2123
2124
2125void
2126install_signal_handler(int signo, void *handler)
2127{
2128  struct sigaction sa;
2129 
2130  sa.sa_sigaction = (void *)handler;
2131  sigfillset(&sa.sa_mask);
2132  sa.sa_flags = 
2133    0 /* SA_RESTART */
2134    | SA_SIGINFO
2135#ifdef DARWIN
2136#ifdef PPC64
2137    | SA_64REGSET
2138#endif
2139#endif
2140    ;
2141
2142  sigaction(signo, &sa, NULL);
2143}
2144
2145void
2146install_pmcl_exception_handlers()
2147{
2148#ifdef DARWIN
2149  extern Boolean use_mach_exception_handling;
2150#endif
2151
2152  Boolean install_signal_handlers_for_exceptions =
2153#ifdef DARWIN
2154    !use_mach_exception_handling
2155#else
2156    true
2157#endif
2158    ;
2159  if (install_signal_handlers_for_exceptions) {
2160    extern int no_sigtrap;
2161    install_signal_handler(SIGILL, (void *)signal_handler);
2162    if (no_sigtrap != 1) {
2163      install_signal_handler(SIGTRAP, (void *)signal_handler);
2164    }
2165    install_signal_handler(SIGBUS,  (void *)signal_handler);
2166    install_signal_handler(SIGSEGV, (void *)signal_handler);
2167    install_signal_handler(SIGFPE, (void *)signal_handler);
2168  }
2169 
2170  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2171                         (void *)interrupt_handler);
2172  signal(SIGPIPE, SIG_IGN);
2173}
2174
2175void
2176thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2177{
2178  TCR *tcr = get_tcr(false);
2179  area *a;
2180  sigset_t mask;
2181 
2182  sigemptyset(&mask);
2183
2184  if (tcr) {
2185    tcr->valence = TCR_STATE_FOREIGN;
2186    a = tcr->vs_area;
2187    if (a) {
2188      a->active = a->high;
2189    }
2190    a = tcr->ts_area;
2191    if (a) {
2192      a->active = a->high;
2193    }
2194    a = tcr->cs_area;
2195    if (a) {
2196      a->active = a->high;
2197    }
2198  }
2199 
2200  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2201  pthread_exit(NULL);
2202}
2203
2204void
2205thread_signal_setup()
2206{
2207  thread_suspend_signal = SIG_SUSPEND_THREAD;
2208  thread_kill_signal = SIG_KILL_THREAD;
2209
2210  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2211  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
2212}
2213
2214
2215
2216void
2217unprotect_all_areas()
2218{
2219  protected_area_ptr p;
2220
2221  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2222    unprotect_area(p);
2223  }
2224}
2225
2226/*
2227  A binding subprim has just done "twlle limit_regno,idx_regno" and
2228  the trap's been taken.  Extend the tcr's tlb so that the index will
2229  be in bounds and the new limit will be on a page boundary, filling
2230  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2231  the tcr fields and the registers in the xp and return true if this
2232  all works, false otherwise.
2233
2234  Note that the tlb was allocated via malloc, so realloc can do some
2235  of the hard work.
2236*/
2237Boolean
2238extend_tcr_tlb(TCR *tcr, 
2239               ExceptionInformation *xp, 
2240               unsigned limit_regno,
2241               unsigned idx_regno)
2242{
2243  unsigned
2244    index = (unsigned) (xpGPR(xp,idx_regno)),
2245    old_limit = tcr->tlb_limit,
2246    new_limit = align_to_power_of_2(index+1,12),
2247    new_bytes = new_limit-old_limit;
2248  LispObj
2249    *old_tlb = tcr->tlb_pointer,
2250    *new_tlb = realloc(old_tlb, new_limit),
2251    *work;
2252
2253  if (new_tlb == NULL) {
2254    return false;
2255  }
2256 
2257  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2258
2259  while (new_bytes) {
2260    *work++ = no_thread_local_binding_marker;
2261    new_bytes -= sizeof(LispObj);
2262  }
2263  tcr->tlb_pointer = new_tlb;
2264  tcr->tlb_limit = new_limit;
2265  xpGPR(xp, limit_regno) = new_limit;
2266  return true;
2267}
2268
2269
2270
2271void
2272exception_init()
2273{
2274  install_pmcl_exception_handlers();
2275}
2276
2277
2278
2279
2280
2281#ifdef DARWIN
2282
2283
2284#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2285#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2286
2287
2288
2289#define LISP_EXCEPTIONS_HANDLED_MASK \
2290 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2291
2292/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2293#define NUM_LISP_EXCEPTIONS_HANDLED 4
2294
2295typedef struct {
2296  int foreign_exception_port_count;
2297  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2298  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2299  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2300  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2301} MACH_foreign_exception_state;
2302
2303
2304
2305
2306/*
2307  Mach's exception mechanism works a little better than its signal
2308  mechanism (and, not incidentally, it gets along with GDB a lot
2309  better.
2310
2311  Initially, we install an exception handler to handle each native
2312  thread's exceptions.  This process involves creating a distinguished
2313  thread which listens for kernel exception messages on a set of
2314  0 or more thread exception ports.  As threads are created, they're
2315  added to that port set; a thread's exception port is destroyed
2316  (and therefore removed from the port set) when the thread exits.
2317
2318  A few exceptions can be handled directly in the handler thread;
2319  others require that we resume the user thread (and that the
2320  exception thread resumes listening for exceptions.)  The user
2321  thread might eventually want to return to the original context
2322  (possibly modified somewhat.)
2323
2324  As it turns out, the simplest way to force the faulting user
2325  thread to handle its own exceptions is to do pretty much what
2326  signal() does: the exception handlng thread sets up a sigcontext
2327  on the user thread's stack and forces the user thread to resume
2328  execution as if a signal handler had been called with that
2329  context as an argument.  We can use a distinguished UUO at a
2330  distinguished address to do something like sigreturn(); that'll
2331  have the effect of resuming the user thread's execution in
2332  the (pseudo-) signal context.
2333
2334  Since:
2335    a) we have miles of code in C and in Lisp that knows how to
2336    deal with Linux sigcontexts
2337    b) Linux sigcontexts contain a little more useful information
2338    (the DAR, DSISR, etc.) than their Darwin counterparts
2339    c) we have to create a sigcontext ourselves when calling out
2340    to the user thread: we aren't really generating a signal, just
2341    leveraging existing signal-handling code.
2342
2343  we create a Linux sigcontext struct.
2344
2345  Simple ?  Hopefully from the outside it is ...
2346
2347  We want the process of passing a thread's own context to it to
2348  appear to be atomic: in particular, we don't want the GC to suspend
2349  a thread that's had an exception but has not yet had its user-level
2350  exception handler called, and we don't want the thread's exception
2351  context to be modified by a GC while the Mach handler thread is
2352  copying it around.  On Linux (and on Jaguar), we avoid this issue
2353  because (a) the kernel sets up the user-level signal handler and
2354  (b) the signal handler blocks signals (including the signal used
2355  by the GC to suspend threads) until tcr->xframe is set up.
2356
2357  The GC and the Mach server thread therefore contend for the lock
2358  "mach_exception_lock".  The Mach server thread holds the lock
2359  when copying exception information between the kernel and the
2360  user thread; the GC holds this lock during most of its execution
2361  (delaying exception processing until it can be done without
2362  GC interference.)
2363
2364*/
2365
2366#ifdef PPC64
2367#define C_REDZONE_LEN           320
2368#define C_STK_ALIGN             32
2369#else
2370#define C_REDZONE_LEN           224
2371#define C_STK_ALIGN             16
2372#endif
2373#define C_PARAMSAVE_LEN         64
2374#define C_LINKAGE_LEN           48
2375
2376#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2377
2378void
2379fatal_mach_error(char *format, ...);
2380
2381#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2382
2383
2384void
2385restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2386{
2387  kern_return_t kret;
2388  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2389
2390  /* Set the thread's FP state from the pseudosigcontext */
2391  kret = thread_set_state(thread,
2392                          PPC_FLOAT_STATE,
2393                          (thread_state_t)&(mc->fs),
2394                          PPC_FLOAT_STATE_COUNT);
2395
2396  MACH_CHECK_ERROR("setting thread FP state", kret);
2397
2398  /* The thread'll be as good as new ... */
2399#ifdef PPC64
2400  kret = thread_set_state(thread,
2401                          PPC_THREAD_STATE64,
2402                          (thread_state_t)&(mc->ss),
2403                          PPC_THREAD_STATE64_COUNT);
2404#else
2405  kret = thread_set_state(thread, 
2406                          MACHINE_THREAD_STATE,
2407                          (thread_state_t)&(mc->ss),
2408                          MACHINE_THREAD_STATE_COUNT);
2409#endif
2410  MACH_CHECK_ERROR("setting thread state", kret);
2411} 
2412
2413/* This code runs in the exception handling thread, in response
2414   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2415   in response to a call to pseudo_sigreturn() from the specified
2416   user thread.
2417   Find that context (the user thread's R3 points to it), then
2418   use that context to set the user thread's state.  When this
2419   function's caller returns, the Mach kernel will resume the
2420   user thread.
2421*/
2422
2423kern_return_t
2424do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2425{
2426  ExceptionInformation *xp;
2427
2428#ifdef DEBUG_MACH_EXCEPTIONS
2429  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2430#endif
2431  xp = tcr->pending_exception_context;
2432  if (xp) {
2433    tcr->pending_exception_context = NULL;
2434    tcr->valence = TCR_STATE_LISP;
2435    restore_mach_thread_state(thread, xp);
2436    raise_pending_interrupt(tcr);
2437  } else {
2438    Bug(NULL, "no xp here!\n");
2439  }
2440#ifdef DEBUG_MACH_EXCEPTIONS
2441  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2442#endif
2443  return KERN_SUCCESS;
2444} 
2445
2446ExceptionInformation *
2447create_thread_context_frame(mach_port_t thread, 
2448                            natural *new_stack_top)
2449{
2450#ifdef PPC64
2451  ppc_thread_state64_t ts;
2452#else
2453  ppc_thread_state_t ts;
2454#endif
2455  mach_msg_type_number_t thread_state_count;
2456  kern_return_t result;
2457  ExceptionInformation *pseudosigcontext;
2458  MCONTEXT_T mc;
2459  natural stackp, backlink;
2460
2461#ifdef PPC64
2462  thread_state_count = PPC_THREAD_STATE64_COUNT;
2463  result = thread_get_state(thread,
2464                            PPC_THREAD_STATE64,
2465                            (thread_state_t)&ts,
2466                            &thread_state_count);
2467#else
2468  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2469  result = thread_get_state(thread, 
2470                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2471                            (thread_state_t)&ts,
2472                            &thread_state_count);
2473#endif
2474 
2475  if (result != KERN_SUCCESS) {
2476    get_tcr(true);
2477    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2478  }
2479  stackp = ts.r1;
2480  backlink = stackp;
2481  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2482  stackp -= sizeof(*pseudosigcontext);
2483  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2484
2485  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2486  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2487  memmove(&(mc->ss),&ts,sizeof(ts));
2488
2489  thread_state_count = PPC_FLOAT_STATE_COUNT;
2490  thread_get_state(thread,
2491                   PPC_FLOAT_STATE,
2492                   (thread_state_t)&(mc->fs),
2493                   &thread_state_count);
2494
2495
2496#ifdef PPC64
2497  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2498#else
2499  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2500#endif
2501  thread_get_state(thread,
2502#ifdef PPC64
2503                   PPC_EXCEPTION_STATE64,
2504#else
2505                   PPC_EXCEPTION_STATE,
2506#endif
2507                   (thread_state_t)&(mc->es),
2508                   &thread_state_count);
2509
2510
2511  UC_MCONTEXT(pseudosigcontext) = mc;
2512  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2513  stackp -= C_LINKAGE_LEN;
2514  *(natural *)ptr_from_lispobj(stackp) = backlink;
2515  if (new_stack_top) {
2516    *new_stack_top = stackp;
2517  }
2518  return pseudosigcontext;
2519}
2520
2521/*
2522  This code sets up the user thread so that it executes a "pseudo-signal
2523  handler" function when it resumes.  Create a linux sigcontext struct
2524  on the thread's stack and pass it as an argument to the pseudo-signal
2525  handler.
2526
2527  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2528  which will restore the thread's context.
2529
2530  If the handler invokes code that throws (or otherwise never sigreturn()'s
2531  to the context), that's fine.
2532
2533  Actually, check that: throw (and variants) may need to be careful and
2534  pop the tcr's xframe list until it's younger than any frame being
2535  entered.
2536*/
2537
2538int
2539setup_signal_frame(mach_port_t thread,
2540                   void *handler_address,
2541                   int signum,
2542                   int code,
2543                   TCR *tcr)
2544{
2545#ifdef PPC64
2546  ppc_thread_state64_t ts;
2547#else
2548  ppc_thread_state_t ts;
2549#endif
2550  ExceptionInformation *pseudosigcontext;
2551  int old_valence = tcr->valence;
2552  natural stackp;
2553
2554#ifdef DEBUG_MACH_EXCEPTIONS
2555  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2556#endif
2557  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2558  pseudosigcontext->uc_onstack = 0;
2559  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2560  tcr->pending_exception_context = pseudosigcontext;
2561  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2562 
2563
2564  /*
2565     It seems like we've created a  sigcontext on the thread's
2566     stack.  Set things up so that we call the handler (with appropriate
2567     args) when the thread's resumed.
2568  */
2569
2570  ts.srr0 = (natural) handler_address;
2571  ts.srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2572  ts.r1 = stackp;
2573  ts.r3 = signum;
2574  ts.r4 = (natural)pseudosigcontext;
2575  ts.r5 = (natural)tcr;
2576  ts.r6 = (natural)old_valence;
2577  ts.lr = (natural)pseudo_sigreturn;
2578
2579
2580#ifdef PPC64
2581  ts.r13 = xpGPR(pseudosigcontext,13);
2582  thread_set_state(thread,
2583                   PPC_THREAD_STATE64,
2584                   (thread_state_t)&ts,
2585                   PPC_THREAD_STATE64_COUNT);
2586#else
2587  thread_set_state(thread, 
2588                   MACHINE_THREAD_STATE,
2589                   (thread_state_t)&ts,
2590                   MACHINE_THREAD_STATE_COUNT);
2591#endif
2592#ifdef DEBUG_MACH_EXCEPTIONS
2593  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2594#endif
2595  return 0;
2596}
2597
2598
2599void
2600pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2601{
2602  signal_handler(signum, NULL, context, tcr, old_valence);
2603} 
2604
2605
2606int
2607thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2608{
2609#ifdef PPC64
2610  ppc_thread_state64_t ts;
2611#else
2612  ppc_thread_state_t ts;
2613#endif
2614  mach_msg_type_number_t thread_state_count;
2615
2616#ifdef PPC64
2617  thread_state_count = PPC_THREAD_STATE64_COUNT;
2618#else
2619  thread_state_count = PPC_THREAD_STATE_COUNT;
2620#endif
2621  thread_get_state(thread, 
2622#ifdef PPC64
2623                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2624#else
2625                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2626#endif
2627                   (thread_state_t)&ts,
2628                   &thread_state_count);
2629  if (enabled) {
2630    ts.srr1 |= MSR_FE0_FE1_MASK;
2631  } else {
2632    ts.srr1 &= ~MSR_FE0_FE1_MASK;
2633  }
2634  /*
2635     Hack-o-rama warning (isn't it about time for such a warning?):
2636     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2637     Our handler for lisp's use of pthread_kill() pushes a phony
2638     lisp frame on the stack and force the context to resume at
2639     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2640     lisp frame contains the -real- address that process_interrupt
2641     should have returned to, and the fact that it's in a lisp
2642     frame should convince the GC to notice that address if it
2643     runs in the tiny time window between returning from our
2644     interrupt handler and ... here.
2645     If the top frame on the stack is a lisp frame, discard it
2646     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2647     just adjust ts.srr0 to skip over the UUO.
2648  */
2649  {
2650    lisp_frame *tos = (lisp_frame *)ts.r1,
2651      *next_frame = tos->backlink;
2652   
2653    if (tos == (next_frame -1)) {
2654      ts.srr0 = tos->savelr;
2655      ts.r1 = (LispObj) next_frame;
2656    } else {
2657      ts.srr0 += 4;
2658    }
2659  }
2660  thread_set_state(thread, 
2661#ifdef PPC64
2662                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2663#else
2664                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2665#endif
2666                   (thread_state_t)&ts,
2667#ifdef PPC64
2668                   PPC_THREAD_STATE64_COUNT
2669#else
2670                   PPC_THREAD_STATE_COUNT
2671#endif
2672                   );
2673
2674  return 0;
2675}
2676
2677/*
2678  This function runs in the exception handling thread.  It's
2679  called (by this precise name) from the library function "exc_server()"
2680  when the thread's exception ports are set up.  (exc_server() is called
2681  via mach_msg_server(), which is a function that waits for and dispatches
2682  on exception messages from the Mach kernel.)
2683
2684  This checks to see if the exception was caused by a pseudo_sigreturn()
2685  UUO; if so, it arranges for the thread to have its state restored
2686  from the specified context.
2687
2688  Otherwise, it tries to map the exception to a signal number and
2689  arranges that the thread run a "pseudo signal handler" to handle
2690  the exception.
2691
2692  Some exceptions could and should be handled here directly.
2693*/
2694
2695kern_return_t
2696catch_exception_raise(mach_port_t exception_port,
2697                      mach_port_t thread,
2698                      mach_port_t task, 
2699                      exception_type_t exception,
2700                      exception_data_t code_vector,
2701                      mach_msg_type_number_t code_count)
2702{
2703  int signum = 0, code = *code_vector, code1;
2704  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2705  kern_return_t kret;
2706
2707#ifdef DEBUG_MACH_EXCEPTIONS
2708  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2709#endif
2710
2711  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2712    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2713  } 
2714  if ((exception == EXC_BAD_INSTRUCTION) &&
2715      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2716      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2717       (code1 == (int)enable_fp_exceptions) ||
2718       (code1 == (int)disable_fp_exceptions))) {
2719    if (code1 == (int)pseudo_sigreturn) {
2720      kret = do_pseudo_sigreturn(thread, tcr);
2721#if 0
2722      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2723#endif
2724       
2725    } else if (code1 == (int)enable_fp_exceptions) {
2726      kret = thread_set_fp_exceptions_enabled(thread, true);
2727    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2728  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2729    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2730    kret = 17;
2731  } else {
2732    switch (exception) {
2733    case EXC_BAD_ACCESS:
2734      signum = SIGSEGV;
2735      break;
2736       
2737    case EXC_BAD_INSTRUCTION:
2738      signum = SIGILL;
2739      break;
2740     
2741    case EXC_SOFTWARE:
2742      if (code == EXC_PPC_TRAP) {
2743        signum = SIGTRAP;
2744      }
2745      break;
2746     
2747    case EXC_ARITHMETIC:
2748      signum = SIGFPE;
2749      break;
2750
2751    default:
2752      break;
2753    }
2754    if (signum) {
2755      kret = setup_signal_frame(thread,
2756                                (void *)pseudo_signal_handler,
2757                                signum,
2758                                code,
2759                                tcr);
2760#if 0
2761      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2762#endif
2763
2764    } else {
2765      kret = 17;
2766    }
2767  }
2768
2769  return kret;
2770}
2771
2772
2773
2774typedef struct {
2775  mach_msg_header_t Head;
2776  /* start of the kernel processed data */
2777  mach_msg_body_t msgh_body;
2778  mach_msg_port_descriptor_t thread;
2779  mach_msg_port_descriptor_t task;
2780  /* end of the kernel processed data */
2781  NDR_record_t NDR;
2782  exception_type_t exception;
2783  mach_msg_type_number_t codeCnt;
2784  integer_t code[2];
2785  mach_msg_trailer_t trailer;
2786} exceptionRequest;
2787
2788
2789boolean_t
2790openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2791{
2792  static NDR_record_t _NDR = {0};
2793  kern_return_t handled;
2794  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2795  exceptionRequest *req = (exceptionRequest *) in;
2796
2797  reply->NDR = _NDR;
2798
2799  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2800  out->msgh_remote_port = in->msgh_remote_port;
2801  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2802  out->msgh_local_port = MACH_PORT_NULL;
2803  out->msgh_id = in->msgh_id+100;
2804
2805  /* Could handle other exception flavors in the range 2401-2403 */
2806
2807
2808  if (in->msgh_id != 2401) {
2809    reply->RetCode = MIG_BAD_ID;
2810    return FALSE;
2811  }
2812  handled = catch_exception_raise(req->Head.msgh_local_port,
2813                                  req->thread.name,
2814                                  req->task.name,
2815                                  req->exception,
2816                                  req->code,
2817                                  req->codeCnt);
2818  reply->RetCode = handled;
2819  return TRUE;
2820}
2821
2822/*
2823  The initial function for an exception-handling thread.
2824*/
2825
2826void *
2827exception_handler_proc(void *arg)
2828{
2829  extern boolean_t exc_server();
2830  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2831
2832  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2833  /* Should never return. */
2834  abort();
2835}
2836
2837
2838
2839mach_port_t
2840mach_exception_port_set()
2841{
2842  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2843  kern_return_t kret; 
2844  if (__exception_port_set == MACH_PORT_NULL) {
2845    kret = mach_port_allocate(mach_task_self(),
2846                              MACH_PORT_RIGHT_PORT_SET,
2847                              &__exception_port_set);
2848    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2849    create_system_thread(0,
2850                         NULL,
2851                         exception_handler_proc, 
2852                         (void *)((natural)__exception_port_set));
2853  }
2854  return __exception_port_set;
2855}
2856
2857/*
2858  Setup a new thread to handle those exceptions specified by
2859  the mask "which".  This involves creating a special Mach
2860  message port, telling the Mach kernel to send exception
2861  messages for the calling thread to that port, and setting
2862  up a handler thread which listens for and responds to
2863  those messages.
2864
2865*/
2866
2867/*
2868  Establish the lisp thread's TCR as its exception port, and determine
2869  whether any other ports have been established by foreign code for
2870  exceptions that lisp cares about.
2871
2872  If this happens at all, it should happen on return from foreign
2873  code and on entry to lisp code via a callback.
2874
2875  This is a lot of trouble (and overhead) to support Java, or other
2876  embeddable systems that clobber their caller's thread exception ports.
2877 
2878*/
2879kern_return_t
2880tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2881{
2882  kern_return_t kret;
2883  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2884  int i;
2885  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2886  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2887  exception_mask_t mask = 0;
2888
2889  kret = thread_swap_exception_ports(thread,
2890                                     LISP_EXCEPTIONS_HANDLED_MASK,
2891                                     lisp_port,
2892                                     EXCEPTION_DEFAULT,
2893                                     THREAD_STATE_NONE,
2894                                     fxs->masks,
2895                                     &n,
2896                                     fxs->ports,
2897                                     fxs->behaviors,
2898                                     fxs->flavors);
2899  if (kret == KERN_SUCCESS) {
2900    fxs->foreign_exception_port_count = n;
2901    for (i = 0; i < n; i ++) {
2902      foreign_port = fxs->ports[i];
2903
2904      if ((foreign_port != lisp_port) &&
2905          (foreign_port != MACH_PORT_NULL)) {
2906        mask |= fxs->masks[i];
2907      }
2908    }
2909    tcr->foreign_exception_status = (int) mask;
2910  }
2911  return kret;
2912}
2913
2914kern_return_t
2915tcr_establish_lisp_exception_port(TCR *tcr)
2916{
2917  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2918}
2919
2920/*
2921  Do this when calling out to or returning from foreign code, if
2922  any conflicting foreign exception ports were established when we
2923  last entered lisp code.
2924*/
2925kern_return_t
2926restore_foreign_exception_ports(TCR *tcr)
2927{
2928  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2929 
2930  if (m) {
2931    MACH_foreign_exception_state *fxs  = 
2932      (MACH_foreign_exception_state *) tcr->native_thread_info;
2933    int i, n = fxs->foreign_exception_port_count;
2934    exception_mask_t tm;
2935
2936    for (i = 0; i < n; i++) {
2937      if ((tm = fxs->masks[i]) & m) {
2938        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2939                                   tm,
2940                                   fxs->ports[i],
2941                                   fxs->behaviors[i],
2942                                   fxs->flavors[i]);
2943      }
2944    }
2945  }
2946}
2947                                   
2948
2949/*
2950  This assumes that a Mach port (to be used as the thread's exception port) whose
2951  "name" matches the TCR's 32-bit address has already been allocated.
2952*/
2953
2954kern_return_t
2955setup_mach_exception_handling(TCR *tcr)
2956{
2957  mach_port_t
2958    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2959    task_self = mach_task_self();
2960  kern_return_t kret;
2961
2962  kret = mach_port_insert_right(task_self,
2963                                thread_exception_port,
2964                                thread_exception_port,
2965                                MACH_MSG_TYPE_MAKE_SEND);
2966  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2967
2968  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2969  if (kret == KERN_SUCCESS) {
2970    mach_port_t exception_port_set = mach_exception_port_set();
2971
2972    kret = mach_port_move_member(task_self,
2973                                 thread_exception_port,
2974                                 exception_port_set);
2975  }
2976  return kret;
2977}
2978
2979void
2980darwin_exception_init(TCR *tcr)
2981{
2982  void tcr_monitor_exception_handling(TCR*, Boolean);
2983  kern_return_t kret;
2984  MACH_foreign_exception_state *fxs = 
2985    calloc(1, sizeof(MACH_foreign_exception_state));
2986 
2987  tcr->native_thread_info = (void *) fxs;
2988
2989  if ((kret = setup_mach_exception_handling(tcr))
2990      != KERN_SUCCESS) {
2991    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2992    terminate_lisp();
2993  }
2994  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2995  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
2996}
2997
2998/*
2999  The tcr is the "name" of the corresponding thread's exception port.
3000  Destroying the port should remove it from all port sets of which it's
3001  a member (notably, the exception port set.)
3002*/
3003void
3004darwin_exception_cleanup(TCR *tcr)
3005{
3006  void *fxs = tcr->native_thread_info;
3007  extern Boolean use_mach_exception_handling;
3008
3009  if (fxs) {
3010    tcr->native_thread_info = NULL;
3011    free(fxs);
3012  }
3013  if (use_mach_exception_handling) {
3014    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3015    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3016  }
3017}
3018
3019
3020Boolean
3021suspend_mach_thread(mach_port_t mach_thread)
3022{
3023  kern_return_t status;
3024  Boolean aborted = false;
3025 
3026  do {
3027    aborted = false;
3028    status = thread_suspend(mach_thread);
3029    if (status == KERN_SUCCESS) {
3030      status = thread_abort_safely(mach_thread);
3031      if (status == KERN_SUCCESS) {
3032        aborted = true;
3033      } else {
3034        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3035        thread_resume(mach_thread);
3036      }
3037    } else {
3038      return false;
3039    }
3040  } while (! aborted);
3041  return true;
3042}
3043
3044/*
3045  Only do this if pthread_kill indicated that the pthread isn't
3046  listening to signals anymore, as can happen as soon as pthread_exit()
3047  is called on Darwin.  The thread could still call out to lisp as it
3048  is exiting, so we need another way to suspend it in this case.
3049*/
3050Boolean
3051mach_suspend_tcr(TCR *tcr)
3052{
3053  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3054  ExceptionInformation *pseudosigcontext;
3055  Boolean result = false;
3056 
3057  result = suspend_mach_thread(mach_thread);
3058  if (result) {
3059    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3060    pseudosigcontext->uc_onstack = 0;
3061    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3062    tcr->suspend_context = pseudosigcontext;
3063  }
3064  return result;
3065}
3066
3067void
3068mach_resume_tcr(TCR *tcr)
3069{
3070  ExceptionInformation *xp;
3071  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3072 
3073  xp = tcr->suspend_context;
3074#ifdef DEBUG_MACH_EXCEPTIONS
3075  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3076          tcr, tcr->pending_exception_context);
3077#endif
3078  tcr->suspend_context = NULL;
3079  restore_mach_thread_state(mach_thread, xp);
3080#ifdef DEBUG_MACH_EXCEPTIONS
3081  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3082          tcr, tcr->pending_exception_context);
3083#endif
3084  thread_resume(mach_thread);
3085}
3086
3087void
3088fatal_mach_error(char *format, ...)
3089{
3090  va_list args;
3091  char s[512];
3092 
3093
3094  va_start(args, format);
3095  vsnprintf(s, sizeof(s),format, args);
3096  va_end(args);
3097
3098  Fatal("Mach error", s);
3099}
3100
3101void
3102pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3103{
3104  interrupt_handler(signum, NULL, context);
3105}
3106
3107int
3108mach_raise_thread_interrupt(TCR *target)
3109{
3110  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3111  kern_return_t kret;
3112  Boolean result = false;
3113  TCR *current = get_tcr(false);
3114  thread_basic_info_data_t info; 
3115  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3116
3117  LOCK(lisp_global(TCR_AREA_LOCK), current);
3118
3119  if (suspend_mach_thread(mach_thread)) {
3120    if (thread_info(mach_thread,
3121                    THREAD_BASIC_INFO,
3122                    (thread_info_t)&info,
3123                    &info_count) == KERN_SUCCESS) {
3124      if (info.suspend_count == 1) {
3125        if ((target->valence == TCR_STATE_LISP) &&
3126            (!target->unwinding) &&
3127            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3128          kret = setup_signal_frame(mach_thread,
3129                                    (void *)pseudo_interrupt_handler,
3130                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3131                                    0,
3132                                    target);
3133          if (kret == KERN_SUCCESS) {
3134            result = true;
3135          }
3136        }
3137      }
3138    }
3139    if (! result) {
3140      target->interrupt_pending = 1 << fixnumshift;
3141    }
3142    thread_resume(mach_thread);
3143   
3144  }
3145  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3146  return 0;
3147}
3148
3149#endif
Note: See TracBrowser for help on using the repository browser.