source: trunk/source/lisp-kernel/ppc-exceptions.c @ 8579

Last change on this file since 8579 was 8579, checked in by gb, 13 years ago

tcr-level suspend/resume/interrupt: trap into the kernel, don't call into
it. This (mostly) has to do with a Leopard bug workaround and this
commit only implements the PPC side of things. x8664-related changes
and new images coming "soon", for some value of "soon".

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 88.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444        release_readonly_area();
445      }
446      if (selector & GC_TRAP_FUNCTION_PURIFY) {
447        purify_from_xp(xp, 0L);
448        gc_from_xp(xp, 0L);
449      }
450      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
451        OSErr err;
452        extern OSErr save_application(unsigned);
453        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
454        area *vsarea = tcr->vs_area;
455       
456        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
457        err = save_application(arg);
458        if (err == noErr) {
459          _exit(0);
460        }
461        fatal_oserr(": save_application", err);
462      }
463      switch (selector) {
464      case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
465        xpGPR(xp, imm0) = 0;
466        break;
467
468      case GC_TRAP_FUNCTION_FREEZE:
469        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
470        tenured_area->static_dnodes = area_dnode(a->active, a->low);
471        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
472        break;
473      default:
474        break;
475      }
476    }
477   
478    if (egc_was_enabled) {
479      egc_control(true, NULL);
480    }
481    break;
482   
483  }
484
485  adjust_exception_pc(xp,4);
486  return 0;
487}
488
489
490
491void
492signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
493{
494  /* The cstack just overflowed.  Force the current thread's
495     control stack to do so until all stacks are well under their overflow
496     limits.
497  */
498
499#if 0
500  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
501#endif
502  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
503}
504
505/*
506  Lower (move toward 0) the "end" of the soft protected area associated
507  with a by a page, if we can.
508*/
509
510void
511adjust_soft_protection_limit(area *a)
512{
513  char *proposed_new_soft_limit = a->softlimit - 4096;
514  protected_area_ptr p = a->softprot;
515 
516  if (proposed_new_soft_limit >= (p->start+16384)) {
517    p->end = proposed_new_soft_limit;
518    p->protsize = p->end-p->start;
519    a->softlimit = proposed_new_soft_limit;
520  }
521  protect_area(p);
522}
523
524void
525restore_soft_stack_limit(unsigned stkreg)
526{
527  area *a;
528  TCR *tcr = get_tcr(true);
529
530  switch (stkreg) {
531  case sp:
532    a = tcr->cs_area;
533    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
534      a->softlimit -= 4096;
535    }
536    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
537    break;
538  case vsp:
539    a = tcr->vs_area;
540    adjust_soft_protection_limit(a);
541    break;
542  case tsp:
543    a = tcr->ts_area;
544    adjust_soft_protection_limit(a);
545  }
546}
547
548/* Maybe this'll work someday.  We may have to do something to
549   make the thread look like it's not handling an exception */
550void
551reset_lisp_process(ExceptionInformation *xp)
552{
553  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
554  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
555
556  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
557  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
558
559  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
560  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
561
562  start_lisp(tcr, 1);
563}
564
565/*
566  This doesn't GC; it returns true if it made enough room, false
567  otherwise.
568  If "extend" is true, it can try to extend the dynamic area to
569  satisfy the request.
570*/
571
572Boolean
573new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
574{
575  area *a;
576  natural newlimit, oldlimit;
577  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
578
579  a  = active_dynamic_area;
580  oldlimit = (natural) a->active;
581  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
582              align_to_power_of_2(need, log2_allocation_quantum));
583  if (newlimit > (natural) (a->high)) {
584    if (extend) {
585      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
586      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
587      do {
588        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
589          break;
590        }
591        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
592        if (extend_by < 4<<20) {
593          return false;
594        }
595      } while (1);
596    } else {
597      return false;
598    }
599  }
600  a->active = (BytePtr) newlimit;
601  tcr->last_allocptr = (void *)newlimit;
602  xpGPR(xp,allocptr) = (LispObj) newlimit;
603  xpGPR(xp,allocbase) = (LispObj) oldlimit;
604
605  return true;
606}
607
608 
609void
610update_area_active (area **aptr, BytePtr value)
611{
612  area *a = *aptr;
613  for (; a; a = a->older) {
614    if ((a->low <= value) && (a->high >= value)) break;
615  };
616  if (a == NULL) Bug(NULL, "Can't find active area");
617  a->active = value;
618  *aptr = a;
619
620  for (a = a->younger; a; a = a->younger) {
621    a->active = a->high;
622  }
623}
624
625void
626normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
627{
628  void *cur_allocptr = NULL;
629  LispObj freeptr = 0;
630
631  if (xp) {
632    if (is_other_tcr) {
633      pc_luser_xp(xp, tcr, NULL);
634      freeptr = xpGPR(xp, allocptr);
635      if (fulltag_of(freeptr) == 0){
636        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
637      }
638    }
639    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
640    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
641    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
642#ifdef DEBUG
643    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
644            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
645    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
646            tcr,
647            xpGPR(xp, allocbase),
648            xpGPR(xp, allocptr),
649            xpPC(xp));
650    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
651            tcr,
652            tcr->pending_exception_context);
653#endif
654  } else {
655    /* In ff-call.  No need to update cs_area */
656    cur_allocptr = (void *) (tcr->save_allocptr);
657#ifdef DEBUG
658    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
659            tcr, tcr->save_vsp, tcr->save_tsp);
660    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
661            tcr,
662            tcr->save_allocbase,
663            tcr->save_allocptr,
664            xpPC(xp));
665
666#endif
667    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
668    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
669  }
670
671
672  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
673  if (cur_allocptr) {
674    update_bytes_allocated(tcr, cur_allocptr);
675    if (freeptr) {
676      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
677      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
678    }
679  }
680}
681
682TCR *gc_tcr = NULL;
683
684/* Suspend and "normalize" other tcrs, then call a gc-like function
685   in that context.  Resume the other tcrs, then return what the
686   function returned */
687
688int
689gc_like_from_xp(ExceptionInformation *xp, 
690                int(*fun)(TCR *, signed_natural), 
691                signed_natural param)
692{
693  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
694  ExceptionInformation* other_xp;
695  int result;
696  signed_natural inhibit;
697
698  suspend_other_threads(true);
699  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
700  if (inhibit != 0) {
701    if (inhibit > 0) {
702      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
703    }
704    resume_other_threads(true);
705    gc_deferred++;
706    return 0;
707  }
708  gc_deferred = 0;
709
710  gc_tcr = tcr;
711
712  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
713  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
714
715  normalize_tcr(xp, tcr, false);
716
717
718  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
719    if (other_tcr->pending_exception_context) {
720      other_tcr->gc_context = other_tcr->pending_exception_context;
721    } else if (other_tcr->valence == TCR_STATE_LISP) {
722      other_tcr->gc_context = other_tcr->suspend_context;
723    } else {
724      /* no pending exception, didn't suspend in lisp state:
725         must have executed a synchronous ff-call.
726      */
727      other_tcr->gc_context = NULL;
728    }
729    normalize_tcr(other_tcr->gc_context, other_tcr, true);
730  }
731   
732
733
734  result = fun(tcr, param);
735
736  other_tcr = tcr;
737  do {
738    other_tcr->gc_context = NULL;
739    other_tcr = other_tcr->next;
740  } while (other_tcr != tcr);
741
742  gc_tcr = NULL;
743
744  resume_other_threads(true);
745
746  return result;
747
748}
749
750
751/* Returns #bytes freed by invoking GC */
752
753int
754gc_from_tcr(TCR *tcr, signed_natural param)
755{
756  area *a;
757  BytePtr oldfree, newfree;
758  BytePtr oldend, newend;
759
760#ifdef DEBUG
761  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
762#endif
763  a = active_dynamic_area;
764  oldend = a->high;
765  oldfree = a->active;
766  gc(tcr, param);
767  newfree = a->active;
768  newend = a->high;
769#if 0
770  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
771#endif
772  return ((oldfree-newfree)+(newend-oldend));
773}
774
775int
776gc_from_xp(ExceptionInformation *xp, signed_natural param)
777{
778  int status = gc_like_from_xp(xp, gc_from_tcr, param);
779
780  freeGCptrs();
781  return status;
782}
783
784int
785purify_from_xp(ExceptionInformation *xp, signed_natural param)
786{
787  return gc_like_from_xp(xp, purify, param);
788}
789
790int
791impurify_from_xp(ExceptionInformation *xp, signed_natural param)
792{
793  return gc_like_from_xp(xp, impurify, param);
794}
795
796
797
798
799
800
801protection_handler
802 * protection_handlers[] = {
803   do_spurious_wp_fault,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_soft_stack_overflow,
807   do_hard_stack_overflow,   
808   do_hard_stack_overflow,
809   do_hard_stack_overflow
810   };
811
812
813Boolean
814is_write_fault(ExceptionInformation *xp, siginfo_t *info)
815{
816  /* use the siginfo if it's available.  Some versions of Linux
817     don't propagate the DSISR and TRAP fields correctly from
818     64- to 32-bit handlers.
819  */
820  if (info) {
821    /*
822       To confuse matters still further, the value of SEGV_ACCERR
823       varies quite a bit among LinuxPPC variants (the value defined
824       in the header files varies, and the value actually set by
825       the kernel also varies.  So far, we're only looking at the
826       siginfo under Linux and Linux always seems to generate
827       SIGSEGV, so check for SIGSEGV and check the low 16 bits
828       of the si_code.
829    */
830    return ((info->si_signo == SIGSEGV) &&
831            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
832  }
833  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
834         (xpTRAP(xp) == 
835#ifdef LINUX
8360x0300
837#endif
838#ifdef DARWIN
8390x0300/0x100
840#endif
841)
842         );
843#if 0 
844  /* Maybe worth keeping around; not sure if it's an exhaustive
845     list of PPC instructions that could cause a WP fault */
846  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
847     valid values of those SPRs in the context they provide to
848     exception handlers.  Look at the opcode of the offending
849     instruction & recognize 32-bit store operations */
850  opcode instr = *(xpPC(xp));
851
852  if (xp->regs->trap != 0x300) {
853    return 0;
854  }
855  switch (instr >> 26) {
856  case 47:                      /* STMW */
857  case 36:                      /* STW */
858  case 37:                      /* STWU */
859    return 1;
860  case 31:
861    switch ((instr >> 1) & 1023) {
862    case 151:                   /* STWX */
863    case 183:                   /* STWUX */
864      return 1;
865    default:
866      return 0;
867    }
868  default:
869    return 0;
870  }
871#endif
872}
873
874OSStatus
875handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
876{
877  BytePtr addr;
878  protected_area_ptr area;
879  protection_handler *handler;
880  extern Boolean touch_page(void *);
881  extern void touch_page_end(void);
882
883  if (info) {
884    addr = (BytePtr)(info->si_addr);
885  } else {
886    addr = (BytePtr) ((natural) (xpDAR(xp)));
887  }
888
889  if (addr && (addr == tcr->safe_ref_address)) {
890    adjust_exception_pc(xp,4);
891
892    xpGPR(xp,imm0) = 0;
893    return 0;
894  }
895
896  if (xpPC(xp) == (pc)touch_page) {
897    xpGPR(xp,imm0) = 0;
898    xpPC(xp) = (pc)touch_page_end;
899    return 0;
900  }
901
902
903  if (is_write_fault(xp,info)) {
904    area = find_protected_area(addr);
905    if (area != NULL) {
906      handler = protection_handlers[area->why];
907      return handler(xp, area, addr);
908    } else {
909      if ((addr >= readonly_area->low) &&
910          (addr < readonly_area->active)) {
911        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
912                        page_size);
913        return 0;
914      }
915    }
916  }
917  if (old_valence == TCR_STATE_LISP) {
918    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
919  }
920  return -1;
921}
922
923
924
925
926
927OSStatus
928do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
929{
930#ifdef SUPPORT_PRAGMA_UNUSED
931#pragma unused(area,addr)
932#endif
933  reset_lisp_process(xp);
934  return -1;
935}
936
937extern area*
938allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
939
940extern area*
941allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
942
943#ifdef EXTEND_VSTACK
944Boolean
945catch_frame_p(lisp_frame *spPtr)
946{
947  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
948
949  for (; catch; catch = (catch_frame *) untag(catch->link)) {
950    if (spPtr == ((lisp_frame *) catch->csp)) {
951      return true;
952    }
953  }
954  return false;
955}
956#endif
957
958Boolean
959unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
960{
961  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
962      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
963    return true;
964  } else {
965    return false;
966  }
967}
968
969Boolean
970lexpr_entry_frame_p(lisp_frame *spPtr)
971{
972  LispObj savelr = spPtr->savelr;
973  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
974  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
975  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
976
977  return
978    (savelr == lexpr_return1v) ||
979    (savelr == lexpr_return) ||
980    ((savelr == ret1valn) &&
981     (((spPtr->backlink)->savelr) == lexpr_return));
982}
983
984Boolean
985lisp_frame_p(lisp_frame *spPtr)
986{
987  LispObj savefn;
988  /* We can't just look at the size of the stack frame under the EABI
989     calling sequence, but that's the first thing to check. */
990  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
991    return false;
992  }
993  savefn = spPtr->savefn;
994  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
995 
996}
997
998
999int ffcall_overflow_count = 0;
1000
1001/* Find a frame that is neither a catch frame nor one of the
1002   lexpr_entry frames We don't check for non-lisp frames here because
1003   we'll always stop before we get there due to a dummy lisp frame
1004   pushed by .SPcallback that masks out the foreign frames.  The one
1005   exception is that there is a non-lisp frame without a valid VSP
1006   while in the process of ppc-ff-call. We recognize that because its
1007   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1008   next frame is 0, then we're executing an unwind-protect cleanup
1009   form, and the top stack frame belongs to its (no longer extant)
1010   catch frame.  */
1011
1012#ifdef EXTEND_VSTACK
1013lisp_frame *
1014find_non_catch_frame_from_xp (ExceptionInformation *xp)
1015{
1016  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1017  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1018    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1019  }
1020  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1021         unwind_protect_cleanup_frame_p(spPtr) ||
1022         catch_frame_p(spPtr) ||
1023         lexpr_entry_frame_p(spPtr) ; ) {
1024     spPtr = spPtr->backlink;
1025     };
1026  return spPtr;
1027}
1028#endif
1029
1030#ifdef EXTEND_VSTACK
1031Boolean
1032db_link_chain_in_area_p (area *a)
1033{
1034  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1035          *high = (LispObj *) a->high,
1036          *low = (LispObj *) a->low;
1037  for (; db; db = (LispObj *) *db) {
1038    if ((db >= low) && (db < high)) return true;
1039  };
1040  return false;
1041}
1042#endif
1043
1044
1045
1046
1047/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1048  the current value of VSP (TSP) or an older area.  */
1049
1050OSStatus
1051do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1052{
1053  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1054  area *a = tcr->vs_area;
1055  protected_area_ptr vsp_soft = a->softprot;
1056  unprotect_area(vsp_soft);
1057  signal_stack_soft_overflow(xp,vsp);
1058  return 0;
1059}
1060
1061
1062OSStatus
1063do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1064{
1065  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1066  area *a = tcr->ts_area;
1067  protected_area_ptr tsp_soft = a->softprot;
1068  unprotect_area(tsp_soft);
1069  signal_stack_soft_overflow(xp,tsp);
1070  return 0;
1071}
1072
1073OSStatus
1074do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1075{
1076  /* Trying to write into a guard page on the vstack or tstack.
1077     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1078     signal an error_stack_overflow condition.
1079      */
1080  lisp_protection_kind which = prot_area->why;
1081  Boolean on_TSP = (which == kTSPsoftguard);
1082
1083  if (on_TSP) {
1084    return do_tsp_overflow(xp, addr);
1085   } else {
1086    return do_vsp_overflow(xp, addr);
1087   }
1088}
1089
1090OSStatus
1091do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1092{
1093#ifdef SUPPORT_PRAGMA_UNUSED
1094#pragma unused(xp,area,addr)
1095#endif
1096  return -1;
1097}
1098
1099
1100/*
1101  We have a couple of choices here.  We can simply unprotect the page
1102  and let the store happen on return, or we can try to emulate writes
1103  that we know will involve an intergenerational reference.  Both are
1104  correct as far as EGC constraints go, but the latter approach is
1105  probably more efficient.  (This only matters in the case where the
1106  GC runs after this exception handler returns but before the write
1107  actually happens.  If we didn't emulate node stores here, the EGC
1108  would scan the newly-writen page, find nothing interesting, and
1109  run to completion.  This thread will try the write again afer it
1110  resumes, the page'll be re-protected, and we'll have taken this
1111  fault twice.  The whole scenario shouldn't happen very often, but
1112  (having already taken a fault and committed to an mprotect syscall)
1113  we might as well emulate stores involving intergenerational references,
1114  since they're pretty easy to identify.
1115
1116  Note that cases involving two or more threads writing to the same
1117  page (before either of them can run this handler) is benign: one
1118  invocation of the handler will just unprotect an unprotected page in
1119  that case.
1120
1121  If there are GCs (or any other suspensions of the thread between
1122  the time that the write fault was detected and the time that the
1123  exception lock is obtained) none of this stuff happens.
1124*/
1125
1126/*
1127  Return true (and emulate the instruction) iff:
1128  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1129     instruction.
1130  b) RS is a node register (>= fn)
1131  c) RS is tagged as a cons or vector
1132  d) RS is in some ephemeral generation.
1133  This is slightly conservative, since RS may be no younger than the
1134  EA being written to.
1135*/
1136Boolean
1137is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1138{
1139  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1140    opcode instr = *xpPC(xp);
1141   
1142    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1143        major_opcode_p(instr, major_opcode_STW)) {
1144      LispObj
1145        rs = RS_field(instr), 
1146        rsval = xpGPR(xp,rs),
1147        tag = fulltag_of(rsval);
1148     
1149      if (rs >= fn) {
1150        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1151          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1152              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1153            *(LispObj *)ea = rsval;
1154            return true;
1155          }
1156        }
1157      }
1158    }
1159  }
1160  return false;
1161}
1162
1163     
1164
1165
1166
1167
1168
1169OSStatus
1170handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1171{
1172  (void) zero_fpscr(tcr);
1173  enable_fp_exceptions();
1174
1175
1176  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1177
1178  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1179     operation; there's an FPU operation right at the PC, so tell it to start
1180     looking one word beyond */
1181  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1182}
1183
1184   
1185int
1186altivec_present = 1;
1187
1188
1189/* This only tries to implement the "optional" fsqrt and fsqrts
1190   instructions, which were generally implemented on IBM hardware
1191   but generally not available on Motorola/Freescale systems.
1192*/               
1193OSStatus
1194handle_unimplemented_instruction(ExceptionInformation *xp,
1195                                 opcode instruction,
1196                                 TCR *tcr)
1197{
1198  (void) zero_fpscr(tcr);
1199  enable_fp_exceptions();
1200  /* the rc bit (bit 0 in the instruction) is supposed to cause
1201     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1202     fsqrt. or fsqrts.
1203  */
1204  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1205       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1206      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1207    double b, d, sqrt(double);
1208
1209    b = xpFPR(xp,RB_field(instruction));
1210    d = sqrt(b);
1211    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1212                   (get_fpscr() & _FPU_RESERVED));
1213    xpFPR(xp,RT_field(instruction)) = d;
1214    adjust_exception_pc(xp,4);
1215    return 0;
1216  }
1217
1218  return -1;
1219}
1220
1221OSStatus
1222PMCL_exception_handler(int xnum, 
1223                       ExceptionInformation *xp, 
1224                       TCR *tcr, 
1225                       siginfo_t *info,
1226                       int old_valence)
1227{
1228  unsigned oldMQ;
1229  OSStatus status = -1;
1230  pc program_counter;
1231  opcode instruction = 0;
1232
1233
1234  program_counter = xpPC(xp);
1235 
1236  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1237    instruction = *program_counter;
1238  }
1239
1240  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1241    status = handle_alloc_trap(xp, tcr);
1242  } else if ((xnum == SIGSEGV) ||
1243             (xnum == SIGBUS)) {
1244    status = handle_protection_violation(xp, info, tcr, old_valence);
1245  } else if (xnum == SIGFPE) {
1246    status = handle_sigfpe(xp, tcr);
1247  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1248    if (instruction == GC_TRAP_INSTRUCTION) {
1249      status = handle_gc_trap(xp, tcr);
1250    } else if (IS_UUO(instruction)) {
1251      status = handle_uuo(xp, instruction, program_counter);
1252    } else if (is_conditional_trap(instruction)) {
1253      status = handle_trap(xp, instruction, program_counter, info);
1254    } else {
1255      status = handle_unimplemented_instruction(xp,instruction,tcr);
1256    }
1257  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1258    tcr->interrupt_pending = 0;
1259    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1260    status = 0;
1261  }
1262
1263  return status;
1264}
1265
1266void
1267adjust_exception_pc(ExceptionInformation *xp, int delta)
1268{
1269  xpPC(xp) += (delta >> 2);
1270}
1271
1272
1273/*
1274  This wants to scan backwards until "where" points to an instruction
1275   whose major opcode is either 63 (double-float) or 59 (single-float)
1276*/
1277
1278OSStatus
1279handle_fpux_binop(ExceptionInformation *xp, pc where)
1280{
1281  OSStatus err;
1282  opcode *there = (opcode *) where, instr, errnum;
1283  int i = TRAP_LOOKUP_TRIES, delta = 0;
1284 
1285  while (i--) {
1286    instr = *--there;
1287    delta -= 4;
1288    if (codevec_hdr_p(instr)) {
1289      return -1;
1290    }
1291    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1292      errnum = error_FPU_exception_double;
1293      break;
1294    }
1295
1296    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1297      errnum = error_FPU_exception_short;
1298      break;
1299    }
1300  }
1301 
1302  err = handle_error(xp, errnum, rcontext, 0,  there);
1303  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1304 
1305  adjust_exception_pc(xp, delta);
1306  xpFPSCR(xp)  &=  0x03fff;
1307 
1308  return err;
1309
1310}
1311
1312OSStatus
1313handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1314{
1315#ifdef SUPPORT_PRAGMA_UNUSED
1316#pragma unused(where)
1317#endif
1318  unsigned 
1319    minor = UUO_MINOR(the_uuo),
1320    rt = 0x1f & (the_uuo >> 21),
1321    ra = 0x1f & (the_uuo >> 16),
1322    rb = 0x1f & (the_uuo >> 11),
1323    errnum = 0x3ff & (the_uuo >> 16);
1324
1325  OSStatus status = -1;
1326
1327  int bump = 4;
1328
1329  switch (minor) {
1330
1331  case UUO_ZERO_FPSCR:
1332    status = 0;
1333    xpFPSCR(xp) = 0;
1334    break;
1335
1336
1337  case UUO_INTERR:
1338    {
1339      TCR * target = (TCR *)xpGPR(xp,arg_z);
1340      status = 0;
1341      switch (errnum) {
1342      case error_propagate_suspend:
1343        break;
1344      case error_interrupt:
1345        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1346        break;
1347      case error_suspend:
1348        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1349        break;
1350      case error_suspend_all:
1351        lisp_suspend_other_threads();
1352        break;
1353      case error_resume:
1354        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1355        break;
1356      case error_resume_all:
1357        lisp_resume_other_threads();
1358        break;
1359      default:
1360        status = handle_error(xp, errnum, rb, 0,  where);
1361        break;
1362      }
1363    }
1364    break;
1365
1366  case UUO_INTCERR:
1367    status = handle_error(xp, errnum, rb, 1,  where);
1368    if (errnum == error_udf_call) {
1369      /* If lisp's returned from a continuable undefined-function call,
1370         it's put a code vector in the xp's PC.  Don't advance the
1371         PC ... */
1372      bump = 0;
1373    }
1374    break;
1375
1376  case UUO_FPUX_BINOP:
1377    status = handle_fpux_binop(xp, where);
1378    bump = 0;
1379    break;
1380
1381  default:
1382    status = -1;
1383    bump = 0;
1384  }
1385 
1386  if ((!status) && bump) {
1387    adjust_exception_pc(xp, bump);
1388  }
1389  return status;
1390}
1391
1392natural
1393register_codevector_contains_pc (natural lisp_function, pc where)
1394{
1395  natural code_vector, size;
1396
1397  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1398      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1399    code_vector = deref(lisp_function, 1);
1400    size = header_element_count(header_of(code_vector)) << 2;
1401    if ((untag(code_vector) < (natural)where) && 
1402        ((natural)where < (code_vector + size)))
1403      return(code_vector);
1404  }
1405
1406  return(0);
1407}
1408
1409/* Callback to lisp to handle a trap. Need to translate the
1410   PC (where) into one of two forms of pairs:
1411
1412   1. If PC is in fn or nfn's code vector, use the register number
1413      of fn or nfn and the index into that function's code vector.
1414   2. Otherwise use 0 and the pc itself
1415*/
1416void
1417callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1418                   natural arg1, natural arg2, natural arg3)
1419{
1420  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1421  unsigned register_number = fn;
1422  natural index = (natural)where;
1423
1424  if (code_vector == 0) {
1425    register_number = nfn;
1426    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1427  }
1428  if (code_vector == 0)
1429    register_number = 0;
1430  else
1431    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1432  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1433}
1434
1435void
1436callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1437                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1438{
1439  sigset_t mask;
1440  natural  callback_ptr, i;
1441  area *a;
1442
1443  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1444
1445  /* Put the active stack pointer where .SPcallback expects it */
1446  a = tcr->cs_area;
1447  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1448
1449  /* Copy globals from the exception frame to tcr */
1450  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1451  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1452  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1453  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1454
1455
1456
1457  /* Call back.
1458     Lisp will handle trampolining through some code that
1459     will push lr/fn & pc/nfn stack frames for backtrace.
1460  */
1461  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1462#ifdef DEBUG
1463  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1464#endif
1465  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1466  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1467  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1468#ifdef DEBUG
1469  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1470#endif
1471
1472
1473
1474  /* Copy GC registers back into exception frame */
1475  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1476  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1477}
1478
1479area *
1480allocate_no_stack (natural size)
1481{
1482#ifdef SUPPORT_PRAGMA_UNUSED
1483#pragma unused(size)
1484#endif
1485
1486  return (area *) NULL;
1487}
1488
1489
1490
1491
1492
1493
1494/* callback to (symbol-value cmain) if it is a macptr,
1495   otherwise report cause and function name to console.
1496   Returns noErr if exception handled OK */
1497OSStatus
1498handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1499{
1500  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1501  int       ra, rs, fn_reg = 0;
1502  char *    error_msg = NULL;
1503  char      name[kNameBufLen];
1504  LispObj   cmain = nrs_CMAIN.vcell;
1505  Boolean   event_poll_p = false;
1506  int old_interrupt_level = 0;
1507  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1508
1509  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1510     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1511
1512  /* If it's a "trllt" instruction where RA == sp, it's a failed
1513     control stack overflow check.  In that case:
1514     
1515     a) We're in "yellow zone" mode if the value of the
1516     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1517     we're not already in yellow zone mode, attempt to create a new
1518     thread and continue execution on its stack. If that fails, call
1519     signal_stack_soft_overflow to enter yellow zone mode and signal
1520     the condition to lisp.
1521     
1522     b) If we're already in "yellow zone" mode, then:
1523     
1524     1) if the SP is past the current control-stack area's hard
1525     overflow limit, signal a "hard" stack overflow error (e.g., throw
1526     to toplevel as quickly as possible. If we aren't in "yellow zone"
1527     mode, attempt to continue on another thread first.
1528     
1529     2) if SP is "well" (> 4K) below its soft overflow limit, set
1530     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1531     "yellow zone mode" in this case.
1532     
1533     3) Otherwise, do nothing.  We'll continue to trap every time
1534     something gets pushed on the control stack, so we should try to
1535     detect and handle all of these cases fairly quickly.  Of course,
1536     the trap overhead is going to slow things down quite a bit.
1537     */
1538
1539  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1540      (RA_field(the_trap) == sp) &&
1541      (TO_field(the_trap) == TO_LO)) {
1542    area
1543      *CS_area = tcr->cs_area,
1544      *VS_area = tcr->vs_area;
1545     
1546    natural
1547      current_SP = xpGPR(xp,sp),
1548      current_VSP = xpGPR(xp,vsp);
1549
1550    if (current_SP  < (natural) (CS_area->hardlimit)) {
1551      /* If we're not in soft overflow mode yet, assume that the
1552         user has set the soft overflow size very small and try to
1553         continue on another thread before throwing to toplevel */
1554      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1555        reset_lisp_process(xp);
1556      }
1557    } else {
1558      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1559        /* If the control stack pointer is at least 4K away from its soft limit
1560           and the value stack pointer is at least 4K away from its soft limit,
1561           stop trapping.  Else keep trapping. */
1562        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1563            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1564          protected_area_ptr vs_soft = VS_area->softprot;
1565          if (vs_soft->nprot == 0) {
1566            protect_area(vs_soft);
1567          }
1568          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1569        }
1570      } else {
1571        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1572        signal_stack_soft_overflow(xp, sp);
1573      }
1574    }
1575   
1576    adjust_exception_pc(xp, 4);
1577    return noErr;
1578  } else {
1579    if (the_trap == LISP_BREAK_INSTRUCTION) {
1580      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1581      set_xpPC(xp, xpLR(xp));
1582      if (message == NULL) {
1583        message = "Lisp Breakpoint";
1584      }
1585      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1586      return noErr;
1587    }
1588    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1589      adjust_exception_pc(xp,4);
1590      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1591      return noErr;
1592    }
1593    /*
1594      twlle ra,rb is used to detect tlb overflow, where RA = current
1595      limit and RB = index to use.
1596    */
1597    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1598        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1599      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1600        return noErr;
1601      }
1602      return -1;
1603    }
1604
1605    if ((fulltag_of(cmain) == fulltag_misc) &&
1606        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1607      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1608        /* reset interrup_level, interrupt_pending */
1609        TCR_INTERRUPT_LEVEL(tcr) = 0;
1610        tcr->interrupt_pending = 0;
1611      }
1612#if 0
1613      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1614#endif
1615      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1616      adjust_exception_pc(xp, 4);
1617      return(noErr);
1618    }
1619    return -1;
1620  }
1621}
1622
1623
1624/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1625   Stop if subtag_code_vector is encountered. */
1626unsigned
1627scan_for_instr( unsigned target, unsigned mask, pc where )
1628{
1629  int i = TRAP_LOOKUP_TRIES;
1630
1631  while( i-- ) {
1632    unsigned instr = *(--where);
1633    if ( codevec_hdr_p(instr) ) {
1634      return 0;
1635    } else if ( match_instr(instr, mask, target) ) {
1636      return instr;
1637    }
1638  }
1639  return 0;
1640}
1641
1642
1643void non_fatal_error( char *msg )
1644{
1645  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1646  fflush( stderr );
1647}
1648
1649/* The main opcode.  */
1650
1651int 
1652is_conditional_trap(opcode instr)
1653{
1654  unsigned to = TO_field(instr);
1655  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1656
1657#ifndef MACOS
1658  if ((instr == LISP_BREAK_INSTRUCTION) ||
1659      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1660    return 1;
1661  }
1662#endif
1663  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1664    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1665       EQ bit must be set in the TO mask and either the register
1666       operands (if "tw") are the same or either both of the signed or
1667       both of the unsigned inequality bits must be set. */
1668    if (! (to & TO_EQ)) {
1669      return 1;                 /* Won't trap on EQ: conditional */
1670    }
1671    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1672      return 0;                 /* Will trap on EQ, same regs: unconditional */
1673    }
1674    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1675        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1676      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1677    }
1678    return 1;                   /* must be conditional */
1679  }
1680  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1681                                   debugger have it */
1682}
1683
1684OSStatus
1685handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1686{
1687  LispObj   pname;
1688  LispObj   errdisp = nrs_ERRDISP.vcell;
1689
1690  if ((fulltag_of(errdisp) == fulltag_misc) &&
1691      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1692    /* errdisp is a macptr, we can call back to lisp */
1693    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1694    return(0);
1695    }
1696
1697  return(-1);
1698}
1699               
1700
1701/*
1702   Current thread has all signals masked.  Before unmasking them,
1703   make it appear that the current thread has been suspended.
1704   (This is to handle the case where another thread is trying
1705   to GC before this thread is able to sieze the exception lock.)
1706*/
1707int
1708prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1709{
1710  int old_valence = tcr->valence;
1711
1712  tcr->pending_exception_context = context;
1713  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1714
1715  ALLOW_EXCEPTIONS(context);
1716  return old_valence;
1717} 
1718
1719void
1720wait_for_exception_lock_in_handler(TCR *tcr, 
1721                                   ExceptionInformation *context,
1722                                   xframe_list *xf)
1723{
1724
1725  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1726#ifdef DEBUG
1727  fprintf(stderr, "0x%x has exception lock\n", tcr);
1728#endif
1729  xf->curr = context;
1730  xf->prev = tcr->xframe;
1731  tcr->xframe =  xf;
1732  tcr->pending_exception_context = NULL;
1733  tcr->valence = TCR_STATE_FOREIGN; 
1734}
1735
1736void
1737unlock_exception_lock_in_handler(TCR *tcr)
1738{
1739  tcr->pending_exception_context = tcr->xframe->curr;
1740  tcr->xframe = tcr->xframe->prev;
1741  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1742#ifdef DEBUG
1743  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1744#endif
1745  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1746}
1747
1748/*
1749   If an interrupt is pending on exception exit, try to ensure
1750   that the thread sees it as soon as it's able to run.
1751*/
1752void
1753raise_pending_interrupt(TCR *tcr)
1754{
1755  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1756    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1757  }
1758}
1759
1760void
1761exit_signal_handler(TCR *tcr, int old_valence)
1762{
1763  sigset_t mask;
1764  sigfillset(&mask);
1765 
1766  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1767  tcr->valence = old_valence;
1768  tcr->pending_exception_context = NULL;
1769}
1770
1771
1772void
1773signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1774{
1775  xframe_list xframe_link;
1776
1777#ifdef DARWIN
1778  if (running_under_rosetta) {
1779    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1780  }
1781#endif
1782  if (!use_mach_exception_handling) {
1783   
1784    tcr = (TCR *) get_interrupt_tcr(false);
1785 
1786    /* The signal handler's entered with all signals (notably the
1787       thread_suspend signal) blocked.  Don't allow any other signals
1788       (notably the thread_suspend signal) to preempt us until we've
1789       set the TCR's xframe slot to include the current exception
1790       context.
1791    */
1792   
1793    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1794  }
1795
1796  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1797    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1798    pthread_kill(pthread_self(), thread_suspend_signal);
1799  }
1800
1801 
1802  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1803  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1804    char msg[512];
1805    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1806    if (lisp_Debugger(context, info, signum, false, msg)) {
1807      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1808    }
1809  }
1810
1811  unlock_exception_lock_in_handler(tcr);
1812
1813  /* This thread now looks like a thread that was suspended while
1814     executing lisp code.  If some other thread gets the exception
1815     lock and GCs, the context (this thread's suspend_context) will
1816     be updated.  (That's only of concern if it happens before we
1817     can return to the kernel/to the Mach exception handler).
1818  */
1819  if (!use_mach_exception_handling) {
1820    exit_signal_handler(tcr, old_valence);
1821    raise_pending_interrupt(tcr);
1822  }
1823}
1824
1825/*
1826  If it looks like we're in the middle of an atomic operation, make
1827  it seem as if that operation is either complete or hasn't started
1828  yet.
1829
1830  The cases handled include:
1831
1832  a) storing into a newly-allocated lisp frame on the stack.
1833  b) marking a newly-allocated TSP frame as containing "raw" data.
1834  c) consing: the GC has its own ideas about how this should be
1835     handled, but other callers would be best advised to back
1836     up or move forward, according to whether we're in the middle
1837     of allocating a cons cell or allocating a uvector.
1838  d) a STMW to the vsp
1839  e) EGC write-barrier subprims.
1840*/
1841
1842extern opcode
1843  egc_write_barrier_start,
1844  egc_write_barrier_end, 
1845  egc_store_node_conditional, 
1846  egc_set_hash_key,
1847  egc_gvset,
1848  egc_rplaca,
1849  egc_rplacd;
1850
1851
1852extern opcode ffcall_return_window, ffcall_return_window_end;
1853
1854void
1855pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1856{
1857  pc program_counter = xpPC(xp);
1858  opcode instr = *program_counter;
1859  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1860  LispObj cur_allocptr = xpGPR(xp, allocptr);
1861  int allocptr_tag = fulltag_of(cur_allocptr);
1862 
1863
1864
1865  if ((program_counter < &egc_write_barrier_end) && 
1866      (program_counter >= &egc_write_barrier_start)) {
1867    LispObj *ea = 0, val, root;
1868    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1869    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1870
1871    if (program_counter >= &egc_store_node_conditional) {
1872      if ((program_counter == &egc_store_node_conditional) || ! (xpCCR(xp) & 0x20000000)) {
1873        /* The conditional store either hasn't been attempted yet, or
1874           has failed.  No need to adjust the PC, or do memoization. */
1875        return;
1876      }
1877      val = xpGPR(xp,arg_z);
1878      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1879      xpGPR(xp,arg_z) = t_value;
1880      need_store = false;
1881    } else if (program_counter >= &egc_set_hash_key) {
1882      root = xpGPR(xp,arg_x);
1883      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1884      need_memoize_root = true;
1885    } else if (program_counter >= &egc_gvset) {
1886      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1887      val = xpGPR(xp,arg_z);
1888    } else if (program_counter >= &egc_rplacd) {
1889      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1890      val = xpGPR(xp,arg_z);
1891    } else {                      /* egc_rplaca */
1892      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1893      val = xpGPR(xp,arg_z);
1894    }
1895    if (need_store) {
1896      *ea = val;
1897    }
1898    if (need_check_memo) {
1899      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1900      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1901          ((LispObj)ea < val)) {
1902        atomic_set_bit(refbits, bitnumber);
1903        if (need_memoize_root) {
1904          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1905          atomic_set_bit(refbits, bitnumber);
1906        }
1907      }
1908    }
1909    set_xpPC(xp, xpLR(xp));
1910    return;
1911  }
1912
1913
1914  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1915    LispObj tsp_val = xpGPR(xp,tsp);
1916   
1917    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1918    adjust_exception_pc(xp, 4);
1919    return;
1920  }
1921 
1922  if (frame->backlink == (frame+1)) {
1923    if (
1924#ifdef PPC64
1925        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1926        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1927#else
1928        (major_opcode_p(instr, major_opcode_STW)) && 
1929#endif
1930        (RA_field(instr) == sp) &&
1931        /* There are a few places in the runtime that store into
1932           a previously-allocated frame atop the stack when
1933           throwing values around.  We only care about the case
1934           where the frame was newly allocated, in which case
1935           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1936           a few instructions before the current program counter.
1937           (The whole point here is that a newly allocated frame
1938           might contain random values that we don't want the
1939           GC to see; a previously allocated frame should already
1940           be completely initialized.)
1941        */
1942        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1943         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1944         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1945#ifdef PPC64
1946      int disp = DS_field(instr);
1947#else     
1948      int disp = D_field(instr);
1949#endif
1950
1951
1952      if (disp < (4*node_size)) {
1953#if 0
1954        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1955#endif
1956        frame->savevsp = 0;
1957        if (disp < (3*node_size)) {
1958          frame->savelr = 0;
1959          if (disp == node_size) {
1960            frame->savefn = 0;
1961          }
1962        }
1963      }
1964      return;
1965    }
1966  }
1967
1968  if (allocptr_tag != tag_fixnum) {
1969    signed_natural disp = allocptr_displacement(xp);
1970
1971    if (disp) {
1972      /* Being architecturally "at" the alloc trap doesn't tell
1973         us much (in particular, it doesn't tell us whether
1974         or not the thread has committed to taking the trap
1975         and is waiting for the exception lock (or waiting
1976         for the Mach exception thread to tell it how bad
1977         things are) or is about to execute a conditional
1978         trap.
1979         Regardless of which case applies, we want the
1980         other thread to take (or finish taking) the
1981         trap, and we don't want it to consider its
1982         current allocptr to be valid.
1983         The difference between this case (suspend other
1984         thread for GC) and the previous case (suspend
1985         current thread for interrupt) is solely a
1986         matter of what happens after we leave this
1987         function: some non-current thread will stay
1988         suspended until the GC finishes, then take
1989         (or start processing) the alloc trap.   The
1990         current thread will go off and do PROCESS-INTERRUPT
1991         or something, and may return from the interrupt
1992         and need to finish the allocation that got interrupted.
1993      */
1994
1995      if (alloc_disp) {
1996        *alloc_disp = disp;
1997        xpGPR(xp,allocptr) += disp;
1998        /* Leave the PC at the alloc trap.  When the interrupt
1999           handler returns, it'll decrement allocptr by disp
2000           and the trap may or may not be taken.
2001        */
2002      } else {
2003        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2004        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2005        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2006      }
2007    } else {
2008#ifdef DEBUG
2009      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2010#endif
2011      /* If we're already past the alloc_trap, finish allocating
2012         the object. */
2013      if (allocptr_tag == fulltag_cons) {
2014        finish_allocating_cons(xp);
2015#ifdef DEBUG
2016          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
2017                  tcr);
2018#endif
2019      } else {
2020        if (allocptr_tag == fulltag_misc) {
2021#ifdef DEBUG
2022          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
2023                  tcr);
2024#endif
2025          finish_allocating_uvector(xp);
2026        } else {
2027          Bug(xp, "what's being allocated here ?");
2028        }
2029      }
2030      /* Whatever we finished allocating, reset allocptr/allocbase to
2031         VOID_ALLOCPTR */
2032      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2033    }
2034    return;
2035  }
2036
2037  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2038    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2039    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2040#if 0
2041        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2042#endif
2043
2044    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2045      deref(frame,idx) = 0;
2046    }
2047    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2048    return;
2049  }
2050
2051#ifndef PC64
2052  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2053      (RA_field(instr) == vsp)) {
2054    int r;
2055    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2056   
2057    for (r = RS_field(instr); r <= 31; r++) {
2058      *vspptr++ = xpGPR(xp,r);
2059    }
2060    adjust_exception_pc(xp, 4);
2061  }
2062#endif
2063}
2064
2065void
2066interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2067{
2068  TCR *tcr = get_interrupt_tcr(false);
2069  if (tcr) {
2070    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2071      tcr->interrupt_pending = 1 << fixnumshift;
2072    } else {
2073      LispObj cmain = nrs_CMAIN.vcell;
2074
2075      if ((fulltag_of(cmain) == fulltag_misc) &&
2076          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2077        /*
2078           This thread can (allegedly) take an interrupt now.
2079           It's tricky to do that if we're executing
2080           foreign code (especially Linuxthreads code, much
2081           of which isn't reentrant.)
2082           If we're unwinding the stack, we also want to defer
2083           the interrupt.
2084        */
2085        if ((tcr->valence != TCR_STATE_LISP) ||
2086            (tcr->unwinding != 0)) {
2087          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2088        } else {
2089          xframe_list xframe_link;
2090          int old_valence;
2091          signed_natural disp=0;
2092         
2093          pc_luser_xp(context, tcr, &disp);
2094          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2095          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2096#ifdef DEBUG
2097          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2098#endif
2099          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2100          if (disp) {
2101            xpGPR(context,allocptr) -= disp;
2102          }
2103          unlock_exception_lock_in_handler(tcr);
2104#ifdef DEBUG
2105          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2106#endif
2107          exit_signal_handler(tcr, old_valence);
2108        }
2109      }
2110    }
2111  }
2112#ifdef DARWIN
2113    DarwinSigReturn(context);
2114#endif
2115}
2116
2117
2118
2119void
2120install_signal_handler(int signo, void *handler)
2121{
2122  struct sigaction sa;
2123 
2124  sa.sa_sigaction = (void *)handler;
2125  sigfillset(&sa.sa_mask);
2126  sa.sa_flags = 
2127    0 /* SA_RESTART */
2128    | SA_SIGINFO
2129#ifdef DARWIN
2130#ifdef PPC64
2131    | SA_64REGSET
2132#endif
2133#endif
2134    ;
2135
2136  sigaction(signo, &sa, NULL);
2137}
2138
2139void
2140install_pmcl_exception_handlers()
2141{
2142#ifdef DARWIN
2143  extern Boolean use_mach_exception_handling;
2144#endif
2145
2146  Boolean install_signal_handlers_for_exceptions =
2147#ifdef DARWIN
2148    !use_mach_exception_handling
2149#else
2150    true
2151#endif
2152    ;
2153  if (install_signal_handlers_for_exceptions) {
2154    extern int no_sigtrap;
2155    install_signal_handler(SIGILL, (void *)signal_handler);
2156    if (no_sigtrap != 1) {
2157      install_signal_handler(SIGTRAP, (void *)signal_handler);
2158    }
2159    install_signal_handler(SIGBUS,  (void *)signal_handler);
2160    install_signal_handler(SIGSEGV, (void *)signal_handler);
2161    install_signal_handler(SIGFPE, (void *)signal_handler);
2162  }
2163 
2164  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2165                         (void *)interrupt_handler);
2166  signal(SIGPIPE, SIG_IGN);
2167}
2168
2169void
2170quit_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2171{
2172  TCR *tcr = get_tcr(false);
2173  area *a;
2174  sigset_t mask;
2175 
2176  sigemptyset(&mask);
2177
2178  if (tcr) {
2179    tcr->valence = TCR_STATE_FOREIGN;
2180    a = tcr->vs_area;
2181    if (a) {
2182      a->active = a->high;
2183    }
2184    a = tcr->ts_area;
2185    if (a) {
2186      a->active = a->high;
2187    }
2188    a = tcr->cs_area;
2189    if (a) {
2190      a->active = a->high;
2191    }
2192  }
2193 
2194  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2195  pthread_exit(NULL);
2196}
2197
2198void
2199thread_signal_setup()
2200{
2201  thread_suspend_signal = SIG_SUSPEND_THREAD;
2202
2203  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2204  install_signal_handler(SIGQUIT, (void *)quit_handler);
2205}
2206
2207
2208
2209void
2210unprotect_all_areas()
2211{
2212  protected_area_ptr p;
2213
2214  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2215    unprotect_area(p);
2216  }
2217}
2218
2219/*
2220  A binding subprim has just done "twlle limit_regno,idx_regno" and
2221  the trap's been taken.  Extend the tcr's tlb so that the index will
2222  be in bounds and the new limit will be on a page boundary, filling
2223  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2224  the tcr fields and the registers in the xp and return true if this
2225  all works, false otherwise.
2226
2227  Note that the tlb was allocated via malloc, so realloc can do some
2228  of the hard work.
2229*/
2230Boolean
2231extend_tcr_tlb(TCR *tcr, 
2232               ExceptionInformation *xp, 
2233               unsigned limit_regno,
2234               unsigned idx_regno)
2235{
2236  unsigned
2237    index = (unsigned) (xpGPR(xp,idx_regno)),
2238    old_limit = tcr->tlb_limit,
2239    new_limit = align_to_power_of_2(index+1,12),
2240    new_bytes = new_limit-old_limit;
2241  LispObj
2242    *old_tlb = tcr->tlb_pointer,
2243    *new_tlb = realloc(old_tlb, new_limit),
2244    *work;
2245
2246  if (new_tlb == NULL) {
2247    return false;
2248  }
2249 
2250  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2251
2252  while (new_bytes) {
2253    *work++ = no_thread_local_binding_marker;
2254    new_bytes -= sizeof(LispObj);
2255  }
2256  tcr->tlb_pointer = new_tlb;
2257  tcr->tlb_limit = new_limit;
2258  xpGPR(xp, limit_regno) = new_limit;
2259  return true;
2260}
2261
2262
2263
2264void
2265exception_init()
2266{
2267  install_pmcl_exception_handlers();
2268}
2269
2270
2271
2272
2273
2274#ifdef DARWIN
2275
2276
2277#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2278#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2279
2280
2281#if USE_MACH_EXCEPTION_LOCK
2282pthread_mutex_t _mach_exception_lock, *mach_exception_lock;
2283#endif
2284
2285#define LISP_EXCEPTIONS_HANDLED_MASK \
2286 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2287
2288/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2289#define NUM_LISP_EXCEPTIONS_HANDLED 4
2290
2291typedef struct {
2292  int foreign_exception_port_count;
2293  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2294  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2295  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2296  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2297} MACH_foreign_exception_state;
2298
2299
2300
2301
2302/*
2303  Mach's exception mechanism works a little better than its signal
2304  mechanism (and, not incidentally, it gets along with GDB a lot
2305  better.
2306
2307  Initially, we install an exception handler to handle each native
2308  thread's exceptions.  This process involves creating a distinguished
2309  thread which listens for kernel exception messages on a set of
2310  0 or more thread exception ports.  As threads are created, they're
2311  added to that port set; a thread's exception port is destroyed
2312  (and therefore removed from the port set) when the thread exits.
2313
2314  A few exceptions can be handled directly in the handler thread;
2315  others require that we resume the user thread (and that the
2316  exception thread resumes listening for exceptions.)  The user
2317  thread might eventually want to return to the original context
2318  (possibly modified somewhat.)
2319
2320  As it turns out, the simplest way to force the faulting user
2321  thread to handle its own exceptions is to do pretty much what
2322  signal() does: the exception handlng thread sets up a sigcontext
2323  on the user thread's stack and forces the user thread to resume
2324  execution as if a signal handler had been called with that
2325  context as an argument.  We can use a distinguished UUO at a
2326  distinguished address to do something like sigreturn(); that'll
2327  have the effect of resuming the user thread's execution in
2328  the (pseudo-) signal context.
2329
2330  Since:
2331    a) we have miles of code in C and in Lisp that knows how to
2332    deal with Linux sigcontexts
2333    b) Linux sigcontexts contain a little more useful information
2334    (the DAR, DSISR, etc.) than their Darwin counterparts
2335    c) we have to create a sigcontext ourselves when calling out
2336    to the user thread: we aren't really generating a signal, just
2337    leveraging existing signal-handling code.
2338
2339  we create a Linux sigcontext struct.
2340
2341  Simple ?  Hopefully from the outside it is ...
2342
2343  We want the process of passing a thread's own context to it to
2344  appear to be atomic: in particular, we don't want the GC to suspend
2345  a thread that's had an exception but has not yet had its user-level
2346  exception handler called, and we don't want the thread's exception
2347  context to be modified by a GC while the Mach handler thread is
2348  copying it around.  On Linux (and on Jaguar), we avoid this issue
2349  because (a) the kernel sets up the user-level signal handler and
2350  (b) the signal handler blocks signals (including the signal used
2351  by the GC to suspend threads) until tcr->xframe is set up.
2352
2353  The GC and the Mach server thread therefore contend for the lock
2354  "mach_exception_lock".  The Mach server thread holds the lock
2355  when copying exception information between the kernel and the
2356  user thread; the GC holds this lock during most of its execution
2357  (delaying exception processing until it can be done without
2358  GC interference.)
2359
2360*/
2361
2362#ifdef PPC64
2363#define C_REDZONE_LEN           320
2364#define C_STK_ALIGN             32
2365#else
2366#define C_REDZONE_LEN           224
2367#define C_STK_ALIGN             16
2368#endif
2369#define C_PARAMSAVE_LEN         64
2370#define C_LINKAGE_LEN           48
2371
2372#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2373
2374void
2375fatal_mach_error(char *format, ...);
2376
2377#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2378
2379
2380void
2381restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2382{
2383  int i, j;
2384  kern_return_t kret;
2385  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2386
2387  /* Set the thread's FP state from the pseudosigcontext */
2388  kret = thread_set_state(thread,
2389                          PPC_FLOAT_STATE,
2390                          (thread_state_t)&(mc->__fs),
2391                          PPC_FLOAT_STATE_COUNT);
2392
2393  MACH_CHECK_ERROR("setting thread FP state", kret);
2394
2395  /* The thread'll be as good as new ... */
2396#ifdef PPC64
2397  kret = thread_set_state(thread,
2398                          PPC_THREAD_STATE64,
2399                          (thread_state_t)&(mc->__ss),
2400                          PPC_THREAD_STATE64_COUNT);
2401#else
2402  kret = thread_set_state(thread, 
2403                          MACHINE_THREAD_STATE,
2404                          (thread_state_t)&(mc->__ss),
2405                          MACHINE_THREAD_STATE_COUNT);
2406#endif
2407  MACH_CHECK_ERROR("setting thread state", kret);
2408} 
2409
2410/* This code runs in the exception handling thread, in response
2411   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2412   in response to a call to pseudo_sigreturn() from the specified
2413   user thread.
2414   Find that context (the user thread's R3 points to it), then
2415   use that context to set the user thread's state.  When this
2416   function's caller returns, the Mach kernel will resume the
2417   user thread.
2418*/
2419
2420kern_return_t
2421do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2422{
2423  ExceptionInformation *xp;
2424
2425#ifdef DEBUG_MACH_EXCEPTIONS
2426  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2427#endif
2428  xp = tcr->pending_exception_context;
2429  if (xp) {
2430    tcr->pending_exception_context = NULL;
2431    tcr->valence = TCR_STATE_LISP;
2432    restore_mach_thread_state(thread, xp);
2433    raise_pending_interrupt(tcr);
2434  } else {
2435    Bug(NULL, "no xp here!\n");
2436  }
2437#ifdef DEBUG_MACH_EXCEPTIONS
2438  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2439#endif
2440  return KERN_SUCCESS;
2441} 
2442
2443ExceptionInformation *
2444create_thread_context_frame(mach_port_t thread, 
2445                            natural *new_stack_top)
2446{
2447#ifdef PPC64
2448  ppc_thread_state64_t ts;
2449#else
2450  ppc_thread_state_t ts;
2451#endif
2452  mach_msg_type_number_t thread_state_count;
2453  kern_return_t result;
2454  int i,j;
2455  ExceptionInformation *pseudosigcontext;
2456  MCONTEXT_T mc;
2457  natural stackp, backlink;
2458
2459#ifdef PPC64
2460  thread_state_count = PPC_THREAD_STATE64_COUNT;
2461  result = thread_get_state(thread,
2462                            PPC_THREAD_STATE64,
2463                            (thread_state_t)&ts,
2464                            &thread_state_count);
2465#else
2466  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2467  result = thread_get_state(thread, 
2468                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2469                            (thread_state_t)&ts,
2470                            &thread_state_count);
2471#endif
2472 
2473  if (result != KERN_SUCCESS) {
2474    get_tcr(true);
2475    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2476  }
2477  stackp = ts.__r1;
2478  backlink = stackp;
2479  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2480  stackp -= sizeof(*pseudosigcontext);
2481  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2482
2483  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2484  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2485  memmove(&(mc->__ss),&ts,sizeof(ts));
2486
2487  thread_state_count = PPC_FLOAT_STATE_COUNT;
2488  thread_get_state(thread,
2489                   PPC_FLOAT_STATE,
2490                   (thread_state_t)&(mc->__fs),
2491                   &thread_state_count);
2492
2493
2494#ifdef PPC64
2495  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2496#else
2497  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2498#endif
2499  thread_get_state(thread,
2500#ifdef PPC64
2501                   PPC_EXCEPTION_STATE64,
2502#else
2503                   PPC_EXCEPTION_STATE,
2504#endif
2505                   (thread_state_t)&(mc->__es),
2506                   &thread_state_count);
2507
2508
2509  UC_MCONTEXT(pseudosigcontext) = mc;
2510  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2511  stackp -= C_LINKAGE_LEN;
2512  *(natural *)ptr_from_lispobj(stackp) = backlink;
2513  if (new_stack_top) {
2514    *new_stack_top = stackp;
2515  }
2516  return pseudosigcontext;
2517}
2518
2519/*
2520  This code sets up the user thread so that it executes a "pseudo-signal
2521  handler" function when it resumes.  Create a linux sigcontext struct
2522  on the thread's stack and pass it as an argument to the pseudo-signal
2523  handler.
2524
2525  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2526  which will restore the thread's context.
2527
2528  If the handler invokes code that throws (or otherwise never sigreturn()'s
2529  to the context), that's fine.
2530
2531  Actually, check that: throw (and variants) may need to be careful and
2532  pop the tcr's xframe list until it's younger than any frame being
2533  entered.
2534*/
2535
2536int
2537setup_signal_frame(mach_port_t thread,
2538                   void *handler_address,
2539                   int signum,
2540                   int code,
2541                   TCR *tcr)
2542{
2543#ifdef PPC64
2544  ppc_thread_state64_t ts;
2545#else
2546  ppc_thread_state_t ts;
2547#endif
2548  mach_msg_type_number_t thread_state_count;
2549  ExceptionInformation *pseudosigcontext;
2550  int i, j, old_valence = tcr->valence;
2551  kern_return_t result;
2552  natural stackp;
2553
2554#ifdef DEBUG_MACH_EXCEPTIONS
2555  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2556#endif
2557  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2558  pseudosigcontext->uc_onstack = 0;
2559  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2560  tcr->pending_exception_context = pseudosigcontext;
2561  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2562 
2563
2564  /*
2565     It seems like we've created a  sigcontext on the thread's
2566     stack.  Set things up so that we call the handler (with appropriate
2567     args) when the thread's resumed.
2568  */
2569
2570  ts.__srr0 = (natural) handler_address;
2571  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2572  ts.__r1 = stackp;
2573  ts.__r3 = signum;
2574  ts.__r4 = (natural)pseudosigcontext;
2575  ts.__r5 = (natural)tcr;
2576  ts.__r6 = (natural)old_valence;
2577  ts.__lr = (natural)pseudo_sigreturn;
2578
2579
2580#ifdef PPC64
2581  ts.__r13 = xpGPR(pseudosigcontext,13);
2582  thread_set_state(thread,
2583                   PPC_THREAD_STATE64,
2584                   (thread_state_t)&ts,
2585                   PPC_THREAD_STATE64_COUNT);
2586#else
2587  thread_set_state(thread, 
2588                   MACHINE_THREAD_STATE,
2589                   (thread_state_t)&ts,
2590                   MACHINE_THREAD_STATE_COUNT);
2591#endif
2592#ifdef DEBUG_MACH_EXCEPTIONS
2593  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2594#endif
2595  return 0;
2596}
2597
2598
2599void
2600pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2601{
2602  signal_handler(signum, NULL, context, tcr, old_valence);
2603} 
2604
2605
2606int
2607thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2608{
2609#ifdef PPC64
2610  ppc_thread_state64_t ts;
2611#else
2612  ppc_thread_state_t ts;
2613#endif
2614  mach_msg_type_number_t thread_state_count;
2615
2616#ifdef PPC64
2617  thread_state_count = PPC_THREAD_STATE64_COUNT;
2618#else
2619  thread_state_count = PPC_THREAD_STATE_COUNT;
2620#endif
2621  thread_get_state(thread, 
2622#ifdef PPC64
2623                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2624#else
2625                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2626#endif
2627                   (thread_state_t)&ts,
2628                   &thread_state_count);
2629  if (enabled) {
2630    ts.__srr1 |= MSR_FE0_FE1_MASK;
2631  } else {
2632    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2633  }
2634  /*
2635     Hack-o-rama warning (isn't it about time for such a warning?):
2636     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2637     Our handler for lisp's use of pthread_kill() pushes a phony
2638     lisp frame on the stack and force the context to resume at
2639     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2640     lisp frame contains the -real- address that process_interrupt
2641     should have returned to, and the fact that it's in a lisp
2642     frame should convince the GC to notice that address if it
2643     runs in the tiny time window between returning from our
2644     interrupt handler and ... here.
2645     If the top frame on the stack is a lisp frame, discard it
2646     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2647     just adjust ts.srr0 to skip over the UUO.
2648  */
2649  {
2650    lisp_frame *tos = (lisp_frame *)ts.__r1,
2651      *next_frame = tos->backlink;
2652   
2653    if (tos == (next_frame -1)) {
2654      ts.__srr0 = tos->savelr;
2655      ts.__r1 = (LispObj) next_frame;
2656    } else {
2657      ts.__srr0 += 4;
2658    }
2659  }
2660  thread_set_state(thread, 
2661#ifdef PPC64
2662                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2663#else
2664                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2665#endif
2666                   (thread_state_t)&ts,
2667#ifdef PPC64
2668                   PPC_THREAD_STATE64_COUNT
2669#else
2670                   PPC_THREAD_STATE_COUNT
2671#endif
2672                   );
2673
2674  return 0;
2675}
2676
2677/*
2678  This function runs in the exception handling thread.  It's
2679  called (by this precise name) from the library function "exc_server()"
2680  when the thread's exception ports are set up.  (exc_server() is called
2681  via mach_msg_server(), which is a function that waits for and dispatches
2682  on exception messages from the Mach kernel.)
2683
2684  This checks to see if the exception was caused by a pseudo_sigreturn()
2685  UUO; if so, it arranges for the thread to have its state restored
2686  from the specified context.
2687
2688  Otherwise, it tries to map the exception to a signal number and
2689  arranges that the thread run a "pseudo signal handler" to handle
2690  the exception.
2691
2692  Some exceptions could and should be handled here directly.
2693*/
2694
2695kern_return_t
2696catch_exception_raise(mach_port_t exception_port,
2697                      mach_port_t thread,
2698                      mach_port_t task, 
2699                      exception_type_t exception,
2700                      exception_data_t code_vector,
2701                      mach_msg_type_number_t code_count)
2702{
2703  int signum = 0, code = *code_vector, code1;
2704  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2705  kern_return_t kret;
2706
2707#ifdef DEBUG_MACH_EXCEPTIONS
2708  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2709#endif
2710
2711  if (
2712#if USE_MACH_EXCEPTION_LOCK
2713    pthread_mutex_trylock(mach_exception_lock) == 0
2714#else
2715    1
2716#endif
2717    ) {
2718    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2719      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2720    } 
2721    if ((exception == EXC_BAD_INSTRUCTION) &&
2722        (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2723        (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2724         (code1 == (int)enable_fp_exceptions) ||
2725         (code1 == (int)disable_fp_exceptions))) {
2726      if (code1 == (int)pseudo_sigreturn) {
2727        kret = do_pseudo_sigreturn(thread, tcr);
2728#if 0
2729      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2730#endif
2731       
2732      } else if (code1 == (int)enable_fp_exceptions) {
2733        kret = thread_set_fp_exceptions_enabled(thread, true);
2734      } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2735    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2736      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2737      kret = 17;
2738    } else {
2739      switch (exception) {
2740      case EXC_BAD_ACCESS:
2741        signum = SIGSEGV;
2742        break;
2743       
2744      case EXC_BAD_INSTRUCTION:
2745        signum = SIGILL;
2746        break;
2747     
2748      case EXC_SOFTWARE:
2749        if (code == EXC_PPC_TRAP) {
2750          signum = SIGTRAP;
2751        }
2752        break;
2753     
2754      case EXC_ARITHMETIC:
2755        signum = SIGFPE;
2756        break;
2757
2758      default:
2759        break;
2760      }
2761      if (signum) {
2762        kret = setup_signal_frame(thread,
2763                                  (void *)pseudo_signal_handler,
2764                                  signum,
2765                                  code,
2766                                  tcr);
2767#if 0
2768      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2769#endif
2770
2771      } else {
2772        kret = 17;
2773      }
2774    }
2775#if USE_MACH_EXCEPTION_LOCK
2776#ifdef DEBUG_MACH_EXCEPTIONS
2777    fprintf(stderr, "releasing Mach exception lock in exception thread\n");
2778#endif
2779    pthread_mutex_unlock(mach_exception_lock);
2780#endif
2781  } else {
2782    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2783#if 0
2784    fprintf(stderr, "deferring pending exception in 0x%x\n", tcr);
2785#endif
2786    kret = 0;
2787    if (tcr == gc_tcr) {
2788      int i;
2789      write(1, "exception in GC thread. Sleeping for 60 seconds\n",sizeof("exception in GC thread.  Sleeping for 60 seconds\n"));
2790      for (i = 0; i < 60; i++) {
2791        sleep(1);
2792      }
2793      _exit(EX_SOFTWARE);
2794    }
2795  }
2796  return kret;
2797}
2798
2799
2800
2801typedef struct {
2802  mach_msg_header_t Head;
2803  /* start of the kernel processed data */
2804  mach_msg_body_t msgh_body;
2805  mach_msg_port_descriptor_t thread;
2806  mach_msg_port_descriptor_t task;
2807  /* end of the kernel processed data */
2808  NDR_record_t NDR;
2809  exception_type_t exception;
2810  mach_msg_type_number_t codeCnt;
2811  integer_t code[2];
2812  mach_msg_trailer_t trailer;
2813} exceptionRequest;
2814
2815
2816boolean_t
2817openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2818{
2819  static NDR_record_t _NDR = {0};
2820  kern_return_t handled;
2821  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2822  exceptionRequest *req = (exceptionRequest *) in;
2823
2824  reply->NDR = _NDR;
2825
2826  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2827  out->msgh_remote_port = in->msgh_remote_port;
2828  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2829  out->msgh_local_port = MACH_PORT_NULL;
2830  out->msgh_id = in->msgh_id+100;
2831
2832  /* Could handle other exception flavors in the range 2401-2403 */
2833
2834
2835  if (in->msgh_id != 2401) {
2836    reply->RetCode = MIG_BAD_ID;
2837    return FALSE;
2838  }
2839  handled = catch_exception_raise(req->Head.msgh_local_port,
2840                                  req->thread.name,
2841                                  req->task.name,
2842                                  req->exception,
2843                                  req->code,
2844                                  req->codeCnt);
2845  reply->RetCode = handled;
2846  return TRUE;
2847}
2848
2849/*
2850  The initial function for an exception-handling thread.
2851*/
2852
2853void *
2854exception_handler_proc(void *arg)
2855{
2856  extern boolean_t exc_server();
2857  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2858
2859  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2860  /* Should never return. */
2861  abort();
2862}
2863
2864
2865
2866mach_port_t
2867mach_exception_port_set()
2868{
2869  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2870  kern_return_t kret; 
2871  if (__exception_port_set == MACH_PORT_NULL) {
2872#if USE_MACH_EXCEPTION_LOCK
2873    mach_exception_lock = &_mach_exception_lock;
2874    pthread_mutex_init(mach_exception_lock, NULL);
2875#endif
2876    kret = mach_port_allocate(mach_task_self(),
2877                              MACH_PORT_RIGHT_PORT_SET,
2878                              &__exception_port_set);
2879    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2880    create_system_thread(0,
2881                         NULL,
2882                         exception_handler_proc, 
2883                         (void *)((natural)__exception_port_set));
2884  }
2885  return __exception_port_set;
2886}
2887
2888/*
2889  Setup a new thread to handle those exceptions specified by
2890  the mask "which".  This involves creating a special Mach
2891  message port, telling the Mach kernel to send exception
2892  messages for the calling thread to that port, and setting
2893  up a handler thread which listens for and responds to
2894  those messages.
2895
2896*/
2897
2898/*
2899  Establish the lisp thread's TCR as its exception port, and determine
2900  whether any other ports have been established by foreign code for
2901  exceptions that lisp cares about.
2902
2903  If this happens at all, it should happen on return from foreign
2904  code and on entry to lisp code via a callback.
2905
2906  This is a lot of trouble (and overhead) to support Java, or other
2907  embeddable systems that clobber their caller's thread exception ports.
2908 
2909*/
2910kern_return_t
2911tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2912{
2913  kern_return_t kret;
2914  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2915  int i;
2916  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2917  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2918  exception_mask_t mask = 0;
2919
2920  kret = thread_swap_exception_ports(thread,
2921                                     LISP_EXCEPTIONS_HANDLED_MASK,
2922                                     lisp_port,
2923                                     EXCEPTION_DEFAULT,
2924                                     THREAD_STATE_NONE,
2925                                     fxs->masks,
2926                                     &n,
2927                                     fxs->ports,
2928                                     fxs->behaviors,
2929                                     fxs->flavors);
2930  if (kret == KERN_SUCCESS) {
2931    fxs->foreign_exception_port_count = n;
2932    for (i = 0; i < n; i ++) {
2933      foreign_port = fxs->ports[i];
2934
2935      if ((foreign_port != lisp_port) &&
2936          (foreign_port != MACH_PORT_NULL)) {
2937        mask |= fxs->masks[i];
2938      }
2939    }
2940    tcr->foreign_exception_status = (int) mask;
2941  }
2942  return kret;
2943}
2944
2945kern_return_t
2946tcr_establish_lisp_exception_port(TCR *tcr)
2947{
2948  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2949}
2950
2951/*
2952  Do this when calling out to or returning from foreign code, if
2953  any conflicting foreign exception ports were established when we
2954  last entered lisp code.
2955*/
2956kern_return_t
2957restore_foreign_exception_ports(TCR *tcr)
2958{
2959  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2960 
2961  if (m) {
2962    MACH_foreign_exception_state *fxs  = 
2963      (MACH_foreign_exception_state *) tcr->native_thread_info;
2964    int i, n = fxs->foreign_exception_port_count;
2965    exception_mask_t tm;
2966
2967    for (i = 0; i < n; i++) {
2968      if ((tm = fxs->masks[i]) & m) {
2969        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2970                                   tm,
2971                                   fxs->ports[i],
2972                                   fxs->behaviors[i],
2973                                   fxs->flavors[i]);
2974      }
2975    }
2976  }
2977}
2978                                   
2979
2980/*
2981  This assumes that a Mach port (to be used as the thread's exception port) whose
2982  "name" matches the TCR's 32-bit address has already been allocated.
2983*/
2984
2985kern_return_t
2986setup_mach_exception_handling(TCR *tcr)
2987{
2988  mach_port_t
2989    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2990    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2991    task_self = mach_task_self();
2992  kern_return_t kret;
2993
2994  kret = mach_port_insert_right(task_self,
2995                                thread_exception_port,
2996                                thread_exception_port,
2997                                MACH_MSG_TYPE_MAKE_SEND);
2998  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2999
3000  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3001  if (kret == KERN_SUCCESS) {
3002    mach_port_t exception_port_set = mach_exception_port_set();
3003
3004    kret = mach_port_move_member(task_self,
3005                                 thread_exception_port,
3006                                 exception_port_set);
3007  }
3008  return kret;
3009}
3010
3011void
3012darwin_exception_init(TCR *tcr)
3013{
3014  void tcr_monitor_exception_handling(TCR*, Boolean);
3015  kern_return_t kret;
3016  MACH_foreign_exception_state *fxs = 
3017    calloc(1, sizeof(MACH_foreign_exception_state));
3018 
3019  tcr->native_thread_info = (void *) fxs;
3020
3021  if ((kret = setup_mach_exception_handling(tcr))
3022      != KERN_SUCCESS) {
3023    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
3024    terminate_lisp();
3025  }
3026  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
3027  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
3028}
3029
3030/*
3031  The tcr is the "name" of the corresponding thread's exception port.
3032  Destroying the port should remove it from all port sets of which it's
3033  a member (notably, the exception port set.)
3034*/
3035void
3036darwin_exception_cleanup(TCR *tcr)
3037{
3038  void *fxs = tcr->native_thread_info;
3039  extern Boolean use_mach_exception_handling;
3040
3041  if (fxs) {
3042    tcr->native_thread_info = NULL;
3043    free(fxs);
3044  }
3045  if (use_mach_exception_handling) {
3046    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3047    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3048  }
3049}
3050
3051
3052Boolean
3053suspend_mach_thread(mach_port_t mach_thread)
3054{
3055  kern_return_t status;
3056  Boolean aborted = false;
3057 
3058  do {
3059    aborted = false;
3060    status = thread_suspend(mach_thread);
3061    if (status == KERN_SUCCESS) {
3062      status = thread_abort_safely(mach_thread);
3063      if (status == KERN_SUCCESS) {
3064        aborted = true;
3065      } else {
3066        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3067        thread_resume(mach_thread);
3068      }
3069    } else {
3070      return false;
3071    }
3072  } while (! aborted);
3073  return true;
3074}
3075
3076/*
3077  Only do this if pthread_kill indicated that the pthread isn't
3078  listening to signals anymore, as can happen as soon as pthread_exit()
3079  is called on Darwin.  The thread could still call out to lisp as it
3080  is exiting, so we need another way to suspend it in this case.
3081*/
3082Boolean
3083mach_suspend_tcr(TCR *tcr)
3084{
3085  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3086  ExceptionInformation *pseudosigcontext;
3087  Boolean result = false;
3088 
3089  result = suspend_mach_thread(mach_thread);
3090  if (result) {
3091    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3092    pseudosigcontext->uc_onstack = 0;
3093    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3094    tcr->suspend_context = pseudosigcontext;
3095  }
3096  return result;
3097}
3098
3099void
3100mach_resume_tcr(TCR *tcr)
3101{
3102  ExceptionInformation *xp;
3103  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3104 
3105  xp = tcr->suspend_context;
3106#ifdef DEBUG_MACH_EXCEPTIONS
3107  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3108          tcr, tcr->pending_exception_context);
3109#endif
3110  tcr->suspend_context = NULL;
3111  restore_mach_thread_state(mach_thread, xp);
3112#ifdef DEBUG_MACH_EXCEPTIONS
3113  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3114          tcr, tcr->pending_exception_context);
3115#endif
3116  thread_resume(mach_thread);
3117}
3118
3119void
3120fatal_mach_error(char *format, ...)
3121{
3122  va_list args;
3123  char s[512];
3124 
3125
3126  va_start(args, format);
3127  vsnprintf(s, sizeof(s),format, args);
3128  va_end(args);
3129
3130  Fatal("Mach error", s);
3131}
3132
3133void
3134pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3135{
3136  interrupt_handler(signum, NULL, context);
3137}
3138
3139int
3140mach_raise_thread_interrupt(TCR *target)
3141{
3142  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3143  kern_return_t kret;
3144  Boolean result = false;
3145  TCR *current = get_tcr(false);
3146  thread_basic_info_data_t info; 
3147  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3148
3149  LOCK(lisp_global(TCR_AREA_LOCK), current);
3150#if USE_MACH_EXCEPTION_LOCK
3151  pthread_mutex_lock(mach_exception_lock);
3152#endif
3153
3154  if (suspend_mach_thread(mach_thread)) {
3155    if (thread_info(mach_thread,
3156                    THREAD_BASIC_INFO,
3157                    (thread_info_t)&info,
3158                    &info_count) == KERN_SUCCESS) {
3159      if (info.suspend_count == 1) {
3160        if ((target->valence == TCR_STATE_LISP) &&
3161            (!target->unwinding) &&
3162            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3163          kret = setup_signal_frame(mach_thread,
3164                                    (void *)pseudo_interrupt_handler,
3165                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3166                                    0,
3167                                    target);
3168          if (kret == KERN_SUCCESS) {
3169            result = true;
3170          }
3171        }
3172      }
3173    }
3174    if (! result) {
3175      target->interrupt_pending = 1 << fixnumshift;
3176    }
3177    thread_resume(mach_thread);
3178   
3179  }
3180#if USE_MACH_EXCEPTION_LOCK
3181  pthread_mutex_unlock(mach_exception_lock);
3182#endif
3183  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3184  return 0;
3185}
3186
3187#endif
Note: See TracBrowser for help on using the repository browser.