source: trunk/ccl/lisp-kernel/ppc-exceptions.c @ 7137

Last change on this file since 7137 was 7137, checked in by gb, 13 years ago

Lose "HeapHighWaterMark?", which was supposed to allow us to track the
high end of a range of pages that'd been written to and not freed (so
we'd zero them lazily when they were reallocated.) Such a scheme would
really need to track both ends of such a range, and the old scheme wound
up being overly zealous and often zeroed pages that were already zeroed.
Zero the range between the old free pointer and the new one after each
GC, instead.

At least partly address ticket:101, by doing GROW-DYNAMIC-AREA more
carefully.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.4 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444      }
445      if (selector & GC_TRAP_FUNCTION_PURIFY) {
446        purify_from_xp(xp, 0L);
447        gc_from_xp(xp, 0L);
448      }
449      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
450        OSErr err;
451        extern OSErr save_application(unsigned);
452        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
453        area *vsarea = tcr->vs_area;
454       
455        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
456        err = save_application(arg);
457        if (err == noErr) {
458          _exit(0);
459        }
460        fatal_oserr(": save_application", err);
461      }
462      if (selector == GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE) {
463        LispObj aligned_arg = align_to_power_of_2(arg, log2_nbits_in_word);
464        signed_natural
465          delta_dnodes = ((signed_natural) aligned_arg) - 
466          ((signed_natural) tenured_area->static_dnodes);
467        change_hons_area_size_from_xp(xp, delta_dnodes*dnode_size);
468        xpGPR(xp, imm0) = tenured_area->static_dnodes;
469      }
470    }
471   
472    if (egc_was_enabled) {
473      egc_control(true, NULL);
474    }
475    break;
476   
477  }
478
479  adjust_exception_pc(xp,4);
480  return 0;
481}
482
483
484
485void
486signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
487{
488  /* The cstack just overflowed.  Force the current thread's
489     control stack to do so until all stacks are well under their overflow
490     limits.
491  */
492
493#if 0
494  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
495#endif
496  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
497}
498
499/*
500  Lower (move toward 0) the "end" of the soft protected area associated
501  with a by a page, if we can.
502*/
503
504void
505adjust_soft_protection_limit(area *a)
506{
507  char *proposed_new_soft_limit = a->softlimit - 4096;
508  protected_area_ptr p = a->softprot;
509 
510  if (proposed_new_soft_limit >= (p->start+16384)) {
511    p->end = proposed_new_soft_limit;
512    p->protsize = p->end-p->start;
513    a->softlimit = proposed_new_soft_limit;
514  }
515  protect_area(p);
516}
517
518void
519restore_soft_stack_limit(unsigned stkreg)
520{
521  area *a;
522  TCR *tcr = get_tcr(true);
523
524  switch (stkreg) {
525  case sp:
526    a = tcr->cs_area;
527    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
528      a->softlimit -= 4096;
529    }
530    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
531    break;
532  case vsp:
533    a = tcr->vs_area;
534    adjust_soft_protection_limit(a);
535    break;
536  case tsp:
537    a = tcr->ts_area;
538    adjust_soft_protection_limit(a);
539  }
540}
541
542/* Maybe this'll work someday.  We may have to do something to
543   make the thread look like it's not handling an exception */
544void
545reset_lisp_process(ExceptionInformation *xp)
546{
547  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
548  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
549
550  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
551  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
552
553  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
554  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
555
556  start_lisp(tcr, 1);
557}
558
559/*
560  This doesn't GC; it returns true if it made enough room, false
561  otherwise.
562  If "extend" is true, it can try to extend the dynamic area to
563  satisfy the request.
564*/
565
566Boolean
567new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
568{
569  area *a;
570  natural newlimit, oldlimit;
571  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
572
573  a  = active_dynamic_area;
574  oldlimit = (natural) a->active;
575  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
576              align_to_power_of_2(need, log2_allocation_quantum));
577  if (newlimit > (natural) (a->high)) {
578    if (extend) {
579      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
580      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
581      do {
582        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
583          break;
584        }
585        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
586        if (extend_by < 4<<20) {
587          return false;
588        }
589      } while (1);
590    } else {
591      return false;
592    }
593  }
594  a->active = (BytePtr) newlimit;
595  tcr->last_allocptr = (void *)newlimit;
596  xpGPR(xp,allocptr) = (LispObj) newlimit;
597  xpGPR(xp,allocbase) = (LispObj) oldlimit;
598
599  return true;
600}
601
602 
603void
604update_area_active (area **aptr, BytePtr value)
605{
606  area *a = *aptr;
607  for (; a; a = a->older) {
608    if ((a->low <= value) && (a->high >= value)) break;
609  };
610  if (a == NULL) Bug(NULL, "Can't find active area");
611  a->active = value;
612  *aptr = a;
613
614  for (a = a->younger; a; a = a->younger) {
615    a->active = a->high;
616  }
617}
618
619void
620normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
621{
622  void *cur_allocptr = NULL;
623  LispObj freeptr = 0;
624
625  if (xp) {
626    if (is_other_tcr) {
627      pc_luser_xp(xp, tcr, NULL);
628      freeptr = xpGPR(xp, allocptr);
629      if (fulltag_of(freeptr) == 0){
630        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
631      }
632    }
633    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
634    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
635    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
636#ifdef DEBUG
637    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
638            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
639    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
640            tcr,
641            xpGPR(xp, allocbase),
642            xpGPR(xp, allocptr),
643            xpPC(xp));
644    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
645            tcr,
646            tcr->pending_exception_context);
647#endif
648  } else {
649    /* In ff-call.  No need to update cs_area */
650    cur_allocptr = (void *) (tcr->save_allocptr);
651#ifdef DEBUG
652    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
653            tcr, tcr->save_vsp, tcr->save_tsp);
654    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
655            tcr,
656            tcr->save_allocbase,
657            tcr->save_allocptr,
658            xpPC(xp));
659
660#endif
661    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
662    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
663  }
664
665
666  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
667  if (cur_allocptr) {
668    update_bytes_allocated(tcr, cur_allocptr);
669    if (freeptr) {
670      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
671      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
672    }
673  }
674}
675
676TCR *gc_tcr = NULL;
677
678/* Suspend and "normalize" other tcrs, then call a gc-like function
679   in that context.  Resume the other tcrs, then return what the
680   function returned */
681
682int
683gc_like_from_xp(ExceptionInformation *xp, 
684                int(*fun)(TCR *, signed_natural), 
685                signed_natural param)
686{
687  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
688  ExceptionInformation* other_xp;
689  int result;
690  signed_natural inhibit;
691
692  suspend_other_threads(true);
693  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
694  if (inhibit != 0) {
695    if (inhibit > 0) {
696      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
697    }
698    resume_other_threads(true);
699    gc_deferred++;
700    return 0;
701  }
702  gc_deferred = 0;
703
704  gc_tcr = tcr;
705
706  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
707  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
708
709  normalize_tcr(xp, tcr, false);
710
711
712  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
713    if (other_tcr->pending_exception_context) {
714      other_tcr->gc_context = other_tcr->pending_exception_context;
715    } else if (other_tcr->valence == TCR_STATE_LISP) {
716      other_tcr->gc_context = other_tcr->suspend_context;
717    } else {
718      /* no pending exception, didn't suspend in lisp state:
719         must have executed a synchronous ff-call.
720      */
721      other_tcr->gc_context = NULL;
722    }
723    normalize_tcr(other_tcr->gc_context, other_tcr, true);
724  }
725   
726
727
728  result = fun(tcr, param);
729
730  other_tcr = tcr;
731  do {
732    other_tcr->gc_context = NULL;
733    other_tcr = other_tcr->next;
734  } while (other_tcr != tcr);
735
736  gc_tcr = NULL;
737
738  resume_other_threads(true);
739
740  return result;
741
742}
743
744
745/* Returns #bytes freed by invoking GC */
746
747int
748gc_from_tcr(TCR *tcr, signed_natural param)
749{
750  area *a;
751  BytePtr oldfree, newfree;
752  BytePtr oldend, newend;
753
754#ifdef DEBUG
755  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
756#endif
757  a = active_dynamic_area;
758  oldend = a->high;
759  oldfree = a->active;
760  gc(tcr, param);
761  newfree = a->active;
762  newend = a->high;
763#if 0
764  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
765#endif
766  return ((oldfree-newfree)+(newend-oldend));
767}
768
769int
770gc_from_xp(ExceptionInformation *xp, signed_natural param)
771{
772  int status = gc_like_from_xp(xp, gc_from_tcr, param);
773
774  freeGCptrs();
775  return status;
776}
777
778int
779purify_from_xp(ExceptionInformation *xp, signed_natural param)
780{
781  return gc_like_from_xp(xp, purify, param);
782}
783
784int
785impurify_from_xp(ExceptionInformation *xp, signed_natural param)
786{
787  return gc_like_from_xp(xp, impurify, param);
788}
789
790int
791change_hons_area_size_from_xp(ExceptionInformation *xp, signed_natural delta_in_bytes)
792{
793  return gc_like_from_xp(xp, change_hons_area_size, delta_in_bytes);
794}
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow,
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  /* use the siginfo if it's available.  Some versions of Linux
816     don't propagate the DSISR and TRAP fields correctly from
817     64- to 32-bit handlers.
818  */
819  if (info) {
820    /*
821       To confuse matters still further, the value of SEGV_ACCERR
822       varies quite a bit among LinuxPPC variants (the value defined
823       in the header files varies, and the value actually set by
824       the kernel also varies.  So far, we're only looking at the
825       siginfo under Linux and Linux always seems to generate
826       SIGSEGV, so check for SIGSEGV and check the low 16 bits
827       of the si_code.
828    */
829    return ((info->si_signo == SIGSEGV) &&
830            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
831  }
832  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
833         (xpTRAP(xp) == 
834#ifdef LINUX
8350x0300
836#endif
837#ifdef DARWIN
8380x0300/0x100
839#endif
840)
841         );
842#if 0 
843  /* Maybe worth keeping around; not sure if it's an exhaustive
844     list of PPC instructions that could cause a WP fault */
845  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
846     valid values of those SPRs in the context they provide to
847     exception handlers.  Look at the opcode of the offending
848     instruction & recognize 32-bit store operations */
849  opcode instr = *(xpPC(xp));
850
851  if (xp->regs->trap != 0x300) {
852    return 0;
853  }
854  switch (instr >> 26) {
855  case 47:                      /* STMW */
856  case 36:                      /* STW */
857  case 37:                      /* STWU */
858    return 1;
859  case 31:
860    switch ((instr >> 1) & 1023) {
861    case 151:                   /* STWX */
862    case 183:                   /* STWUX */
863      return 1;
864    default:
865      return 0;
866    }
867  default:
868    return 0;
869  }
870#endif
871}
872
873OSStatus
874handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
875{
876  BytePtr addr;
877  protected_area_ptr area;
878  protection_handler *handler;
879  extern Boolean touch_page(void *);
880  extern void touch_page_end(void);
881
882  if (info) {
883    addr = (BytePtr)(info->si_addr);
884  } else {
885    addr = (BytePtr) ((natural) (xpDAR(xp)));
886  }
887
888  if (addr && (addr == tcr->safe_ref_address)) {
889    adjust_exception_pc(xp,4);
890
891    xpGPR(xp,imm0) = 0;
892    return 0;
893  }
894
895  if (xpPC(xp) == (pc)touch_page) {
896    xpGPR(xp,imm0) = 0;
897    xpPC(xp) = (pc)touch_page_end;
898    return 0;
899  }
900
901
902  if (is_write_fault(xp,info)) {
903    area = find_protected_area(addr);
904    if (area != NULL) {
905      handler = protection_handlers[area->why];
906      return handler(xp, area, addr);
907    }
908  }
909  if (old_valence == TCR_STATE_LISP) {
910    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
911  }
912  return -1;
913}
914
915
916
917
918
919OSStatus
920do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
921{
922#ifdef SUPPORT_PRAGMA_UNUSED
923#pragma unused(area,addr)
924#endif
925  reset_lisp_process(xp);
926  return -1;
927}
928
929extern area*
930allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
931
932extern area*
933allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
934
935#ifdef EXTEND_VSTACK
936Boolean
937catch_frame_p(lisp_frame *spPtr)
938{
939  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
940
941  for (; catch; catch = (catch_frame *) untag(catch->link)) {
942    if (spPtr == ((lisp_frame *) catch->csp)) {
943      return true;
944    }
945  }
946  return false;
947}
948#endif
949
950Boolean
951unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
952{
953  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
954      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
955    return true;
956  } else {
957    return false;
958  }
959}
960
961Boolean
962lexpr_entry_frame_p(lisp_frame *spPtr)
963{
964  LispObj savelr = spPtr->savelr;
965  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
966  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
967  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
968
969  return
970    (savelr == lexpr_return1v) ||
971    (savelr == lexpr_return) ||
972    ((savelr == ret1valn) &&
973     (((spPtr->backlink)->savelr) == lexpr_return));
974}
975
976Boolean
977lisp_frame_p(lisp_frame *spPtr)
978{
979  LispObj savefn;
980  /* We can't just look at the size of the stack frame under the EABI
981     calling sequence, but that's the first thing to check. */
982  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
983    return false;
984  }
985  savefn = spPtr->savefn;
986  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
987 
988}
989
990
991int ffcall_overflow_count = 0;
992
993/* Find a frame that is neither a catch frame nor one of the
994   lexpr_entry frames We don't check for non-lisp frames here because
995   we'll always stop before we get there due to a dummy lisp frame
996   pushed by .SPcallback that masks out the foreign frames.  The one
997   exception is that there is a non-lisp frame without a valid VSP
998   while in the process of ppc-ff-call. We recognize that because its
999   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1000   next frame is 0, then we're executing an unwind-protect cleanup
1001   form, and the top stack frame belongs to its (no longer extant)
1002   catch frame.  */
1003
1004#ifdef EXTEND_VSTACK
1005lisp_frame *
1006find_non_catch_frame_from_xp (ExceptionInformation *xp)
1007{
1008  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1009  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1010    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1011  }
1012  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1013         unwind_protect_cleanup_frame_p(spPtr) ||
1014         catch_frame_p(spPtr) ||
1015         lexpr_entry_frame_p(spPtr) ; ) {
1016     spPtr = spPtr->backlink;
1017     };
1018  return spPtr;
1019}
1020#endif
1021
1022#ifdef EXTEND_VSTACK
1023Boolean
1024db_link_chain_in_area_p (area *a)
1025{
1026  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1027          *high = (LispObj *) a->high,
1028          *low = (LispObj *) a->low;
1029  for (; db; db = (LispObj *) *db) {
1030    if ((db >= low) && (db < high)) return true;
1031  };
1032  return false;
1033}
1034#endif
1035
1036
1037
1038
1039/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1040  the current value of VSP (TSP) or an older area.  */
1041
1042OSStatus
1043do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1044{
1045  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1046  area *a = tcr->vs_area;
1047  protected_area_ptr vsp_soft = a->softprot;
1048  unprotect_area(vsp_soft);
1049  signal_stack_soft_overflow(xp,vsp);
1050  return 0;
1051}
1052
1053
1054OSStatus
1055do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1056{
1057  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1058  area *a = tcr->ts_area;
1059  protected_area_ptr tsp_soft = a->softprot;
1060  unprotect_area(tsp_soft);
1061  signal_stack_soft_overflow(xp,tsp);
1062  return 0;
1063}
1064
1065OSStatus
1066do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1067{
1068  /* Trying to write into a guard page on the vstack or tstack.
1069     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1070     signal an error_stack_overflow condition.
1071      */
1072  lisp_protection_kind which = prot_area->why;
1073  Boolean on_TSP = (which == kTSPsoftguard);
1074
1075  if (on_TSP) {
1076    return do_tsp_overflow(xp, addr);
1077   } else {
1078    return do_vsp_overflow(xp, addr);
1079   }
1080}
1081
1082OSStatus
1083do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1084{
1085#ifdef SUPPORT_PRAGMA_UNUSED
1086#pragma unused(xp,area,addr)
1087#endif
1088  return -1;
1089}
1090
1091/*
1092  We have a couple of choices here.  We can simply unprotect the page
1093  and let the store happen on return, or we can try to emulate writes
1094  that we know will involve an intergenerational reference.  Both are
1095  correct as far as EGC constraints go, but the latter approach is
1096  probably more efficient.  (This only matters in the case where the
1097  GC runs after this exception handler returns but before the write
1098  actually happens.  If we didn't emulate node stores here, the EGC
1099  would scan the newly-writen page, find nothing interesting, and
1100  run to completion.  This thread will try the write again afer it
1101  resumes, the page'll be re-protected, and we'll have taken this
1102  fault twice.  The whole scenario shouldn't happen very often, but
1103  (having already taken a fault and committed to an mprotect syscall)
1104  we might as well emulate stores involving intergenerational references,
1105  since they're pretty easy to identify.
1106
1107  Note that cases involving two or more threads writing to the same
1108  page (before either of them can run this handler) is benign: one
1109  invocation of the handler will just unprotect an unprotected page in
1110  that case.
1111
1112  If there are GCs (or any other suspensions of the thread between
1113  the time that the write fault was detected and the time that the
1114  exception lock is obtained) none of this stuff happens.
1115*/
1116
1117/*
1118  Return true (and emulate the instruction) iff:
1119  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1120     instruction.
1121  b) RS is a node register (>= fn)
1122  c) RS is tagged as a cons or vector
1123  d) RS is in some ephemeral generation.
1124  This is slightly conservative, since RS may be no younger than the
1125  EA being written to.
1126*/
1127Boolean
1128is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1129{
1130  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1131    opcode instr = *xpPC(xp);
1132   
1133    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1134        major_opcode_p(instr, major_opcode_STW)) {
1135      LispObj
1136        rs = RS_field(instr), 
1137        rsval = xpGPR(xp,rs),
1138        tag = fulltag_of(rsval);
1139     
1140      if (rs >= fn) {
1141        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1142          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1143              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1144            *(LispObj *)ea = rsval;
1145            return true;
1146          }
1147        }
1148      }
1149    }
1150  }
1151  return false;
1152}
1153
1154     
1155
1156
1157
1158
1159
1160OSStatus
1161handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1162{
1163  (void) zero_fpscr(tcr);
1164  enable_fp_exceptions();
1165
1166
1167  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1168
1169  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1170     operation; there's an FPU operation right at the PC, so tell it to start
1171     looking one word beyond */
1172  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1173}
1174
1175   
1176int
1177altivec_present = 1;
1178
1179
1180/* This only tries to implement the "optional" fsqrt and fsqrts
1181   instructions, which were generally implemented on IBM hardware
1182   but generally not available on Motorola/Freescale systems.
1183*/               
1184OSStatus
1185handle_unimplemented_instruction(ExceptionInformation *xp,
1186                                 opcode instruction,
1187                                 TCR *tcr)
1188{
1189  (void) zero_fpscr(tcr);
1190  enable_fp_exceptions();
1191  /* the rc bit (bit 0 in the instruction) is supposed to cause
1192     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1193     fsqrt. or fsqrts.
1194  */
1195  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1196       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1197      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1198    double b, d, sqrt(double);
1199
1200    b = xpFPR(xp,RB_field(instruction));
1201    d = sqrt(b);
1202    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1203                   (get_fpscr() & _FPU_RESERVED));
1204    xpFPR(xp,RT_field(instruction)) = d;
1205    adjust_exception_pc(xp,4);
1206    return 0;
1207  }
1208
1209  return -1;
1210}
1211
1212OSStatus
1213PMCL_exception_handler(int xnum, 
1214                       ExceptionInformation *xp, 
1215                       TCR *tcr, 
1216                       siginfo_t *info,
1217                       int old_valence)
1218{
1219  unsigned oldMQ;
1220  OSStatus status = -1;
1221  pc program_counter;
1222  opcode instruction = 0;
1223
1224
1225  program_counter = xpPC(xp);
1226 
1227  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1228    instruction = *program_counter;
1229  }
1230
1231  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1232    status = handle_alloc_trap(xp, tcr);
1233  } else if ((xnum == SIGSEGV) ||
1234             (xnum == SIGBUS)) {
1235    status = handle_protection_violation(xp, info, tcr, old_valence);
1236  } else if (xnum == SIGFPE) {
1237    status = handle_sigfpe(xp, tcr);
1238  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1239    if (instruction == GC_TRAP_INSTRUCTION) {
1240      status = handle_gc_trap(xp, tcr);
1241    } else if (IS_UUO(instruction)) {
1242      status = handle_uuo(xp, instruction, program_counter);
1243    } else if (is_conditional_trap(instruction)) {
1244      status = handle_trap(xp, instruction, program_counter, info);
1245    } else {
1246      status = handle_unimplemented_instruction(xp,instruction,tcr);
1247    }
1248  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1249    tcr->interrupt_pending = 0;
1250    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1251    status = 0;
1252  }
1253
1254  return status;
1255}
1256
1257void
1258adjust_exception_pc(ExceptionInformation *xp, int delta)
1259{
1260  xpPC(xp) += (delta >> 2);
1261}
1262
1263
1264/*
1265  This wants to scan backwards until "where" points to an instruction
1266   whose major opcode is either 63 (double-float) or 59 (single-float)
1267*/
1268
1269OSStatus
1270handle_fpux_binop(ExceptionInformation *xp, pc where)
1271{
1272  OSStatus err;
1273  opcode *there = (opcode *) where, instr, errnum;
1274  int i = TRAP_LOOKUP_TRIES, delta = 0;
1275 
1276  while (i--) {
1277    instr = *--there;
1278    delta -= 4;
1279    if (codevec_hdr_p(instr)) {
1280      return -1;
1281    }
1282    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1283      errnum = error_FPU_exception_double;
1284      break;
1285    }
1286
1287    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1288      errnum = error_FPU_exception_short;
1289      break;
1290    }
1291  }
1292 
1293  err = handle_error(xp, errnum, rcontext, 0,  there);
1294  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1295 
1296  adjust_exception_pc(xp, delta);
1297  xpFPSCR(xp)  &=  0x03fff;
1298 
1299  return err;
1300
1301}
1302
1303OSStatus
1304handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1305{
1306#ifdef SUPPORT_PRAGMA_UNUSED
1307#pragma unused(where)
1308#endif
1309  unsigned 
1310    minor = UUO_MINOR(the_uuo),
1311    rt = 0x1f & (the_uuo >> 21),
1312    ra = 0x1f & (the_uuo >> 16),
1313    rb = 0x1f & (the_uuo >> 11),
1314    errnum = 0x3ff & (the_uuo >> 16);
1315
1316  OSStatus status = -1;
1317
1318  int bump = 4;
1319
1320  switch (minor) {
1321
1322  case UUO_ZERO_FPSCR:
1323    status = 0;
1324    xpFPSCR(xp) = 0;
1325    break;
1326
1327
1328  case UUO_INTERR:
1329    status = handle_error(xp, errnum, rb, 0,  where);
1330    break;
1331
1332  case UUO_INTCERR:
1333    status = handle_error(xp, errnum, rb, 1,  where);
1334    if (errnum == error_udf_call) {
1335      /* If lisp's returned from a continuable undefined-function call,
1336         it's put a code vector in the xp's PC.  Don't advance the
1337         PC ... */
1338      bump = 0;
1339    }
1340    break;
1341
1342  case UUO_FPUX_BINOP:
1343    status = handle_fpux_binop(xp, where);
1344    bump = 0;
1345    break;
1346
1347  default:
1348    status = -1;
1349    bump = 0;
1350  }
1351 
1352  if ((!status) && bump) {
1353    adjust_exception_pc(xp, bump);
1354  }
1355  return status;
1356}
1357
1358natural
1359register_codevector_contains_pc (natural lisp_function, pc where)
1360{
1361  natural code_vector, size;
1362
1363  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1364      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1365    code_vector = deref(lisp_function, 1);
1366    size = header_element_count(header_of(code_vector)) << 2;
1367    if ((untag(code_vector) < (natural)where) && 
1368        ((natural)where < (code_vector + size)))
1369      return(code_vector);
1370  }
1371
1372  return(0);
1373}
1374
1375/* Callback to lisp to handle a trap. Need to translate the
1376   PC (where) into one of two forms of pairs:
1377
1378   1. If PC is in fn or nfn's code vector, use the register number
1379      of fn or nfn and the index into that function's code vector.
1380   2. Otherwise use 0 and the pc itself
1381*/
1382void
1383callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1384                   natural arg1, natural arg2, natural arg3)
1385{
1386  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1387  unsigned register_number = fn;
1388  natural index = (natural)where;
1389
1390  if (code_vector == 0) {
1391    register_number = nfn;
1392    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1393  }
1394  if (code_vector == 0)
1395    register_number = 0;
1396  else
1397    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1398  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1399}
1400
1401void
1402callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1403                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1404{
1405  sigset_t mask;
1406  natural  callback_ptr, i;
1407  area *a;
1408
1409  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1410
1411  /* Put the active stack pointer where .SPcallback expects it */
1412  a = tcr->cs_area;
1413  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1414
1415  /* Copy globals from the exception frame to tcr */
1416  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1417  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1418  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1419  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1420
1421
1422
1423  /* Call back.
1424     Lisp will handle trampolining through some code that
1425     will push lr/fn & pc/nfn stack frames for backtrace.
1426  */
1427  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1428#ifdef DEBUG
1429  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1430#endif
1431  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1432  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1433  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1434#ifdef DEBUG
1435  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1436#endif
1437
1438
1439
1440  /* Copy GC registers back into exception frame */
1441  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1442  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1443}
1444
1445area *
1446allocate_no_stack (natural size)
1447{
1448#ifdef SUPPORT_PRAGMA_UNUSED
1449#pragma unused(size)
1450#endif
1451
1452  return (area *) NULL;
1453}
1454
1455
1456
1457
1458
1459
1460/* callback to (symbol-value cmain) if it is a macptr,
1461   otherwise report cause and function name to console.
1462   Returns noErr if exception handled OK */
1463OSStatus
1464handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1465{
1466  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1467  int       ra, rs, fn_reg = 0;
1468  char *    error_msg = NULL;
1469  char      name[kNameBufLen];
1470  LispObj   cmain = nrs_CMAIN.vcell;
1471  Boolean   event_poll_p = false;
1472  int old_interrupt_level = 0;
1473  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1474
1475  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1476     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1477
1478  /* If it's a "trllt" instruction where RA == sp, it's a failed
1479     control stack overflow check.  In that case:
1480     
1481     a) We're in "yellow zone" mode if the value of the
1482     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1483     we're not already in yellow zone mode, attempt to create a new
1484     thread and continue execution on its stack. If that fails, call
1485     signal_stack_soft_overflow to enter yellow zone mode and signal
1486     the condition to lisp.
1487     
1488     b) If we're already in "yellow zone" mode, then:
1489     
1490     1) if the SP is past the current control-stack area's hard
1491     overflow limit, signal a "hard" stack overflow error (e.g., throw
1492     to toplevel as quickly as possible. If we aren't in "yellow zone"
1493     mode, attempt to continue on another thread first.
1494     
1495     2) if SP is "well" (> 4K) below its soft overflow limit, set
1496     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1497     "yellow zone mode" in this case.
1498     
1499     3) Otherwise, do nothing.  We'll continue to trap every time
1500     something gets pushed on the control stack, so we should try to
1501     detect and handle all of these cases fairly quickly.  Of course,
1502     the trap overhead is going to slow things down quite a bit.
1503     */
1504
1505  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1506      (RA_field(the_trap) == sp) &&
1507      (TO_field(the_trap) == TO_LO)) {
1508    area
1509      *CS_area = tcr->cs_area,
1510      *VS_area = tcr->vs_area;
1511     
1512    natural
1513      current_SP = xpGPR(xp,sp),
1514      current_VSP = xpGPR(xp,vsp);
1515
1516    if (current_SP  < (natural) (CS_area->hardlimit)) {
1517      /* If we're not in soft overflow mode yet, assume that the
1518         user has set the soft overflow size very small and try to
1519         continue on another thread before throwing to toplevel */
1520      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1521        reset_lisp_process(xp);
1522      }
1523    } else {
1524      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1525        /* If the control stack pointer is at least 4K away from its soft limit
1526           and the value stack pointer is at least 4K away from its soft limit,
1527           stop trapping.  Else keep trapping. */
1528        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1529            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1530          protected_area_ptr vs_soft = VS_area->softprot;
1531          if (vs_soft->nprot == 0) {
1532            protect_area(vs_soft);
1533          }
1534          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1535        }
1536      } else {
1537        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1538        signal_stack_soft_overflow(xp, sp);
1539      }
1540    }
1541   
1542    adjust_exception_pc(xp, 4);
1543    return noErr;
1544  } else {
1545    if (the_trap == LISP_BREAK_INSTRUCTION) {
1546      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1547      set_xpPC(xp, xpLR(xp));
1548      if (message == NULL) {
1549        message = "Lisp Breakpoint";
1550      }
1551      lisp_Debugger(xp, info, debug_entry_dbg, message);
1552      return noErr;
1553    }
1554    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1555      adjust_exception_pc(xp,4);
1556      lisp_Debugger(xp, info, debug_entry_dbg, "Lisp Breakpoint");
1557      return noErr;
1558    }
1559    /*
1560      twlle ra,rb is used to detect tlb overflow, where RA = current
1561      limit and RB = index to use.
1562    */
1563    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1564        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1565      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1566        return noErr;
1567      }
1568      return -1;
1569    }
1570
1571    if ((fulltag_of(cmain) == fulltag_misc) &&
1572        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1573      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1574        /* reset interrup_level, interrupt_pending */
1575        TCR_INTERRUPT_LEVEL(tcr) = 0;
1576        tcr->interrupt_pending = 0;
1577      }
1578#if 0
1579      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1580#endif
1581      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1582      adjust_exception_pc(xp, 4);
1583      return(noErr);
1584    }
1585    return -1;
1586  }
1587}
1588
1589
1590/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1591   Stop if subtag_code_vector is encountered. */
1592unsigned
1593scan_for_instr( unsigned target, unsigned mask, pc where )
1594{
1595  int i = TRAP_LOOKUP_TRIES;
1596
1597  while( i-- ) {
1598    unsigned instr = *(--where);
1599    if ( codevec_hdr_p(instr) ) {
1600      return 0;
1601    } else if ( match_instr(instr, mask, target) ) {
1602      return instr;
1603    }
1604  }
1605  return 0;
1606}
1607
1608
1609void non_fatal_error( char *msg )
1610{
1611  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1612  fflush( stderr );
1613}
1614
1615/* The main opcode.  */
1616
1617int 
1618is_conditional_trap(opcode instr)
1619{
1620  unsigned to = TO_field(instr);
1621  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1622
1623#ifndef MACOS
1624  if ((instr == LISP_BREAK_INSTRUCTION) ||
1625      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1626    return 1;
1627  }
1628#endif
1629  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1630    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1631       EQ bit must be set in the TO mask and either the register
1632       operands (if "tw") are the same or either both of the signed or
1633       both of the unsigned inequality bits must be set. */
1634    if (! (to & TO_EQ)) {
1635      return 1;                 /* Won't trap on EQ: conditional */
1636    }
1637    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1638      return 0;                 /* Will trap on EQ, same regs: unconditional */
1639    }
1640    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1641        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1642      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1643    }
1644    return 1;                   /* must be conditional */
1645  }
1646  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1647                                   debugger have it */
1648}
1649
1650OSStatus
1651handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1652{
1653  LispObj   pname;
1654  LispObj   errdisp = nrs_ERRDISP.vcell;
1655
1656  if ((fulltag_of(errdisp) == fulltag_misc) &&
1657      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1658    /* errdisp is a macptr, we can call back to lisp */
1659    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1660    return(0);
1661    }
1662
1663  return(-1);
1664}
1665               
1666
1667/*
1668   Current thread has all signals masked.  Before unmasking them,
1669   make it appear that the current thread has been suspended.
1670   (This is to handle the case where another thread is trying
1671   to GC before this thread is able to sieze the exception lock.)
1672*/
1673int
1674prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1675{
1676  int old_valence = tcr->valence;
1677
1678  tcr->pending_exception_context = context;
1679  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1680
1681  ALLOW_EXCEPTIONS(context);
1682  return old_valence;
1683} 
1684
1685void
1686wait_for_exception_lock_in_handler(TCR *tcr, 
1687                                   ExceptionInformation *context,
1688                                   xframe_list *xf)
1689{
1690
1691  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1692#ifdef DEBUG
1693  fprintf(stderr, "0x%x has exception lock\n", tcr);
1694#endif
1695  xf->curr = context;
1696  xf->prev = tcr->xframe;
1697  tcr->xframe =  xf;
1698  tcr->pending_exception_context = NULL;
1699  tcr->valence = TCR_STATE_FOREIGN; 
1700}
1701
1702void
1703unlock_exception_lock_in_handler(TCR *tcr)
1704{
1705  tcr->pending_exception_context = tcr->xframe->curr;
1706  tcr->xframe = tcr->xframe->prev;
1707  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1708#ifdef DEBUG
1709  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1710#endif
1711  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1712}
1713
1714/*
1715   If an interrupt is pending on exception exit, try to ensure
1716   that the thread sees it as soon as it's able to run.
1717*/
1718void
1719raise_pending_interrupt(TCR *tcr)
1720{
1721  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1722    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1723  }
1724}
1725
1726void
1727exit_signal_handler(TCR *tcr, int old_valence)
1728{
1729  sigset_t mask;
1730  sigfillset(&mask);
1731 
1732  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1733  tcr->valence = old_valence;
1734  tcr->pending_exception_context = NULL;
1735}
1736
1737
1738void
1739signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1740{
1741  xframe_list xframe_link;
1742
1743#ifdef DARWIN
1744  if (running_under_rosetta) {
1745    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1746  }
1747#endif
1748  if (!use_mach_exception_handling) {
1749   
1750    tcr = (TCR *) get_interrupt_tcr(false);
1751 
1752    /* The signal handler's entered with all signals (notably the
1753       thread_suspend signal) blocked.  Don't allow any other signals
1754       (notably the thread_suspend signal) to preempt us until we've
1755       set the TCR's xframe slot to include the current exception
1756       context.
1757    */
1758   
1759    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1760  }
1761 
1762  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1763  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1764    char msg[512];
1765    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1766    if (lisp_Debugger(context, info, signum, msg)) {
1767      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1768    }
1769  }
1770
1771  unlock_exception_lock_in_handler(tcr);
1772
1773  /* This thread now looks like a thread that was suspended while
1774     executing lisp code.  If some other thread gets the exception
1775     lock and GCs, the context (this thread's suspend_context) will
1776     be updated.  (That's only of concern if it happens before we
1777     can return to the kernel/to the Mach exception handler).
1778  */
1779  if (!use_mach_exception_handling) {
1780    exit_signal_handler(tcr, old_valence);
1781    raise_pending_interrupt(tcr);
1782  }
1783}
1784
1785/*
1786  If it looks like we're in the middle of an atomic operation, make
1787  it seem as if that operation is either complete or hasn't started
1788  yet.
1789
1790  The cases handled include:
1791
1792  a) storing into a newly-allocated lisp frame on the stack.
1793  b) marking a newly-allocated TSP frame as containing "raw" data.
1794  c) consing: the GC has its own ideas about how this should be
1795     handled, but other callers would be best advised to back
1796     up or move forward, according to whether we're in the middle
1797     of allocating a cons cell or allocating a uvector.
1798  d) a STMW to the vsp
1799  e) EGC write-barrier subprims.
1800*/
1801
1802extern opcode
1803  egc_write_barrier_start,
1804  egc_write_barrier_end, 
1805  egc_store_node_conditional, 
1806  egc_set_hash_key,
1807  egc_gvset,
1808  egc_rplaca,
1809  egc_rplacd;
1810
1811
1812extern opcode ffcall_return_window, ffcall_return_window_end;
1813
1814void
1815pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1816{
1817  pc program_counter = xpPC(xp);
1818  opcode instr = *program_counter;
1819  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1820  LispObj cur_allocptr = xpGPR(xp, allocptr);
1821  int allocptr_tag = fulltag_of(cur_allocptr);
1822 
1823
1824
1825  if ((program_counter < &egc_write_barrier_end) && 
1826      (program_counter >= &egc_write_barrier_start)) {
1827    LispObj *ea = 0, val, root;
1828    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1829    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1830
1831    if (program_counter >= &egc_store_node_conditional) {
1832      if ((program_counter == &egc_store_node_conditional) || ! (xpCCR(xp) & 0x20000000)) {
1833        /* The conditional store either hasn't been attempted yet, or
1834           has failed.  No need to adjust the PC, or do memoization. */
1835        return;
1836      }
1837      val = xpGPR(xp,arg_z);
1838      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1839      xpGPR(xp,arg_z) = t_value;
1840      need_store = false;
1841    } else if (program_counter >= &egc_set_hash_key) {
1842      root = xpGPR(xp,arg_x);
1843      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1844      need_memoize_root = true;
1845    } else if (program_counter >= &egc_gvset) {
1846      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1847      val = xpGPR(xp,arg_z);
1848    } else if (program_counter >= &egc_rplacd) {
1849      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1850      val = xpGPR(xp,arg_z);
1851    } else {                      /* egc_rplaca */
1852      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1853      val = xpGPR(xp,arg_z);
1854    }
1855    if (need_store) {
1856      *ea = val;
1857    }
1858    if (need_check_memo) {
1859      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1860      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1861          ((LispObj)ea < val)) {
1862        atomic_set_bit(refbits, bitnumber);
1863        if (need_memoize_root) {
1864          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1865          atomic_set_bit(refbits, bitnumber);
1866        }
1867      }
1868    }
1869    set_xpPC(xp, xpLR(xp));
1870    return;
1871  }
1872
1873
1874  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1875    LispObj tsp_val = xpGPR(xp,tsp);
1876   
1877    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1878    adjust_exception_pc(xp, 4);
1879    return;
1880  }
1881 
1882  if (frame->backlink == (frame+1)) {
1883    if (
1884#ifdef PPC64
1885        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1886        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1887#else
1888        (major_opcode_p(instr, major_opcode_STW)) && 
1889#endif
1890        (RA_field(instr) == sp) &&
1891        /* There are a few places in the runtime that store into
1892           a previously-allocated frame atop the stack when
1893           throwing values around.  We only care about the case
1894           where the frame was newly allocated, in which case
1895           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1896           a few instructions before the current program counter.
1897           (The whole point here is that a newly allocated frame
1898           might contain random values that we don't want the
1899           GC to see; a previously allocated frame should already
1900           be completely initialized.)
1901        */
1902        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1903         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1904         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1905#ifdef PPC64
1906      int disp = DS_field(instr);
1907#else     
1908      int disp = D_field(instr);
1909#endif
1910
1911
1912      if (disp < (4*node_size)) {
1913#if 0
1914        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1915#endif
1916        frame->savevsp = 0;
1917        if (disp < (3*node_size)) {
1918          frame->savelr = 0;
1919          if (disp == node_size) {
1920            frame->savefn = 0;
1921          }
1922        }
1923      }
1924      return;
1925    }
1926  }
1927
1928  if (allocptr_tag != tag_fixnum) {
1929    signed_natural disp = allocptr_displacement(xp);
1930
1931    if (disp) {
1932      /* Being architecturally "at" the alloc trap doesn't tell
1933         us much (in particular, it doesn't tell us whether
1934         or not the thread has committed to taking the trap
1935         and is waiting for the exception lock (or waiting
1936         for the Mach exception thread to tell it how bad
1937         things are) or is about to execute a conditional
1938         trap.
1939         Regardless of which case applies, we want the
1940         other thread to take (or finish taking) the
1941         trap, and we don't want it to consider its
1942         current allocptr to be valid.
1943         The difference between this case (suspend other
1944         thread for GC) and the previous case (suspend
1945         current thread for interrupt) is solely a
1946         matter of what happens after we leave this
1947         function: some non-current thread will stay
1948         suspended until the GC finishes, then take
1949         (or start processing) the alloc trap.   The
1950         current thread will go off and do PROCESS-INTERRUPT
1951         or something, and may return from the interrupt
1952         and need to finish the allocation that got interrupted.
1953      */
1954
1955      if (alloc_disp) {
1956        *alloc_disp = disp;
1957        xpGPR(xp,allocptr) += disp;
1958        /* Leave the PC at the alloc trap.  When the interrupt
1959           handler returns, it'll decrement allocptr by disp
1960           and the trap may or may not be taken.
1961        */
1962      } else {
1963        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1964        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1965        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1966      }
1967    } else {
1968#ifdef DEBUG
1969      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
1970#endif
1971      /* If we're already past the alloc_trap, finish allocating
1972         the object. */
1973      if (allocptr_tag == fulltag_cons) {
1974        finish_allocating_cons(xp);
1975#ifdef DEBUG
1976          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
1977                  tcr);
1978#endif
1979      } else {
1980        if (allocptr_tag == fulltag_misc) {
1981#ifdef DEBUG
1982          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
1983                  tcr);
1984#endif
1985          finish_allocating_uvector(xp);
1986        } else {
1987          Bug(xp, "what's being allocated here ?");
1988        }
1989      }
1990      /* Whatever we finished allocating, reset allocptr/allocbase to
1991         VOID_ALLOCPTR */
1992      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
1993    }
1994    return;
1995  }
1996
1997  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
1998    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
1999    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2000#if 0
2001        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2002#endif
2003
2004    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2005      deref(frame,idx) = 0;
2006    }
2007    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2008    return;
2009  }
2010
2011#ifndef PC64
2012  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2013      (RA_field(instr) == vsp)) {
2014    int r;
2015    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2016   
2017    for (r = RS_field(instr); r <= 31; r++) {
2018      *vspptr++ = xpGPR(xp,r);
2019    }
2020    adjust_exception_pc(xp, 4);
2021  }
2022#endif
2023}
2024
2025void
2026interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2027{
2028  TCR *tcr = get_interrupt_tcr(false);
2029  if (tcr) {
2030    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2031      tcr->interrupt_pending = 1 << fixnumshift;
2032    } else {
2033      LispObj cmain = nrs_CMAIN.vcell;
2034
2035      if ((fulltag_of(cmain) == fulltag_misc) &&
2036          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2037        /*
2038           This thread can (allegedly) take an interrupt now.
2039           It's tricky to do that if we're executing
2040           foreign code (especially Linuxthreads code, much
2041           of which isn't reentrant.)
2042           If we're unwinding the stack, we also want to defer
2043           the interrupt.
2044        */
2045        if ((tcr->valence != TCR_STATE_LISP) ||
2046            (tcr->unwinding != 0)) {
2047          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2048        } else {
2049          xframe_list xframe_link;
2050          int old_valence;
2051          signed_natural disp=0;
2052         
2053          pc_luser_xp(context, tcr, &disp);
2054          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2055          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2056#ifdef DEBUG
2057          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2058#endif
2059          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2060          if (disp) {
2061            xpGPR(context,allocptr) -= disp;
2062          }
2063          unlock_exception_lock_in_handler(tcr);
2064#ifdef DEBUG
2065          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2066#endif
2067          exit_signal_handler(tcr, old_valence);
2068        }
2069      }
2070    }
2071  }
2072#ifdef DARWIN
2073    DarwinSigReturn(context);
2074#endif
2075}
2076
2077
2078
2079void
2080install_signal_handler(int signo, void *handler)
2081{
2082  struct sigaction sa;
2083 
2084  sa.sa_sigaction = (void *)handler;
2085  sigfillset(&sa.sa_mask);
2086  sa.sa_flags = 
2087    SA_RESTART
2088    | SA_SIGINFO
2089#ifdef DARWIN
2090#ifdef PPC64
2091    | SA_64REGSET
2092#endif
2093#endif
2094    ;
2095
2096  sigaction(signo, &sa, NULL);
2097}
2098
2099void
2100install_pmcl_exception_handlers()
2101{
2102#ifdef DARWIN
2103  extern Boolean use_mach_exception_handling;
2104#endif
2105
2106  Boolean install_signal_handlers_for_exceptions =
2107#ifdef DARWIN
2108    !use_mach_exception_handling
2109#else
2110    true
2111#endif
2112    ;
2113  if (install_signal_handlers_for_exceptions) {
2114    extern int no_sigtrap;
2115    install_signal_handler(SIGILL, (void *)signal_handler);
2116    if (no_sigtrap != 1) {
2117      install_signal_handler(SIGTRAP, (void *)signal_handler);
2118    }
2119    install_signal_handler(SIGBUS,  (void *)signal_handler);
2120    install_signal_handler(SIGSEGV, (void *)signal_handler);
2121    install_signal_handler(SIGFPE, (void *)signal_handler);
2122  }
2123 
2124  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2125                         (void *)interrupt_handler);
2126  signal(SIGPIPE, SIG_IGN);
2127}
2128
2129void
2130quit_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2131{
2132  TCR *tcr = get_tcr(false);
2133  area *a;
2134  sigset_t mask;
2135 
2136  sigemptyset(&mask);
2137
2138  if (tcr) {
2139    tcr->valence = TCR_STATE_FOREIGN;
2140    a = tcr->vs_area;
2141    if (a) {
2142      a->active = a->high;
2143    }
2144    a = tcr->ts_area;
2145    if (a) {
2146      a->active = a->high;
2147    }
2148    a = tcr->cs_area;
2149    if (a) {
2150      a->active = a->high;
2151    }
2152  }
2153 
2154  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2155  pthread_exit(NULL);
2156}
2157
2158void
2159thread_signal_setup()
2160{
2161  thread_suspend_signal = SIG_SUSPEND_THREAD;
2162  thread_resume_signal = SIG_RESUME_THREAD;
2163
2164  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2165  install_signal_handler(thread_resume_signal,  (void *) suspend_resume_handler);
2166  install_signal_handler(SIGQUIT, (void *)quit_handler);
2167}
2168
2169
2170
2171void
2172unprotect_all_areas()
2173{
2174  protected_area_ptr p;
2175
2176  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2177    unprotect_area(p);
2178  }
2179}
2180
2181/*
2182  A binding subprim has just done "twlle limit_regno,idx_regno" and
2183  the trap's been taken.  Extend the tcr's tlb so that the index will
2184  be in bounds and the new limit will be on a page boundary, filling
2185  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2186  the tcr fields and the registers in the xp and return true if this
2187  all works, false otherwise.
2188
2189  Note that the tlb was allocated via malloc, so realloc can do some
2190  of the hard work.
2191*/
2192Boolean
2193extend_tcr_tlb(TCR *tcr, 
2194               ExceptionInformation *xp, 
2195               unsigned limit_regno,
2196               unsigned idx_regno)
2197{
2198  unsigned
2199    index = (unsigned) (xpGPR(xp,idx_regno)),
2200    old_limit = tcr->tlb_limit,
2201    new_limit = align_to_power_of_2(index+1,12),
2202    new_bytes = new_limit-old_limit;
2203  LispObj
2204    *old_tlb = tcr->tlb_pointer,
2205    *new_tlb = realloc(old_tlb, new_limit),
2206    *work;
2207
2208  if (new_tlb == NULL) {
2209    return false;
2210  }
2211 
2212  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2213
2214  while (new_bytes) {
2215    *work++ = no_thread_local_binding_marker;
2216    new_bytes -= sizeof(LispObj);
2217  }
2218  tcr->tlb_pointer = new_tlb;
2219  tcr->tlb_limit = new_limit;
2220  xpGPR(xp, limit_regno) = new_limit;
2221  return true;
2222}
2223
2224
2225
2226void
2227exception_init()
2228{
2229  install_pmcl_exception_handlers();
2230}
2231
2232
2233
2234
2235
2236#ifdef DARWIN
2237
2238
2239#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2240#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2241
2242
2243#if USE_MACH_EXCEPTION_LOCK
2244pthread_mutex_t _mach_exception_lock, *mach_exception_lock;
2245#endif
2246
2247#define LISP_EXCEPTIONS_HANDLED_MASK \
2248 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2249
2250/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2251#define NUM_LISP_EXCEPTIONS_HANDLED 4
2252
2253typedef struct {
2254  int foreign_exception_port_count;
2255  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2256  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2257  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2258  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2259} MACH_foreign_exception_state;
2260
2261
2262
2263
2264/*
2265  Mach's exception mechanism works a little better than its signal
2266  mechanism (and, not incidentally, it gets along with GDB a lot
2267  better.
2268
2269  Initially, we install an exception handler to handle each native
2270  thread's exceptions.  This process involves creating a distinguished
2271  thread which listens for kernel exception messages on a set of
2272  0 or more thread exception ports.  As threads are created, they're
2273  added to that port set; a thread's exception port is destroyed
2274  (and therefore removed from the port set) when the thread exits.
2275
2276  A few exceptions can be handled directly in the handler thread;
2277  others require that we resume the user thread (and that the
2278  exception thread resumes listening for exceptions.)  The user
2279  thread might eventually want to return to the original context
2280  (possibly modified somewhat.)
2281
2282  As it turns out, the simplest way to force the faulting user
2283  thread to handle its own exceptions is to do pretty much what
2284  signal() does: the exception handlng thread sets up a sigcontext
2285  on the user thread's stack and forces the user thread to resume
2286  execution as if a signal handler had been called with that
2287  context as an argument.  We can use a distinguished UUO at a
2288  distinguished address to do something like sigreturn(); that'll
2289  have the effect of resuming the user thread's execution in
2290  the (pseudo-) signal context.
2291
2292  Since:
2293    a) we have miles of code in C and in Lisp that knows how to
2294    deal with Linux sigcontexts
2295    b) Linux sigcontexts contain a little more useful information
2296    (the DAR, DSISR, etc.) than their Darwin counterparts
2297    c) we have to create a sigcontext ourselves when calling out
2298    to the user thread: we aren't really generating a signal, just
2299    leveraging existing signal-handling code.
2300
2301  we create a Linux sigcontext struct.
2302
2303  Simple ?  Hopefully from the outside it is ...
2304
2305  We want the process of passing a thread's own context to it to
2306  appear to be atomic: in particular, we don't want the GC to suspend
2307  a thread that's had an exception but has not yet had its user-level
2308  exception handler called, and we don't want the thread's exception
2309  context to be modified by a GC while the Mach handler thread is
2310  copying it around.  On Linux (and on Jaguar), we avoid this issue
2311  because (a) the kernel sets up the user-level signal handler and
2312  (b) the signal handler blocks signals (including the signal used
2313  by the GC to suspend threads) until tcr->xframe is set up.
2314
2315  The GC and the Mach server thread therefore contend for the lock
2316  "mach_exception_lock".  The Mach server thread holds the lock
2317  when copying exception information between the kernel and the
2318  user thread; the GC holds this lock during most of its execution
2319  (delaying exception processing until it can be done without
2320  GC interference.)
2321
2322*/
2323
2324#ifdef PPC64
2325#define C_REDZONE_LEN           320
2326#define C_STK_ALIGN             32
2327#else
2328#define C_REDZONE_LEN           224
2329#define C_STK_ALIGN             16
2330#endif
2331#define C_PARAMSAVE_LEN         64
2332#define C_LINKAGE_LEN           48
2333
2334#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2335
2336void
2337fatal_mach_error(char *format, ...);
2338
2339#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2340
2341
2342void
2343restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2344{
2345  int i, j;
2346  kern_return_t kret;
2347  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2348
2349  /* Set the thread's FP state from the pseudosigcontext */
2350  kret = thread_set_state(thread,
2351                          PPC_FLOAT_STATE,
2352                          (thread_state_t)&(mc->__fs),
2353                          PPC_FLOAT_STATE_COUNT);
2354
2355  MACH_CHECK_ERROR("setting thread FP state", kret);
2356
2357  /* The thread'll be as good as new ... */
2358#ifdef PPC64
2359  kret = thread_set_state(thread,
2360                          PPC_THREAD_STATE64,
2361                          (thread_state_t)&(mc->__ss),
2362                          PPC_THREAD_STATE64_COUNT);
2363#else
2364  kret = thread_set_state(thread, 
2365                          MACHINE_THREAD_STATE,
2366                          (thread_state_t)&(mc->__ss),
2367                          MACHINE_THREAD_STATE_COUNT);
2368#endif
2369  MACH_CHECK_ERROR("setting thread state", kret);
2370} 
2371
2372/* This code runs in the exception handling thread, in response
2373   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2374   in response to a call to pseudo_sigreturn() from the specified
2375   user thread.
2376   Find that context (the user thread's R3 points to it), then
2377   use that context to set the user thread's state.  When this
2378   function's caller returns, the Mach kernel will resume the
2379   user thread.
2380*/
2381
2382kern_return_t
2383do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2384{
2385  ExceptionInformation *xp;
2386
2387#ifdef DEBUG_MACH_EXCEPTIONS
2388  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2389#endif
2390  xp = tcr->pending_exception_context;
2391  if (xp) {
2392    tcr->pending_exception_context = NULL;
2393    tcr->valence = TCR_STATE_LISP;
2394    restore_mach_thread_state(thread, xp);
2395    raise_pending_interrupt(tcr);
2396  } else {
2397    Bug(NULL, "no xp here!\n");
2398  }
2399#ifdef DEBUG_MACH_EXCEPTIONS
2400  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2401#endif
2402  return KERN_SUCCESS;
2403} 
2404
2405ExceptionInformation *
2406create_thread_context_frame(mach_port_t thread, 
2407                            natural *new_stack_top)
2408{
2409#ifdef PPC64
2410  ppc_thread_state64_t ts;
2411#else
2412  ppc_thread_state_t ts;
2413#endif
2414  mach_msg_type_number_t thread_state_count;
2415  kern_return_t result;
2416  int i,j;
2417  ExceptionInformation *pseudosigcontext;
2418  MCONTEXT_T mc;
2419  natural stackp, backlink;
2420
2421#ifdef PPC64
2422  thread_state_count = PPC_THREAD_STATE64_COUNT;
2423  result = thread_get_state(thread,
2424                            PPC_THREAD_STATE64,
2425                            (thread_state_t)&ts,
2426                            &thread_state_count);
2427#else
2428  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2429  result = thread_get_state(thread, 
2430                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2431                            (thread_state_t)&ts,
2432                            &thread_state_count);
2433#endif
2434 
2435  if (result != KERN_SUCCESS) {
2436    get_tcr(true);
2437    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2438  }
2439  stackp = ts.__r1;
2440  backlink = stackp;
2441  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2442  stackp -= sizeof(*pseudosigcontext);
2443  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2444
2445  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2446  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2447  bcopy(&ts,&(mc->__ss),sizeof(ts));
2448
2449  thread_state_count = PPC_FLOAT_STATE_COUNT;
2450  thread_get_state(thread,
2451                   PPC_FLOAT_STATE,
2452                   (thread_state_t)&(mc->__fs),
2453                   &thread_state_count);
2454
2455
2456#ifdef PPC64
2457  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2458#else
2459  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2460#endif
2461  thread_get_state(thread,
2462#ifdef PPC64
2463                   PPC_EXCEPTION_STATE64,
2464#else
2465                   PPC_EXCEPTION_STATE,
2466#endif
2467                   (thread_state_t)&(mc->__es),
2468                   &thread_state_count);
2469
2470
2471  UC_MCONTEXT(pseudosigcontext) = mc;
2472  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2473  stackp -= C_LINKAGE_LEN;
2474  *(natural *)ptr_from_lispobj(stackp) = backlink;
2475  if (new_stack_top) {
2476    *new_stack_top = stackp;
2477  }
2478  return pseudosigcontext;
2479}
2480
2481/*
2482  This code sets up the user thread so that it executes a "pseudo-signal
2483  handler" function when it resumes.  Create a linux sigcontext struct
2484  on the thread's stack and pass it as an argument to the pseudo-signal
2485  handler.
2486
2487  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2488  which will restore the thread's context.
2489
2490  If the handler invokes code that throws (or otherwise never sigreturn()'s
2491  to the context), that's fine.
2492
2493  Actually, check that: throw (and variants) may need to be careful and
2494  pop the tcr's xframe list until it's younger than any frame being
2495  entered.
2496*/
2497
2498int
2499setup_signal_frame(mach_port_t thread,
2500                   void *handler_address,
2501                   int signum,
2502                   int code,
2503                   TCR *tcr)
2504{
2505#ifdef PPC64
2506  ppc_thread_state64_t ts;
2507#else
2508  ppc_thread_state_t ts;
2509#endif
2510  mach_msg_type_number_t thread_state_count;
2511  ExceptionInformation *pseudosigcontext;
2512  int i, j, old_valence = tcr->valence;
2513  kern_return_t result;
2514  natural stackp;
2515
2516#ifdef DEBUG_MACH_EXCEPTIONS
2517  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2518#endif
2519  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2520  pseudosigcontext->uc_onstack = 0;
2521  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2522  tcr->pending_exception_context = pseudosigcontext;
2523  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2524 
2525
2526  /*
2527     It seems like we've created a  sigcontext on the thread's
2528     stack.  Set things up so that we call the handler (with appropriate
2529     args) when the thread's resumed.
2530  */
2531
2532  ts.__srr0 = (natural) handler_address;
2533  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2534  ts.__r1 = stackp;
2535  ts.__r3 = signum;
2536  ts.__r4 = (natural)pseudosigcontext;
2537  ts.__r5 = (natural)tcr;
2538  ts.__r6 = (natural)old_valence;
2539  ts.__lr = (natural)pseudo_sigreturn;
2540
2541
2542#ifdef PPC64
2543  ts.__r13 = xpGPR(pseudosigcontext,13);
2544  thread_set_state(thread,
2545                   PPC_THREAD_STATE64,
2546                   (thread_state_t)&ts,
2547                   PPC_THREAD_STATE64_COUNT);
2548#else
2549  thread_set_state(thread, 
2550                   MACHINE_THREAD_STATE,
2551                   (thread_state_t)&ts,
2552                   MACHINE_THREAD_STATE_COUNT);
2553#endif
2554#ifdef DEBUG_MACH_EXCEPTIONS
2555  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2556#endif
2557  return 0;
2558}
2559
2560
2561void
2562pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2563{
2564  signal_handler(signum, NULL, context, tcr, old_valence);
2565} 
2566
2567
2568int
2569thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2570{
2571#ifdef PPC64
2572  ppc_thread_state64_t ts;
2573#else
2574  ppc_thread_state_t ts;
2575#endif
2576  mach_msg_type_number_t thread_state_count;
2577
2578#ifdef PPC64
2579  thread_state_count = PPC_THREAD_STATE64_COUNT;
2580#else
2581  thread_state_count = PPC_THREAD_STATE_COUNT;
2582#endif
2583  thread_get_state(thread, 
2584#ifdef PPC64
2585                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2586#else
2587                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2588#endif
2589                   (thread_state_t)&ts,
2590                   &thread_state_count);
2591  if (enabled) {
2592    ts.__srr1 |= MSR_FE0_FE1_MASK;
2593  } else {
2594    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2595  }
2596  /*
2597     Hack-o-rama warning (isn't it about time for such a warning?):
2598     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2599     Our handler for lisp's use of pthread_kill() pushes a phony
2600     lisp frame on the stack and force the context to resume at
2601     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2602     lisp frame contains the -real- address that process_interrupt
2603     should have returned to, and the fact that it's in a lisp
2604     frame should convince the GC to notice that address if it
2605     runs in the tiny time window between returning from our
2606     interrupt handler and ... here.
2607     If the top frame on the stack is a lisp frame, discard it
2608     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2609     just adjust ts.srr0 to skip over the UUO.
2610  */
2611  {
2612    lisp_frame *tos = (lisp_frame *)ts.__r1,
2613      *next_frame = tos->backlink;
2614   
2615    if (tos == (next_frame -1)) {
2616      ts.__srr0 = tos->savelr;
2617      ts.__r1 = (LispObj) next_frame;
2618    } else {
2619      ts.__srr0 += 4;
2620    }
2621  }
2622  thread_set_state(thread, 
2623#ifdef PPC64
2624                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2625#else
2626                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2627#endif
2628                   (thread_state_t)&ts,
2629#ifdef PPC64
2630                   PPC_THREAD_STATE64_COUNT
2631#else
2632                   PPC_THREAD_STATE_COUNT
2633#endif
2634                   );
2635
2636  return 0;
2637}
2638
2639/*
2640  This function runs in the exception handling thread.  It's
2641  called (by this precise name) from the library function "exc_server()"
2642  when the thread's exception ports are set up.  (exc_server() is called
2643  via mach_msg_server(), which is a function that waits for and dispatches
2644  on exception messages from the Mach kernel.)
2645
2646  This checks to see if the exception was caused by a pseudo_sigreturn()
2647  UUO; if so, it arranges for the thread to have its state restored
2648  from the specified context.
2649
2650  Otherwise, it tries to map the exception to a signal number and
2651  arranges that the thread run a "pseudo signal handler" to handle
2652  the exception.
2653
2654  Some exceptions could and should be handled here directly.
2655*/
2656
2657kern_return_t
2658catch_exception_raise(mach_port_t exception_port,
2659                      mach_port_t thread,
2660                      mach_port_t task, 
2661                      exception_type_t exception,
2662                      exception_data_t code_vector,
2663                      mach_msg_type_number_t code_count)
2664{
2665  int signum = 0, code = *code_vector, code1;
2666  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2667  kern_return_t kret;
2668
2669#ifdef DEBUG_MACH_EXCEPTIONS
2670  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2671#endif
2672
2673  if (
2674#if USE_MACH_EXCEPTION_LOCK
2675    pthread_mutex_trylock(mach_exception_lock) == 0
2676#else
2677    1
2678#endif
2679    ) {
2680    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2681      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2682    } 
2683    if ((exception == EXC_BAD_INSTRUCTION) &&
2684        (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2685        (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2686         (code1 == (int)enable_fp_exceptions) ||
2687         (code1 == (int)disable_fp_exceptions))) {
2688      if (code1 == (int)pseudo_sigreturn) {
2689        kret = do_pseudo_sigreturn(thread, tcr);
2690#if 0
2691      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2692#endif
2693       
2694      } else if (code1 == (int)enable_fp_exceptions) {
2695        kret = thread_set_fp_exceptions_enabled(thread, true);
2696      } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2697    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2698      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2699      kret = 17;
2700    } else {
2701      switch (exception) {
2702      case EXC_BAD_ACCESS:
2703        signum = SIGSEGV;
2704        break;
2705       
2706      case EXC_BAD_INSTRUCTION:
2707        signum = SIGILL;
2708        break;
2709     
2710      case EXC_SOFTWARE:
2711        if (code == EXC_PPC_TRAP) {
2712          signum = SIGTRAP;
2713        }
2714        break;
2715     
2716      case EXC_ARITHMETIC:
2717        signum = SIGFPE;
2718        break;
2719
2720      default:
2721        break;
2722      }
2723      if (signum) {
2724        kret = setup_signal_frame(thread,
2725                                  (void *)pseudo_signal_handler,
2726                                  signum,
2727                                  code,
2728                                  tcr);
2729#if 0
2730      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2731#endif
2732
2733      } else {
2734        kret = 17;
2735      }
2736    }
2737#if USE_MACH_EXCEPTION_LOCK
2738#ifdef DEBUG_MACH_EXCEPTIONS
2739    fprintf(stderr, "releasing Mach exception lock in exception thread\n");
2740#endif
2741    pthread_mutex_unlock(mach_exception_lock);
2742#endif
2743  } else {
2744    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2745#if 0
2746    fprintf(stderr, "deferring pending exception in 0x%x\n", tcr);
2747#endif
2748    kret = 0;
2749    if (tcr == gc_tcr) {
2750      int i;
2751      write(1, "exception in GC thread. Sleeping for 60 seconds\n",sizeof("exception in GC thread.  Sleeping for 60 seconds\n"));
2752      for (i = 0; i < 60; i++) {
2753        sleep(1);
2754      }
2755      _exit(EX_SOFTWARE);
2756    }
2757  }
2758  return kret;
2759}
2760
2761
2762
2763typedef struct {
2764  mach_msg_header_t Head;
2765  /* start of the kernel processed data */
2766  mach_msg_body_t msgh_body;
2767  mach_msg_port_descriptor_t thread;
2768  mach_msg_port_descriptor_t task;
2769  /* end of the kernel processed data */
2770  NDR_record_t NDR;
2771  exception_type_t exception;
2772  mach_msg_type_number_t codeCnt;
2773  integer_t code[2];
2774  mach_msg_trailer_t trailer;
2775} exceptionRequest;
2776
2777
2778boolean_t
2779openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2780{
2781  static NDR_record_t _NDR = {0};
2782  kern_return_t handled;
2783  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2784  exceptionRequest *req = (exceptionRequest *) in;
2785
2786  reply->NDR = _NDR;
2787
2788  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2789  out->msgh_remote_port = in->msgh_remote_port;
2790  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2791  out->msgh_local_port = MACH_PORT_NULL;
2792  out->msgh_id = in->msgh_id+100;
2793
2794  /* Could handle other exception flavors in the range 2401-2403 */
2795
2796
2797  if (in->msgh_id != 2401) {
2798    reply->RetCode = MIG_BAD_ID;
2799    return FALSE;
2800  }
2801  handled = catch_exception_raise(req->Head.msgh_local_port,
2802                                  req->thread.name,
2803                                  req->task.name,
2804                                  req->exception,
2805                                  req->code,
2806                                  req->codeCnt);
2807  reply->RetCode = handled;
2808  return TRUE;
2809}
2810
2811/*
2812  The initial function for an exception-handling thread.
2813*/
2814
2815void *
2816exception_handler_proc(void *arg)
2817{
2818  extern boolean_t exc_server();
2819  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2820
2821  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2822  /* Should never return. */
2823  abort();
2824}
2825
2826
2827
2828mach_port_t
2829mach_exception_port_set()
2830{
2831  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2832  kern_return_t kret; 
2833  if (__exception_port_set == MACH_PORT_NULL) {
2834#if USE_MACH_EXCEPTION_LOCK
2835    mach_exception_lock = &_mach_exception_lock;
2836    pthread_mutex_init(mach_exception_lock, NULL);
2837#endif
2838    kret = mach_port_allocate(mach_task_self(),
2839                              MACH_PORT_RIGHT_PORT_SET,
2840                              &__exception_port_set);
2841    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2842    create_system_thread(0,
2843                         NULL,
2844                         exception_handler_proc, 
2845                         (void *)((natural)__exception_port_set));
2846  }
2847  return __exception_port_set;
2848}
2849
2850/*
2851  Setup a new thread to handle those exceptions specified by
2852  the mask "which".  This involves creating a special Mach
2853  message port, telling the Mach kernel to send exception
2854  messages for the calling thread to that port, and setting
2855  up a handler thread which listens for and responds to
2856  those messages.
2857
2858*/
2859
2860/*
2861  Establish the lisp thread's TCR as its exception port, and determine
2862  whether any other ports have been established by foreign code for
2863  exceptions that lisp cares about.
2864
2865  If this happens at all, it should happen on return from foreign
2866  code and on entry to lisp code via a callback.
2867
2868  This is a lot of trouble (and overhead) to support Java, or other
2869  embeddable systems that clobber their caller's thread exception ports.
2870 
2871*/
2872kern_return_t
2873tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2874{
2875  kern_return_t kret;
2876  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2877  int i;
2878  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2879  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2880  exception_mask_t mask = 0;
2881
2882  kret = thread_swap_exception_ports(thread,
2883                                     LISP_EXCEPTIONS_HANDLED_MASK,
2884                                     lisp_port,
2885                                     EXCEPTION_DEFAULT,
2886                                     THREAD_STATE_NONE,
2887                                     fxs->masks,
2888                                     &n,
2889                                     fxs->ports,
2890                                     fxs->behaviors,
2891                                     fxs->flavors);
2892  if (kret == KERN_SUCCESS) {
2893    fxs->foreign_exception_port_count = n;
2894    for (i = 0; i < n; i ++) {
2895      foreign_port = fxs->ports[i];
2896
2897      if ((foreign_port != lisp_port) &&
2898          (foreign_port != MACH_PORT_NULL)) {
2899        mask |= fxs->masks[i];
2900      }
2901    }
2902    tcr->foreign_exception_status = (int) mask;
2903  }
2904  return kret;
2905}
2906
2907kern_return_t
2908tcr_establish_lisp_exception_port(TCR *tcr)
2909{
2910  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2911}
2912
2913/*
2914  Do this when calling out to or returning from foreign code, if
2915  any conflicting foreign exception ports were established when we
2916  last entered lisp code.
2917*/
2918kern_return_t
2919restore_foreign_exception_ports(TCR *tcr)
2920{
2921  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2922 
2923  if (m) {
2924    MACH_foreign_exception_state *fxs  = 
2925      (MACH_foreign_exception_state *) tcr->native_thread_info;
2926    int i, n = fxs->foreign_exception_port_count;
2927    exception_mask_t tm;
2928
2929    for (i = 0; i < n; i++) {
2930      if ((tm = fxs->masks[i]) & m) {
2931        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2932                                   tm,
2933                                   fxs->ports[i],
2934                                   fxs->behaviors[i],
2935                                   fxs->flavors[i]);
2936      }
2937    }
2938  }
2939}
2940                                   
2941
2942/*
2943  This assumes that a Mach port (to be used as the thread's exception port) whose
2944  "name" matches the TCR's 32-bit address has already been allocated.
2945*/
2946
2947kern_return_t
2948setup_mach_exception_handling(TCR *tcr)
2949{
2950  mach_port_t
2951    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2952    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2953    task_self = mach_task_self();
2954  kern_return_t kret;
2955
2956  kret = mach_port_insert_right(task_self,
2957                                thread_exception_port,
2958                                thread_exception_port,
2959                                MACH_MSG_TYPE_MAKE_SEND);
2960  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2961
2962  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2963  if (kret == KERN_SUCCESS) {
2964    mach_port_t exception_port_set = mach_exception_port_set();
2965
2966    kret = mach_port_move_member(task_self,
2967                                 thread_exception_port,
2968                                 exception_port_set);
2969  }
2970  return kret;
2971}
2972
2973void
2974darwin_exception_init(TCR *tcr)
2975{
2976  void tcr_monitor_exception_handling(TCR*, Boolean);
2977  kern_return_t kret;
2978  MACH_foreign_exception_state *fxs = 
2979    calloc(1, sizeof(MACH_foreign_exception_state));
2980 
2981  tcr->native_thread_info = (void *) fxs;
2982
2983  if ((kret = setup_mach_exception_handling(tcr))
2984      != KERN_SUCCESS) {
2985    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2986    terminate_lisp();
2987  }
2988  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2989  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
2990}
2991
2992/*
2993  The tcr is the "name" of the corresponding thread's exception port.
2994  Destroying the port should remove it from all port sets of which it's
2995  a member (notably, the exception port set.)
2996*/
2997void
2998darwin_exception_cleanup(TCR *tcr)
2999{
3000  void *fxs = tcr->native_thread_info;
3001  extern Boolean use_mach_exception_handling;
3002
3003  if (fxs) {
3004    tcr->native_thread_info = NULL;
3005    free(fxs);
3006  }
3007  if (use_mach_exception_handling) {
3008    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3009    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3010  }
3011}
3012
3013
3014Boolean
3015suspend_mach_thread(mach_port_t mach_thread)
3016{
3017  kern_return_t status;
3018  Boolean aborted = false;
3019 
3020  do {
3021    aborted = false;
3022    status = thread_suspend(mach_thread);
3023    if (status == KERN_SUCCESS) {
3024      status = thread_abort_safely(mach_thread);
3025      if (status == KERN_SUCCESS) {
3026        aborted = true;
3027      } else {
3028        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3029        thread_resume(mach_thread);
3030      }
3031    } else {
3032      return false;
3033    }
3034  } while (! aborted);
3035  return true;
3036}
3037
3038/*
3039  Only do this if pthread_kill indicated that the pthread isn't
3040  listening to signals anymore, as can happen as soon as pthread_exit()
3041  is called on Darwin.  The thread could still call out to lisp as it
3042  is exiting, so we need another way to suspend it in this case.
3043*/
3044Boolean
3045mach_suspend_tcr(TCR *tcr)
3046{
3047  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3048  ExceptionInformation *pseudosigcontext;
3049  Boolean result = false;
3050 
3051  result = suspend_mach_thread(mach_thread);
3052  if (result) {
3053    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3054    pseudosigcontext->uc_onstack = 0;
3055    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3056    tcr->suspend_context = pseudosigcontext;
3057  }
3058  return result;
3059}
3060
3061void
3062mach_resume_tcr(TCR *tcr)
3063{
3064  ExceptionInformation *xp;
3065  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3066 
3067  xp = tcr->suspend_context;
3068#ifdef DEBUG_MACH_EXCEPTIONS
3069  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3070          tcr, tcr->pending_exception_context);
3071#endif
3072  tcr->suspend_context = NULL;
3073  restore_mach_thread_state(mach_thread, xp);
3074#ifdef DEBUG_MACH_EXCEPTIONS
3075  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3076          tcr, tcr->pending_exception_context);
3077#endif
3078  thread_resume(mach_thread);
3079}
3080
3081void
3082fatal_mach_error(char *format, ...)
3083{
3084  va_list args;
3085  char s[512];
3086 
3087
3088  va_start(args, format);
3089  vsnprintf(s, sizeof(s),format, args);
3090  va_end(args);
3091
3092  Fatal("Mach error", s);
3093}
3094
3095void
3096pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3097{
3098  interrupt_handler(signum, NULL, context);
3099}
3100
3101int
3102mach_raise_thread_interrupt(TCR *target)
3103{
3104  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3105  kern_return_t kret;
3106  Boolean result = false;
3107  TCR *current = get_tcr(false);
3108  thread_basic_info_data_t info; 
3109  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3110
3111  LOCK(lisp_global(TCR_AREA_LOCK), current);
3112#if USE_MACH_EXCEPTION_LOCK
3113  pthread_mutex_lock(mach_exception_lock);
3114#endif
3115
3116  if (suspend_mach_thread(mach_thread)) {
3117    if (thread_info(mach_thread,
3118                    THREAD_BASIC_INFO,
3119                    (thread_info_t)&info,
3120                    &info_count) == KERN_SUCCESS) {
3121      if (info.suspend_count == 1) {
3122        if ((target->valence == TCR_STATE_LISP) &&
3123            (!target->unwinding) &&
3124            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3125          kret = setup_signal_frame(mach_thread,
3126                                    (void *)pseudo_interrupt_handler,
3127                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3128                                    0,
3129                                    target);
3130          if (kret == KERN_SUCCESS) {
3131            result = true;
3132          }
3133        }
3134      }
3135    }
3136    if (! result) {
3137      target->interrupt_pending = 1 << fixnumshift;
3138    }
3139    thread_resume(mach_thread);
3140   
3141  }
3142#if USE_MACH_EXCEPTION_LOCK
3143  pthread_mutex_unlock(mach_exception_lock);
3144#endif
3145  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3146  return 0;
3147}
3148
3149#endif
Note: See TracBrowser for help on using the repository browser.