source: branches/working-0711/ccl/lisp-kernel/ppc-exceptions.c @ 9978

Last change on this file since 9978 was 9978, checked in by gb, 11 years ago

Trunk changes: new traps for thread ops, handle writes to readonly
area by unprotecting the page.
Use traditional register names on Darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.6 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444        release_readonly_area();
445      }
446      if (selector & GC_TRAP_FUNCTION_PURIFY) {
447        purify_from_xp(xp, 0L);
448        gc_from_xp(xp, 0L);
449      }
450      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
451        OSErr err;
452        extern OSErr save_application(unsigned);
453        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
454        area *vsarea = tcr->vs_area;
455       
456        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
457        err = save_application(arg);
458        if (err == noErr) {
459          _exit(0);
460        }
461        fatal_oserr(": save_application", err);
462      }
463      switch (selector) {
464      case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
465        xpGPR(xp, imm0) = 0;
466        break;
467
468      case GC_TRAP_FUNCTION_FREEZE:
469        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
470        tenured_area->static_dnodes = area_dnode(a->active, a->low);
471        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
472        break;
473      default:
474        break;
475      }
476    }
477   
478    if (egc_was_enabled) {
479      egc_control(true, NULL);
480    }
481    break;
482   
483  }
484
485  adjust_exception_pc(xp,4);
486  return 0;
487}
488
489
490
491void
492signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
493{
494  /* The cstack just overflowed.  Force the current thread's
495     control stack to do so until all stacks are well under their overflow
496     limits.
497  */
498
499#if 0
500  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
501#endif
502  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
503}
504
505/*
506  Lower (move toward 0) the "end" of the soft protected area associated
507  with a by a page, if we can.
508*/
509
510void
511adjust_soft_protection_limit(area *a)
512{
513  char *proposed_new_soft_limit = a->softlimit - 4096;
514  protected_area_ptr p = a->softprot;
515 
516  if (proposed_new_soft_limit >= (p->start+16384)) {
517    p->end = proposed_new_soft_limit;
518    p->protsize = p->end-p->start;
519    a->softlimit = proposed_new_soft_limit;
520  }
521  protect_area(p);
522}
523
524void
525restore_soft_stack_limit(unsigned stkreg)
526{
527  area *a;
528  TCR *tcr = get_tcr(true);
529
530  switch (stkreg) {
531  case sp:
532    a = tcr->cs_area;
533    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
534      a->softlimit -= 4096;
535    }
536    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
537    break;
538  case vsp:
539    a = tcr->vs_area;
540    adjust_soft_protection_limit(a);
541    break;
542  case tsp:
543    a = tcr->ts_area;
544    adjust_soft_protection_limit(a);
545  }
546}
547
548/* Maybe this'll work someday.  We may have to do something to
549   make the thread look like it's not handling an exception */
550void
551reset_lisp_process(ExceptionInformation *xp)
552{
553  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
554  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
555
556  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
557  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
558
559  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
560  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
561
562  start_lisp(tcr, 1);
563}
564
565/*
566  This doesn't GC; it returns true if it made enough room, false
567  otherwise.
568  If "extend" is true, it can try to extend the dynamic area to
569  satisfy the request.
570*/
571
572Boolean
573new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
574{
575  area *a;
576  natural newlimit, oldlimit;
577  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
578
579  a  = active_dynamic_area;
580  oldlimit = (natural) a->active;
581  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
582              align_to_power_of_2(need, log2_allocation_quantum));
583  if (newlimit > (natural) (a->high)) {
584    if (extend) {
585      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
586      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
587      do {
588        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
589          break;
590        }
591        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
592        if (extend_by < 4<<20) {
593          return false;
594        }
595      } while (1);
596    } else {
597      return false;
598    }
599  }
600  a->active = (BytePtr) newlimit;
601  tcr->last_allocptr = (void *)newlimit;
602  xpGPR(xp,allocptr) = (LispObj) newlimit;
603  xpGPR(xp,allocbase) = (LispObj) oldlimit;
604
605  return true;
606}
607
608 
609void
610update_area_active (area **aptr, BytePtr value)
611{
612  area *a = *aptr;
613  for (; a; a = a->older) {
614    if ((a->low <= value) && (a->high >= value)) break;
615  };
616  if (a == NULL) Bug(NULL, "Can't find active area");
617  a->active = value;
618  *aptr = a;
619
620  for (a = a->younger; a; a = a->younger) {
621    a->active = a->high;
622  }
623}
624
625void
626normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
627{
628  void *cur_allocptr = NULL;
629  LispObj freeptr = 0;
630
631  if (xp) {
632    if (is_other_tcr) {
633      pc_luser_xp(xp, tcr, NULL);
634      freeptr = xpGPR(xp, allocptr);
635      if (fulltag_of(freeptr) == 0){
636        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
637      }
638    }
639    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
640    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
641    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
642#ifdef DEBUG
643    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
644            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
645    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
646            tcr,
647            xpGPR(xp, allocbase),
648            xpGPR(xp, allocptr),
649            xpPC(xp));
650    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
651            tcr,
652            tcr->pending_exception_context);
653#endif
654  } else {
655    /* In ff-call.  No need to update cs_area */
656    cur_allocptr = (void *) (tcr->save_allocptr);
657#ifdef DEBUG
658    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
659            tcr, tcr->save_vsp, tcr->save_tsp);
660    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
661            tcr,
662            tcr->save_allocbase,
663            tcr->save_allocptr,
664            xpPC(xp));
665
666#endif
667    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
668    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
669  }
670
671
672  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
673  if (cur_allocptr) {
674    update_bytes_allocated(tcr, cur_allocptr);
675    if (freeptr) {
676      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
677      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
678    }
679  }
680}
681
682TCR *gc_tcr = NULL;
683
684/* Suspend and "normalize" other tcrs, then call a gc-like function
685   in that context.  Resume the other tcrs, then return what the
686   function returned */
687
688int
689gc_like_from_xp(ExceptionInformation *xp, 
690                int(*fun)(TCR *, signed_natural), 
691                signed_natural param)
692{
693  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
694  int result;
695  signed_natural inhibit;
696
697  suspend_other_threads(true);
698  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
699  if (inhibit != 0) {
700    if (inhibit > 0) {
701      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
702    }
703    resume_other_threads(true);
704    gc_deferred++;
705    return 0;
706  }
707  gc_deferred = 0;
708
709  gc_tcr = tcr;
710
711  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
712  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
713
714  normalize_tcr(xp, tcr, false);
715
716
717  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
718    if (other_tcr->pending_exception_context) {
719      other_tcr->gc_context = other_tcr->pending_exception_context;
720    } else if (other_tcr->valence == TCR_STATE_LISP) {
721      other_tcr->gc_context = other_tcr->suspend_context;
722    } else {
723      /* no pending exception, didn't suspend in lisp state:
724         must have executed a synchronous ff-call.
725      */
726      other_tcr->gc_context = NULL;
727    }
728    normalize_tcr(other_tcr->gc_context, other_tcr, true);
729  }
730   
731
732
733  result = fun(tcr, param);
734
735  other_tcr = tcr;
736  do {
737    other_tcr->gc_context = NULL;
738    other_tcr = other_tcr->next;
739  } while (other_tcr != tcr);
740
741  gc_tcr = NULL;
742
743  resume_other_threads(true);
744
745  return result;
746
747}
748
749
750/* Returns #bytes freed by invoking GC */
751
752int
753gc_from_tcr(TCR *tcr, signed_natural param)
754{
755  area *a;
756  BytePtr oldfree, newfree;
757  BytePtr oldend, newend;
758
759#ifdef DEBUG
760  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
761#endif
762  a = active_dynamic_area;
763  oldend = a->high;
764  oldfree = a->active;
765  gc(tcr, param);
766  newfree = a->active;
767  newend = a->high;
768#if 0
769  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
770#endif
771  return ((oldfree-newfree)+(newend-oldend));
772}
773
774int
775gc_from_xp(ExceptionInformation *xp, signed_natural param)
776{
777  int status = gc_like_from_xp(xp, gc_from_tcr, param);
778
779  freeGCptrs();
780  return status;
781}
782
783int
784purify_from_xp(ExceptionInformation *xp, signed_natural param)
785{
786  return gc_like_from_xp(xp, purify, param);
787}
788
789int
790impurify_from_xp(ExceptionInformation *xp, signed_natural param)
791{
792  return gc_like_from_xp(xp, impurify, param);
793}
794
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  /* use the siginfo if it's available.  Some versions of Linux
816     don't propagate the DSISR and TRAP fields correctly from
817     64- to 32-bit handlers.
818  */
819  if (info) {
820    /*
821       To confuse matters still further, the value of SEGV_ACCERR
822       varies quite a bit among LinuxPPC variants (the value defined
823       in the header files varies, and the value actually set by
824       the kernel also varies.  So far, we're only looking at the
825       siginfo under Linux and Linux always seems to generate
826       SIGSEGV, so check for SIGSEGV and check the low 16 bits
827       of the si_code.
828    */
829    return ((info->si_signo == SIGSEGV) &&
830            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
831  }
832  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
833         (xpTRAP(xp) == 
834#ifdef LINUX
8350x0300
836#endif
837#ifdef DARWIN
8380x0300/0x100
839#endif
840)
841         );
842#if 0 
843  /* Maybe worth keeping around; not sure if it's an exhaustive
844     list of PPC instructions that could cause a WP fault */
845  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
846     valid values of those SPRs in the context they provide to
847     exception handlers.  Look at the opcode of the offending
848     instruction & recognize 32-bit store operations */
849  opcode instr = *(xpPC(xp));
850
851  if (xp->regs->trap != 0x300) {
852    return 0;
853  }
854  switch (instr >> 26) {
855  case 47:                      /* STMW */
856  case 36:                      /* STW */
857  case 37:                      /* STWU */
858    return 1;
859  case 31:
860    switch ((instr >> 1) & 1023) {
861    case 151:                   /* STWX */
862    case 183:                   /* STWUX */
863      return 1;
864    default:
865      return 0;
866    }
867  default:
868    return 0;
869  }
870#endif
871}
872
873OSStatus
874handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
875{
876  BytePtr addr;
877  protected_area_ptr area;
878  protection_handler *handler;
879  extern Boolean touch_page(void *);
880  extern void touch_page_end(void);
881
882  if (info) {
883    addr = (BytePtr)(info->si_addr);
884  } else {
885    addr = (BytePtr) ((natural) (xpDAR(xp)));
886  }
887
888  if (addr && (addr == tcr->safe_ref_address)) {
889    adjust_exception_pc(xp,4);
890
891    xpGPR(xp,imm0) = 0;
892    return 0;
893  }
894
895  if (xpPC(xp) == (pc)touch_page) {
896    xpGPR(xp,imm0) = 0;
897    xpPC(xp) = (pc)touch_page_end;
898    return 0;
899  }
900
901
902  if (is_write_fault(xp,info)) {
903    area = find_protected_area(addr);
904    if (area != NULL) {
905      handler = protection_handlers[area->why];
906      return handler(xp, area, addr);
907    } else {
908      if ((addr >= readonly_area->low) &&
909          (addr < readonly_area->active)) {
910        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
911                        page_size);
912        return 0;
913      }
914    }
915  }
916  if (old_valence == TCR_STATE_LISP) {
917    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
918  }
919  return -1;
920}
921
922
923
924
925
926OSStatus
927do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
928{
929#ifdef SUPPORT_PRAGMA_UNUSED
930#pragma unused(area,addr)
931#endif
932  reset_lisp_process(xp);
933  return -1;
934}
935
936extern area*
937allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
938
939extern area*
940allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
941
942#ifdef EXTEND_VSTACK
943Boolean
944catch_frame_p(lisp_frame *spPtr)
945{
946  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
947
948  for (; catch; catch = (catch_frame *) untag(catch->link)) {
949    if (spPtr == ((lisp_frame *) catch->csp)) {
950      return true;
951    }
952  }
953  return false;
954}
955#endif
956
957Boolean
958unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
959{
960  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
961      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
962    return true;
963  } else {
964    return false;
965  }
966}
967
968Boolean
969lexpr_entry_frame_p(lisp_frame *spPtr)
970{
971  LispObj savelr = spPtr->savelr;
972  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
973  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
974  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
975
976  return
977    (savelr == lexpr_return1v) ||
978    (savelr == lexpr_return) ||
979    ((savelr == ret1valn) &&
980     (((spPtr->backlink)->savelr) == lexpr_return));
981}
982
983Boolean
984lisp_frame_p(lisp_frame *spPtr)
985{
986  LispObj savefn;
987  /* We can't just look at the size of the stack frame under the EABI
988     calling sequence, but that's the first thing to check. */
989  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
990    return false;
991  }
992  savefn = spPtr->savefn;
993  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
994 
995}
996
997
998int ffcall_overflow_count = 0;
999
1000/* Find a frame that is neither a catch frame nor one of the
1001   lexpr_entry frames We don't check for non-lisp frames here because
1002   we'll always stop before we get there due to a dummy lisp frame
1003   pushed by .SPcallback that masks out the foreign frames.  The one
1004   exception is that there is a non-lisp frame without a valid VSP
1005   while in the process of ppc-ff-call. We recognize that because its
1006   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1007   next frame is 0, then we're executing an unwind-protect cleanup
1008   form, and the top stack frame belongs to its (no longer extant)
1009   catch frame.  */
1010
1011#ifdef EXTEND_VSTACK
1012lisp_frame *
1013find_non_catch_frame_from_xp (ExceptionInformation *xp)
1014{
1015  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1016  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1017    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1018  }
1019  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1020         unwind_protect_cleanup_frame_p(spPtr) ||
1021         catch_frame_p(spPtr) ||
1022         lexpr_entry_frame_p(spPtr) ; ) {
1023     spPtr = spPtr->backlink;
1024     };
1025  return spPtr;
1026}
1027#endif
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031db_link_chain_in_area_p (area *a)
1032{
1033  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1034          *high = (LispObj *) a->high,
1035          *low = (LispObj *) a->low;
1036  for (; db; db = (LispObj *) *db) {
1037    if ((db >= low) && (db < high)) return true;
1038  };
1039  return false;
1040}
1041#endif
1042
1043
1044
1045
1046/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1047  the current value of VSP (TSP) or an older area.  */
1048
1049OSStatus
1050do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1051{
1052  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1053  area *a = tcr->vs_area;
1054  protected_area_ptr vsp_soft = a->softprot;
1055  unprotect_area(vsp_soft);
1056  signal_stack_soft_overflow(xp,vsp);
1057  return 0;
1058}
1059
1060
1061OSStatus
1062do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1063{
1064  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1065  area *a = tcr->ts_area;
1066  protected_area_ptr tsp_soft = a->softprot;
1067  unprotect_area(tsp_soft);
1068  signal_stack_soft_overflow(xp,tsp);
1069  return 0;
1070}
1071
1072OSStatus
1073do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1074{
1075  /* Trying to write into a guard page on the vstack or tstack.
1076     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1077     signal an error_stack_overflow condition.
1078      */
1079  lisp_protection_kind which = prot_area->why;
1080  Boolean on_TSP = (which == kTSPsoftguard);
1081
1082  if (on_TSP) {
1083    return do_tsp_overflow(xp, addr);
1084   } else {
1085    return do_vsp_overflow(xp, addr);
1086   }
1087}
1088
1089OSStatus
1090do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1091{
1092#ifdef SUPPORT_PRAGMA_UNUSED
1093#pragma unused(xp,area,addr)
1094#endif
1095  return -1;
1096}
1097
1098
1099/*
1100  We have a couple of choices here.  We can simply unprotect the page
1101  and let the store happen on return, or we can try to emulate writes
1102  that we know will involve an intergenerational reference.  Both are
1103  correct as far as EGC constraints go, but the latter approach is
1104  probably more efficient.  (This only matters in the case where the
1105  GC runs after this exception handler returns but before the write
1106  actually happens.  If we didn't emulate node stores here, the EGC
1107  would scan the newly-writen page, find nothing interesting, and
1108  run to completion.  This thread will try the write again afer it
1109  resumes, the page'll be re-protected, and we'll have taken this
1110  fault twice.  The whole scenario shouldn't happen very often, but
1111  (having already taken a fault and committed to an mprotect syscall)
1112  we might as well emulate stores involving intergenerational references,
1113  since they're pretty easy to identify.
1114
1115  Note that cases involving two or more threads writing to the same
1116  page (before either of them can run this handler) is benign: one
1117  invocation of the handler will just unprotect an unprotected page in
1118  that case.
1119
1120  If there are GCs (or any other suspensions of the thread between
1121  the time that the write fault was detected and the time that the
1122  exception lock is obtained) none of this stuff happens.
1123*/
1124
1125/*
1126  Return true (and emulate the instruction) iff:
1127  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1128     instruction.
1129  b) RS is a node register (>= fn)
1130  c) RS is tagged as a cons or vector
1131  d) RS is in some ephemeral generation.
1132  This is slightly conservative, since RS may be no younger than the
1133  EA being written to.
1134*/
1135Boolean
1136is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1137{
1138  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1139    opcode instr = *xpPC(xp);
1140   
1141    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1142        major_opcode_p(instr, major_opcode_STW)) {
1143      LispObj
1144        rs = RS_field(instr), 
1145        rsval = xpGPR(xp,rs),
1146        tag = fulltag_of(rsval);
1147     
1148      if (rs >= fn) {
1149        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1150          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1151              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1152            *(LispObj *)ea = rsval;
1153            return true;
1154          }
1155        }
1156      }
1157    }
1158  }
1159  return false;
1160}
1161
1162     
1163
1164
1165
1166
1167
1168OSStatus
1169handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1170{
1171  (void) zero_fpscr(tcr);
1172  enable_fp_exceptions();
1173
1174
1175  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1176
1177  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1178     operation; there's an FPU operation right at the PC, so tell it to start
1179     looking one word beyond */
1180  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1181}
1182
1183   
1184int
1185altivec_present = 1;
1186
1187
1188/* This only tries to implement the "optional" fsqrt and fsqrts
1189   instructions, which were generally implemented on IBM hardware
1190   but generally not available on Motorola/Freescale systems.
1191*/               
1192OSStatus
1193handle_unimplemented_instruction(ExceptionInformation *xp,
1194                                 opcode instruction,
1195                                 TCR *tcr)
1196{
1197  (void) zero_fpscr(tcr);
1198  enable_fp_exceptions();
1199  /* the rc bit (bit 0 in the instruction) is supposed to cause
1200     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1201     fsqrt. or fsqrts.
1202  */
1203  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1204       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1205      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1206    double b, d, sqrt(double);
1207
1208    b = xpFPR(xp,RB_field(instruction));
1209    d = sqrt(b);
1210    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1211                   (get_fpscr() & _FPU_RESERVED));
1212    xpFPR(xp,RT_field(instruction)) = d;
1213    adjust_exception_pc(xp,4);
1214    return 0;
1215  }
1216
1217  return -1;
1218}
1219
1220OSStatus
1221PMCL_exception_handler(int xnum, 
1222                       ExceptionInformation *xp, 
1223                       TCR *tcr, 
1224                       siginfo_t *info,
1225                       int old_valence)
1226{
1227  OSStatus status = -1;
1228  pc program_counter;
1229  opcode instruction = 0;
1230
1231
1232  program_counter = xpPC(xp);
1233 
1234  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1235    instruction = *program_counter;
1236  }
1237
1238  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1239    status = handle_alloc_trap(xp, tcr);
1240  } else if ((xnum == SIGSEGV) ||
1241             (xnum == SIGBUS)) {
1242    status = handle_protection_violation(xp, info, tcr, old_valence);
1243  } else if (xnum == SIGFPE) {
1244    status = handle_sigfpe(xp, tcr);
1245  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1246    if (instruction == GC_TRAP_INSTRUCTION) {
1247      status = handle_gc_trap(xp, tcr);
1248    } else if (IS_UUO(instruction)) {
1249      status = handle_uuo(xp, instruction, program_counter);
1250    } else if (is_conditional_trap(instruction)) {
1251      status = handle_trap(xp, instruction, program_counter, info);
1252    } else {
1253      status = handle_unimplemented_instruction(xp,instruction,tcr);
1254    }
1255  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1256    tcr->interrupt_pending = 0;
1257    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1258    status = 0;
1259  }
1260
1261  return status;
1262}
1263
1264void
1265adjust_exception_pc(ExceptionInformation *xp, int delta)
1266{
1267  xpPC(xp) += (delta >> 2);
1268}
1269
1270
1271/*
1272  This wants to scan backwards until "where" points to an instruction
1273   whose major opcode is either 63 (double-float) or 59 (single-float)
1274*/
1275
1276OSStatus
1277handle_fpux_binop(ExceptionInformation *xp, pc where)
1278{
1279  OSStatus err;
1280  opcode *there = (opcode *) where, instr, errnum = 0;
1281  int i = TRAP_LOOKUP_TRIES, delta = 0;
1282 
1283  while (i--) {
1284    instr = *--there;
1285    delta -= 4;
1286    if (codevec_hdr_p(instr)) {
1287      return -1;
1288    }
1289    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1290      errnum = error_FPU_exception_double;
1291      break;
1292    }
1293
1294    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1295      errnum = error_FPU_exception_short;
1296      break;
1297    }
1298  }
1299 
1300  err = handle_error(xp, errnum, rcontext, 0,  there);
1301  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1302 
1303  adjust_exception_pc(xp, delta);
1304  xpFPSCR(xp)  &=  0x03fff;
1305 
1306  return err;
1307
1308}
1309
1310OSStatus
1311handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1312{
1313#ifdef SUPPORT_PRAGMA_UNUSED
1314#pragma unused(where)
1315#endif
1316  unsigned 
1317    minor = UUO_MINOR(the_uuo),
1318    rb = 0x1f & (the_uuo >> 11),
1319    errnum = 0x3ff & (the_uuo >> 16);
1320
1321  OSStatus status = -1;
1322
1323  int bump = 4;
1324
1325  switch (minor) {
1326
1327  case UUO_ZERO_FPSCR:
1328    status = 0;
1329    xpFPSCR(xp) = 0;
1330    break;
1331
1332
1333  case UUO_INTERR:
1334    {
1335      TCR * target = (TCR *)xpGPR(xp,arg_z);
1336      status = 0;
1337      switch (errnum) {
1338      case error_propagate_suspend:
1339        break;
1340      case error_interrupt:
1341        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1342        break;
1343      case error_suspend:
1344        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1345        break;
1346      case error_suspend_all:
1347        lisp_suspend_other_threads();
1348        break;
1349      case error_resume:
1350        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1351        break;
1352      case error_resume_all:
1353        lisp_resume_other_threads();
1354        break;
1355      default:
1356        status = handle_error(xp, errnum, rb, 0,  where);
1357        break;
1358      }
1359    }
1360    break;
1361
1362  case UUO_INTCERR:
1363    status = handle_error(xp, errnum, rb, 1,  where);
1364    if (errnum == error_udf_call) {
1365      /* If lisp's returned from a continuable undefined-function call,
1366         it's put a code vector in the xp's PC.  Don't advance the
1367         PC ... */
1368      bump = 0;
1369    }
1370    break;
1371
1372  case UUO_FPUX_BINOP:
1373    status = handle_fpux_binop(xp, where);
1374    bump = 0;
1375    break;
1376
1377  default:
1378    status = -1;
1379    bump = 0;
1380  }
1381 
1382  if ((!status) && bump) {
1383    adjust_exception_pc(xp, bump);
1384  }
1385  return status;
1386}
1387
1388natural
1389register_codevector_contains_pc (natural lisp_function, pc where)
1390{
1391  natural code_vector, size;
1392
1393  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1394      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1395    code_vector = deref(lisp_function, 1);
1396    size = header_element_count(header_of(code_vector)) << 2;
1397    if ((untag(code_vector) < (natural)where) && 
1398        ((natural)where < (code_vector + size)))
1399      return(code_vector);
1400  }
1401
1402  return(0);
1403}
1404
1405/* Callback to lisp to handle a trap. Need to translate the
1406   PC (where) into one of two forms of pairs:
1407
1408   1. If PC is in fn or nfn's code vector, use the register number
1409      of fn or nfn and the index into that function's code vector.
1410   2. Otherwise use 0 and the pc itself
1411*/
1412void
1413callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1414                   natural arg1, natural arg2, natural arg3)
1415{
1416  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1417  unsigned register_number = fn;
1418  natural index = (natural)where;
1419
1420  if (code_vector == 0) {
1421    register_number = nfn;
1422    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1423  }
1424  if (code_vector == 0)
1425    register_number = 0;
1426  else
1427    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1428  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1429}
1430
1431void
1432callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1433                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1434{
1435  natural  callback_ptr;
1436  area *a;
1437
1438  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1439
1440  /* Put the active stack pointer where .SPcallback expects it */
1441  a = tcr->cs_area;
1442  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1443
1444  /* Copy globals from the exception frame to tcr */
1445  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1446  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1447  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1448  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1449
1450
1451
1452  /* Call back.
1453     Lisp will handle trampolining through some code that
1454     will push lr/fn & pc/nfn stack frames for backtrace.
1455  */
1456  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1457#ifdef DEBUG
1458  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1459#endif
1460  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1461  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1462  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1463#ifdef DEBUG
1464  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1465#endif
1466
1467
1468
1469  /* Copy GC registers back into exception frame */
1470  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1471  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1472}
1473
1474area *
1475allocate_no_stack (natural size)
1476{
1477#ifdef SUPPORT_PRAGMA_UNUSED
1478#pragma unused(size)
1479#endif
1480
1481  return (area *) NULL;
1482}
1483
1484
1485
1486
1487
1488
1489/* callback to (symbol-value cmain) if it is a macptr,
1490   otherwise report cause and function name to console.
1491   Returns noErr if exception handled OK */
1492OSStatus
1493handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1494{
1495  LispObj   cmain = nrs_CMAIN.vcell;
1496  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1497
1498  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1499     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1500
1501  /* If it's a "trllt" instruction where RA == sp, it's a failed
1502     control stack overflow check.  In that case:
1503     
1504     a) We're in "yellow zone" mode if the value of the
1505     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1506     we're not already in yellow zone mode, attempt to create a new
1507     thread and continue execution on its stack. If that fails, call
1508     signal_stack_soft_overflow to enter yellow zone mode and signal
1509     the condition to lisp.
1510     
1511     b) If we're already in "yellow zone" mode, then:
1512     
1513     1) if the SP is past the current control-stack area's hard
1514     overflow limit, signal a "hard" stack overflow error (e.g., throw
1515     to toplevel as quickly as possible. If we aren't in "yellow zone"
1516     mode, attempt to continue on another thread first.
1517     
1518     2) if SP is "well" (> 4K) below its soft overflow limit, set
1519     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1520     "yellow zone mode" in this case.
1521     
1522     3) Otherwise, do nothing.  We'll continue to trap every time
1523     something gets pushed on the control stack, so we should try to
1524     detect and handle all of these cases fairly quickly.  Of course,
1525     the trap overhead is going to slow things down quite a bit.
1526     */
1527
1528  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1529      (RA_field(the_trap) == sp) &&
1530      (TO_field(the_trap) == TO_LO)) {
1531    area
1532      *CS_area = tcr->cs_area,
1533      *VS_area = tcr->vs_area;
1534     
1535    natural
1536      current_SP = xpGPR(xp,sp),
1537      current_VSP = xpGPR(xp,vsp);
1538
1539    if (current_SP  < (natural) (CS_area->hardlimit)) {
1540      /* If we're not in soft overflow mode yet, assume that the
1541         user has set the soft overflow size very small and try to
1542         continue on another thread before throwing to toplevel */
1543      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1544        reset_lisp_process(xp);
1545      }
1546    } else {
1547      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1548        /* If the control stack pointer is at least 4K away from its soft limit
1549           and the value stack pointer is at least 4K away from its soft limit,
1550           stop trapping.  Else keep trapping. */
1551        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1552            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1553          protected_area_ptr vs_soft = VS_area->softprot;
1554          if (vs_soft->nprot == 0) {
1555            protect_area(vs_soft);
1556          }
1557          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1558        }
1559      } else {
1560        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1561        signal_stack_soft_overflow(xp, sp);
1562      }
1563    }
1564   
1565    adjust_exception_pc(xp, 4);
1566    return noErr;
1567  } else {
1568    if (the_trap == LISP_BREAK_INSTRUCTION) {
1569      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1570      set_xpPC(xp, xpLR(xp));
1571      if (message == NULL) {
1572        message = "Lisp Breakpoint";
1573      }
1574      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1575      return noErr;
1576    }
1577    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1578      adjust_exception_pc(xp,4);
1579      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1580      return noErr;
1581    }
1582    /*
1583      twlle ra,rb is used to detect tlb overflow, where RA = current
1584      limit and RB = index to use.
1585    */
1586    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1587        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1588      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1589        return noErr;
1590      }
1591      return -1;
1592    }
1593
1594    if ((fulltag_of(cmain) == fulltag_misc) &&
1595        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1596      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1597        /* reset interrup_level, interrupt_pending */
1598        TCR_INTERRUPT_LEVEL(tcr) = 0;
1599        tcr->interrupt_pending = 0;
1600      }
1601#if 0
1602      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1603#endif
1604      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1605      adjust_exception_pc(xp, 4);
1606      return(noErr);
1607    }
1608    return -1;
1609  }
1610}
1611
1612
1613/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1614   Stop if subtag_code_vector is encountered. */
1615unsigned
1616scan_for_instr( unsigned target, unsigned mask, pc where )
1617{
1618  int i = TRAP_LOOKUP_TRIES;
1619
1620  while( i-- ) {
1621    unsigned instr = *(--where);
1622    if ( codevec_hdr_p(instr) ) {
1623      return 0;
1624    } else if ( match_instr(instr, mask, target) ) {
1625      return instr;
1626    }
1627  }
1628  return 0;
1629}
1630
1631
1632void non_fatal_error( char *msg )
1633{
1634  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1635  fflush( stderr );
1636}
1637
1638/* The main opcode.  */
1639
1640int 
1641is_conditional_trap(opcode instr)
1642{
1643  unsigned to = TO_field(instr);
1644  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1645
1646#ifndef MACOS
1647  if ((instr == LISP_BREAK_INSTRUCTION) ||
1648      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1649    return 1;
1650  }
1651#endif
1652  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1653    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1654       EQ bit must be set in the TO mask and either the register
1655       operands (if "tw") are the same or either both of the signed or
1656       both of the unsigned inequality bits must be set. */
1657    if (! (to & TO_EQ)) {
1658      return 1;                 /* Won't trap on EQ: conditional */
1659    }
1660    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1661      return 0;                 /* Will trap on EQ, same regs: unconditional */
1662    }
1663    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1664        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1665      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1666    }
1667    return 1;                   /* must be conditional */
1668  }
1669  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1670                                   debugger have it */
1671}
1672
1673OSStatus
1674handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1675{
1676  LispObj   errdisp = nrs_ERRDISP.vcell;
1677
1678  if ((fulltag_of(errdisp) == fulltag_misc) &&
1679      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1680    /* errdisp is a macptr, we can call back to lisp */
1681    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1682    return(0);
1683    }
1684
1685  return(-1);
1686}
1687               
1688
1689/*
1690   Current thread has all signals masked.  Before unmasking them,
1691   make it appear that the current thread has been suspended.
1692   (This is to handle the case where another thread is trying
1693   to GC before this thread is able to sieze the exception lock.)
1694*/
1695int
1696prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1697{
1698  int old_valence = tcr->valence;
1699
1700  tcr->pending_exception_context = context;
1701  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1702
1703  ALLOW_EXCEPTIONS(context);
1704  return old_valence;
1705} 
1706
1707void
1708wait_for_exception_lock_in_handler(TCR *tcr, 
1709                                   ExceptionInformation *context,
1710                                   xframe_list *xf)
1711{
1712
1713  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1714#ifdef DEBUG
1715  fprintf(stderr, "0x%x has exception lock\n", tcr);
1716#endif
1717  xf->curr = context;
1718  xf->prev = tcr->xframe;
1719  tcr->xframe =  xf;
1720  tcr->pending_exception_context = NULL;
1721  tcr->valence = TCR_STATE_FOREIGN; 
1722}
1723
1724void
1725unlock_exception_lock_in_handler(TCR *tcr)
1726{
1727  tcr->pending_exception_context = tcr->xframe->curr;
1728  tcr->xframe = tcr->xframe->prev;
1729  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1730#ifdef DEBUG
1731  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1732#endif
1733  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1734}
1735
1736/*
1737   If an interrupt is pending on exception exit, try to ensure
1738   that the thread sees it as soon as it's able to run.
1739*/
1740void
1741raise_pending_interrupt(TCR *tcr)
1742{
1743  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1744    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1745  }
1746}
1747
1748void
1749exit_signal_handler(TCR *tcr, int old_valence)
1750{
1751  sigset_t mask;
1752  sigfillset(&mask);
1753 
1754  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1755  tcr->valence = old_valence;
1756  tcr->pending_exception_context = NULL;
1757}
1758
1759
1760void
1761signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1762{
1763  xframe_list xframe_link;
1764
1765#ifdef DARWIN
1766  if (running_under_rosetta) {
1767    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1768  }
1769#endif
1770  if (!use_mach_exception_handling) {
1771   
1772    tcr = (TCR *) get_interrupt_tcr(false);
1773 
1774    /* The signal handler's entered with all signals (notably the
1775       thread_suspend signal) blocked.  Don't allow any other signals
1776       (notably the thread_suspend signal) to preempt us until we've
1777       set the TCR's xframe slot to include the current exception
1778       context.
1779    */
1780   
1781    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1782  }
1783
1784  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1785    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1786    pthread_kill(pthread_self(), thread_suspend_signal);
1787  }
1788
1789 
1790  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1791  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1792    char msg[512];
1793    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1794    if (lisp_Debugger(context, info, signum, false, msg)) {
1795      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1796    }
1797  }
1798
1799  unlock_exception_lock_in_handler(tcr);
1800
1801  /* This thread now looks like a thread that was suspended while
1802     executing lisp code.  If some other thread gets the exception
1803     lock and GCs, the context (this thread's suspend_context) will
1804     be updated.  (That's only of concern if it happens before we
1805     can return to the kernel/to the Mach exception handler).
1806  */
1807  if (!use_mach_exception_handling) {
1808    exit_signal_handler(tcr, old_valence);
1809    raise_pending_interrupt(tcr);
1810  }
1811}
1812
1813/*
1814  If it looks like we're in the middle of an atomic operation, make
1815  it seem as if that operation is either complete or hasn't started
1816  yet.
1817
1818  The cases handled include:
1819
1820  a) storing into a newly-allocated lisp frame on the stack.
1821  b) marking a newly-allocated TSP frame as containing "raw" data.
1822  c) consing: the GC has its own ideas about how this should be
1823     handled, but other callers would be best advised to back
1824     up or move forward, according to whether we're in the middle
1825     of allocating a cons cell or allocating a uvector.
1826  d) a STMW to the vsp
1827  e) EGC write-barrier subprims.
1828*/
1829
1830extern opcode
1831  egc_write_barrier_start,
1832  egc_write_barrier_end, 
1833  egc_store_node_conditional, 
1834  egc_set_hash_key,
1835  egc_gvset,
1836  egc_rplaca,
1837  egc_rplacd;
1838
1839
1840extern opcode ffcall_return_window, ffcall_return_window_end;
1841
1842void
1843pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1844{
1845  pc program_counter = xpPC(xp);
1846  opcode instr = *program_counter;
1847  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1848  LispObj cur_allocptr = xpGPR(xp, allocptr);
1849  int allocptr_tag = fulltag_of(cur_allocptr);
1850 
1851
1852
1853  if ((program_counter < &egc_write_barrier_end) && 
1854      (program_counter >= &egc_write_barrier_start)) {
1855    LispObj *ea = 0, val = 0, root = 0;
1856    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1857    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1858
1859    if (program_counter >= &egc_store_node_conditional) {
1860      if ((program_counter == &egc_store_node_conditional) || ! (xpCCR(xp) & 0x20000000)) {
1861        /* The conditional store either hasn't been attempted yet, or
1862           has failed.  No need to adjust the PC, or do memoization. */
1863        return;
1864      }
1865      val = xpGPR(xp,arg_z);
1866      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1867      xpGPR(xp,arg_z) = t_value;
1868      need_store = false;
1869    } else if (program_counter >= &egc_set_hash_key) {
1870      root = xpGPR(xp,arg_x);
1871      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1872      need_memoize_root = true;
1873    } else if (program_counter >= &egc_gvset) {
1874      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1875      val = xpGPR(xp,arg_z);
1876    } else if (program_counter >= &egc_rplacd) {
1877      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1878      val = xpGPR(xp,arg_z);
1879    } else {                      /* egc_rplaca */
1880      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1881      val = xpGPR(xp,arg_z);
1882    }
1883    if (need_store) {
1884      *ea = val;
1885    }
1886    if (need_check_memo) {
1887      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1888      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1889          ((LispObj)ea < val)) {
1890        atomic_set_bit(refbits, bitnumber);
1891        if (need_memoize_root) {
1892          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1893          atomic_set_bit(refbits, bitnumber);
1894        }
1895      }
1896    }
1897    set_xpPC(xp, xpLR(xp));
1898    return;
1899  }
1900
1901
1902  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1903    LispObj tsp_val = xpGPR(xp,tsp);
1904   
1905    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1906    adjust_exception_pc(xp, 4);
1907    return;
1908  }
1909 
1910  if (frame->backlink == (frame+1)) {
1911    if (
1912#ifdef PPC64
1913        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1914        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1915#else
1916        (major_opcode_p(instr, major_opcode_STW)) && 
1917#endif
1918        (RA_field(instr) == sp) &&
1919        /* There are a few places in the runtime that store into
1920           a previously-allocated frame atop the stack when
1921           throwing values around.  We only care about the case
1922           where the frame was newly allocated, in which case
1923           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1924           a few instructions before the current program counter.
1925           (The whole point here is that a newly allocated frame
1926           might contain random values that we don't want the
1927           GC to see; a previously allocated frame should already
1928           be completely initialized.)
1929        */
1930        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1931         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1932         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1933#ifdef PPC64
1934      int disp = DS_field(instr);
1935#else     
1936      int disp = D_field(instr);
1937#endif
1938
1939
1940      if (disp < (4*node_size)) {
1941#if 0
1942        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1943#endif
1944        frame->savevsp = 0;
1945        if (disp < (3*node_size)) {
1946          frame->savelr = 0;
1947          if (disp == node_size) {
1948            frame->savefn = 0;
1949          }
1950        }
1951      }
1952      return;
1953    }
1954  }
1955
1956  if (allocptr_tag != tag_fixnum) {
1957    signed_natural disp = allocptr_displacement(xp);
1958
1959    if (disp) {
1960      /* Being architecturally "at" the alloc trap doesn't tell
1961         us much (in particular, it doesn't tell us whether
1962         or not the thread has committed to taking the trap
1963         and is waiting for the exception lock (or waiting
1964         for the Mach exception thread to tell it how bad
1965         things are) or is about to execute a conditional
1966         trap.
1967         Regardless of which case applies, we want the
1968         other thread to take (or finish taking) the
1969         trap, and we don't want it to consider its
1970         current allocptr to be valid.
1971         The difference between this case (suspend other
1972         thread for GC) and the previous case (suspend
1973         current thread for interrupt) is solely a
1974         matter of what happens after we leave this
1975         function: some non-current thread will stay
1976         suspended until the GC finishes, then take
1977         (or start processing) the alloc trap.   The
1978         current thread will go off and do PROCESS-INTERRUPT
1979         or something, and may return from the interrupt
1980         and need to finish the allocation that got interrupted.
1981      */
1982
1983      if (alloc_disp) {
1984        *alloc_disp = disp;
1985        xpGPR(xp,allocptr) += disp;
1986        /* Leave the PC at the alloc trap.  When the interrupt
1987           handler returns, it'll decrement allocptr by disp
1988           and the trap may or may not be taken.
1989        */
1990      } else {
1991        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1992        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1993        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1994      }
1995    } else {
1996#ifdef DEBUG
1997      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
1998#endif
1999      /* If we're already past the alloc_trap, finish allocating
2000         the object. */
2001      if (allocptr_tag == fulltag_cons) {
2002        finish_allocating_cons(xp);
2003#ifdef DEBUG
2004          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
2005                  tcr);
2006#endif
2007      } else {
2008        if (allocptr_tag == fulltag_misc) {
2009#ifdef DEBUG
2010          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
2011                  tcr);
2012#endif
2013          finish_allocating_uvector(xp);
2014        } else {
2015          Bug(xp, "what's being allocated here ?");
2016        }
2017      }
2018      /* Whatever we finished allocating, reset allocptr/allocbase to
2019         VOID_ALLOCPTR */
2020      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2021    }
2022    return;
2023  }
2024
2025  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2026    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2027    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2028#if 0
2029        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2030#endif
2031
2032    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2033      deref(frame,idx) = 0;
2034    }
2035    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2036    return;
2037  }
2038
2039#ifndef PC64
2040  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2041      (RA_field(instr) == vsp)) {
2042    int r;
2043    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2044   
2045    for (r = RS_field(instr); r <= 31; r++) {
2046      *vspptr++ = xpGPR(xp,r);
2047    }
2048    adjust_exception_pc(xp, 4);
2049  }
2050#endif
2051}
2052
2053void
2054interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2055{
2056  TCR *tcr = get_interrupt_tcr(false);
2057  if (tcr) {
2058    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2059      tcr->interrupt_pending = 1 << fixnumshift;
2060    } else {
2061      LispObj cmain = nrs_CMAIN.vcell;
2062
2063      if ((fulltag_of(cmain) == fulltag_misc) &&
2064          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2065        /*
2066           This thread can (allegedly) take an interrupt now.
2067           It's tricky to do that if we're executing
2068           foreign code (especially Linuxthreads code, much
2069           of which isn't reentrant.)
2070           If we're unwinding the stack, we also want to defer
2071           the interrupt.
2072        */
2073        if ((tcr->valence != TCR_STATE_LISP) ||
2074            (tcr->unwinding != 0)) {
2075          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2076        } else {
2077          xframe_list xframe_link;
2078          int old_valence;
2079          signed_natural disp=0;
2080         
2081          pc_luser_xp(context, tcr, &disp);
2082          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2083          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2084#ifdef DEBUG
2085          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2086#endif
2087          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2088          if (disp) {
2089            xpGPR(context,allocptr) -= disp;
2090          }
2091          unlock_exception_lock_in_handler(tcr);
2092#ifdef DEBUG
2093          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2094#endif
2095          exit_signal_handler(tcr, old_valence);
2096        }
2097      }
2098    }
2099  }
2100#ifdef DARWIN
2101    DarwinSigReturn(context);
2102#endif
2103}
2104
2105
2106
2107void
2108install_signal_handler(int signo, void *handler)
2109{
2110  struct sigaction sa;
2111 
2112  sa.sa_sigaction = (void *)handler;
2113  sigfillset(&sa.sa_mask);
2114  sa.sa_flags = 
2115    0 /* SA_RESTART */
2116    | SA_SIGINFO
2117#ifdef DARWIN
2118#ifdef PPC64
2119    | SA_64REGSET
2120#endif
2121#endif
2122    ;
2123
2124  sigaction(signo, &sa, NULL);
2125}
2126
2127void
2128install_pmcl_exception_handlers()
2129{
2130#ifdef DARWIN
2131  extern Boolean use_mach_exception_handling;
2132#endif
2133
2134  Boolean install_signal_handlers_for_exceptions =
2135#ifdef DARWIN
2136    !use_mach_exception_handling
2137#else
2138    true
2139#endif
2140    ;
2141  if (install_signal_handlers_for_exceptions) {
2142    extern int no_sigtrap;
2143    install_signal_handler(SIGILL, (void *)signal_handler);
2144    if (no_sigtrap != 1) {
2145      install_signal_handler(SIGTRAP, (void *)signal_handler);
2146    }
2147    install_signal_handler(SIGBUS,  (void *)signal_handler);
2148    install_signal_handler(SIGSEGV, (void *)signal_handler);
2149    install_signal_handler(SIGFPE, (void *)signal_handler);
2150  }
2151 
2152  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2153                         (void *)interrupt_handler);
2154  signal(SIGPIPE, SIG_IGN);
2155}
2156
2157void
2158quit_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2159{
2160  TCR *tcr = get_tcr(false);
2161  area *a;
2162  sigset_t mask;
2163 
2164  sigemptyset(&mask);
2165
2166  if (tcr) {
2167    tcr->valence = TCR_STATE_FOREIGN;
2168    a = tcr->vs_area;
2169    if (a) {
2170      a->active = a->high;
2171    }
2172    a = tcr->ts_area;
2173    if (a) {
2174      a->active = a->high;
2175    }
2176    a = tcr->cs_area;
2177    if (a) {
2178      a->active = a->high;
2179    }
2180  }
2181 
2182  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2183  pthread_exit(NULL);
2184}
2185
2186void
2187thread_signal_setup()
2188{
2189  thread_suspend_signal = SIG_SUSPEND_THREAD;
2190
2191  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2192  install_signal_handler(SIGQUIT, (void *)quit_handler);
2193}
2194
2195
2196
2197void
2198unprotect_all_areas()
2199{
2200  protected_area_ptr p;
2201
2202  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2203    unprotect_area(p);
2204  }
2205}
2206
2207/*
2208  A binding subprim has just done "twlle limit_regno,idx_regno" and
2209  the trap's been taken.  Extend the tcr's tlb so that the index will
2210  be in bounds and the new limit will be on a page boundary, filling
2211  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2212  the tcr fields and the registers in the xp and return true if this
2213  all works, false otherwise.
2214
2215  Note that the tlb was allocated via malloc, so realloc can do some
2216  of the hard work.
2217*/
2218Boolean
2219extend_tcr_tlb(TCR *tcr, 
2220               ExceptionInformation *xp, 
2221               unsigned limit_regno,
2222               unsigned idx_regno)
2223{
2224  unsigned
2225    index = (unsigned) (xpGPR(xp,idx_regno)),
2226    old_limit = tcr->tlb_limit,
2227    new_limit = align_to_power_of_2(index+1,12),
2228    new_bytes = new_limit-old_limit;
2229  LispObj
2230    *old_tlb = tcr->tlb_pointer,
2231    *new_tlb = realloc(old_tlb, new_limit),
2232    *work;
2233
2234  if (new_tlb == NULL) {
2235    return false;
2236  }
2237 
2238  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2239
2240  while (new_bytes) {
2241    *work++ = no_thread_local_binding_marker;
2242    new_bytes -= sizeof(LispObj);
2243  }
2244  tcr->tlb_pointer = new_tlb;
2245  tcr->tlb_limit = new_limit;
2246  xpGPR(xp, limit_regno) = new_limit;
2247  return true;
2248}
2249
2250
2251
2252void
2253exception_init()
2254{
2255  install_pmcl_exception_handlers();
2256}
2257
2258
2259
2260
2261
2262#ifdef DARWIN
2263
2264
2265#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2266#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2267
2268
2269#if USE_MACH_EXCEPTION_LOCK
2270pthread_mutex_t _mach_exception_lock, *mach_exception_lock;
2271#endif
2272
2273#define LISP_EXCEPTIONS_HANDLED_MASK \
2274 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2275
2276/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2277#define NUM_LISP_EXCEPTIONS_HANDLED 4
2278
2279typedef struct {
2280  int foreign_exception_port_count;
2281  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2282  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2283  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2284  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2285} MACH_foreign_exception_state;
2286
2287
2288
2289
2290/*
2291  Mach's exception mechanism works a little better than its signal
2292  mechanism (and, not incidentally, it gets along with GDB a lot
2293  better.
2294
2295  Initially, we install an exception handler to handle each native
2296  thread's exceptions.  This process involves creating a distinguished
2297  thread which listens for kernel exception messages on a set of
2298  0 or more thread exception ports.  As threads are created, they're
2299  added to that port set; a thread's exception port is destroyed
2300  (and therefore removed from the port set) when the thread exits.
2301
2302  A few exceptions can be handled directly in the handler thread;
2303  others require that we resume the user thread (and that the
2304  exception thread resumes listening for exceptions.)  The user
2305  thread might eventually want to return to the original context
2306  (possibly modified somewhat.)
2307
2308  As it turns out, the simplest way to force the faulting user
2309  thread to handle its own exceptions is to do pretty much what
2310  signal() does: the exception handlng thread sets up a sigcontext
2311  on the user thread's stack and forces the user thread to resume
2312  execution as if a signal handler had been called with that
2313  context as an argument.  We can use a distinguished UUO at a
2314  distinguished address to do something like sigreturn(); that'll
2315  have the effect of resuming the user thread's execution in
2316  the (pseudo-) signal context.
2317
2318  Since:
2319    a) we have miles of code in C and in Lisp that knows how to
2320    deal with Linux sigcontexts
2321    b) Linux sigcontexts contain a little more useful information
2322    (the DAR, DSISR, etc.) than their Darwin counterparts
2323    c) we have to create a sigcontext ourselves when calling out
2324    to the user thread: we aren't really generating a signal, just
2325    leveraging existing signal-handling code.
2326
2327  we create a Linux sigcontext struct.
2328
2329  Simple ?  Hopefully from the outside it is ...
2330
2331  We want the process of passing a thread's own context to it to
2332  appear to be atomic: in particular, we don't want the GC to suspend
2333  a thread that's had an exception but has not yet had its user-level
2334  exception handler called, and we don't want the thread's exception
2335  context to be modified by a GC while the Mach handler thread is
2336  copying it around.  On Linux (and on Jaguar), we avoid this issue
2337  because (a) the kernel sets up the user-level signal handler and
2338  (b) the signal handler blocks signals (including the signal used
2339  by the GC to suspend threads) until tcr->xframe is set up.
2340
2341  The GC and the Mach server thread therefore contend for the lock
2342  "mach_exception_lock".  The Mach server thread holds the lock
2343  when copying exception information between the kernel and the
2344  user thread; the GC holds this lock during most of its execution
2345  (delaying exception processing until it can be done without
2346  GC interference.)
2347
2348*/
2349
2350#ifdef PPC64
2351#define C_REDZONE_LEN           320
2352#define C_STK_ALIGN             32
2353#else
2354#define C_REDZONE_LEN           224
2355#define C_STK_ALIGN             16
2356#endif
2357#define C_PARAMSAVE_LEN         64
2358#define C_LINKAGE_LEN           48
2359
2360#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2361
2362void
2363fatal_mach_error(char *format, ...);
2364
2365#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2366
2367
2368void
2369restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2370{
2371  kern_return_t kret;
2372  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2373
2374  /* Set the thread's FP state from the pseudosigcontext */
2375  kret = thread_set_state(thread,
2376                          PPC_FLOAT_STATE,
2377                          (thread_state_t)&(mc->fs),
2378                          PPC_FLOAT_STATE_COUNT);
2379
2380  MACH_CHECK_ERROR("setting thread FP state", kret);
2381
2382  /* The thread'll be as good as new ... */
2383#ifdef PPC64
2384  kret = thread_set_state(thread,
2385                          PPC_THREAD_STATE64,
2386                          (thread_state_t)&(mc->ss),
2387                          PPC_THREAD_STATE64_COUNT);
2388#else
2389  kret = thread_set_state(thread, 
2390                          MACHINE_THREAD_STATE,
2391                          (thread_state_t)&(mc->ss),
2392                          MACHINE_THREAD_STATE_COUNT);
2393#endif
2394  MACH_CHECK_ERROR("setting thread state", kret);
2395} 
2396
2397/* This code runs in the exception handling thread, in response
2398   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2399   in response to a call to pseudo_sigreturn() from the specified
2400   user thread.
2401   Find that context (the user thread's R3 points to it), then
2402   use that context to set the user thread's state.  When this
2403   function's caller returns, the Mach kernel will resume the
2404   user thread.
2405*/
2406
2407kern_return_t
2408do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2409{
2410  ExceptionInformation *xp;
2411
2412#ifdef DEBUG_MACH_EXCEPTIONS
2413  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2414#endif
2415  xp = tcr->pending_exception_context;
2416  if (xp) {
2417    tcr->pending_exception_context = NULL;
2418    tcr->valence = TCR_STATE_LISP;
2419    restore_mach_thread_state(thread, xp);
2420    raise_pending_interrupt(tcr);
2421  } else {
2422    Bug(NULL, "no xp here!\n");
2423  }
2424#ifdef DEBUG_MACH_EXCEPTIONS
2425  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2426#endif
2427  return KERN_SUCCESS;
2428} 
2429
2430ExceptionInformation *
2431create_thread_context_frame(mach_port_t thread, 
2432                            natural *new_stack_top)
2433{
2434#ifdef PPC64
2435  ppc_thread_state64_t ts;
2436#else
2437  ppc_thread_state_t ts;
2438#endif
2439  mach_msg_type_number_t thread_state_count;
2440  kern_return_t result;
2441  ExceptionInformation *pseudosigcontext;
2442  MCONTEXT_T mc;
2443  natural stackp, backlink;
2444
2445#ifdef PPC64
2446  thread_state_count = PPC_THREAD_STATE64_COUNT;
2447  result = thread_get_state(thread,
2448                            PPC_THREAD_STATE64,
2449                            (thread_state_t)&ts,
2450                            &thread_state_count);
2451#else
2452  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2453  result = thread_get_state(thread, 
2454                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2455                            (thread_state_t)&ts,
2456                            &thread_state_count);
2457#endif
2458 
2459  if (result != KERN_SUCCESS) {
2460    get_tcr(true);
2461    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2462  }
2463  stackp = ts.r1;
2464  backlink = stackp;
2465  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2466  stackp -= sizeof(*pseudosigcontext);
2467  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2468
2469  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2470  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2471  memmove(&(mc->ss),&ts,sizeof(ts));
2472
2473  thread_state_count = PPC_FLOAT_STATE_COUNT;
2474  thread_get_state(thread,
2475                   PPC_FLOAT_STATE,
2476                   (thread_state_t)&(mc->fs),
2477                   &thread_state_count);
2478
2479
2480#ifdef PPC64
2481  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2482#else
2483  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2484#endif
2485  thread_get_state(thread,
2486#ifdef PPC64
2487                   PPC_EXCEPTION_STATE64,
2488#else
2489                   PPC_EXCEPTION_STATE,
2490#endif
2491                   (thread_state_t)&(mc->es),
2492                   &thread_state_count);
2493
2494
2495  UC_MCONTEXT(pseudosigcontext) = mc;
2496  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2497  stackp -= C_LINKAGE_LEN;
2498  *(natural *)ptr_from_lispobj(stackp) = backlink;
2499  if (new_stack_top) {
2500    *new_stack_top = stackp;
2501  }
2502  return pseudosigcontext;
2503}
2504
2505/*
2506  This code sets up the user thread so that it executes a "pseudo-signal
2507  handler" function when it resumes.  Create a linux sigcontext struct
2508  on the thread's stack and pass it as an argument to the pseudo-signal
2509  handler.
2510
2511  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2512  which will restore the thread's context.
2513
2514  If the handler invokes code that throws (or otherwise never sigreturn()'s
2515  to the context), that's fine.
2516
2517  Actually, check that: throw (and variants) may need to be careful and
2518  pop the tcr's xframe list until it's younger than any frame being
2519  entered.
2520*/
2521
2522int
2523setup_signal_frame(mach_port_t thread,
2524                   void *handler_address,
2525                   int signum,
2526                   int code,
2527                   TCR *tcr)
2528{
2529#ifdef PPC64
2530  ppc_thread_state64_t ts;
2531#else
2532  ppc_thread_state_t ts;
2533#endif
2534  ExceptionInformation *pseudosigcontext;
2535  int old_valence = tcr->valence;
2536  natural stackp;
2537
2538#ifdef DEBUG_MACH_EXCEPTIONS
2539  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2540#endif
2541  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2542  pseudosigcontext->uc_onstack = 0;
2543  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2544  tcr->pending_exception_context = pseudosigcontext;
2545  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2546 
2547
2548  /*
2549     It seems like we've created a  sigcontext on the thread's
2550     stack.  Set things up so that we call the handler (with appropriate
2551     args) when the thread's resumed.
2552  */
2553
2554  ts.srr0 = (natural) handler_address;
2555  ts.srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2556  ts.r1 = stackp;
2557  ts.r3 = signum;
2558  ts.r4 = (natural)pseudosigcontext;
2559  ts.r5 = (natural)tcr;
2560  ts.r6 = (natural)old_valence;
2561  ts.lr = (natural)pseudo_sigreturn;
2562
2563
2564#ifdef PPC64
2565  ts.r13 = xpGPR(pseudosigcontext,13);
2566  thread_set_state(thread,
2567                   PPC_THREAD_STATE64,
2568                   (thread_state_t)&ts,
2569                   PPC_THREAD_STATE64_COUNT);
2570#else
2571  thread_set_state(thread, 
2572                   MACHINE_THREAD_STATE,
2573                   (thread_state_t)&ts,
2574                   MACHINE_THREAD_STATE_COUNT);
2575#endif
2576#ifdef DEBUG_MACH_EXCEPTIONS
2577  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2578#endif
2579  return 0;
2580}
2581
2582
2583void
2584pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2585{
2586  signal_handler(signum, NULL, context, tcr, old_valence);
2587} 
2588
2589
2590int
2591thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2592{
2593#ifdef PPC64
2594  ppc_thread_state64_t ts;
2595#else
2596  ppc_thread_state_t ts;
2597#endif
2598  mach_msg_type_number_t thread_state_count;
2599
2600#ifdef PPC64
2601  thread_state_count = PPC_THREAD_STATE64_COUNT;
2602#else
2603  thread_state_count = PPC_THREAD_STATE_COUNT;
2604#endif
2605  thread_get_state(thread, 
2606#ifdef PPC64
2607                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2608#else
2609                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2610#endif
2611                   (thread_state_t)&ts,
2612                   &thread_state_count);
2613  if (enabled) {
2614    ts.srr1 |= MSR_FE0_FE1_MASK;
2615  } else {
2616    ts.srr1 &= ~MSR_FE0_FE1_MASK;
2617  }
2618  /*
2619     Hack-o-rama warning (isn't it about time for such a warning?):
2620     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2621     Our handler for lisp's use of pthread_kill() pushes a phony
2622     lisp frame on the stack and force the context to resume at
2623     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2624     lisp frame contains the -real- address that process_interrupt
2625     should have returned to, and the fact that it's in a lisp
2626     frame should convince the GC to notice that address if it
2627     runs in the tiny time window between returning from our
2628     interrupt handler and ... here.
2629     If the top frame on the stack is a lisp frame, discard it
2630     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2631     just adjust ts.srr0 to skip over the UUO.
2632  */
2633  {
2634    lisp_frame *tos = (lisp_frame *)ts.r1,
2635      *next_frame = tos->backlink;
2636   
2637    if (tos == (next_frame -1)) {
2638      ts.srr0 = tos->savelr;
2639      ts.r1 = (LispObj) next_frame;
2640    } else {
2641      ts.srr0 += 4;
2642    }
2643  }
2644  thread_set_state(thread, 
2645#ifdef PPC64
2646                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2647#else
2648                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2649#endif
2650                   (thread_state_t)&ts,
2651#ifdef PPC64
2652                   PPC_THREAD_STATE64_COUNT
2653#else
2654                   PPC_THREAD_STATE_COUNT
2655#endif
2656                   );
2657
2658  return 0;
2659}
2660
2661/*
2662  This function runs in the exception handling thread.  It's
2663  called (by this precise name) from the library function "exc_server()"
2664  when the thread's exception ports are set up.  (exc_server() is called
2665  via mach_msg_server(), which is a function that waits for and dispatches
2666  on exception messages from the Mach kernel.)
2667
2668  This checks to see if the exception was caused by a pseudo_sigreturn()
2669  UUO; if so, it arranges for the thread to have its state restored
2670  from the specified context.
2671
2672  Otherwise, it tries to map the exception to a signal number and
2673  arranges that the thread run a "pseudo signal handler" to handle
2674  the exception.
2675
2676  Some exceptions could and should be handled here directly.
2677*/
2678
2679kern_return_t
2680catch_exception_raise(mach_port_t exception_port,
2681                      mach_port_t thread,
2682                      mach_port_t task, 
2683                      exception_type_t exception,
2684                      exception_data_t code_vector,
2685                      mach_msg_type_number_t code_count)
2686{
2687  int signum = 0, code = *code_vector, code1;
2688  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2689  kern_return_t kret;
2690
2691#ifdef DEBUG_MACH_EXCEPTIONS
2692  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2693#endif
2694
2695  if (
2696#if USE_MACH_EXCEPTION_LOCK
2697    pthread_mutex_trylock(mach_exception_lock) == 0
2698#else
2699    1
2700#endif
2701    ) {
2702    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2703      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2704    } 
2705    if ((exception == EXC_BAD_INSTRUCTION) &&
2706        (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2707        (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2708         (code1 == (int)enable_fp_exceptions) ||
2709         (code1 == (int)disable_fp_exceptions))) {
2710      if (code1 == (int)pseudo_sigreturn) {
2711        kret = do_pseudo_sigreturn(thread, tcr);
2712#if 0
2713      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2714#endif
2715       
2716      } else if (code1 == (int)enable_fp_exceptions) {
2717        kret = thread_set_fp_exceptions_enabled(thread, true);
2718      } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2719    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2720      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2721      kret = 17;
2722    } else {
2723      switch (exception) {
2724      case EXC_BAD_ACCESS:
2725        signum = SIGSEGV;
2726        break;
2727       
2728      case EXC_BAD_INSTRUCTION:
2729        signum = SIGILL;
2730        break;
2731     
2732      case EXC_SOFTWARE:
2733        if (code == EXC_PPC_TRAP) {
2734          signum = SIGTRAP;
2735        }
2736        break;
2737     
2738      case EXC_ARITHMETIC:
2739        signum = SIGFPE;
2740        break;
2741
2742      default:
2743        break;
2744      }
2745      if (signum) {
2746        kret = setup_signal_frame(thread,
2747                                  (void *)pseudo_signal_handler,
2748                                  signum,
2749                                  code,
2750                                  tcr);
2751#if 0
2752      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2753#endif
2754
2755      } else {
2756        kret = 17;
2757      }
2758    }
2759#if USE_MACH_EXCEPTION_LOCK
2760#ifdef DEBUG_MACH_EXCEPTIONS
2761    fprintf(stderr, "releasing Mach exception lock in exception thread\n");
2762#endif
2763    pthread_mutex_unlock(mach_exception_lock);
2764#endif
2765  } else {
2766    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2767#if 0
2768    fprintf(stderr, "deferring pending exception in 0x%x\n", tcr);
2769#endif
2770    kret = 0;
2771    if (tcr == gc_tcr) {
2772      int i;
2773      write(1, "exception in GC thread. Sleeping for 60 seconds\n",sizeof("exception in GC thread.  Sleeping for 60 seconds\n"));
2774      for (i = 0; i < 60; i++) {
2775        sleep(1);
2776      }
2777      _exit(EX_SOFTWARE);
2778    }
2779  }
2780  return kret;
2781}
2782
2783
2784
2785typedef struct {
2786  mach_msg_header_t Head;
2787  /* start of the kernel processed data */
2788  mach_msg_body_t msgh_body;
2789  mach_msg_port_descriptor_t thread;
2790  mach_msg_port_descriptor_t task;
2791  /* end of the kernel processed data */
2792  NDR_record_t NDR;
2793  exception_type_t exception;
2794  mach_msg_type_number_t codeCnt;
2795  integer_t code[2];
2796  mach_msg_trailer_t trailer;
2797} exceptionRequest;
2798
2799
2800boolean_t
2801openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2802{
2803  static NDR_record_t _NDR = {0};
2804  kern_return_t handled;
2805  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2806  exceptionRequest *req = (exceptionRequest *) in;
2807
2808  reply->NDR = _NDR;
2809
2810  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2811  out->msgh_remote_port = in->msgh_remote_port;
2812  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2813  out->msgh_local_port = MACH_PORT_NULL;
2814  out->msgh_id = in->msgh_id+100;
2815
2816  /* Could handle other exception flavors in the range 2401-2403 */
2817
2818
2819  if (in->msgh_id != 2401) {
2820    reply->RetCode = MIG_BAD_ID;
2821    return FALSE;
2822  }
2823  handled = catch_exception_raise(req->Head.msgh_local_port,
2824                                  req->thread.name,
2825                                  req->task.name,
2826                                  req->exception,
2827                                  req->code,
2828                                  req->codeCnt);
2829  reply->RetCode = handled;
2830  return TRUE;
2831}
2832
2833/*
2834  The initial function for an exception-handling thread.
2835*/
2836
2837void *
2838exception_handler_proc(void *arg)
2839{
2840  extern boolean_t exc_server();
2841  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2842
2843  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2844  /* Should never return. */
2845  abort();
2846}
2847
2848
2849
2850mach_port_t
2851mach_exception_port_set()
2852{
2853  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2854  kern_return_t kret; 
2855  if (__exception_port_set == MACH_PORT_NULL) {
2856#if USE_MACH_EXCEPTION_LOCK
2857    mach_exception_lock = &_mach_exception_lock;
2858    pthread_mutex_init(mach_exception_lock, NULL);
2859#endif
2860    kret = mach_port_allocate(mach_task_self(),
2861                              MACH_PORT_RIGHT_PORT_SET,
2862                              &__exception_port_set);
2863    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2864    create_system_thread(0,
2865                         NULL,
2866                         exception_handler_proc, 
2867                         (void *)((natural)__exception_port_set));
2868  }
2869  return __exception_port_set;
2870}
2871
2872/*
2873  Setup a new thread to handle those exceptions specified by
2874  the mask "which".  This involves creating a special Mach
2875  message port, telling the Mach kernel to send exception
2876  messages for the calling thread to that port, and setting
2877  up a handler thread which listens for and responds to
2878  those messages.
2879
2880*/
2881
2882/*
2883  Establish the lisp thread's TCR as its exception port, and determine
2884  whether any other ports have been established by foreign code for
2885  exceptions that lisp cares about.
2886
2887  If this happens at all, it should happen on return from foreign
2888  code and on entry to lisp code via a callback.
2889
2890  This is a lot of trouble (and overhead) to support Java, or other
2891  embeddable systems that clobber their caller's thread exception ports.
2892 
2893*/
2894kern_return_t
2895tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2896{
2897  kern_return_t kret;
2898  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2899  int i;
2900  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2901  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2902  exception_mask_t mask = 0;
2903
2904  kret = thread_swap_exception_ports(thread,
2905                                     LISP_EXCEPTIONS_HANDLED_MASK,
2906                                     lisp_port,
2907                                     EXCEPTION_DEFAULT,
2908                                     THREAD_STATE_NONE,
2909                                     fxs->masks,
2910                                     &n,
2911                                     fxs->ports,
2912                                     fxs->behaviors,
2913                                     fxs->flavors);
2914  if (kret == KERN_SUCCESS) {
2915    fxs->foreign_exception_port_count = n;
2916    for (i = 0; i < n; i ++) {
2917      foreign_port = fxs->ports[i];
2918
2919      if ((foreign_port != lisp_port) &&
2920          (foreign_port != MACH_PORT_NULL)) {
2921        mask |= fxs->masks[i];
2922      }
2923    }
2924    tcr->foreign_exception_status = (int) mask;
2925  }
2926  return kret;
2927}
2928
2929kern_return_t
2930tcr_establish_lisp_exception_port(TCR *tcr)
2931{
2932  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2933}
2934
2935/*
2936  Do this when calling out to or returning from foreign code, if
2937  any conflicting foreign exception ports were established when we
2938  last entered lisp code.
2939*/
2940kern_return_t
2941restore_foreign_exception_ports(TCR *tcr)
2942{
2943  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2944 
2945  if (m) {
2946    MACH_foreign_exception_state *fxs  = 
2947      (MACH_foreign_exception_state *) tcr->native_thread_info;
2948    int i, n = fxs->foreign_exception_port_count;
2949    exception_mask_t tm;
2950
2951    for (i = 0; i < n; i++) {
2952      if ((tm = fxs->masks[i]) & m) {
2953        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2954                                   tm,
2955                                   fxs->ports[i],
2956                                   fxs->behaviors[i],
2957                                   fxs->flavors[i]);
2958      }
2959    }
2960  }
2961}
2962                                   
2963
2964/*
2965  This assumes that a Mach port (to be used as the thread's exception port) whose
2966  "name" matches the TCR's 32-bit address has already been allocated.
2967*/
2968
2969kern_return_t
2970setup_mach_exception_handling(TCR *tcr)
2971{
2972  mach_port_t
2973    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2974    task_self = mach_task_self();
2975  kern_return_t kret;
2976
2977  kret = mach_port_insert_right(task_self,
2978                                thread_exception_port,
2979                                thread_exception_port,
2980                                MACH_MSG_TYPE_MAKE_SEND);
2981  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2982
2983  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2984  if (kret == KERN_SUCCESS) {
2985    mach_port_t exception_port_set = mach_exception_port_set();
2986
2987    kret = mach_port_move_member(task_self,
2988                                 thread_exception_port,
2989                                 exception_port_set);
2990  }
2991  return kret;
2992}
2993
2994void
2995darwin_exception_init(TCR *tcr)
2996{
2997  void tcr_monitor_exception_handling(TCR*, Boolean);
2998  kern_return_t kret;
2999  MACH_foreign_exception_state *fxs = 
3000    calloc(1, sizeof(MACH_foreign_exception_state));
3001 
3002  tcr->native_thread_info = (void *) fxs;
3003
3004  if ((kret = setup_mach_exception_handling(tcr))
3005      != KERN_SUCCESS) {
3006    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
3007    terminate_lisp();
3008  }
3009  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
3010  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
3011}
3012
3013/*
3014  The tcr is the "name" of the corresponding thread's exception port.
3015  Destroying the port should remove it from all port sets of which it's
3016  a member (notably, the exception port set.)
3017*/
3018void
3019darwin_exception_cleanup(TCR *tcr)
3020{
3021  void *fxs = tcr->native_thread_info;
3022  extern Boolean use_mach_exception_handling;
3023
3024  if (fxs) {
3025    tcr->native_thread_info = NULL;
3026    free(fxs);
3027  }
3028  if (use_mach_exception_handling) {
3029    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3030    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3031  }
3032}
3033
3034
3035Boolean
3036suspend_mach_thread(mach_port_t mach_thread)
3037{
3038  kern_return_t status;
3039  Boolean aborted = false;
3040 
3041  do {
3042    aborted = false;
3043    status = thread_suspend(mach_thread);
3044    if (status == KERN_SUCCESS) {
3045      status = thread_abort_safely(mach_thread);
3046      if (status == KERN_SUCCESS) {
3047        aborted = true;
3048      } else {
3049        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3050        thread_resume(mach_thread);
3051      }
3052    } else {
3053      return false;
3054    }
3055  } while (! aborted);
3056  return true;
3057}
3058
3059/*
3060  Only do this if pthread_kill indicated that the pthread isn't
3061  listening to signals anymore, as can happen as soon as pthread_exit()
3062  is called on Darwin.  The thread could still call out to lisp as it
3063  is exiting, so we need another way to suspend it in this case.
3064*/
3065Boolean
3066mach_suspend_tcr(TCR *tcr)
3067{
3068  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3069  ExceptionInformation *pseudosigcontext;
3070  Boolean result = false;
3071 
3072  result = suspend_mach_thread(mach_thread);
3073  if (result) {
3074    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3075    pseudosigcontext->uc_onstack = 0;
3076    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3077    tcr->suspend_context = pseudosigcontext;
3078  }
3079  return result;
3080}
3081
3082void
3083mach_resume_tcr(TCR *tcr)
3084{
3085  ExceptionInformation *xp;
3086  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3087 
3088  xp = tcr->suspend_context;
3089#ifdef DEBUG_MACH_EXCEPTIONS
3090  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3091          tcr, tcr->pending_exception_context);
3092#endif
3093  tcr->suspend_context = NULL;
3094  restore_mach_thread_state(mach_thread, xp);
3095#ifdef DEBUG_MACH_EXCEPTIONS
3096  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3097          tcr, tcr->pending_exception_context);
3098#endif
3099  thread_resume(mach_thread);
3100}
3101
3102void
3103fatal_mach_error(char *format, ...)
3104{
3105  va_list args;
3106  char s[512];
3107 
3108
3109  va_start(args, format);
3110  vsnprintf(s, sizeof(s),format, args);
3111  va_end(args);
3112
3113  Fatal("Mach error", s);
3114}
3115
3116void
3117pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3118{
3119  interrupt_handler(signum, NULL, context);
3120}
3121
3122int
3123mach_raise_thread_interrupt(TCR *target)
3124{
3125  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3126  kern_return_t kret;
3127  Boolean result = false;
3128  TCR *current = get_tcr(false);
3129  thread_basic_info_data_t info; 
3130  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3131
3132  LOCK(lisp_global(TCR_AREA_LOCK), current);
3133#if USE_MACH_EXCEPTION_LOCK
3134  pthread_mutex_lock(mach_exception_lock);
3135#endif
3136
3137  if (suspend_mach_thread(mach_thread)) {
3138    if (thread_info(mach_thread,
3139                    THREAD_BASIC_INFO,
3140                    (thread_info_t)&info,
3141                    &info_count) == KERN_SUCCESS) {
3142      if (info.suspend_count == 1) {
3143        if ((target->valence == TCR_STATE_LISP) &&
3144            (!target->unwinding) &&
3145            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3146          kret = setup_signal_frame(mach_thread,
3147                                    (void *)pseudo_interrupt_handler,
3148                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3149                                    0,
3150                                    target);
3151          if (kret == KERN_SUCCESS) {
3152            result = true;
3153          }
3154        }
3155      }
3156    }
3157    if (! result) {
3158      target->interrupt_pending = 1 << fixnumshift;
3159    }
3160    thread_resume(mach_thread);
3161   
3162  }
3163#if USE_MACH_EXCEPTION_LOCK
3164  pthread_mutex_unlock(mach_exception_lock);
3165#endif
3166  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3167  return 0;
3168}
3169
3170#endif
Note: See TracBrowser for help on using the repository browser.