source: trunk/ccl/lisp-kernel/ppc-exceptions.c @ 7779

Last change on this file since 7779 was 7779, checked in by gb, 13 years ago

lisp_Debugger() takes an extra Boolean "in foreign context" arg, rather
than trying to set a bit in the exception code (which might be negative.)

Add a (T)hread info command to kernel debugger.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include <ctype.h>
21#include <stdio.h>
22#include <stddef.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <stdio.h>
27#ifdef LINUX
28#include <strings.h>
29#include <sys/mman.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifdef DARWIN
35#include <sys/mman.h>
36#define _FPU_RESERVED 0xffffff00
37#ifndef SA_NODEFER
38#define SA_NODEFER 0
39#endif
40#include <sysexits.h>
41
42/* a distinguished UUO at a distinguished address */
43extern void pseudo_sigreturn(ExceptionInformation *);
44#endif
45
46
47#include "Threads.h"
48
49#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
50#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
51#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
52extern void enable_fp_exceptions(void);
53extern void disable_fp_exceptions(void);
54
55#ifdef LINUX
56/* Some relatively recent kernels support this interface.
57   If this prctl isn't supported, assume that we're always
58   running with excptions enabled and "precise".
59*/
60#ifndef PR_SET_FPEXC
61#define PR_SET_FPEXC 12
62#endif
63#ifndef PR_FP_EXC_DISABLED
64#define PR_FP_EXC_DISABLED 0
65#endif
66#ifndef PR_FP_EXC_PRECISE
67#define PR_FP_EXC_PRECISE 3
68#endif
69
70void
71enable_fp_exceptions()
72{
73  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
74}
75
76void
77disable_fp_exceptions()
78{
79  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
80}
81
82#endif
83
84/*
85  Handle exceptions.
86
87*/
88
89extern LispObj lisp_nil;
90
91extern natural lisp_heap_gc_threshold;
92extern Boolean grow_dynamic_area(natural);
93
94
95
96
97
98
99int
100page_size = 4096;
101
102int
103log2_page_size = 12;
104
105
106
107
108
109/*
110  If the PC is pointing to an allocation trap, the previous instruction
111  must have decremented allocptr.  Return the non-zero amount by which
112  allocptr was decremented.
113*/
114signed_natural
115allocptr_displacement(ExceptionInformation *xp)
116{
117  pc program_counter = xpPC(xp);
118  opcode instr = *program_counter, prev_instr = *(program_counter-1);
119
120  if (instr == ALLOC_TRAP_INSTRUCTION) {
121    if (match_instr(prev_instr, 
122                    XO_MASK | RT_MASK | RB_MASK,
123                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
124                    RT(allocptr) |
125                    RB(allocptr))) {
126      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
127    }
128    if (match_instr(prev_instr,
129                    OP_MASK | RT_MASK | RA_MASK,
130                    OP(major_opcode_ADDI) | 
131                    RT(allocptr) |
132                    RA(allocptr))) {
133      return (signed_natural) -((short) prev_instr);
134    }
135    Bug(xp, "Can't determine allocation displacement");
136  }
137  return 0;
138}
139
140
141/*
142  A cons cell's been successfully allocated, but the allocptr's
143  still tagged (as fulltag_cons, of course.)  Emulate any instructions
144  that might follow the allocation (stores to the car or cdr, an
145  assignment to the "result" gpr) that take place while the allocptr's
146  tag is non-zero, advancing over each such instruction.  When we're
147  done, the cons cell will be allocated and initialized, the result
148  register will point to it, the allocptr will be untagged, and
149  the PC will point past the instruction that clears the allocptr's
150  tag.
151*/
152void
153finish_allocating_cons(ExceptionInformation *xp)
154{
155  pc program_counter = xpPC(xp);
156  opcode instr;
157  LispObj cur_allocptr = xpGPR(xp, allocptr);
158  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
159  int target_reg;
160
161  while (1) {
162    instr = *program_counter++;
163
164    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
165      xpGPR(xp, allocptr) = untag(cur_allocptr);
166      xpPC(xp) = program_counter;
167      return;
168    }
169   
170    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
171    case STORE_CAR_ALLOCPTR_INSTRUCTION:
172      c->car = xpGPR(xp,RT_field(instr));
173      break;
174    case STORE_CDR_ALLOCPTR_INSTRUCTION:
175      c->cdr = xpGPR(xp,RT_field(instr));
176      break;
177    default:
178      /* Assume that this is an assignment: {rt/ra} <- allocptr.
179         There are several equivalent instruction forms
180         that might have that effect; just assign to target here.
181      */
182      if (major_opcode_p(instr,major_opcode_X31)) {
183        target_reg = RA_field(instr);
184      } else {
185        target_reg = RT_field(instr);
186      }
187      xpGPR(xp,target_reg) = cur_allocptr;
188      break;
189    }
190  }
191}
192
193/*
194  We were interrupted in the process of allocating a uvector; we
195  survived the allocation trap, and allocptr is tagged as fulltag_misc.
196  Emulate any instructions which store a header into the uvector,
197  assign the value of allocptr to some other register, and clear
198  allocptr's tag.  Don't expect/allow any other instructions in
199  this environment.
200*/
201void
202finish_allocating_uvector(ExceptionInformation *xp)
203{
204  pc program_counter = xpPC(xp);
205  opcode instr;
206  LispObj cur_allocptr = xpGPR(xp, allocptr);
207  int target_reg;
208
209  while (1) {
210    instr = *program_counter++;
211    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
212      xpGPR(xp, allocptr) = untag(cur_allocptr);
213      xpPC(xp) = program_counter;
214      return;
215    }
216    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
217        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
218      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
219    } else {
220      /* assume that this is an assignment */
221
222      if (major_opcode_p(instr,major_opcode_X31)) {
223        target_reg = RA_field(instr);
224      } else {
225        target_reg = RT_field(instr);
226      }
227      xpGPR(xp,target_reg) = cur_allocptr;
228    }
229  }
230}
231
232
233Boolean
234allocate_object(ExceptionInformation *xp,
235                natural bytes_needed, 
236                signed_natural disp_from_allocptr,
237                TCR *tcr)
238{
239  area *a = active_dynamic_area;
240
241  /* Maybe do an EGC */
242  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
243    if (((a->active)-(a->low)) >= a->threshold) {
244      gc_from_xp(xp, 0L);
245    }
246  }
247
248  /* Life is pretty simple if we can simply grab a segment
249     without extending the heap.
250  */
251  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
252    xpGPR(xp, allocptr) += disp_from_allocptr;
253#ifdef DEBUG
254    fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
255            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
256#endif
257    return true;
258  }
259 
260  /* It doesn't make sense to try a full GC if the object
261     we're trying to allocate is larger than everything
262     allocated so far.
263  */
264  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
265    untenure_from_area(tenured_area); /* force a full GC */
266    gc_from_xp(xp, 0L);
267  }
268 
269  /* Try again, growing the heap if necessary */
270  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
271    xpGPR(xp, allocptr) += disp_from_allocptr;
272#ifdef DEBUG
273    fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n",
274            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
275#endif
276    return true;
277  }
278 
279  return false;
280}
281
282#ifndef XNOMEM
283#define XNOMEM 10
284#endif
285
286void
287update_bytes_allocated(TCR* tcr, void *cur_allocptr)
288{
289  BytePtr
290    last = (BytePtr) tcr->last_allocptr, 
291    current = (BytePtr) cur_allocptr;
292  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
293    tcr->bytes_allocated += last-current;
294  }
295  tcr->last_allocptr = 0;
296}
297
298OSStatus
299handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
300{
301  pc program_counter;
302  natural cur_allocptr, bytes_needed = 0;
303  opcode prev_instr;
304  signed_natural disp = 0;
305  unsigned allocptr_tag;
306
307  cur_allocptr = xpGPR(xp,allocptr);
308  program_counter = xpPC(xp);
309  prev_instr = *(program_counter-1);
310  allocptr_tag = fulltag_of(cur_allocptr);
311
312  switch (allocptr_tag) {
313  case fulltag_cons:
314    bytes_needed = sizeof(cons);
315    disp = -sizeof(cons) + fulltag_cons;
316    break;
317
318  case fulltag_even_fixnum:
319  case fulltag_odd_fixnum:
320    break;
321
322  case fulltag_misc:
323    if (match_instr(prev_instr, 
324                    XO_MASK | RT_MASK | RB_MASK,
325                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
326                    RT(allocptr) |
327                    RB(allocptr))) {
328      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
329    } else if (match_instr(prev_instr,
330                           OP_MASK | RT_MASK | RA_MASK,
331                           OP(major_opcode_ADDI) | 
332                           RT(allocptr) |
333                           RA(allocptr))) {
334      disp = (signed_natural) ((short) prev_instr);
335    }
336    if (disp) {
337      bytes_needed = (-disp) + fulltag_misc;
338      break;
339    }
340    /* else fall thru */
341  default:
342    return -1;
343  }
344
345  if (bytes_needed) {
346    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
347    if (allocate_object(xp, bytes_needed, disp, tcr)) {
348#if 0
349      fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
350              tcr, xpGPR(xp, allocptr));
351#endif
352      adjust_exception_pc(xp,4);
353      return 0;
354    }
355    /* Couldn't allocate the object.  If it's smaller than some arbitrary
356       size (say 128K bytes), signal a "chronically out-of-memory" condition;
357       else signal a "allocation request failed" condition.
358    */
359    xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
360    handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
361    return -1;
362  }
363  return -1;
364}
365
366natural gc_deferred = 0, full_gc_deferred = 0;
367
368OSStatus
369handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
370{
371  LispObj
372    selector = xpGPR(xp,imm0), 
373    arg = xpGPR(xp,imm1);
374  area *a = active_dynamic_area;
375  Boolean egc_was_enabled = (a->older != NULL);
376  natural gc_previously_deferred = gc_deferred;
377
378
379  switch (selector) {
380  case GC_TRAP_FUNCTION_EGC_CONTROL:
381    egc_control(arg != 0, a->active);
382    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
383    break;
384
385  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
386    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
387    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
388    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
389    xpGPR(xp,arg_z) = lisp_nil+t_offset;
390    break;
391
392  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
393    if (((signed_natural) arg) > 0) {
394      lisp_heap_gc_threshold = 
395        align_to_power_of_2((arg-1) +
396                            (heap_segment_size - 1),
397                            log2_heap_segment_size);
398    }
399    /* fall through */
400  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
401    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
402    break;
403
404  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
405    /*  Try to put the current threshold in effect.  This may
406        need to disable/reenable the EGC. */
407    untenure_from_area(tenured_area);
408    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
409    if (egc_was_enabled) {
410      if ((a->high - a->active) >= a->threshold) {
411        tenure_to_area(tenured_area);
412      }
413    }
414    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
415    break;
416
417  default:
418    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
419
420    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
421      if (!full_gc_deferred) {
422        gc_from_xp(xp, 0L);
423        break;
424      }
425      /* Tried to do a full GC when gc was disabled.  That failed,
426         so try full GC now */
427      selector = GC_TRAP_FUNCTION_GC;
428    }
429   
430    if (egc_was_enabled) {
431      egc_control(false, (BytePtr) a->active);
432    }
433    gc_from_xp(xp, 0L);
434    if (gc_deferred > gc_previously_deferred) {
435      full_gc_deferred = 1;
436    } else {
437      full_gc_deferred = 0;
438    }
439    if (selector > GC_TRAP_FUNCTION_GC) {
440      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
441        impurify_from_xp(xp, 0L);
442        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
443        gc_from_xp(xp, 0L);
444      }
445      if (selector & GC_TRAP_FUNCTION_PURIFY) {
446        purify_from_xp(xp, 0L);
447        gc_from_xp(xp, 0L);
448      }
449      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
450        OSErr err;
451        extern OSErr save_application(unsigned);
452        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
453        area *vsarea = tcr->vs_area;
454       
455        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
456        err = save_application(arg);
457        if (err == noErr) {
458          _exit(0);
459        }
460        fatal_oserr(": save_application", err);
461      }
462      if (selector == GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE) {
463        LispObj aligned_arg = align_to_power_of_2(arg, log2_nbits_in_word);
464        signed_natural
465          delta_dnodes = ((signed_natural) aligned_arg) - 
466          ((signed_natural) tenured_area->static_dnodes);
467        change_hons_area_size_from_xp(xp, delta_dnodes*dnode_size);
468        xpGPR(xp, imm0) = tenured_area->static_dnodes;
469      }
470    }
471   
472    if (egc_was_enabled) {
473      egc_control(true, NULL);
474    }
475    break;
476   
477  }
478
479  adjust_exception_pc(xp,4);
480  return 0;
481}
482
483
484
485void
486signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
487{
488  /* The cstack just overflowed.  Force the current thread's
489     control stack to do so until all stacks are well under their overflow
490     limits.
491  */
492
493#if 0
494  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
495#endif
496  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
497}
498
499/*
500  Lower (move toward 0) the "end" of the soft protected area associated
501  with a by a page, if we can.
502*/
503
504void
505adjust_soft_protection_limit(area *a)
506{
507  char *proposed_new_soft_limit = a->softlimit - 4096;
508  protected_area_ptr p = a->softprot;
509 
510  if (proposed_new_soft_limit >= (p->start+16384)) {
511    p->end = proposed_new_soft_limit;
512    p->protsize = p->end-p->start;
513    a->softlimit = proposed_new_soft_limit;
514  }
515  protect_area(p);
516}
517
518void
519restore_soft_stack_limit(unsigned stkreg)
520{
521  area *a;
522  TCR *tcr = get_tcr(true);
523
524  switch (stkreg) {
525  case sp:
526    a = tcr->cs_area;
527    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
528      a->softlimit -= 4096;
529    }
530    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
531    break;
532  case vsp:
533    a = tcr->vs_area;
534    adjust_soft_protection_limit(a);
535    break;
536  case tsp:
537    a = tcr->ts_area;
538    adjust_soft_protection_limit(a);
539  }
540}
541
542/* Maybe this'll work someday.  We may have to do something to
543   make the thread look like it's not handling an exception */
544void
545reset_lisp_process(ExceptionInformation *xp)
546{
547  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
548  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
549
550  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
551  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
552
553  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
554  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
555
556  start_lisp(tcr, 1);
557}
558
559/*
560  This doesn't GC; it returns true if it made enough room, false
561  otherwise.
562  If "extend" is true, it can try to extend the dynamic area to
563  satisfy the request.
564*/
565
566Boolean
567new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
568{
569  area *a;
570  natural newlimit, oldlimit;
571  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
572
573  a  = active_dynamic_area;
574  oldlimit = (natural) a->active;
575  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
576              align_to_power_of_2(need, log2_allocation_quantum));
577  if (newlimit > (natural) (a->high)) {
578    if (extend) {
579      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
580      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
581      do {
582        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
583          break;
584        }
585        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
586        if (extend_by < 4<<20) {
587          return false;
588        }
589      } while (1);
590    } else {
591      return false;
592    }
593  }
594  a->active = (BytePtr) newlimit;
595  tcr->last_allocptr = (void *)newlimit;
596  xpGPR(xp,allocptr) = (LispObj) newlimit;
597  xpGPR(xp,allocbase) = (LispObj) oldlimit;
598
599  return true;
600}
601
602 
603void
604update_area_active (area **aptr, BytePtr value)
605{
606  area *a = *aptr;
607  for (; a; a = a->older) {
608    if ((a->low <= value) && (a->high >= value)) break;
609  };
610  if (a == NULL) Bug(NULL, "Can't find active area");
611  a->active = value;
612  *aptr = a;
613
614  for (a = a->younger; a; a = a->younger) {
615    a->active = a->high;
616  }
617}
618
619void
620normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
621{
622  void *cur_allocptr = NULL;
623  LispObj freeptr = 0;
624
625  if (xp) {
626    if (is_other_tcr) {
627      pc_luser_xp(xp, tcr, NULL);
628      freeptr = xpGPR(xp, allocptr);
629      if (fulltag_of(freeptr) == 0){
630        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
631      }
632    }
633    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
634    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
635    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
636#ifdef DEBUG
637    fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
638            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
639    fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
640            tcr,
641            xpGPR(xp, allocbase),
642            xpGPR(xp, allocptr),
643            xpPC(xp));
644    fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n",
645            tcr,
646            tcr->pending_exception_context);
647#endif
648  } else {
649    /* In ff-call.  No need to update cs_area */
650    cur_allocptr = (void *) (tcr->save_allocptr);
651#ifdef DEBUG
652    fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
653            tcr, tcr->save_vsp, tcr->save_tsp);
654    fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
655            tcr,
656            tcr->save_allocbase,
657            tcr->save_allocptr,
658            xpPC(xp));
659
660#endif
661    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
662    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
663  }
664
665
666  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
667  if (cur_allocptr) {
668    update_bytes_allocated(tcr, cur_allocptr);
669    if (freeptr) {
670      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
671      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
672    }
673  }
674}
675
676TCR *gc_tcr = NULL;
677
678/* Suspend and "normalize" other tcrs, then call a gc-like function
679   in that context.  Resume the other tcrs, then return what the
680   function returned */
681
682int
683gc_like_from_xp(ExceptionInformation *xp, 
684                int(*fun)(TCR *, signed_natural), 
685                signed_natural param)
686{
687  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
688  ExceptionInformation* other_xp;
689  int result;
690  signed_natural inhibit;
691
692  suspend_other_threads(true);
693  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
694  if (inhibit != 0) {
695    if (inhibit > 0) {
696      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
697    }
698    resume_other_threads(true);
699    gc_deferred++;
700    return 0;
701  }
702  gc_deferred = 0;
703
704  gc_tcr = tcr;
705
706  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
707  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
708
709  normalize_tcr(xp, tcr, false);
710
711
712  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
713    if (other_tcr->pending_exception_context) {
714      other_tcr->gc_context = other_tcr->pending_exception_context;
715    } else if (other_tcr->valence == TCR_STATE_LISP) {
716      other_tcr->gc_context = other_tcr->suspend_context;
717    } else {
718      /* no pending exception, didn't suspend in lisp state:
719         must have executed a synchronous ff-call.
720      */
721      other_tcr->gc_context = NULL;
722    }
723    normalize_tcr(other_tcr->gc_context, other_tcr, true);
724  }
725   
726
727
728  result = fun(tcr, param);
729
730  other_tcr = tcr;
731  do {
732    other_tcr->gc_context = NULL;
733    other_tcr = other_tcr->next;
734  } while (other_tcr != tcr);
735
736  gc_tcr = NULL;
737
738  resume_other_threads(true);
739
740  return result;
741
742}
743
744
745/* Returns #bytes freed by invoking GC */
746
747int
748gc_from_tcr(TCR *tcr, signed_natural param)
749{
750  area *a;
751  BytePtr oldfree, newfree;
752  BytePtr oldend, newend;
753
754#ifdef DEBUG
755  fprintf(stderr, "Start GC  in 0x%lx\n", tcr);
756#endif
757  a = active_dynamic_area;
758  oldend = a->high;
759  oldfree = a->active;
760  gc(tcr, param);
761  newfree = a->active;
762  newend = a->high;
763#if 0
764  fprintf(stderr, "End GC  in 0x%lx\n", tcr);
765#endif
766  return ((oldfree-newfree)+(newend-oldend));
767}
768
769int
770gc_from_xp(ExceptionInformation *xp, signed_natural param)
771{
772  int status = gc_like_from_xp(xp, gc_from_tcr, param);
773
774  freeGCptrs();
775  return status;
776}
777
778int
779purify_from_xp(ExceptionInformation *xp, signed_natural param)
780{
781  return gc_like_from_xp(xp, purify, param);
782}
783
784int
785impurify_from_xp(ExceptionInformation *xp, signed_natural param)
786{
787  return gc_like_from_xp(xp, impurify, param);
788}
789
790int
791change_hons_area_size_from_xp(ExceptionInformation *xp, signed_natural delta_in_bytes)
792{
793  return gc_like_from_xp(xp, change_hons_area_size, delta_in_bytes);
794}
795
796
797
798
799
800protection_handler
801 * protection_handlers[] = {
802   do_spurious_wp_fault,
803   do_soft_stack_overflow,
804   do_soft_stack_overflow,
805   do_soft_stack_overflow,
806   do_hard_stack_overflow,   
807   do_hard_stack_overflow,
808   do_hard_stack_overflow,
809   };
810
811
812Boolean
813is_write_fault(ExceptionInformation *xp, siginfo_t *info)
814{
815  /* use the siginfo if it's available.  Some versions of Linux
816     don't propagate the DSISR and TRAP fields correctly from
817     64- to 32-bit handlers.
818  */
819  if (info) {
820    /*
821       To confuse matters still further, the value of SEGV_ACCERR
822       varies quite a bit among LinuxPPC variants (the value defined
823       in the header files varies, and the value actually set by
824       the kernel also varies.  So far, we're only looking at the
825       siginfo under Linux and Linux always seems to generate
826       SIGSEGV, so check for SIGSEGV and check the low 16 bits
827       of the si_code.
828    */
829    return ((info->si_signo == SIGSEGV) &&
830            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
831  }
832  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
833         (xpTRAP(xp) == 
834#ifdef LINUX
8350x0300
836#endif
837#ifdef DARWIN
8380x0300/0x100
839#endif
840)
841         );
842#if 0 
843  /* Maybe worth keeping around; not sure if it's an exhaustive
844     list of PPC instructions that could cause a WP fault */
845  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
846     valid values of those SPRs in the context they provide to
847     exception handlers.  Look at the opcode of the offending
848     instruction & recognize 32-bit store operations */
849  opcode instr = *(xpPC(xp));
850
851  if (xp->regs->trap != 0x300) {
852    return 0;
853  }
854  switch (instr >> 26) {
855  case 47:                      /* STMW */
856  case 36:                      /* STW */
857  case 37:                      /* STWU */
858    return 1;
859  case 31:
860    switch ((instr >> 1) & 1023) {
861    case 151:                   /* STWX */
862    case 183:                   /* STWUX */
863      return 1;
864    default:
865      return 0;
866    }
867  default:
868    return 0;
869  }
870#endif
871}
872
873OSStatus
874handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
875{
876  BytePtr addr;
877  protected_area_ptr area;
878  protection_handler *handler;
879  extern Boolean touch_page(void *);
880  extern void touch_page_end(void);
881
882  if (info) {
883    addr = (BytePtr)(info->si_addr);
884  } else {
885    addr = (BytePtr) ((natural) (xpDAR(xp)));
886  }
887
888  if (addr && (addr == tcr->safe_ref_address)) {
889    adjust_exception_pc(xp,4);
890
891    xpGPR(xp,imm0) = 0;
892    return 0;
893  }
894
895  if (xpPC(xp) == (pc)touch_page) {
896    xpGPR(xp,imm0) = 0;
897    xpPC(xp) = (pc)touch_page_end;
898    return 0;
899  }
900
901
902  if (is_write_fault(xp,info)) {
903    area = find_protected_area(addr);
904    if (area != NULL) {
905      handler = protection_handlers[area->why];
906      return handler(xp, area, addr);
907    }
908  }
909  if (old_valence == TCR_STATE_LISP) {
910    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
911  }
912  return -1;
913}
914
915
916
917
918
919OSStatus
920do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
921{
922#ifdef SUPPORT_PRAGMA_UNUSED
923#pragma unused(area,addr)
924#endif
925  reset_lisp_process(xp);
926  return -1;
927}
928
929extern area*
930allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
931
932extern area*
933allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
934
935#ifdef EXTEND_VSTACK
936Boolean
937catch_frame_p(lisp_frame *spPtr)
938{
939  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
940
941  for (; catch; catch = (catch_frame *) untag(catch->link)) {
942    if (spPtr == ((lisp_frame *) catch->csp)) {
943      return true;
944    }
945  }
946  return false;
947}
948#endif
949
950Boolean
951unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
952{
953  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
954      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
955    return true;
956  } else {
957    return false;
958  }
959}
960
961Boolean
962lexpr_entry_frame_p(lisp_frame *spPtr)
963{
964  LispObj savelr = spPtr->savelr;
965  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
966  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
967  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
968
969  return
970    (savelr == lexpr_return1v) ||
971    (savelr == lexpr_return) ||
972    ((savelr == ret1valn) &&
973     (((spPtr->backlink)->savelr) == lexpr_return));
974}
975
976Boolean
977lisp_frame_p(lisp_frame *spPtr)
978{
979  LispObj savefn;
980  /* We can't just look at the size of the stack frame under the EABI
981     calling sequence, but that's the first thing to check. */
982  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
983    return false;
984  }
985  savefn = spPtr->savefn;
986  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
987 
988}
989
990
991int ffcall_overflow_count = 0;
992
993/* Find a frame that is neither a catch frame nor one of the
994   lexpr_entry frames We don't check for non-lisp frames here because
995   we'll always stop before we get there due to a dummy lisp frame
996   pushed by .SPcallback that masks out the foreign frames.  The one
997   exception is that there is a non-lisp frame without a valid VSP
998   while in the process of ppc-ff-call. We recognize that because its
999   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1000   next frame is 0, then we're executing an unwind-protect cleanup
1001   form, and the top stack frame belongs to its (no longer extant)
1002   catch frame.  */
1003
1004#ifdef EXTEND_VSTACK
1005lisp_frame *
1006find_non_catch_frame_from_xp (ExceptionInformation *xp)
1007{
1008  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1009  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1010    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1011  }
1012  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1013         unwind_protect_cleanup_frame_p(spPtr) ||
1014         catch_frame_p(spPtr) ||
1015         lexpr_entry_frame_p(spPtr) ; ) {
1016     spPtr = spPtr->backlink;
1017     };
1018  return spPtr;
1019}
1020#endif
1021
1022#ifdef EXTEND_VSTACK
1023Boolean
1024db_link_chain_in_area_p (area *a)
1025{
1026  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1027          *high = (LispObj *) a->high,
1028          *low = (LispObj *) a->low;
1029  for (; db; db = (LispObj *) *db) {
1030    if ((db >= low) && (db < high)) return true;
1031  };
1032  return false;
1033}
1034#endif
1035
1036
1037
1038
1039/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1040  the current value of VSP (TSP) or an older area.  */
1041
1042OSStatus
1043do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1044{
1045  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1046  area *a = tcr->vs_area;
1047  protected_area_ptr vsp_soft = a->softprot;
1048  unprotect_area(vsp_soft);
1049  signal_stack_soft_overflow(xp,vsp);
1050  return 0;
1051}
1052
1053
1054OSStatus
1055do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1056{
1057  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1058  area *a = tcr->ts_area;
1059  protected_area_ptr tsp_soft = a->softprot;
1060  unprotect_area(tsp_soft);
1061  signal_stack_soft_overflow(xp,tsp);
1062  return 0;
1063}
1064
1065OSStatus
1066do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1067{
1068  /* Trying to write into a guard page on the vstack or tstack.
1069     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1070     signal an error_stack_overflow condition.
1071      */
1072  lisp_protection_kind which = prot_area->why;
1073  Boolean on_TSP = (which == kTSPsoftguard);
1074
1075  if (on_TSP) {
1076    return do_tsp_overflow(xp, addr);
1077   } else {
1078    return do_vsp_overflow(xp, addr);
1079   }
1080}
1081
1082OSStatus
1083do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1084{
1085#ifdef SUPPORT_PRAGMA_UNUSED
1086#pragma unused(xp,area,addr)
1087#endif
1088  return -1;
1089}
1090
1091/*
1092  We have a couple of choices here.  We can simply unprotect the page
1093  and let the store happen on return, or we can try to emulate writes
1094  that we know will involve an intergenerational reference.  Both are
1095  correct as far as EGC constraints go, but the latter approach is
1096  probably more efficient.  (This only matters in the case where the
1097  GC runs after this exception handler returns but before the write
1098  actually happens.  If we didn't emulate node stores here, the EGC
1099  would scan the newly-writen page, find nothing interesting, and
1100  run to completion.  This thread will try the write again afer it
1101  resumes, the page'll be re-protected, and we'll have taken this
1102  fault twice.  The whole scenario shouldn't happen very often, but
1103  (having already taken a fault and committed to an mprotect syscall)
1104  we might as well emulate stores involving intergenerational references,
1105  since they're pretty easy to identify.
1106
1107  Note that cases involving two or more threads writing to the same
1108  page (before either of them can run this handler) is benign: one
1109  invocation of the handler will just unprotect an unprotected page in
1110  that case.
1111
1112  If there are GCs (or any other suspensions of the thread between
1113  the time that the write fault was detected and the time that the
1114  exception lock is obtained) none of this stuff happens.
1115*/
1116
1117/*
1118  Return true (and emulate the instruction) iff:
1119  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1120     instruction.
1121  b) RS is a node register (>= fn)
1122  c) RS is tagged as a cons or vector
1123  d) RS is in some ephemeral generation.
1124  This is slightly conservative, since RS may be no younger than the
1125  EA being written to.
1126*/
1127Boolean
1128is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1129{
1130  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1131    opcode instr = *xpPC(xp);
1132   
1133    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1134        major_opcode_p(instr, major_opcode_STW)) {
1135      LispObj
1136        rs = RS_field(instr), 
1137        rsval = xpGPR(xp,rs),
1138        tag = fulltag_of(rsval);
1139     
1140      if (rs >= fn) {
1141        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1142          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1143              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1144            *(LispObj *)ea = rsval;
1145            return true;
1146          }
1147        }
1148      }
1149    }
1150  }
1151  return false;
1152}
1153
1154     
1155
1156
1157
1158
1159
1160OSStatus
1161handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1162{
1163  (void) zero_fpscr(tcr);
1164  enable_fp_exceptions();
1165
1166
1167  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1168
1169  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1170     operation; there's an FPU operation right at the PC, so tell it to start
1171     looking one word beyond */
1172  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1173}
1174
1175   
1176int
1177altivec_present = 1;
1178
1179
1180/* This only tries to implement the "optional" fsqrt and fsqrts
1181   instructions, which were generally implemented on IBM hardware
1182   but generally not available on Motorola/Freescale systems.
1183*/               
1184OSStatus
1185handle_unimplemented_instruction(ExceptionInformation *xp,
1186                                 opcode instruction,
1187                                 TCR *tcr)
1188{
1189  (void) zero_fpscr(tcr);
1190  enable_fp_exceptions();
1191  /* the rc bit (bit 0 in the instruction) is supposed to cause
1192     some FPSCR bits to be copied to CR1.  OpenMCL doesn't generate
1193     fsqrt. or fsqrts.
1194  */
1195  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1196       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1197      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1198    double b, d, sqrt(double);
1199
1200    b = xpFPR(xp,RB_field(instruction));
1201    d = sqrt(b);
1202    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1203                   (get_fpscr() & _FPU_RESERVED));
1204    xpFPR(xp,RT_field(instruction)) = d;
1205    adjust_exception_pc(xp,4);
1206    return 0;
1207  }
1208
1209  return -1;
1210}
1211
1212OSStatus
1213PMCL_exception_handler(int xnum, 
1214                       ExceptionInformation *xp, 
1215                       TCR *tcr, 
1216                       siginfo_t *info,
1217                       int old_valence)
1218{
1219  unsigned oldMQ;
1220  OSStatus status = -1;
1221  pc program_counter;
1222  opcode instruction = 0;
1223
1224
1225  program_counter = xpPC(xp);
1226 
1227  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1228    instruction = *program_counter;
1229  }
1230
1231  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1232    status = handle_alloc_trap(xp, tcr);
1233  } else if ((xnum == SIGSEGV) ||
1234             (xnum == SIGBUS)) {
1235    status = handle_protection_violation(xp, info, tcr, old_valence);
1236  } else if (xnum == SIGFPE) {
1237    status = handle_sigfpe(xp, tcr);
1238  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1239    if (instruction == GC_TRAP_INSTRUCTION) {
1240      status = handle_gc_trap(xp, tcr);
1241    } else if (IS_UUO(instruction)) {
1242      status = handle_uuo(xp, instruction, program_counter);
1243    } else if (is_conditional_trap(instruction)) {
1244      status = handle_trap(xp, instruction, program_counter, info);
1245    } else {
1246      status = handle_unimplemented_instruction(xp,instruction,tcr);
1247    }
1248  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1249    tcr->interrupt_pending = 0;
1250    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1251    status = 0;
1252  }
1253
1254  return status;
1255}
1256
1257void
1258adjust_exception_pc(ExceptionInformation *xp, int delta)
1259{
1260  xpPC(xp) += (delta >> 2);
1261}
1262
1263
1264/*
1265  This wants to scan backwards until "where" points to an instruction
1266   whose major opcode is either 63 (double-float) or 59 (single-float)
1267*/
1268
1269OSStatus
1270handle_fpux_binop(ExceptionInformation *xp, pc where)
1271{
1272  OSStatus err;
1273  opcode *there = (opcode *) where, instr, errnum;
1274  int i = TRAP_LOOKUP_TRIES, delta = 0;
1275 
1276  while (i--) {
1277    instr = *--there;
1278    delta -= 4;
1279    if (codevec_hdr_p(instr)) {
1280      return -1;
1281    }
1282    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1283      errnum = error_FPU_exception_double;
1284      break;
1285    }
1286
1287    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1288      errnum = error_FPU_exception_short;
1289      break;
1290    }
1291  }
1292 
1293  err = handle_error(xp, errnum, rcontext, 0,  there);
1294  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1295 
1296  adjust_exception_pc(xp, delta);
1297  xpFPSCR(xp)  &=  0x03fff;
1298 
1299  return err;
1300
1301}
1302
1303OSStatus
1304handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1305{
1306#ifdef SUPPORT_PRAGMA_UNUSED
1307#pragma unused(where)
1308#endif
1309  unsigned 
1310    minor = UUO_MINOR(the_uuo),
1311    rt = 0x1f & (the_uuo >> 21),
1312    ra = 0x1f & (the_uuo >> 16),
1313    rb = 0x1f & (the_uuo >> 11),
1314    errnum = 0x3ff & (the_uuo >> 16);
1315
1316  OSStatus status = -1;
1317
1318  int bump = 4;
1319
1320  switch (minor) {
1321
1322  case UUO_ZERO_FPSCR:
1323    status = 0;
1324    xpFPSCR(xp) = 0;
1325    break;
1326
1327
1328  case UUO_INTERR:
1329    status = handle_error(xp, errnum, rb, 0,  where);
1330    break;
1331
1332  case UUO_INTCERR:
1333    status = handle_error(xp, errnum, rb, 1,  where);
1334    if (errnum == error_udf_call) {
1335      /* If lisp's returned from a continuable undefined-function call,
1336         it's put a code vector in the xp's PC.  Don't advance the
1337         PC ... */
1338      bump = 0;
1339    }
1340    break;
1341
1342  case UUO_FPUX_BINOP:
1343    status = handle_fpux_binop(xp, where);
1344    bump = 0;
1345    break;
1346
1347  default:
1348    status = -1;
1349    bump = 0;
1350  }
1351 
1352  if ((!status) && bump) {
1353    adjust_exception_pc(xp, bump);
1354  }
1355  return status;
1356}
1357
1358natural
1359register_codevector_contains_pc (natural lisp_function, pc where)
1360{
1361  natural code_vector, size;
1362
1363  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1364      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1365    code_vector = deref(lisp_function, 1);
1366    size = header_element_count(header_of(code_vector)) << 2;
1367    if ((untag(code_vector) < (natural)where) && 
1368        ((natural)where < (code_vector + size)))
1369      return(code_vector);
1370  }
1371
1372  return(0);
1373}
1374
1375/* Callback to lisp to handle a trap. Need to translate the
1376   PC (where) into one of two forms of pairs:
1377
1378   1. If PC is in fn or nfn's code vector, use the register number
1379      of fn or nfn and the index into that function's code vector.
1380   2. Otherwise use 0 and the pc itself
1381*/
1382void
1383callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1384                   natural arg1, natural arg2, natural arg3)
1385{
1386  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1387  unsigned register_number = fn;
1388  natural index = (natural)where;
1389
1390  if (code_vector == 0) {
1391    register_number = nfn;
1392    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1393  }
1394  if (code_vector == 0)
1395    register_number = 0;
1396  else
1397    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1398  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1399}
1400
1401void
1402callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1403                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1404{
1405  sigset_t mask;
1406  natural  callback_ptr, i;
1407  area *a;
1408
1409  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1410
1411  /* Put the active stack pointer where .SPcallback expects it */
1412  a = tcr->cs_area;
1413  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1414
1415  /* Copy globals from the exception frame to tcr */
1416  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1417  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1418  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1419  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1420
1421
1422
1423  /* Call back.
1424     Lisp will handle trampolining through some code that
1425     will push lr/fn & pc/nfn stack frames for backtrace.
1426  */
1427  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1428#ifdef DEBUG
1429  fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr);
1430#endif
1431  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1432  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1433  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1434#ifdef DEBUG
1435  fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr);
1436#endif
1437
1438
1439
1440  /* Copy GC registers back into exception frame */
1441  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1442  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1443}
1444
1445area *
1446allocate_no_stack (natural size)
1447{
1448#ifdef SUPPORT_PRAGMA_UNUSED
1449#pragma unused(size)
1450#endif
1451
1452  return (area *) NULL;
1453}
1454
1455
1456
1457
1458
1459
1460/* callback to (symbol-value cmain) if it is a macptr,
1461   otherwise report cause and function name to console.
1462   Returns noErr if exception handled OK */
1463OSStatus
1464handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1465{
1466  unsigned  instr, err_arg1 = 0, err_arg2 = 0, err_arg3 = 0;
1467  int       ra, rs, fn_reg = 0;
1468  char *    error_msg = NULL;
1469  char      name[kNameBufLen];
1470  LispObj   cmain = nrs_CMAIN.vcell;
1471  Boolean   event_poll_p = false;
1472  int old_interrupt_level = 0;
1473  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1474
1475  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1476     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1477
1478  /* If it's a "trllt" instruction where RA == sp, it's a failed
1479     control stack overflow check.  In that case:
1480     
1481     a) We're in "yellow zone" mode if the value of the
1482     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1483     we're not already in yellow zone mode, attempt to create a new
1484     thread and continue execution on its stack. If that fails, call
1485     signal_stack_soft_overflow to enter yellow zone mode and signal
1486     the condition to lisp.
1487     
1488     b) If we're already in "yellow zone" mode, then:
1489     
1490     1) if the SP is past the current control-stack area's hard
1491     overflow limit, signal a "hard" stack overflow error (e.g., throw
1492     to toplevel as quickly as possible. If we aren't in "yellow zone"
1493     mode, attempt to continue on another thread first.
1494     
1495     2) if SP is "well" (> 4K) below its soft overflow limit, set
1496     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1497     "yellow zone mode" in this case.
1498     
1499     3) Otherwise, do nothing.  We'll continue to trap every time
1500     something gets pushed on the control stack, so we should try to
1501     detect and handle all of these cases fairly quickly.  Of course,
1502     the trap overhead is going to slow things down quite a bit.
1503     */
1504
1505  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1506      (RA_field(the_trap) == sp) &&
1507      (TO_field(the_trap) == TO_LO)) {
1508    area
1509      *CS_area = tcr->cs_area,
1510      *VS_area = tcr->vs_area;
1511     
1512    natural
1513      current_SP = xpGPR(xp,sp),
1514      current_VSP = xpGPR(xp,vsp);
1515
1516    if (current_SP  < (natural) (CS_area->hardlimit)) {
1517      /* If we're not in soft overflow mode yet, assume that the
1518         user has set the soft overflow size very small and try to
1519         continue on another thread before throwing to toplevel */
1520      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1521        reset_lisp_process(xp);
1522      }
1523    } else {
1524      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1525        /* If the control stack pointer is at least 4K away from its soft limit
1526           and the value stack pointer is at least 4K away from its soft limit,
1527           stop trapping.  Else keep trapping. */
1528        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1529            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1530          protected_area_ptr vs_soft = VS_area->softprot;
1531          if (vs_soft->nprot == 0) {
1532            protect_area(vs_soft);
1533          }
1534          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1535        }
1536      } else {
1537        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1538        signal_stack_soft_overflow(xp, sp);
1539      }
1540    }
1541   
1542    adjust_exception_pc(xp, 4);
1543    return noErr;
1544  } else {
1545    if (the_trap == LISP_BREAK_INSTRUCTION) {
1546      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1547      set_xpPC(xp, xpLR(xp));
1548      if (message == NULL) {
1549        message = "Lisp Breakpoint";
1550      }
1551      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1552      return noErr;
1553    }
1554    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1555      adjust_exception_pc(xp,4);
1556      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1557      return noErr;
1558    }
1559    /*
1560      twlle ra,rb is used to detect tlb overflow, where RA = current
1561      limit and RB = index to use.
1562    */
1563    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1564        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1565      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1566        return noErr;
1567      }
1568      return -1;
1569    }
1570
1571    if ((fulltag_of(cmain) == fulltag_misc) &&
1572        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1573      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1574        /* reset interrup_level, interrupt_pending */
1575        TCR_INTERRUPT_LEVEL(tcr) = 0;
1576        tcr->interrupt_pending = 0;
1577      }
1578#if 0
1579      fprintf(stderr, "About to do trap callback in 0x%x\n",tcr);
1580#endif
1581      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1582      adjust_exception_pc(xp, 4);
1583      return(noErr);
1584    }
1585    return -1;
1586  }
1587}
1588
1589
1590/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1591   Stop if subtag_code_vector is encountered. */
1592unsigned
1593scan_for_instr( unsigned target, unsigned mask, pc where )
1594{
1595  int i = TRAP_LOOKUP_TRIES;
1596
1597  while( i-- ) {
1598    unsigned instr = *(--where);
1599    if ( codevec_hdr_p(instr) ) {
1600      return 0;
1601    } else if ( match_instr(instr, mask, target) ) {
1602      return instr;
1603    }
1604  }
1605  return 0;
1606}
1607
1608
1609void non_fatal_error( char *msg )
1610{
1611  fprintf( stderr, "Non-fatal error: %s.\n", msg );
1612  fflush( stderr );
1613}
1614
1615/* The main opcode.  */
1616
1617int 
1618is_conditional_trap(opcode instr)
1619{
1620  unsigned to = TO_field(instr);
1621  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1622
1623#ifndef MACOS
1624  if ((instr == LISP_BREAK_INSTRUCTION) ||
1625      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1626    return 1;
1627  }
1628#endif
1629  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1630    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1631       EQ bit must be set in the TO mask and either the register
1632       operands (if "tw") are the same or either both of the signed or
1633       both of the unsigned inequality bits must be set. */
1634    if (! (to & TO_EQ)) {
1635      return 1;                 /* Won't trap on EQ: conditional */
1636    }
1637    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1638      return 0;                 /* Will trap on EQ, same regs: unconditional */
1639    }
1640    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1641        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1642      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1643    }
1644    return 1;                   /* must be conditional */
1645  }
1646  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1647                                   debugger have it */
1648}
1649
1650OSStatus
1651handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1652{
1653  LispObj   pname;
1654  LispObj   errdisp = nrs_ERRDISP.vcell;
1655
1656  if ((fulltag_of(errdisp) == fulltag_misc) &&
1657      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1658    /* errdisp is a macptr, we can call back to lisp */
1659    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1660    return(0);
1661    }
1662
1663  return(-1);
1664}
1665               
1666
1667/*
1668   Current thread has all signals masked.  Before unmasking them,
1669   make it appear that the current thread has been suspended.
1670   (This is to handle the case where another thread is trying
1671   to GC before this thread is able to sieze the exception lock.)
1672*/
1673int
1674prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1675{
1676  int old_valence = tcr->valence;
1677
1678  tcr->pending_exception_context = context;
1679  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1680
1681  ALLOW_EXCEPTIONS(context);
1682  return old_valence;
1683} 
1684
1685void
1686wait_for_exception_lock_in_handler(TCR *tcr, 
1687                                   ExceptionInformation *context,
1688                                   xframe_list *xf)
1689{
1690
1691  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1692#ifdef DEBUG
1693  fprintf(stderr, "0x%x has exception lock\n", tcr);
1694#endif
1695  xf->curr = context;
1696  xf->prev = tcr->xframe;
1697  tcr->xframe =  xf;
1698  tcr->pending_exception_context = NULL;
1699  tcr->valence = TCR_STATE_FOREIGN; 
1700}
1701
1702void
1703unlock_exception_lock_in_handler(TCR *tcr)
1704{
1705  tcr->pending_exception_context = tcr->xframe->curr;
1706  tcr->xframe = tcr->xframe->prev;
1707  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1708#ifdef DEBUG
1709  fprintf(stderr, "0x%x releasing exception lock\n", tcr);
1710#endif
1711  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1712}
1713
1714/*
1715   If an interrupt is pending on exception exit, try to ensure
1716   that the thread sees it as soon as it's able to run.
1717*/
1718void
1719raise_pending_interrupt(TCR *tcr)
1720{
1721  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1722    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1723  }
1724}
1725
1726void
1727exit_signal_handler(TCR *tcr, int old_valence)
1728{
1729  sigset_t mask;
1730  sigfillset(&mask);
1731 
1732  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1733  tcr->valence = old_valence;
1734  tcr->pending_exception_context = NULL;
1735}
1736
1737
1738void
1739signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1740{
1741  xframe_list xframe_link;
1742
1743#ifdef DARWIN
1744  if (running_under_rosetta) {
1745    fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context));
1746  }
1747#endif
1748  if (!use_mach_exception_handling) {
1749   
1750    tcr = (TCR *) get_interrupt_tcr(false);
1751 
1752    /* The signal handler's entered with all signals (notably the
1753       thread_suspend signal) blocked.  Don't allow any other signals
1754       (notably the thread_suspend signal) to preempt us until we've
1755       set the TCR's xframe slot to include the current exception
1756       context.
1757    */
1758   
1759    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1760  }
1761 
1762  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1763  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1764    Boolean foreign = (old_valence != TCR_STATE_LISP);
1765    char msg[512];
1766    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1767    if (lisp_Debugger(context, info, signum, foreign, msg)) {
1768      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1769    }
1770  }
1771
1772  unlock_exception_lock_in_handler(tcr);
1773
1774  /* This thread now looks like a thread that was suspended while
1775     executing lisp code.  If some other thread gets the exception
1776     lock and GCs, the context (this thread's suspend_context) will
1777     be updated.  (That's only of concern if it happens before we
1778     can return to the kernel/to the Mach exception handler).
1779  */
1780  if (!use_mach_exception_handling) {
1781    exit_signal_handler(tcr, old_valence);
1782    raise_pending_interrupt(tcr);
1783  }
1784}
1785
1786/*
1787  If it looks like we're in the middle of an atomic operation, make
1788  it seem as if that operation is either complete or hasn't started
1789  yet.
1790
1791  The cases handled include:
1792
1793  a) storing into a newly-allocated lisp frame on the stack.
1794  b) marking a newly-allocated TSP frame as containing "raw" data.
1795  c) consing: the GC has its own ideas about how this should be
1796     handled, but other callers would be best advised to back
1797     up or move forward, according to whether we're in the middle
1798     of allocating a cons cell or allocating a uvector.
1799  d) a STMW to the vsp
1800  e) EGC write-barrier subprims.
1801*/
1802
1803extern opcode
1804  egc_write_barrier_start,
1805  egc_write_barrier_end, 
1806  egc_store_node_conditional, 
1807  egc_set_hash_key,
1808  egc_gvset,
1809  egc_rplaca,
1810  egc_rplacd;
1811
1812
1813extern opcode ffcall_return_window, ffcall_return_window_end;
1814
1815void
1816pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1817{
1818  pc program_counter = xpPC(xp);
1819  opcode instr = *program_counter;
1820  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1821  LispObj cur_allocptr = xpGPR(xp, allocptr);
1822  int allocptr_tag = fulltag_of(cur_allocptr);
1823 
1824
1825
1826  if ((program_counter < &egc_write_barrier_end) && 
1827      (program_counter >= &egc_write_barrier_start)) {
1828    LispObj *ea = 0, val, root;
1829    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1830    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1831
1832    if (program_counter >= &egc_store_node_conditional) {
1833      if ((program_counter == &egc_store_node_conditional) || ! (xpCCR(xp) & 0x20000000)) {
1834        /* The conditional store either hasn't been attempted yet, or
1835           has failed.  No need to adjust the PC, or do memoization. */
1836        return;
1837      }
1838      val = xpGPR(xp,arg_z);
1839      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1840      xpGPR(xp,arg_z) = t_value;
1841      need_store = false;
1842    } else if (program_counter >= &egc_set_hash_key) {
1843      root = xpGPR(xp,arg_x);
1844      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1845      need_memoize_root = true;
1846    } else if (program_counter >= &egc_gvset) {
1847      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1848      val = xpGPR(xp,arg_z);
1849    } else if (program_counter >= &egc_rplacd) {
1850      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1851      val = xpGPR(xp,arg_z);
1852    } else {                      /* egc_rplaca */
1853      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1854      val = xpGPR(xp,arg_z);
1855    }
1856    if (need_store) {
1857      *ea = val;
1858    }
1859    if (need_check_memo) {
1860      natural  bitnumber = area_dnode(ea, lisp_global(HEAP_START));
1861      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1862          ((LispObj)ea < val)) {
1863        atomic_set_bit(refbits, bitnumber);
1864        if (need_memoize_root) {
1865          bitnumber = area_dnode(root, lisp_global(HEAP_START));
1866          atomic_set_bit(refbits, bitnumber);
1867        }
1868      }
1869    }
1870    set_xpPC(xp, xpLR(xp));
1871    return;
1872  }
1873
1874
1875  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
1876    LispObj tsp_val = xpGPR(xp,tsp);
1877   
1878    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
1879    adjust_exception_pc(xp, 4);
1880    return;
1881  }
1882 
1883  if (frame->backlink == (frame+1)) {
1884    if (
1885#ifdef PPC64
1886        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
1887        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
1888#else
1889        (major_opcode_p(instr, major_opcode_STW)) && 
1890#endif
1891        (RA_field(instr) == sp) &&
1892        /* There are a few places in the runtime that store into
1893           a previously-allocated frame atop the stack when
1894           throwing values around.  We only care about the case
1895           where the frame was newly allocated, in which case
1896           there must have been a CREATE_LISP_FRAME_INSTRUCTION
1897           a few instructions before the current program counter.
1898           (The whole point here is that a newly allocated frame
1899           might contain random values that we don't want the
1900           GC to see; a previously allocated frame should already
1901           be completely initialized.)
1902        */
1903        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
1904         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
1905         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
1906#ifdef PPC64
1907      int disp = DS_field(instr);
1908#else     
1909      int disp = D_field(instr);
1910#endif
1911
1912
1913      if (disp < (4*node_size)) {
1914#if 0
1915        fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
1916#endif
1917        frame->savevsp = 0;
1918        if (disp < (3*node_size)) {
1919          frame->savelr = 0;
1920          if (disp == node_size) {
1921            frame->savefn = 0;
1922          }
1923        }
1924      }
1925      return;
1926    }
1927  }
1928
1929  if (allocptr_tag != tag_fixnum) {
1930    signed_natural disp = allocptr_displacement(xp);
1931
1932    if (disp) {
1933      /* Being architecturally "at" the alloc trap doesn't tell
1934         us much (in particular, it doesn't tell us whether
1935         or not the thread has committed to taking the trap
1936         and is waiting for the exception lock (or waiting
1937         for the Mach exception thread to tell it how bad
1938         things are) or is about to execute a conditional
1939         trap.
1940         Regardless of which case applies, we want the
1941         other thread to take (or finish taking) the
1942         trap, and we don't want it to consider its
1943         current allocptr to be valid.
1944         The difference between this case (suspend other
1945         thread for GC) and the previous case (suspend
1946         current thread for interrupt) is solely a
1947         matter of what happens after we leave this
1948         function: some non-current thread will stay
1949         suspended until the GC finishes, then take
1950         (or start processing) the alloc trap.   The
1951         current thread will go off and do PROCESS-INTERRUPT
1952         or something, and may return from the interrupt
1953         and need to finish the allocation that got interrupted.
1954      */
1955
1956      if (alloc_disp) {
1957        *alloc_disp = disp;
1958        xpGPR(xp,allocptr) += disp;
1959        /* Leave the PC at the alloc trap.  When the interrupt
1960           handler returns, it'll decrement allocptr by disp
1961           and the trap may or may not be taken.
1962        */
1963      } else {
1964        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
1965        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
1966        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
1967      }
1968    } else {
1969#ifdef DEBUG
1970      fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
1971#endif
1972      /* If we're already past the alloc_trap, finish allocating
1973         the object. */
1974      if (allocptr_tag == fulltag_cons) {
1975        finish_allocating_cons(xp);
1976#ifdef DEBUG
1977          fprintf(stderr, "finish allocating cons in TCR = #x%x\n",
1978                  tcr);
1979#endif
1980      } else {
1981        if (allocptr_tag == fulltag_misc) {
1982#ifdef DEBUG
1983          fprintf(stderr, "finish allocating uvector in TCR = #x%x\n",
1984                  tcr);
1985#endif
1986          finish_allocating_uvector(xp);
1987        } else {
1988          Bug(xp, "what's being allocated here ?");
1989        }
1990      }
1991      /* Whatever we finished allocating, reset allocptr/allocbase to
1992         VOID_ALLOCPTR */
1993      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
1994    }
1995    return;
1996  }
1997
1998  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
1999    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2000    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2001#if 0
2002        fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2003#endif
2004
2005    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2006      deref(frame,idx) = 0;
2007    }
2008    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2009    return;
2010  }
2011
2012#ifndef PC64
2013  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2014      (RA_field(instr) == vsp)) {
2015    int r;
2016    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2017   
2018    for (r = RS_field(instr); r <= 31; r++) {
2019      *vspptr++ = xpGPR(xp,r);
2020    }
2021    adjust_exception_pc(xp, 4);
2022  }
2023#endif
2024}
2025
2026void
2027interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2028{
2029  TCR *tcr = get_interrupt_tcr(false);
2030  if (tcr) {
2031    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2032      tcr->interrupt_pending = 1 << fixnumshift;
2033    } else {
2034      LispObj cmain = nrs_CMAIN.vcell;
2035
2036      if ((fulltag_of(cmain) == fulltag_misc) &&
2037          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2038        /*
2039           This thread can (allegedly) take an interrupt now.
2040           It's tricky to do that if we're executing
2041           foreign code (especially Linuxthreads code, much
2042           of which isn't reentrant.)
2043           If we're unwinding the stack, we also want to defer
2044           the interrupt.
2045        */
2046        if ((tcr->valence != TCR_STATE_LISP) ||
2047            (tcr->unwinding != 0)) {
2048          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2049        } else {
2050          xframe_list xframe_link;
2051          int old_valence;
2052          signed_natural disp=0;
2053         
2054          pc_luser_xp(context, tcr, &disp);
2055          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2056          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2057#ifdef DEBUG
2058          fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr);
2059#endif
2060          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2061          if (disp) {
2062            xpGPR(context,allocptr) -= disp;
2063          }
2064          unlock_exception_lock_in_handler(tcr);
2065#ifdef DEBUG
2066          fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr);
2067#endif
2068          exit_signal_handler(tcr, old_valence);
2069        }
2070      }
2071    }
2072  }
2073#ifdef DARWIN
2074    DarwinSigReturn(context);
2075#endif
2076}
2077
2078
2079
2080void
2081install_signal_handler(int signo, void *handler)
2082{
2083  struct sigaction sa;
2084 
2085  sa.sa_sigaction = (void *)handler;
2086  sigfillset(&sa.sa_mask);
2087  sa.sa_flags = 
2088    SA_RESTART
2089    | SA_SIGINFO
2090#ifdef DARWIN
2091#ifdef PPC64
2092    | SA_64REGSET
2093#endif
2094#endif
2095    ;
2096
2097  sigaction(signo, &sa, NULL);
2098}
2099
2100void
2101install_pmcl_exception_handlers()
2102{
2103#ifdef DARWIN
2104  extern Boolean use_mach_exception_handling;
2105#endif
2106
2107  Boolean install_signal_handlers_for_exceptions =
2108#ifdef DARWIN
2109    !use_mach_exception_handling
2110#else
2111    true
2112#endif
2113    ;
2114  if (install_signal_handlers_for_exceptions) {
2115    extern int no_sigtrap;
2116    install_signal_handler(SIGILL, (void *)signal_handler);
2117    if (no_sigtrap != 1) {
2118      install_signal_handler(SIGTRAP, (void *)signal_handler);
2119    }
2120    install_signal_handler(SIGBUS,  (void *)signal_handler);
2121    install_signal_handler(SIGSEGV, (void *)signal_handler);
2122    install_signal_handler(SIGFPE, (void *)signal_handler);
2123  }
2124 
2125  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2126                         (void *)interrupt_handler);
2127  signal(SIGPIPE, SIG_IGN);
2128}
2129
2130void
2131quit_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2132{
2133  TCR *tcr = get_tcr(false);
2134  area *a;
2135  sigset_t mask;
2136 
2137  sigemptyset(&mask);
2138
2139  if (tcr) {
2140    tcr->valence = TCR_STATE_FOREIGN;
2141    a = tcr->vs_area;
2142    if (a) {
2143      a->active = a->high;
2144    }
2145    a = tcr->ts_area;
2146    if (a) {
2147      a->active = a->high;
2148    }
2149    a = tcr->cs_area;
2150    if (a) {
2151      a->active = a->high;
2152    }
2153  }
2154 
2155  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2156  pthread_exit(NULL);
2157}
2158
2159void
2160thread_signal_setup()
2161{
2162  thread_suspend_signal = SIG_SUSPEND_THREAD;
2163  thread_resume_signal = SIG_RESUME_THREAD;
2164
2165  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
2166  install_signal_handler(thread_resume_signal,  (void *) suspend_resume_handler);
2167  install_signal_handler(SIGQUIT, (void *)quit_handler);
2168}
2169
2170
2171
2172void
2173unprotect_all_areas()
2174{
2175  protected_area_ptr p;
2176
2177  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2178    unprotect_area(p);
2179  }
2180}
2181
2182/*
2183  A binding subprim has just done "twlle limit_regno,idx_regno" and
2184  the trap's been taken.  Extend the tcr's tlb so that the index will
2185  be in bounds and the new limit will be on a page boundary, filling
2186  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2187  the tcr fields and the registers in the xp and return true if this
2188  all works, false otherwise.
2189
2190  Note that the tlb was allocated via malloc, so realloc can do some
2191  of the hard work.
2192*/
2193Boolean
2194extend_tcr_tlb(TCR *tcr, 
2195               ExceptionInformation *xp, 
2196               unsigned limit_regno,
2197               unsigned idx_regno)
2198{
2199  unsigned
2200    index = (unsigned) (xpGPR(xp,idx_regno)),
2201    old_limit = tcr->tlb_limit,
2202    new_limit = align_to_power_of_2(index+1,12),
2203    new_bytes = new_limit-old_limit;
2204  LispObj
2205    *old_tlb = tcr->tlb_pointer,
2206    *new_tlb = realloc(old_tlb, new_limit),
2207    *work;
2208
2209  if (new_tlb == NULL) {
2210    return false;
2211  }
2212 
2213  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2214
2215  while (new_bytes) {
2216    *work++ = no_thread_local_binding_marker;
2217    new_bytes -= sizeof(LispObj);
2218  }
2219  tcr->tlb_pointer = new_tlb;
2220  tcr->tlb_limit = new_limit;
2221  xpGPR(xp, limit_regno) = new_limit;
2222  return true;
2223}
2224
2225
2226
2227void
2228exception_init()
2229{
2230  install_pmcl_exception_handlers();
2231}
2232
2233
2234
2235
2236
2237#ifdef DARWIN
2238
2239
2240#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2241#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2242
2243
2244#if USE_MACH_EXCEPTION_LOCK
2245pthread_mutex_t _mach_exception_lock, *mach_exception_lock;
2246#endif
2247
2248#define LISP_EXCEPTIONS_HANDLED_MASK \
2249 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2250
2251/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2252#define NUM_LISP_EXCEPTIONS_HANDLED 4
2253
2254typedef struct {
2255  int foreign_exception_port_count;
2256  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2257  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2258  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2259  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2260} MACH_foreign_exception_state;
2261
2262
2263
2264
2265/*
2266  Mach's exception mechanism works a little better than its signal
2267  mechanism (and, not incidentally, it gets along with GDB a lot
2268  better.
2269
2270  Initially, we install an exception handler to handle each native
2271  thread's exceptions.  This process involves creating a distinguished
2272  thread which listens for kernel exception messages on a set of
2273  0 or more thread exception ports.  As threads are created, they're
2274  added to that port set; a thread's exception port is destroyed
2275  (and therefore removed from the port set) when the thread exits.
2276
2277  A few exceptions can be handled directly in the handler thread;
2278  others require that we resume the user thread (and that the
2279  exception thread resumes listening for exceptions.)  The user
2280  thread might eventually want to return to the original context
2281  (possibly modified somewhat.)
2282
2283  As it turns out, the simplest way to force the faulting user
2284  thread to handle its own exceptions is to do pretty much what
2285  signal() does: the exception handlng thread sets up a sigcontext
2286  on the user thread's stack and forces the user thread to resume
2287  execution as if a signal handler had been called with that
2288  context as an argument.  We can use a distinguished UUO at a
2289  distinguished address to do something like sigreturn(); that'll
2290  have the effect of resuming the user thread's execution in
2291  the (pseudo-) signal context.
2292
2293  Since:
2294    a) we have miles of code in C and in Lisp that knows how to
2295    deal with Linux sigcontexts
2296    b) Linux sigcontexts contain a little more useful information
2297    (the DAR, DSISR, etc.) than their Darwin counterparts
2298    c) we have to create a sigcontext ourselves when calling out
2299    to the user thread: we aren't really generating a signal, just
2300    leveraging existing signal-handling code.
2301
2302  we create a Linux sigcontext struct.
2303
2304  Simple ?  Hopefully from the outside it is ...
2305
2306  We want the process of passing a thread's own context to it to
2307  appear to be atomic: in particular, we don't want the GC to suspend
2308  a thread that's had an exception but has not yet had its user-level
2309  exception handler called, and we don't want the thread's exception
2310  context to be modified by a GC while the Mach handler thread is
2311  copying it around.  On Linux (and on Jaguar), we avoid this issue
2312  because (a) the kernel sets up the user-level signal handler and
2313  (b) the signal handler blocks signals (including the signal used
2314  by the GC to suspend threads) until tcr->xframe is set up.
2315
2316  The GC and the Mach server thread therefore contend for the lock
2317  "mach_exception_lock".  The Mach server thread holds the lock
2318  when copying exception information between the kernel and the
2319  user thread; the GC holds this lock during most of its execution
2320  (delaying exception processing until it can be done without
2321  GC interference.)
2322
2323*/
2324
2325#ifdef PPC64
2326#define C_REDZONE_LEN           320
2327#define C_STK_ALIGN             32
2328#else
2329#define C_REDZONE_LEN           224
2330#define C_STK_ALIGN             16
2331#endif
2332#define C_PARAMSAVE_LEN         64
2333#define C_LINKAGE_LEN           48
2334
2335#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2336
2337void
2338fatal_mach_error(char *format, ...);
2339
2340#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2341
2342
2343void
2344restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2345{
2346  int i, j;
2347  kern_return_t kret;
2348  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2349
2350  /* Set the thread's FP state from the pseudosigcontext */
2351  kret = thread_set_state(thread,
2352                          PPC_FLOAT_STATE,
2353                          (thread_state_t)&(mc->__fs),
2354                          PPC_FLOAT_STATE_COUNT);
2355
2356  MACH_CHECK_ERROR("setting thread FP state", kret);
2357
2358  /* The thread'll be as good as new ... */
2359#ifdef PPC64
2360  kret = thread_set_state(thread,
2361                          PPC_THREAD_STATE64,
2362                          (thread_state_t)&(mc->__ss),
2363                          PPC_THREAD_STATE64_COUNT);
2364#else
2365  kret = thread_set_state(thread, 
2366                          MACHINE_THREAD_STATE,
2367                          (thread_state_t)&(mc->__ss),
2368                          MACHINE_THREAD_STATE_COUNT);
2369#endif
2370  MACH_CHECK_ERROR("setting thread state", kret);
2371} 
2372
2373/* This code runs in the exception handling thread, in response
2374   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2375   in response to a call to pseudo_sigreturn() from the specified
2376   user thread.
2377   Find that context (the user thread's R3 points to it), then
2378   use that context to set the user thread's state.  When this
2379   function's caller returns, the Mach kernel will resume the
2380   user thread.
2381*/
2382
2383kern_return_t
2384do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2385{
2386  ExceptionInformation *xp;
2387
2388#ifdef DEBUG_MACH_EXCEPTIONS
2389  fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr);
2390#endif
2391  xp = tcr->pending_exception_context;
2392  if (xp) {
2393    tcr->pending_exception_context = NULL;
2394    tcr->valence = TCR_STATE_LISP;
2395    restore_mach_thread_state(thread, xp);
2396    raise_pending_interrupt(tcr);
2397  } else {
2398    Bug(NULL, "no xp here!\n");
2399  }
2400#ifdef DEBUG_MACH_EXCEPTIONS
2401  fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr);
2402#endif
2403  return KERN_SUCCESS;
2404} 
2405
2406ExceptionInformation *
2407create_thread_context_frame(mach_port_t thread, 
2408                            natural *new_stack_top)
2409{
2410#ifdef PPC64
2411  ppc_thread_state64_t ts;
2412#else
2413  ppc_thread_state_t ts;
2414#endif
2415  mach_msg_type_number_t thread_state_count;
2416  kern_return_t result;
2417  int i,j;
2418  ExceptionInformation *pseudosigcontext;
2419  MCONTEXT_T mc;
2420  natural stackp, backlink;
2421
2422#ifdef PPC64
2423  thread_state_count = PPC_THREAD_STATE64_COUNT;
2424  result = thread_get_state(thread,
2425                            PPC_THREAD_STATE64,
2426                            (thread_state_t)&ts,
2427                            &thread_state_count);
2428#else
2429  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2430  result = thread_get_state(thread, 
2431                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2432                            (thread_state_t)&ts,
2433                            &thread_state_count);
2434#endif
2435 
2436  if (result != KERN_SUCCESS) {
2437    get_tcr(true);
2438    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2439  }
2440  stackp = ts.__r1;
2441  backlink = stackp;
2442  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2443  stackp -= sizeof(*pseudosigcontext);
2444  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2445
2446  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2447  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2448  bcopy(&ts,&(mc->__ss),sizeof(ts));
2449
2450  thread_state_count = PPC_FLOAT_STATE_COUNT;
2451  thread_get_state(thread,
2452                   PPC_FLOAT_STATE,
2453                   (thread_state_t)&(mc->__fs),
2454                   &thread_state_count);
2455
2456
2457#ifdef PPC64
2458  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2459#else
2460  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2461#endif
2462  thread_get_state(thread,
2463#ifdef PPC64
2464                   PPC_EXCEPTION_STATE64,
2465#else
2466                   PPC_EXCEPTION_STATE,
2467#endif
2468                   (thread_state_t)&(mc->__es),
2469                   &thread_state_count);
2470
2471
2472  UC_MCONTEXT(pseudosigcontext) = mc;
2473  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2474  stackp -= C_LINKAGE_LEN;
2475  *(natural *)ptr_from_lispobj(stackp) = backlink;
2476  if (new_stack_top) {
2477    *new_stack_top = stackp;
2478  }
2479  return pseudosigcontext;
2480}
2481
2482/*
2483  This code sets up the user thread so that it executes a "pseudo-signal
2484  handler" function when it resumes.  Create a linux sigcontext struct
2485  on the thread's stack and pass it as an argument to the pseudo-signal
2486  handler.
2487
2488  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2489  which will restore the thread's context.
2490
2491  If the handler invokes code that throws (or otherwise never sigreturn()'s
2492  to the context), that's fine.
2493
2494  Actually, check that: throw (and variants) may need to be careful and
2495  pop the tcr's xframe list until it's younger than any frame being
2496  entered.
2497*/
2498
2499int
2500setup_signal_frame(mach_port_t thread,
2501                   void *handler_address,
2502                   int signum,
2503                   int code,
2504                   TCR *tcr)
2505{
2506#ifdef PPC64
2507  ppc_thread_state64_t ts;
2508#else
2509  ppc_thread_state_t ts;
2510#endif
2511  mach_msg_type_number_t thread_state_count;
2512  ExceptionInformation *pseudosigcontext;
2513  int i, j, old_valence = tcr->valence;
2514  kern_return_t result;
2515  natural stackp;
2516
2517#ifdef DEBUG_MACH_EXCEPTIONS
2518  fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr);
2519#endif
2520  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2521  pseudosigcontext->uc_onstack = 0;
2522  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2523  tcr->pending_exception_context = pseudosigcontext;
2524  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2525 
2526
2527  /*
2528     It seems like we've created a  sigcontext on the thread's
2529     stack.  Set things up so that we call the handler (with appropriate
2530     args) when the thread's resumed.
2531  */
2532
2533  ts.__srr0 = (natural) handler_address;
2534  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2535  ts.__r1 = stackp;
2536  ts.__r3 = signum;
2537  ts.__r4 = (natural)pseudosigcontext;
2538  ts.__r5 = (natural)tcr;
2539  ts.__r6 = (natural)old_valence;
2540  ts.__lr = (natural)pseudo_sigreturn;
2541
2542
2543#ifdef PPC64
2544  ts.__r13 = xpGPR(pseudosigcontext,13);
2545  thread_set_state(thread,
2546                   PPC_THREAD_STATE64,
2547                   (thread_state_t)&ts,
2548                   PPC_THREAD_STATE64_COUNT);
2549#else
2550  thread_set_state(thread, 
2551                   MACHINE_THREAD_STATE,
2552                   (thread_state_t)&ts,
2553                   MACHINE_THREAD_STATE_COUNT);
2554#endif
2555#ifdef DEBUG_MACH_EXCEPTIONS
2556  fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2557#endif
2558  return 0;
2559}
2560
2561
2562void
2563pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2564{
2565  signal_handler(signum, NULL, context, tcr, old_valence);
2566} 
2567
2568
2569int
2570thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2571{
2572#ifdef PPC64
2573  ppc_thread_state64_t ts;
2574#else
2575  ppc_thread_state_t ts;
2576#endif
2577  mach_msg_type_number_t thread_state_count;
2578
2579#ifdef PPC64
2580  thread_state_count = PPC_THREAD_STATE64_COUNT;
2581#else
2582  thread_state_count = PPC_THREAD_STATE_COUNT;
2583#endif
2584  thread_get_state(thread, 
2585#ifdef PPC64
2586                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2587#else
2588                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2589#endif
2590                   (thread_state_t)&ts,
2591                   &thread_state_count);
2592  if (enabled) {
2593    ts.__srr1 |= MSR_FE0_FE1_MASK;
2594  } else {
2595    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2596  }
2597  /*
2598     Hack-o-rama warning (isn't it about time for such a warning?):
2599     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2600     Our handler for lisp's use of pthread_kill() pushes a phony
2601     lisp frame on the stack and force the context to resume at
2602     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2603     lisp frame contains the -real- address that process_interrupt
2604     should have returned to, and the fact that it's in a lisp
2605     frame should convince the GC to notice that address if it
2606     runs in the tiny time window between returning from our
2607     interrupt handler and ... here.
2608     If the top frame on the stack is a lisp frame, discard it
2609     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2610     just adjust ts.srr0 to skip over the UUO.
2611  */
2612  {
2613    lisp_frame *tos = (lisp_frame *)ts.__r1,
2614      *next_frame = tos->backlink;
2615   
2616    if (tos == (next_frame -1)) {
2617      ts.__srr0 = tos->savelr;
2618      ts.__r1 = (LispObj) next_frame;
2619    } else {
2620      ts.__srr0 += 4;
2621    }
2622  }
2623  thread_set_state(thread, 
2624#ifdef PPC64
2625                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2626#else
2627                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2628#endif
2629                   (thread_state_t)&ts,
2630#ifdef PPC64
2631                   PPC_THREAD_STATE64_COUNT
2632#else
2633                   PPC_THREAD_STATE_COUNT
2634#endif
2635                   );
2636
2637  return 0;
2638}
2639
2640/*
2641  This function runs in the exception handling thread.  It's
2642  called (by this precise name) from the library function "exc_server()"
2643  when the thread's exception ports are set up.  (exc_server() is called
2644  via mach_msg_server(), which is a function that waits for and dispatches
2645  on exception messages from the Mach kernel.)
2646
2647  This checks to see if the exception was caused by a pseudo_sigreturn()
2648  UUO; if so, it arranges for the thread to have its state restored
2649  from the specified context.
2650
2651  Otherwise, it tries to map the exception to a signal number and
2652  arranges that the thread run a "pseudo signal handler" to handle
2653  the exception.
2654
2655  Some exceptions could and should be handled here directly.
2656*/
2657
2658kern_return_t
2659catch_exception_raise(mach_port_t exception_port,
2660                      mach_port_t thread,
2661                      mach_port_t task, 
2662                      exception_type_t exception,
2663                      exception_data_t code_vector,
2664                      mach_msg_type_number_t code_count)
2665{
2666  int signum = 0, code = *code_vector, code1;
2667  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2668  kern_return_t kret;
2669
2670#ifdef DEBUG_MACH_EXCEPTIONS
2671  fprintf(stderr, "obtaining Mach exception lock in exception thread\n");
2672#endif
2673
2674  if (
2675#if USE_MACH_EXCEPTION_LOCK
2676    pthread_mutex_trylock(mach_exception_lock) == 0
2677#else
2678    1
2679#endif
2680    ) {
2681    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2682      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2683    } 
2684    if ((exception == EXC_BAD_INSTRUCTION) &&
2685        (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2686        (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2687         (code1 == (int)enable_fp_exceptions) ||
2688         (code1 == (int)disable_fp_exceptions))) {
2689      if (code1 == (int)pseudo_sigreturn) {
2690        kret = do_pseudo_sigreturn(thread, tcr);
2691#if 0
2692      fprintf(stderr, "Exception return in 0x%x\n",tcr);
2693#endif
2694       
2695      } else if (code1 == (int)enable_fp_exceptions) {
2696        kret = thread_set_fp_exceptions_enabled(thread, true);
2697      } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2698    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2699      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2700      kret = 17;
2701    } else {
2702      switch (exception) {
2703      case EXC_BAD_ACCESS:
2704        signum = SIGSEGV;
2705        break;
2706       
2707      case EXC_BAD_INSTRUCTION:
2708        signum = SIGILL;
2709        break;
2710     
2711      case EXC_SOFTWARE:
2712        if (code == EXC_PPC_TRAP) {
2713          signum = SIGTRAP;
2714        }
2715        break;
2716     
2717      case EXC_ARITHMETIC:
2718        signum = SIGFPE;
2719        break;
2720
2721      default:
2722        break;
2723      }
2724      if (signum) {
2725        kret = setup_signal_frame(thread,
2726                                  (void *)pseudo_signal_handler,
2727                                  signum,
2728                                  code,
2729                                  tcr);
2730#if 0
2731      fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr);
2732#endif
2733
2734      } else {
2735        kret = 17;
2736      }
2737    }
2738#if USE_MACH_EXCEPTION_LOCK
2739#ifdef DEBUG_MACH_EXCEPTIONS
2740    fprintf(stderr, "releasing Mach exception lock in exception thread\n");
2741#endif
2742    pthread_mutex_unlock(mach_exception_lock);
2743#endif
2744  } else {
2745    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2746#if 0
2747    fprintf(stderr, "deferring pending exception in 0x%x\n", tcr);
2748#endif
2749    kret = 0;
2750    if (tcr == gc_tcr) {
2751      int i;
2752      write(1, "exception in GC thread. Sleeping for 60 seconds\n",sizeof("exception in GC thread.  Sleeping for 60 seconds\n"));
2753      for (i = 0; i < 60; i++) {
2754        sleep(1);
2755      }
2756      _exit(EX_SOFTWARE);
2757    }
2758  }
2759  return kret;
2760}
2761
2762
2763
2764typedef struct {
2765  mach_msg_header_t Head;
2766  /* start of the kernel processed data */
2767  mach_msg_body_t msgh_body;
2768  mach_msg_port_descriptor_t thread;
2769  mach_msg_port_descriptor_t task;
2770  /* end of the kernel processed data */
2771  NDR_record_t NDR;
2772  exception_type_t exception;
2773  mach_msg_type_number_t codeCnt;
2774  integer_t code[2];
2775  mach_msg_trailer_t trailer;
2776} exceptionRequest;
2777
2778
2779boolean_t
2780openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2781{
2782  static NDR_record_t _NDR = {0};
2783  kern_return_t handled;
2784  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2785  exceptionRequest *req = (exceptionRequest *) in;
2786
2787  reply->NDR = _NDR;
2788
2789  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2790  out->msgh_remote_port = in->msgh_remote_port;
2791  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2792  out->msgh_local_port = MACH_PORT_NULL;
2793  out->msgh_id = in->msgh_id+100;
2794
2795  /* Could handle other exception flavors in the range 2401-2403 */
2796
2797
2798  if (in->msgh_id != 2401) {
2799    reply->RetCode = MIG_BAD_ID;
2800    return FALSE;
2801  }
2802  handled = catch_exception_raise(req->Head.msgh_local_port,
2803                                  req->thread.name,
2804                                  req->task.name,
2805                                  req->exception,
2806                                  req->code,
2807                                  req->codeCnt);
2808  reply->RetCode = handled;
2809  return TRUE;
2810}
2811
2812/*
2813  The initial function for an exception-handling thread.
2814*/
2815
2816void *
2817exception_handler_proc(void *arg)
2818{
2819  extern boolean_t exc_server();
2820  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2821
2822  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2823  /* Should never return. */
2824  abort();
2825}
2826
2827
2828
2829mach_port_t
2830mach_exception_port_set()
2831{
2832  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2833  kern_return_t kret; 
2834  if (__exception_port_set == MACH_PORT_NULL) {
2835#if USE_MACH_EXCEPTION_LOCK
2836    mach_exception_lock = &_mach_exception_lock;
2837    pthread_mutex_init(mach_exception_lock, NULL);
2838#endif
2839    kret = mach_port_allocate(mach_task_self(),
2840                              MACH_PORT_RIGHT_PORT_SET,
2841                              &__exception_port_set);
2842    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2843    create_system_thread(0,
2844                         NULL,
2845                         exception_handler_proc, 
2846                         (void *)((natural)__exception_port_set));
2847  }
2848  return __exception_port_set;
2849}
2850
2851/*
2852  Setup a new thread to handle those exceptions specified by
2853  the mask "which".  This involves creating a special Mach
2854  message port, telling the Mach kernel to send exception
2855  messages for the calling thread to that port, and setting
2856  up a handler thread which listens for and responds to
2857  those messages.
2858
2859*/
2860
2861/*
2862  Establish the lisp thread's TCR as its exception port, and determine
2863  whether any other ports have been established by foreign code for
2864  exceptions that lisp cares about.
2865
2866  If this happens at all, it should happen on return from foreign
2867  code and on entry to lisp code via a callback.
2868
2869  This is a lot of trouble (and overhead) to support Java, or other
2870  embeddable systems that clobber their caller's thread exception ports.
2871 
2872*/
2873kern_return_t
2874tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2875{
2876  kern_return_t kret;
2877  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2878  int i;
2879  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2880  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2881  exception_mask_t mask = 0;
2882
2883  kret = thread_swap_exception_ports(thread,
2884                                     LISP_EXCEPTIONS_HANDLED_MASK,
2885                                     lisp_port,
2886                                     EXCEPTION_DEFAULT,
2887                                     THREAD_STATE_NONE,
2888                                     fxs->masks,
2889                                     &n,
2890                                     fxs->ports,
2891                                     fxs->behaviors,
2892                                     fxs->flavors);
2893  if (kret == KERN_SUCCESS) {
2894    fxs->foreign_exception_port_count = n;
2895    for (i = 0; i < n; i ++) {
2896      foreign_port = fxs->ports[i];
2897
2898      if ((foreign_port != lisp_port) &&
2899          (foreign_port != MACH_PORT_NULL)) {
2900        mask |= fxs->masks[i];
2901      }
2902    }
2903    tcr->foreign_exception_status = (int) mask;
2904  }
2905  return kret;
2906}
2907
2908kern_return_t
2909tcr_establish_lisp_exception_port(TCR *tcr)
2910{
2911  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2912}
2913
2914/*
2915  Do this when calling out to or returning from foreign code, if
2916  any conflicting foreign exception ports were established when we
2917  last entered lisp code.
2918*/
2919kern_return_t
2920restore_foreign_exception_ports(TCR *tcr)
2921{
2922  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2923 
2924  if (m) {
2925    MACH_foreign_exception_state *fxs  = 
2926      (MACH_foreign_exception_state *) tcr->native_thread_info;
2927    int i, n = fxs->foreign_exception_port_count;
2928    exception_mask_t tm;
2929
2930    for (i = 0; i < n; i++) {
2931      if ((tm = fxs->masks[i]) & m) {
2932        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2933                                   tm,
2934                                   fxs->ports[i],
2935                                   fxs->behaviors[i],
2936                                   fxs->flavors[i]);
2937      }
2938    }
2939  }
2940}
2941                                   
2942
2943/*
2944  This assumes that a Mach port (to be used as the thread's exception port) whose
2945  "name" matches the TCR's 32-bit address has already been allocated.
2946*/
2947
2948kern_return_t
2949setup_mach_exception_handling(TCR *tcr)
2950{
2951  mach_port_t
2952    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2953    target_thread = pthread_mach_thread_np((pthread_t)ptr_from_lispobj(tcr->osid)),
2954    task_self = mach_task_self();
2955  kern_return_t kret;
2956
2957  kret = mach_port_insert_right(task_self,
2958                                thread_exception_port,
2959                                thread_exception_port,
2960                                MACH_MSG_TYPE_MAKE_SEND);
2961  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2962
2963  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2964  if (kret == KERN_SUCCESS) {
2965    mach_port_t exception_port_set = mach_exception_port_set();
2966
2967    kret = mach_port_move_member(task_self,
2968                                 thread_exception_port,
2969                                 exception_port_set);
2970  }
2971  return kret;
2972}
2973
2974void
2975darwin_exception_init(TCR *tcr)
2976{
2977  void tcr_monitor_exception_handling(TCR*, Boolean);
2978  kern_return_t kret;
2979  MACH_foreign_exception_state *fxs = 
2980    calloc(1, sizeof(MACH_foreign_exception_state));
2981 
2982  tcr->native_thread_info = (void *) fxs;
2983
2984  if ((kret = setup_mach_exception_handling(tcr))
2985      != KERN_SUCCESS) {
2986    fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret);
2987    terminate_lisp();
2988  }
2989  lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports;
2990  lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port;
2991}
2992
2993/*
2994  The tcr is the "name" of the corresponding thread's exception port.
2995  Destroying the port should remove it from all port sets of which it's
2996  a member (notably, the exception port set.)
2997*/
2998void
2999darwin_exception_cleanup(TCR *tcr)
3000{
3001  void *fxs = tcr->native_thread_info;
3002  extern Boolean use_mach_exception_handling;
3003
3004  if (fxs) {
3005    tcr->native_thread_info = NULL;
3006    free(fxs);
3007  }
3008  if (use_mach_exception_handling) {
3009    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3010    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3011  }
3012}
3013
3014
3015Boolean
3016suspend_mach_thread(mach_port_t mach_thread)
3017{
3018  kern_return_t status;
3019  Boolean aborted = false;
3020 
3021  do {
3022    aborted = false;
3023    status = thread_suspend(mach_thread);
3024    if (status == KERN_SUCCESS) {
3025      status = thread_abort_safely(mach_thread);
3026      if (status == KERN_SUCCESS) {
3027        aborted = true;
3028      } else {
3029        fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread);
3030        thread_resume(mach_thread);
3031      }
3032    } else {
3033      return false;
3034    }
3035  } while (! aborted);
3036  return true;
3037}
3038
3039/*
3040  Only do this if pthread_kill indicated that the pthread isn't
3041  listening to signals anymore, as can happen as soon as pthread_exit()
3042  is called on Darwin.  The thread could still call out to lisp as it
3043  is exiting, so we need another way to suspend it in this case.
3044*/
3045Boolean
3046mach_suspend_tcr(TCR *tcr)
3047{
3048  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3049  ExceptionInformation *pseudosigcontext;
3050  Boolean result = false;
3051 
3052  result = suspend_mach_thread(mach_thread);
3053  if (result) {
3054    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3055    pseudosigcontext->uc_onstack = 0;
3056    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3057    tcr->suspend_context = pseudosigcontext;
3058  }
3059  return result;
3060}
3061
3062void
3063mach_resume_tcr(TCR *tcr)
3064{
3065  ExceptionInformation *xp;
3066  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3067 
3068  xp = tcr->suspend_context;
3069#ifdef DEBUG_MACH_EXCEPTIONS
3070  fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3071          tcr, tcr->pending_exception_context);
3072#endif
3073  tcr->suspend_context = NULL;
3074  restore_mach_thread_state(mach_thread, xp);
3075#ifdef DEBUG_MACH_EXCEPTIONS
3076  fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3077          tcr, tcr->pending_exception_context);
3078#endif
3079  thread_resume(mach_thread);
3080}
3081
3082void
3083fatal_mach_error(char *format, ...)
3084{
3085  va_list args;
3086  char s[512];
3087 
3088
3089  va_start(args, format);
3090  vsnprintf(s, sizeof(s),format, args);
3091  va_end(args);
3092
3093  Fatal("Mach error", s);
3094}
3095
3096void
3097pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3098{
3099  interrupt_handler(signum, NULL, context);
3100}
3101
3102int
3103mach_raise_thread_interrupt(TCR *target)
3104{
3105  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3106  kern_return_t kret;
3107  Boolean result = false;
3108  TCR *current = get_tcr(false);
3109  thread_basic_info_data_t info; 
3110  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3111
3112  LOCK(lisp_global(TCR_AREA_LOCK), current);
3113#if USE_MACH_EXCEPTION_LOCK
3114  pthread_mutex_lock(mach_exception_lock);
3115#endif
3116
3117  if (suspend_mach_thread(mach_thread)) {
3118    if (thread_info(mach_thread,
3119                    THREAD_BASIC_INFO,
3120                    (thread_info_t)&info,
3121                    &info_count) == KERN_SUCCESS) {
3122      if (info.suspend_count == 1) {
3123        if ((target->valence == TCR_STATE_LISP) &&
3124            (!target->unwinding) &&
3125            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3126          kret = setup_signal_frame(mach_thread,
3127                                    (void *)pseudo_interrupt_handler,
3128                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3129                                    0,
3130                                    target);
3131          if (kret == KERN_SUCCESS) {
3132            result = true;
3133          }
3134        }
3135      }
3136    }
3137    if (! result) {
3138      target->interrupt_pending = 1 << fixnumshift;
3139    }
3140    thread_resume(mach_thread);
3141   
3142  }
3143#if USE_MACH_EXCEPTION_LOCK
3144  pthread_mutex_unlock(mach_exception_lock);
3145#endif
3146  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3147  return 0;
3148}
3149
3150#endif
Note: See TracBrowser for help on using the repository browser.